ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b403742cbd4d69ba539a3460041818a0fc199bb6 | # Copyright (C) 2015 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_config import cfg
from oslo_log import log as logging
from nova import test
from nova.tests.functional.test_servers import ServersTestBase
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NumaHostInfo(fakelibvirt.HostInfo):
def __init__(self, **kwargs):
super(NumaHostInfo, self).__init__(**kwargs)
self.numa_mempages_list = []
def get_numa_topology(self):
if self.numa_topology:
return self.numa_topology
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
self.cpu_cores, self.cpu_threads,
self.kB_mem)
self.numa_topology = topology
# update number of active cpus
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
self.cpus = cpu_count - len(self.disabled_cpus_list)
return topology
def set_custom_numa_toplogy(self, topology):
self.numa_topology = topology
class NUMAServersTest(ServersTestBase):
def setUp(self):
super(NUMAServersTest, self).setUp()
# Replace libvirt with fakelibvirt
self.useFixture(fake_imagebackend.ImageBackendFixture())
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
def _setup_compute_service(self):
pass
def _setup_scheduler_service(self):
self.flags(compute_driver='libvirt.LibvirtDriver')
self.flags(scheduler_driver='filter_scheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
self.compute = self.start_service('compute', host='test_compute0')
fake_network.set_stub_network_methods(self)
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that NUMATopologyFilter has been called
self.assertTrue(filter_mock.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self._delete_server(created_server_id)
def _get_topology_filter_spy(self):
host_manager = self.scheduler.manager.driver.host_manager
numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
return host_pass_mock
def test_create_server_with_numa_topology(self):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
cpu_threads=2, kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with test.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock)
def test_create_server_with_numa_fails(self):
host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2,
kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with test.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
|
py | b403747c5c9d7db86814f12083f476546b44e02e | import os
import signal
import redis
from pi import *
redis_host = os.environ.get('REDIS_HOST')
assert redis_host != None
r = redis.Redis(host=redis_host, port= '6379', decode_responses=True)
running = True
def signal_handler(signum, frame):
print("got signal")
running = False
signal.signal(signal.SIGTERM, signal_handler)
print("starting")
while running:
task = r.blpop('queue:task', 5)
if task != None:
iterations = int(task[1])
print("got task: " + str(iterations))
pi = leibniz_pi(iterations)
print (pi)
else:
#todo (if terminate?)
print ("no more work")
running = False
|
py | b40374a7dcaa29a79ff333b284b55471fdb03c65 | #!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
import platform
import os
import re
import shlex
import shutil
import socket
from platforms.host.hdb import HDB
from platforms.platform_base import PlatformBase
from utils.arg_parse import getArgs
from utils.subprocess_with_logger import processRun
class HostPlatform(PlatformBase):
def __init__(self, tempdir):
platform_hash = str(socket.gethostname())
if getArgs().platform_sig is not None:
platform_name = str(getArgs().platform_sig)
else:
platform_name = platform.platform() + "-" + \
self._getProcessorName()
self.tempdir = os.path.join(tempdir, platform_hash)
hdb = HDB(platform_hash, tempdir)
super(HostPlatform, self).__init__(self.tempdir, self.tempdir, hdb)
# reset the platform and platform hash
self.setPlatform(platform_name)
self.setPlatformHash(platform_hash)
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
os.makedirs(self.tempdir, 0o777)
self.type = "host"
def runBenchmark(self, cmd, *args, **kwargs):
if not isinstance(cmd, list):
cmd = shlex.split(cmd)
host_kwargs = {}
env = os.environ
if "platform_args" in kwargs:
platform_args = kwargs["platform_args"]
if "timeout" in platform_args:
host_kwargs["timeout"] = platform_args["timeout"]
# used for local or remote log control
host_kwargs["log_output"] = platform_args.get("log_output", False)
if "env" in platform_args:
customized_env = platform_args["env"]
for k in customized_env:
env[k] = str(customized_env[k])
host_kwargs["env"] = env
output, _ = processRun(cmd, **host_kwargs)
return output
def _getProcessorName(self):
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
processor_info, _ = processRun(
["sysctl", "-n", "machdep.cpu.brand_string"])
if processor_info:
return processor_info.rstrip()
elif platform.system() == "Linux":
processor_info, _ = processRun(["cat", "/proc/cpuinfo"])
if processor_info:
for line in processor_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1)
return ""
def getOutputDir(self):
out_dir = os.path.join(self.tempdir, "output")
if not os.path.isdir(out_dir):
os.makedirs(out_dir, 0o777)
return out_dir
|
py | b4037502581ec6cda8868cbbf95ee680fd5e8816 | #!/usr/bin/env python
# encoding: utf-8
"""
reverse_integer.py
Created by Shengwei on 2014-07-16.
"""
# https://oj.leetcode.com/problems/reverse-integer/
"""
Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
click to show spoilers.
Have you thought about this?
Here are some good questions to ask before coding. Bonus points for you if you have already thought through this!
If the integer's last digit is 0, what should the output be? ie, cases such as 10, 100.
Did you notice that the reversed integer might overflow? Assume the input is a 32-bit integer, then the reverse of 1000000003 overflows. How should you handle such cases?
Throw an exception? Good, but what if throwing an exception is not an option? You would then have to re-design the function (ie, add an extra parameter).
"""
class Solution:
# @return an integer
def reverse(self, x):
sign = -1 if x < 0 else 1
x *= sign
rev = 0
while x > 0:
x, remainder = divmod(x, 10)
rev = rev * 10 + remainder
return rev * sign
|
py | b40375f3bdbd20fe8cc21ef3a0c55314d1f6a0c0 | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1511303758.733848
__CHEETAH_genTimestamp__ = 'Tue Nov 21 20:35:58 2017'
__CHEETAH_src__ = '../m/Context_c.tmpl'
__CHEETAH_srcLastModified__ = 'Tue Nov 21 20:33:00 2017'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class Context_c(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(Context_c, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''#include "''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 1, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 1, col 12.
write(u'''.h"
/* We need to know about or initial state: */
#include "''')
_v = VFN(VFFSL(SL,"state.stop.name",True),"capitalize",False)() # u'${state.stop.name.capitalize()}' on line 4, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name.capitalize()}')) # from line 4, col 12.
write(u'''State.h"
#include "''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 6, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 6, col 12.
write(u'''StateInternal.h"
''')
if len(VFFSL(SL,"state.context.include",True)) > 0: # generated from line 7, col 1
for i in range(0, len(VFFSL(SL,"state.context.include",True))): # generated from line 8, col 1
write(u'''#include "''')
_v = VFN(VFN(VFFSL(SL,"state.context",True),"include",True)[i],"header",True) # u'${state.context.include[i].header}' on line 9, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.include[i].header}')) # from line 9, col 12.
write(u'''"
''')
write(u'''#include <stdlib.h>
#include "message.h" // DEBUG
struct ''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 16, col 8
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 16, col 8.
write(u'''
{
struct ''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 18, col 12
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 18, col 12.
write(u'''State state;
''')
if len(VFFSL(SL,"state.context.struct",True)) > 0: # generated from line 19, col 1
for i in range(0, len(VFFSL(SL,"state.context.struct",True))): # generated from line 20, col 1
write(u''' ''')
_v = VFN(VFN(VFFSL(SL,"state.context",True),"struct",True)[i],"type",True) # u'${state.context.struct[i].type}' on line 21, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.struct[i].type}')) # from line 21, col 5.
write(u''' ''')
_v = VFN(VFN(VFFSL(SL,"state.context",True),"struct",True)[i],"name",True) # u'${state.context.struct[i].name}' on line 21, col 37
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.struct[i].name}')) # from line 21, col 37.
write(u''';
''')
write(u'''};
/* Estatico */
/* USING:
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 28, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 28, col 5.
write(u'''Ptr p = create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 28, col 40
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 28, col 40.
write(u'''();
''')
_v = VFFSL(SL,"state.start.name",True) # u'${state.start.name}' on line 29, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.start.name}')) # from line 29, col 5.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 29, col 24
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 29, col 24.
write(u'''(p);
*/
#define MAX_NO_OF_''')
_v = VFN(VFFSL(SL,"state.context.name",True),"upper",False)() # u'${state.context.name.upper()}' on line 31, col 19
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name.upper()}')) # from line 31, col 19.
write(u'''S 2
static struct ''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 32, col 15
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 32, col 15.
write(u''' objectPool[MAX_NO_OF_''')
_v = VFN(VFFSL(SL,"state.context.name",True),"upper",False)() # u'${state.context.name.upper()}' on line 32, col 58
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name.upper()}')) # from line 32, col 58.
write(u'''S];
static size_t numberOfObjects = 0;
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 35, col 1
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 35, col 1.
write(u'''Ptr create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 35, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 35, col 32.
write(u''' (void)
{
message("** create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 37, col 23
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 37, col 23.
write(u''' **");
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 39, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 39, col 5.
write(u'''Ptr instance = NULL;
if (numberOfObjects < MAX_NO_OF_''')
_v = VFN(VFFSL(SL,"state.context.name",True),"upper",False)() # u'${state.context.name.upper()}' on line 41, col 37
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name.upper()}')) # from line 41, col 37.
write(u'''S)
{
instance = &objectPool[numberOfObjects++];
/* Initialize the object... */
/* Specify the initial state. */
''')
_v = VFFSL(SL,"state.transitionTo.name",True) # u'${state.transitionTo.name}' on line 47, col 9
if _v is not None: write(_filter(_v, rawExpr=u'${state.transitionTo.name}')) # from line 47, col 9.
_v = VFN(VFFSL(SL,"state.stop.name",True),"capitalize",False)() # u'${state.stop.name.capitalize()}' on line 47, col 35
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name.capitalize()}')) # from line 47, col 35.
write(u''' (&instance->state);
/* Initialize the other attributes here.*/
}
return instance;
}
/* Dinamico */
/* USING:
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 56, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 56, col 5.
write(u'''Ptr p = create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 56, col 40
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 56, col 40.
write(u'''();
''')
_v = VFFSL(SL,"state.start.name",True) # u'${state.start.name}' on line 57, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.start.name}')) # from line 57, col 5.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 57, col 24
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 57, col 24.
write(u'''(p);
*/
/*
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 60, col 1
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 60, col 1.
write(u'''Ptr create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 60, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 60, col 32.
write(u''' (void)
{
message("** create''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 62, col 23
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 62, col 23.
write(u''' **");
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 64, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 64, col 5.
write(u'''Ptr instance = malloc (sizeof *instance);
if (NULL != instance)
{
''')
_v = VFFSL(SL,"state.transitionTo.name",True) # u'${state.transitionTo.name}' on line 68, col 9
if _v is not None: write(_filter(_v, rawExpr=u'${state.transitionTo.name}')) # from line 68, col 9.
_v = VFN(VFFSL(SL,"state.stop.name",True),"capitalize",False)() # u'${state.stop.name.capitalize()}' on line 68, col 35
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name.capitalize()}')) # from line 68, col 35.
write(u''' (&instance->state);
}
return instance;
}
*/
/*
void statemachine(void)
{
message("** statemachine **");
''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 79, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 79, col 5.
write(u'''Ptr instance = NULL;
if (numberOfObjects < MAX_NO_OF_''')
_v = VFN(VFFSL(SL,"state.context.name",True),"upper",False)() # u'${state.context.name.upper()}' on line 81, col 37
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name.upper()}')) # from line 81, col 37.
write(u'''S)
{
instance = &objectPool[numberOfObjects++];
''')
_v = VFFSL(SL,"state.transitionTo.name",True) # u'${state.transitionTo.name}' on line 85, col 9
if _v is not None: write(_filter(_v, rawExpr=u'${state.transitionTo.name}')) # from line 85, col 9.
_v = VFN(VFFSL(SL,"state.stop.name",True),"capitalize",False)() # u'${state.stop.name.capitalize()}' on line 85, col 35
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name.capitalize()}')) # from line 85, col 35.
write(u''' (&instance->state);
while(1)
{
instance->state.func (&instance->state);
}
}
}
*/
void destroy''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 95, col 13
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 95, col 13.
write(u''' (''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 95, col 41
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 95, col 41.
write(u'''Ptr instance)
{
message("** destroy''')
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 97, col 24
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 97, col 24.
write(u''' **");
free (instance);
}
void ''')
_v = VFFSL(SL,"state.start.name",True) # u'${state.start.name}' on line 102, col 6
if _v is not None: write(_filter(_v, rawExpr=u'${state.start.name}')) # from line 102, col 6.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 102, col 25
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 102, col 25.
write(u''' (''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 102, col 53
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 102, col 53.
write(u'''Ptr instance)
{
/*
message("** ABSTRACT: ''')
_v = VFFSL(SL,"state.start.name",True) # u'${state.start.name}' on line 105, col 27
if _v is not None: write(_filter(_v, rawExpr=u'${state.start.name}')) # from line 105, col 27.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 105, col 46
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 105, col 46.
write(u''' **");
*/
instance->state.''')
_v = VFN(VFFSL(SL,"state.start.name",True),"lower",False)() # u'${state.start.name.lower()}' on line 107, col 21
if _v is not None: write(_filter(_v, rawExpr=u'${state.start.name.lower()}')) # from line 107, col 21.
write(u''' (&instance->state);
}
void ''')
_v = VFFSL(SL,"state.stop.name",True) # u'${state.stop.name}' on line 110, col 6
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name}')) # from line 110, col 6.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 110, col 24
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 110, col 24.
write(u''' (''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 110, col 52
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 110, col 52.
write(u'''Ptr instance)
{
/*
message("** ABSTRACT: ''')
_v = VFFSL(SL,"state.stop.name",True) # u'${state.stop.name}' on line 113, col 27
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name}')) # from line 113, col 27.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 113, col 45
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 113, col 45.
write(u''' **");
*/
instance->state.''')
_v = VFN(VFFSL(SL,"state.stop.name",True),"lower",False)() # u'${state.stop.name.lower()}' on line 116, col 21
if _v is not None: write(_filter(_v, rawExpr=u'${state.stop.name.lower()}')) # from line 116, col 21.
write(u''' (&instance->state);
}
''')
if len(VFFSL(SL,"state.states",True)) > 0: # generated from line 119, col 1
for i in range(0, len(VFFSL(SL,"state.states",True))): # generated from line 120, col 1
write(u'''void ''')
_v = VFN(VFN(VFFSL(SL,"state",True),"states",True)[i],"name",True) # u'${state.states[i].name}' on line 121, col 6
if _v is not None: write(_filter(_v, rawExpr=u'${state.states[i].name}')) # from line 121, col 6.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 121, col 29
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 121, col 29.
write(u''' (''')
_v = VFFSL(SL,"state.context.name",True) # u'${state.context.name}' on line 121, col 57
if _v is not None: write(_filter(_v, rawExpr=u'${state.context.name}')) # from line 121, col 57.
write(u'''Ptr instance)
{
/*
message("** ABSTRACT: ''')
_v = VFN(VFN(VFFSL(SL,"state",True),"states",True)[i],"name",True) # u'${state.states[i].name}' on line 124, col 27
if _v is not None: write(_filter(_v, rawExpr=u'${state.states[i].name}')) # from line 124, col 27.
_v = VFN(VFFSL(SL,"state.name",True),"capitalize",False)() # u'${state.name.capitalize()}' on line 124, col 50
if _v is not None: write(_filter(_v, rawExpr=u'${state.name.capitalize()}')) # from line 124, col 50.
write(u''' **");
*/
instance->state.''')
_v = VFN(VFN(VFN(VFFSL(SL,"state",True),"states",True)[i],"name",True),"lower",False)() # u'${state.states[i].name.lower()}' on line 127, col 21
if _v is not None: write(_filter(_v, rawExpr=u'${state.states[i].name.lower()}')) # from line 127, col 21.
write(u''' (&instance->state);
}
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_Context_c= 'respond'
## END CLASS DEFINITION
if not hasattr(Context_c, '_initCheetahAttributes'):
templateAPIClass = getattr(Context_c, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(Context_c)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=Context_c()).run()
|
py | b40376fbc29f28de481e3064bd7988282693a5c7 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"The Qiskit Terra setup file."
import os
import re
import sys
from setuptools import setup, find_packages, Extension
from setuptools_rust import Binding, RustExtension
try:
from Cython.Build import cythonize
except ImportError:
import subprocess
subprocess.call([sys.executable, "-m", "pip", "install", "Cython>=0.27.1"])
from Cython.Build import cythonize
with open("requirements.txt") as f:
REQUIREMENTS = f.read().splitlines()
# Add Cython extensions here
CYTHON_EXTS = {
"qiskit/quantum_info/states/cython/exp_value": "qiskit.quantum_info.states.cython.exp_value",
}
INCLUDE_DIRS = []
# Extra link args
LINK_FLAGS = []
# If on Win and not in MSYS2 (i.e. Visual studio compile)
if sys.platform == "win32" and os.environ.get("MSYSTEM") is None:
COMPILER_FLAGS = ["/O2"]
# Everything else
else:
COMPILER_FLAGS = ["-O2", "-funroll-loops", "-std=c++11"]
if sys.platform == "darwin":
# These are needed for compiling on OSX 10.14+
COMPILER_FLAGS.append("-mmacosx-version-min=10.9")
LINK_FLAGS.append("-mmacosx-version-min=10.9")
EXT_MODULES = []
# Add Cython Extensions
for src, module in CYTHON_EXTS.items():
ext = Extension(
module,
sources=[src + ".pyx"],
include_dirs=INCLUDE_DIRS,
extra_compile_args=COMPILER_FLAGS,
extra_link_args=LINK_FLAGS,
language="c++",
)
EXT_MODULES.append(ext)
# Read long description from README.
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), "README.md")
with open(README_PATH) as readme_file:
README = re.sub(
"<!--- long-description-skip-begin -->.*<!--- long-description-skip-end -->",
"",
readme_file.read(),
flags=re.S | re.M,
)
visualization_extras = [
"matplotlib>=3.3",
"ipywidgets>=7.3.0",
"pydot",
"pillow>=4.2.1",
"pylatexenc>=1.4",
"seaborn>=0.9.0",
"pygments>=2.4",
]
z3_requirements = [
"z3-solver>=4.7",
]
bip_requirements = ["cplex", "docplex"]
setup(
name="qiskit-terra",
version="0.20.0",
description="Software for developing quantum computing programs",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Qiskit/qiskit-terra",
author="Qiskit Development Team",
author_email="[email protected]",
license="Apache 2.0",
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
],
keywords="qiskit sdk quantum",
packages=find_packages(exclude=["test*"]),
install_requires=REQUIREMENTS,
setup_requires=["Cython>=0.27.1"],
include_package_data=True,
python_requires=">=3.7",
extras_require={
"visualization": visualization_extras,
"bip-mapper": bip_requirements,
"crosstalk-pass": z3_requirements,
# Note: 'all' does not include 'bip-mapper' because cplex is too fiddly and too little
# supported on various Python versions and OSes compared to Terra. You have to ask for it
# explicitly.
"all": visualization_extras + z3_requirements,
},
project_urls={
"Bug Tracker": "https://github.com/Qiskit/qiskit-terra/issues",
"Documentation": "https://qiskit.org/documentation/",
"Source Code": "https://github.com/Qiskit/qiskit-terra",
},
ext_modules=cythonize(EXT_MODULES),
rust_extensions=[RustExtension("qiskit._accelerate", "Cargo.toml", binding=Binding.PyO3)],
zip_safe=False,
entry_points={
"qiskit.unitary_synthesis": [
"default = qiskit.transpiler.passes.synthesis.unitary_synthesis:DefaultUnitarySynthesis",
"aqc = qiskit.transpiler.synthesis.aqc.aqc_plugin:AQCSynthesisPlugin",
]
},
)
|
py | b403786e3a896119cc52f7f744e2ac0d9a742dfa | import time
from PIL import Image
from glob import glob
import numpy as np
import os, sys
from contextlib import contextmanager
from numba import cuda as ncuda
from PIL import ImageFilter
import cv2
fast_denoising = False
import time
import ImagePipeline_utils as IP
from ImagePipeline_utils import Quiet
import os, shutil
import warnings
from os import path as osp
import subprocess
from contextlib import contextmanager
import time, sys
class ImageRestorer:
def __init__(self, resetgpu = True):
self._history = []
self._resetgpu = resetgpu
#record current interpreter path (to ensure calling the right interpreter when calling another process)
self._python_dir = sys.executable
self.denoise = self.remove_gaussian_noise
self.Q = Quiet()
def preprocess(self, inputdir = None, outputdir = None, **kwargs):
"""
preprocess images: convert them to RGB format, resize them if too large (optional, True by default), convert them to grayscale (optional, False by default)
Defaults options (you can override any option with a keyword argument):
options = {'gray':False, 'resize':True, 'size':(1000,1000), 'quiet':True, 'raising':True}
"""
#default parameters
options = {'gray':False, 'resize':True, 'size':(1000,1000), 'quiet':True, 'raising':True}
#default parameters are overriden by keywords arguments (e.g. gray = True) (passed by **kwargs) and are unpacked as class attributes (self.gray = True, ...)
inputdir, outputdir = self._init_process(inputdir, outputdir, "preprocess", options, **kwargs)
with self.Q.quiet_and_timeit("Image preprocessing", raising = self.raising, quiet = self.quiet):
imname = '*'
orignames = glob(os.path.join(inputdir, imname))
for orig in orignames:
try:
im = Image.open(orig)
#print(orig)
#remove alpha component
#print("torgb")
im = IP.to_RGB(im)
#convert to grayscale
if self.gray:
#print("togray")
im = IP.to_grayscale(im)
#resize
if self.resize:
width, height = im.size
#resize only if larger than limit
if width > self.size[0] or height > self.size[1]:
im.thumbnail(self.size,Image.ANTIALIAS)
#save as png (and remove previous version if inputdir = outputdir)
path, file = os.path.split(orig)
f, e = os.path.splitext(file)
if inputdir == outputdir:
os.remove(orig)
output_name = os.path.join(outputdir, f+".png")
im.save(output_name)
print(output_name)
except Exception as e:
self.Q.force_print(e)
def filter(self, inputdir = None, outputdir = None, **kwargs):
"""
Perform basic filtering (median, gaussian and/or mean filtering) on images
Defaults options (you can override any option with a keyword argument):
options = {'median':True, 'median_winsize':5, 'gaussian':True, 'gaussian_x':5, 'gaussian_y':5, 'gaussian_std':1, 'mean':True, 'mean_winsize':3, 'raising':True, 'quiet':True}
"""
options = {'median':True, 'median_winsize':5, 'gaussian':True, 'gaussian_x':5, 'gaussian_y':5, 'gaussian_std':1, 'mean':True, 'mean_winsize':3, 'raising':True, 'quiet':True}
inputdir, outputdir = self._init_process(inputdir, outputdir, "filter", options, **kwargs)
with self.Q.quiet_and_timeit("Image filtering", self.raising, self.quiet):
imname = '*'
orignames = glob(os.path.join(inputdir, imname))
for orig in orignames:
print(orig)
try:
im = cv2.imread(orig, cv2.IMREAD_COLOR)
#median blur
if self.median:
im = cv2.medianBlur(im,self.median_winsize)
if self.gaussian:
im = cv2.GaussianBlur(im,(self.gaussian_x,self.gaussian_y),self.gaussian_std)
#mean blur
if self.mean:
im = cv2.blur(im,(self.mean_winsize,self.mean_winsize))
#save as png (and remove previous version if inputdir = outputdir)
path, file = os.path.split(orig)
f, e = os.path.splitext(file)
if inputdir == outputdir:
os.remove(orig)
output_name = os.path.join(outputdir, f+".png")
cv2.imwrite(output_name, im)
print(output_name)
except Exception as e:
self.Q.force_print(e)
def remove_stripes(self, inputdir = None, outputdir = None, **kwargs):
"""
Remove vertical and horizontal stripes from images
Defaults options (you can override any option with a keyword argument):
options = {'working_dir':'./WDNN', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
"""
options = {'working_dir':'./WDNN', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
inputdir, outputdir = self._init_process(inputdir, outputdir, "remove_stripes", options, **kwargs)
command = "%s -W ignore -u striperemover.py -i %s -o %s %s %s" % (self.python_dir, inputdir, outputdir, self.process_args, self.command_suffix)
#Remove vertical stripes
with self.Q.quiet_and_timeit("Removing stripes", self.raising, self.quiet):
IP.createdir_ifnotexists(outputdir)
subprocess.run(command, shell = True)
if self.raising:
self.logerr()
if not self.quiet:
for l in self.log():
print(l)
def remove_gaussian_noise(self, inputdir = None, outputdir = None, **kwargs):
"""
Remove gaussian noise using NLRN (or DNCNN if fast is True).
Defaults options (you can override any option with a keyword argument):
options = {'fast':False, 'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
Note that we use a command suffix to export console outputs to a log file. If you remove this, you could have bugs in jupyter notebooks. It should work in standard python.
You can monitor log.out output by using the command "tail -f log.out" in a terminal.
You can launch the process with a specific python environment by providing its path with the keyword argument "python_dir".
"""
#defaults attributes to instance for method
options = {'fast':False, 'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
#Note that we use a command suffix to export console outputs to a log file. If you remove this, you could have bugs in jupyter notebooks. It should work in standard python
inputdir, outputdir = self._init_process(inputdir, outputdir, "remove_gaussian_noise", options, **kwargs)
command = "%s -W ignore -u denoiser.py -i %s -o %s %s %s" % (self.python_dir, inputdir, outputdir, self.process_args, self.command_suffix)
if self.fast:
#IP.reset_gpu(0)
command = '%s -W ignore -u denoiser_NLRN_DNCNN.py -i %s -o %s --method DNCNN %s %s' % (self.python_dir, inputdir, outputdir, self.process_args, self.command_suffix)
#print("raising", self.raising)
with self.Q.quiet_and_timeit("Removing gaussian noise", self.raising, self.quiet):
IP.createdir_ifnotexists(outputdir)
subprocess.run(command, shell = True)
if self.raising:
self.logerr()
if not self.quiet:
for l in self.log():
print(l)
#if self.fast:
#IP.reset_gpu(0)
def colorize(self, inputdir = None, outputdir = None, **kwargs):
"""
Colorize images using deoldify.
Defaults options (you can override any option with a keyword argument):
options = {'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
Note that we use a command suffix to export console outputs to a log file. If you remove this, you could have bugs in jupyter notebooks. It should work in standard python.
You can monitor log.out output by using the command "tail -f log.out" in a terminal.
You can launch the process with a specific python environment by providing its path with the keyword argument "python_dir".
"""
#defaults attributes to instance for method
options = {'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
inputdir, outputdir = self._init_process(inputdir, outputdir, "colorize", options, **kwargs)
command = "%s -W ignore -u colorizer.py -i %s -o %s %s %s" % (self.python_dir, inputdir, outputdir, self.process_args, self.command_suffix)
with self.Q.quiet_and_timeit("Colorizing", self.raising, self.quiet):
IP.createdir_ifnotexists(outputdir)
subprocess.run(command, shell = True)
if self.raising:
self.logerr()
if not self.quiet:
for l in self.log():
print(l)
def super_resolution(self, inputdir = None, outputdir = None, **kwargs):
"""
Upsample images using ESRGAN.
Defaults options (you can override any option with a keyword argument):
options = {'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
Note that we use a command suffix to export console outputs to a log file. If you remove this, you could have bugs in jupyter notebooks. It should work in standard python.
You can monitor log.out output by using the command "tail -f log.out" in a terminal.
You can launch the process with a specific python environment by providing its path with the keyword argument "python_dir".
"""
#defaults attributes to instance for method
options = {'working_dir':'./', 'raising':True, 'quiet':True, 'python_dir':sys.executable, 'process_args':'', 'command_suffix':" 2> log.err 1>> log.out"}
inputdir, outputdir = self._init_process(inputdir, outputdir, "super_resolution", options, **kwargs)
command = "%s -W ignore -u superresolution.py -i %s -o %s %s %s" % (self.python_dir, inputdir, outputdir, self.process_args, self.command_suffix)
with self.Q.quiet_and_timeit("Super-resolving", self.raising, self.quiet):
IP.createdir_ifnotexists(outputdir)
subprocess.run(command, shell = True)
if self.raising:
self.logerr()
if not self.quiet:
for l in self.log():
print(l)
def merge(self, inputdirs, outputdir, **kwargs):
"""
merge (compute average) of folders parwise images
inputdirs: list of input directories
Defaults options (you can override any option with a keyword argument):
options = {'weights':[1/len(inputdirs)]*len(inputdirs), 'raising':True, 'quiet':True}
"""
options = {'weights':[1/len(inputdirs)]*len(inputdirs), 'raising':True, 'quiet':True}
inputdirs, outputdir = self._init_process(inputdirs, outputdir, "merge", options, **kwargs)
with self.Q.quiet_and_timeit("Image merging", self.raising, self.quiet):
names = IP.get_filenames(inputdirs[0])
for n in names:
try:
files = [os.path.join(i,n) for i in inputdirs]
merged = IP.image_average(files,self.weights)
merged.save(os.path.join(outputdir,n))
except Exception as e:
self.Q.force_print(e)
def _init_process(self, inputdir, outputdir, process, default_options, **kwargs):
"""
This method can (should) be used at the beginning of any other method, to manage process options and history log
"""
if inputdir == None:
if len(self._history) == 0:
raise("Please set the inputdir at least for first processing step")
else:
#If no inputdir is provided, take last outputdir as new inputdir
inputdir = self._history[-1]['output']
#if outputdir is not provided, override inputdir with process result
if outputdir == None:
outputdir = inputdir
elif outputdir != inputdir:
if not ( (type(inputdir) is list) and (outputdir in inputdir) ):
#initialize only if diffrent from inputdir
IP.initdir(outputdir)
options = default_options
#override default parameters for the process, and unpack all arguments as class attributes
options.update(kwargs)
#create/update (unpack) class attributes with options
for key in options:
self.__setattr__(key, options[key])
self._history.append({"input":inputdir,"output":outputdir,"process":process,"options" : options})
IP.createdir_ifnotexists(outputdir)
if self._resetgpu:
IP.reset_gpu(0)
return inputdir, outputdir
def display(self, **kwargs):
if len(self._history) == 0:
print("You did not perform any process yet. No image folder to display.")
return
last_folder = self._history[-1]["output"]
IP.display_folder(last_folder, **kwargs)
def log(self, lines = 10):
logdata = []
if os.path.exists('log.out'):
with open('log.out', 'r') as myfile:
logdata = myfile.readlines()
if os.path.exists('log.err'):
with open('log.err', 'r') as myfile:
logdataerr = myfile.readlines()
logdata = logdata+logdataerr
if len(logdata) > 0:
if len(logdata) < lines:
return logdata
return logdata[-lines:]
else:
logdata = 'No log.out file. Using restorer history instead.\n\n %s' % (self._history)
return logdata
def history(self):
return self._history
def logerr(self, raising = False):
if os.path.exists('log.err'):
with open('log.err') as f:
logdata = f.readlines()
if len(logdata) > 0:
if any(['error' in l.lower() for l in logdata]):
print('Error or warning occured during process. Please check output below.')
for l in logdata:
if raising:
raise Exception(l)
else:
print(l)
return logdata
|
py | b4037870f24a50b670677395f9751a46b2858ae9 | """Convert IceCat Data into unified schema"""
def reduce_schema(df_original_dataset):
df_dataset = df_original_dataset.copy()
df_dataset['description'] = df_dataset['Description.LongDesc'] + \
df_dataset['SummaryDescription.LongSummaryDescription']
df_dataset.rename(columns={'Title': 'title', 'Category.Name.Value': 'category', 'Brand': 'brand',
'pathlist_names': 'path_list'}, inplace=True)
# Convert dtype to string
df_dataset['title'] = df_dataset['title'].astype(str)
df_dataset['category'] = df_dataset['category'].astype(str)
df_dataset['description'] = df_dataset['description'].astype(str)
df_dataset['brand'] = df_dataset['brand'].astype(str)
return df_dataset[['title', 'description', 'brand', 'category', 'path_list']]
|
py | b403787f536206c8ab892ad5b1f9b06e355d1cc1 | # f = open('xiaoxiao.txt','a',encoding='utf-8')
# print(type(f))
# f.write('我喜欢你你喜欢我吗')
# f.write('\n')
# f.write('你还想我吗')
# f.write('\n')
# f.write('我已经忘记了你')
# f.write('\n')
# print('我在执行一次')
# f = open('xiaoxiao.txt','r',encoding='utf-8')
# # content = f.read()
# # print(content)
#
# text = f.read(1024)
# print(text)
# f.close()
# f = open('xiaoxiao.txt','r',encoding='utf-8')
# content = f.readlines()#读完 放入list里面
#
# print(content)
#
# # text = f.read(1024)
# # print(text)
# f.close()
# f = open('xiaoxiao.txt','r',encoding='utf-8' )
# for line in f:
# print(line[2])
# with open('xiaoxiao.txt','r',encoding='utf-8') as f:
# content = f.readlines();
# i= 0
# print(content[2])
# for line in content:
# i = i+1
# print(i,line)
def make():
print(2222)
make()
|
py | b403793fb5ce1408d1397f20714d8e7645b96d9f | import logging
import os
import pickle
import sys
from contextlib import nullcontext
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoTokenizer
from transformers import (
HfArgumentParser,
)
from tevatron.arguments import ModelArguments, DataArguments, \
DenseTrainingArguments as TrainingArguments
from tevatron.data import EncodeDataset, EncodeCollator
from tevatron.modeling import DenseOutput, DenseModelForInference
from tevatron.datasets import HFQueryDataset, HFCorpusDataset
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: TrainingArguments
if training_args.local_rank > 0 or training_args.n_gpu > 1:
raise NotImplementedError('Multi-GPU encoding is not supported.')
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
num_labels = 1
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False,
)
model = DenseModelForInference.build(
model_name_or_path=model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
)
text_max_length = data_args.q_max_len if data_args.encode_is_qry else data_args.p_max_len
if data_args.encode_is_qry:
encode_dataset = HFQueryDataset(tokenizer=tokenizer, data_args=data_args,
cache_dir=data_args.data_cache_dir or model_args.cache_dir)
else:
encode_dataset = HFCorpusDataset(tokenizer=tokenizer, data_args=data_args,
cache_dir=data_args.data_cache_dir or model_args.cache_dir)
encode_dataset = EncodeDataset(encode_dataset.process(data_args.encode_num_shard, data_args.encode_shard_index),
tokenizer, max_len=text_max_length)
encode_loader = DataLoader(
encode_dataset,
batch_size=training_args.per_device_eval_batch_size,
collate_fn=EncodeCollator(
tokenizer,
max_length=text_max_length,
padding='max_length'
),
shuffle=False,
drop_last=False,
num_workers=training_args.dataloader_num_workers,
)
encoded = []
lookup_indices = []
model = model.to(training_args.device)
model.eval()
for (batch_ids, batch) in tqdm(encode_loader):
lookup_indices.extend(batch_ids)
with torch.cuda.amp.autocast() if training_args.fp16 else nullcontext():
with torch.no_grad():
for k, v in batch.items():
batch[k] = v.to(training_args.device)
if data_args.encode_is_qry:
model_output: DenseOutput = model(query=batch)
encoded.append(model_output.q_reps.cpu().detach().numpy())
else:
model_output: DenseOutput = model(passage=batch)
encoded.append(model_output.p_reps.cpu().detach().numpy())
encoded = np.concatenate(encoded)
with open(data_args.encoded_save_path, 'wb') as f:
pickle.dump((encoded, lookup_indices), f)
if __name__ == "__main__":
main()
|
py | b40379412cb631aa71cfefed580904a68525b080 | __author__ = "Stacy Smith"
__credits__ = "Jeremy Schulman, Nitin Kumar"
import unittest
from nose.plugins.attrib import attr
from mock import patch, MagicMock
import os
from lxml import etree
from jnpr.junos import Device
from jnpr.junos.exception import RpcError
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr('unit')
class TestIfdStyle(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager_setup
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
@patch('jnpr.junos.Device.execute')
def test_ifd_style_switch(self, mock_execute):
mock_execute.side_effect = self._mock_manager_ifd_style_switch
self.assertEqual(self.dev.facts['ifd_style'], 'SWITCH')
@patch('jnpr.junos.Device.execute')
def test_ifd_style_classic(self, mock_execute):
mock_execute.side_effect = self._mock_manager_ifd_style_classic
self.assertEqual(self.dev.facts['ifd_style'], 'CLASSIC')
def test_ifd_style_None(self):
self.dev.facts._cache['personality'] = 'JDM'
self.assertEqual(self.dev.facts['ifd_style'], None)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
rpc_reply = NCElement(foo,
self.dev._conn._device_handler
.transform_reply())._NCElement__doc[0]
return rpc_reply
def _mock_manager_setup(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
def _mock_manager_ifd_style_classic(self, *args, **kwargs):
if args:
return self._read_file('ifd_style_classic_' + args[0].tag +
'.xml')
def _mock_manager_ifd_style_switch(self, *args, **kwargs):
if args:
if (args[0].tag == 'command'):
raise RpcError()
else:
return self._read_file('ifd_style_switch_' + args[0].tag +
'.xml')
|
py | b40379f6824d67e2664fa44cac492d38a8cfd4e7 | import math
import json
class ActionTypes():
DefaultAction, MoveAction, AttackAction, CollectAction, UpgradeAction, StealAction, PurchaseAction, HealAction = \
range(8)
class UpgradeType():
CarryingCapacity, AttackPower, Defence, MaximumHealth, CollectingSpeed = range(5)
class TileType():
Tile, Wall, House, Lava, Resource, Shop = range(6)
class TileContent():
Empty, Wall, House, Lava, Resource, Shop, Player = range(7)
class PurchasableItem():
MicrosoftSword, UbisoftShield, DevolutionBackpack, DevolutionPickaxe, HealthPotion = range(5)
class Point(object):
# Constructor
def __init__(self, X=0, Y=0):
self.X = X
self.Y = Y
# Overloaded operators
def __add__(self, point):
return Point(self.X + point.X, self.Y + point.Y)
def __sub__(self, point):
return Point(self.X - point.X, self.Y - point.Y)
def __str__(self):
return "{{{0}, {1}}}".format(self.X, self.Y)
# Distance between two Points
@staticmethod
def Distance(p1, p2):
delta_x = p1.X - p2.X
delta_y = p1.Y - p2.Y
return math.sqrt(math.pow(delta_x, 2) + math.pow(delta_y, 2))
class GameInfo(object):
def __init__(self, json_dict):
self.__dict__ = json_dict
self.HouseLocation = Point(json_dict["HouseLocation"])
self.Map = None
self.OtherPlayers = dict()
class Tile(object):
def __init__(self, content=None, x=0, y=0):
self.Content = content
self.X = x
self.Y = y
class Player(object):
def __init__(self, health, maxHealth, position, houseLocation, score, carriedRessources,
carryingCapacity=1000):
self.Health = health
self.MaxHealth = maxHealth
self.Position = position
self.HouseLocation = houseLocation
self.Score = score
self.CarriedRessources = carriedRessources
self.CarryingCapacity = carryingCapacity
class PlayerInfo(object):
def __init__(self, health, maxHealth, position):
self.Health = health
self.MaxHealth = maxHealth
self.Position = position
class ActionContent(object):
def __init__(self, action_name, content):
self.ActionName = action_name
self.Content = str(content)
|
py | b4037ad11a20b61c36390f6e96750ee6725c1e6b | """
Manage Security Groups
======================
.. versionadded:: 2014.7.0
Create and destroy Security Groups. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit EC2 credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
secgroup.keyid: GKTADJGHEIQSXMKKRBJ08H
secgroup.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure mysecgroup exists:
boto_secgroup.present:
- name: mysecgroup
- description: My security group
- vpc_name: myvpc
- rules:
- ip_protocol: tcp
from_port: 80
to_port: 80
cidr_ip:
- 10.0.0.0/8
- 192.168.0.0/16
- ip_protocol: tcp
from_port: 8080
to_port: 8090
cidr_ip:
- 10.0.0.0/8
- 192.168.0.0/16
- ip_protocol: icmp
from_port: -1
to_port: -1
source_group_name: mysecgroup
- ip_protocol: tcp
from_port: 8080
to_port: 8080
source_group_name: MyOtherSecGroup
source_group_name_vpc: MyPeeredVPC
- rules_egress:
- ip_protocol: all
from_port: -1
to_port: -1
cidr_ip:
- 10.0.0.0/8
- 192.168.0.0/16
- tags:
SomeTag: 'My Tag Value'
SomeOtherTag: 'Other Tag Value'
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# Using a profile from pillars
Ensure mysecgroup exists:
boto_secgroup.present:
- name: mysecgroup
- description: My security group
- profile: myprofile
# Passing in a profile
Ensure mysecgroup exists:
boto_secgroup.present:
- name: mysecgroup
- description: My security group
- profile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. note::
When using the ``profile`` parameter and ``region`` is set outside of
the profile group, region is ignored and a default region will be used.
If ``region`` is missing from the ``profile`` data set, ``us-east-1``
will be used as the default region.
"""
import logging
import pprint
import salt.utils.dictupdate as dictupdate
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_secgroup.exists" in __salt__:
return "boto_secgroup"
return (False, "boto_secgroup module could not be loaded")
def present(
name,
description,
vpc_id=None,
vpc_name=None,
rules=None,
rules_egress=None,
delete_ingress_rules=True,
delete_egress_rules=True,
region=None,
key=None,
keyid=None,
profile=None,
tags=None,
):
"""
Ensure the security group exists with the specified rules.
name
Name of the security group.
description
A description of this security group.
vpc_id
The ID of the VPC to create the security group in, if any. Exclusive with vpc_name.
vpc_name
The name of the VPC to create the security group in, if any. Exclusive with vpc_id.
.. versionadded:: 2016.3.0
.. versionadded:: 2015.8.2
rules
A list of ingress rule dicts. If not specified, ``rules=None``,
the ingress rules will be unmanaged. If set to an empty list, ``[]``,
then all ingress rules will be removed.
rules_egress
A list of egress rule dicts. If not specified, ``rules_egress=None``,
the egress rules will be unmanaged. If set to an empty list, ``[]``,
then all egress rules will be removed.
delete_ingress_rules
Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
salt will happily remove on the next run. Set this param to False to
avoid deleting rules which were added outside of salt.
delete_egress_rules
Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
salt will happily remove on the next run. Set this param to False to
avoid deleting rules which were added outside of salt.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key, and keyid.
tags
List of key:value pairs of tags to set on the security group
.. versionadded:: 2016.3.0
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
_ret = _security_group_present(
name,
description,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret["changes"] = _ret["changes"]
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
elif ret["result"] is None:
return ret
if rules is not None:
_ret = _rules_present(
name,
rules,
delete_ingress_rules,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if rules_egress is not None:
_ret = _rules_egress_present(
name,
rules_egress,
delete_egress_rules,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
_ret = _tags_present(
name=name,
tags=tags,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
ret["changes"] = dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _security_group_present(
name,
description,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
given a group name or a group name and vpc id (or vpc name):
1. determine if the group exists
2. if the group does not exist, creates the group
3. return the group's configuration and any changes made
"""
ret = {"result": True, "comment": "", "changes": {}}
exists = __salt__["boto_secgroup.exists"](
name, region, key, keyid, profile, vpc_id, vpc_name
)
if not exists:
if __opts__["test"]:
ret["comment"] = "Security group {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_secgroup.create"](
name=name,
description=description,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"]["old"] = {"secgroup": None}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
ret["changes"]["new"] = {"secgroup": sg}
ret["comment"] = "Security group {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} security group.".format(name)
else:
ret["comment"] = "Security group {} present.".format(name)
return ret
def _split_rules(rules):
"""
Split rules with lists into individual rules.
We accept some attributes as lists or strings. The data we get back from
the execution module lists rules as individual rules. We need to split the
provided rules into individual rules to compare them.
"""
split = []
for rule in rules:
cidr_ip = rule.get("cidr_ip")
group_name = rule.get("source_group_name")
group_id = rule.get("source_group_group_id")
if cidr_ip and not isinstance(cidr_ip, str):
for ip in cidr_ip:
_rule = rule.copy()
_rule["cidr_ip"] = ip
split.append(_rule)
elif group_name and not isinstance(group_name, str):
for name in group_name:
_rule = rule.copy()
_rule["source_group_name"] = name
split.append(_rule)
elif group_id and not isinstance(group_id, str):
for _id in group_id:
_rule = rule.copy()
_rule["source_group_group_id"] = _id
split.append(_rule)
else:
split.append(rule)
return split
def _check_rule(rule, _rule):
"""
Check to see if two rules are the same. Needed to compare rules fetched
from boto, since they may not completely match rules defined in sls files
but may be functionally equivalent.
"""
# We need to alter what Boto returns if no ports are specified
# so that we can compare rules fairly.
#
# Boto returns None for from_port and to_port where we're required
# to pass in "-1" instead.
if _rule.get("from_port") is None:
_rule["from_port"] = -1
if _rule.get("to_port") is None:
_rule["to_port"] = -1
if (
rule["ip_protocol"] == _rule["ip_protocol"]
and str(rule["from_port"]) == str(_rule["from_port"])
and str(rule["to_port"]) == str(_rule["to_port"])
):
_cidr_ip = _rule.get("cidr_ip")
if _cidr_ip and _cidr_ip == rule.get("cidr_ip"):
return True
_owner_id = _rule.get("source_group_owner_id")
if _owner_id and _owner_id == rule.get("source_group_owner_id"):
return True
_group_id = _rule.get("source_group_group_id")
if _group_id and _group_id == rule.get("source_group_group_id"):
return True
_group_name = _rule.get("source_group_name")
if _group_name and _group_id == rule.get("source_group_name"):
return True
return False
def _get_rule_changes(rules, _rules):
"""
given a list of desired rules (rules) and existing rules (_rules) return
a list of rules to delete (to_delete) and to create (to_create)
"""
to_delete = []
to_create = []
# for each rule in state file
# 1. validate rule
# 2. determine if rule exists in existing security group rules
for rule in rules:
try:
ip_protocol = str(rule.get("ip_protocol"))
except KeyError:
raise SaltInvocationError(
"ip_protocol, to_port, and from_port are"
" required arguments for security group"
" rules."
)
supported_protocols = [
"tcp",
"6",
6,
"udp",
"17",
17,
"icmp",
"1",
1,
"all",
"-1",
-1,
]
if ip_protocol not in supported_protocols and (
not "{}".format(ip_protocol).isdigit() or int(ip_protocol) > 255
):
raise SaltInvocationError(
"Invalid ip_protocol {} specified in security group rule.".format(
ip_protocol
)
)
# For the 'all' case, we need to change the protocol name to '-1'.
if ip_protocol == "all":
rule["ip_protocol"] = "-1"
cidr_ip = rule.get("cidr_ip", None)
group_name = rule.get("source_group_name", None)
group_id = rule.get("source_group_group_id", None)
if cidr_ip and (group_id or group_name):
raise SaltInvocationError(
"cidr_ip and source groups can not both"
" be specified in security group rules."
)
if group_id and group_name:
raise SaltInvocationError(
"Either source_group_group_id or"
" source_group_name can be specified in"
" security group rules, but not both."
)
if not (cidr_ip or group_id or group_name):
raise SaltInvocationError(
"cidr_ip, source_group_group_id, or"
" source_group_name must be provided for"
" security group rules."
)
rule_found = False
# for each rule in existing security group ruleset determine if
# new rule exists
for _rule in _rules:
if _check_rule(rule, _rule):
rule_found = True
break
if not rule_found:
to_create.append(rule)
# for each rule in existing security group configuration
# 1. determine if rules needed to be deleted
for _rule in _rules:
rule_found = False
for rule in rules:
if _check_rule(rule, _rule):
rule_found = True
break
if not rule_found:
# Can only supply name or id, not both. Since we're deleting
# entries, it doesn't matter which we pick.
_rule.pop("source_group_name", None)
to_delete.append(_rule)
log.debug("Rules to be deleted: %s", to_delete)
log.debug("Rules to be created: %s", to_create)
return (to_delete, to_create)
def _rules_present(
name,
rules,
delete_ingress_rules=True,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
given a group name or group name and vpc_id (or vpc name):
1. get lists of desired rule changes (using _get_rule_changes)
2. authorize/create rules missing rules
3. if delete_ingress_rules is True, delete/revoke non-requested rules
4. return 'old' and 'new' group rules
"""
ret = {"result": True, "comment": "", "changes": {}}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
if not sg:
ret[
"comment"
] = "{} security group configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
rules = _split_rules(rules)
if vpc_id or vpc_name:
for rule in rules:
_source_group_name = rule.get("source_group_name", None)
if _source_group_name:
_group_vpc_name = vpc_name
_group_vpc_id = vpc_id
_source_group_name_vpc = rule.get("source_group_name_vpc", None)
if _source_group_name_vpc:
_group_vpc_name = _source_group_name_vpc
_group_vpc_id = None
_group_id = __salt__["boto_secgroup.get_group_id"](
name=_source_group_name,
vpc_id=_group_vpc_id,
vpc_name=_group_vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not _group_id:
raise SaltInvocationError(
"source_group_name {} does not map to a valid "
"source group id.".format(_source_group_name)
)
rule["source_group_name"] = None
if _source_group_name_vpc:
rule.pop("source_group_name_vpc")
rule["source_group_group_id"] = _group_id
# rules = rules that exist in salt state
# sg['rules'] = that exist in present group
to_delete, to_create = _get_rule_changes(rules, sg["rules"])
to_delete = to_delete if delete_ingress_rules else []
if to_create or to_delete:
if __opts__["test"]:
msg = """Security group {} set to have rules modified.
To be created: {}
To be deleted: {}""".format(
name, pprint.pformat(to_create), pprint.pformat(to_delete)
)
ret["comment"] = msg
ret["result"] = None
return ret
if to_delete:
deleted = True
for rule in to_delete:
_deleted = __salt__["boto_secgroup.revoke"](
name,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
**rule
)
if not _deleted:
deleted = False
if deleted:
ret["comment"] = "Removed rules on {} security group.".format(name)
else:
ret["comment"] = "Failed to remove rules on {} security group.".format(
name
)
ret["result"] = False
if to_create:
created = True
for rule in to_create:
_created = __salt__["boto_secgroup.authorize"](
name,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
**rule
)
if not _created:
created = False
if created:
ret["comment"] = " ".join(
[
ret["comment"],
"Created rules on {} security group.".format(name),
]
)
else:
ret["comment"] = " ".join(
[
ret["comment"],
"Failed to create rules on {} security group.".format(name),
]
)
ret["result"] = False
ret["changes"]["old"] = {"rules": sg["rules"]}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
ret["changes"]["new"] = {"rules": sg["rules"]}
return ret
def _rules_egress_present(
name,
rules_egress,
delete_egress_rules=True,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
given a group name or group name and vpc_id (or vpc name):
1. get lists of desired rule changes (using _get_rule_changes)
2. authorize/create missing rules
3. if delete_egress_rules is True, delete/revoke non-requested rules
4. return 'old' and 'new' group rules
"""
ret = {"result": True, "comment": "", "changes": {}}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
if not sg:
ret[
"comment"
] = "{} security group configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
rules_egress = _split_rules(rules_egress)
if vpc_id or vpc_name:
for rule in rules_egress:
_source_group_name = rule.get("source_group_name", None)
if _source_group_name:
_group_vpc_name = vpc_name
_group_vpc_id = vpc_id
_source_group_name_vpc = rule.get("source_group_name_vpc", None)
if _source_group_name_vpc:
_group_vpc_name = _source_group_name_vpc
_group_vpc_id = None
_group_id = __salt__["boto_secgroup.get_group_id"](
name=_source_group_name,
vpc_id=_group_vpc_id,
vpc_name=_group_vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not _group_id:
raise SaltInvocationError(
"source_group_name {} does not map to a valid "
"source group id.".format(_source_group_name)
)
rule["source_group_name"] = None
if _source_group_name_vpc:
rule.pop("source_group_name_vpc")
rule["source_group_group_id"] = _group_id
# rules_egress = rules that exist in salt state
# sg['rules_egress'] = that exist in present group
to_delete, to_create = _get_rule_changes(rules_egress, sg["rules_egress"])
to_delete = to_delete if delete_egress_rules else []
if to_create or to_delete:
if __opts__["test"]:
msg = """Security group {} set to have rules modified.
To be created: {}
To be deleted: {}""".format(
name, pprint.pformat(to_create), pprint.pformat(to_delete)
)
ret["comment"] = msg
ret["result"] = None
return ret
if to_delete:
deleted = True
for rule in to_delete:
_deleted = __salt__["boto_secgroup.revoke"](
name,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
egress=True,
**rule
)
if not _deleted:
deleted = False
if deleted:
ret["comment"] = " ".join(
[
ret["comment"],
"Removed egress rule on {} security group.".format(name),
]
)
else:
ret["comment"] = " ".join(
[
ret["comment"],
"Failed to remove egress rule on {} security group.".format(
name
),
]
)
ret["result"] = False
if to_create:
created = True
for rule in to_create:
_created = __salt__["boto_secgroup.authorize"](
name,
vpc_id=vpc_id,
vpc_name=vpc_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
egress=True,
**rule
)
if not _created:
created = False
if created:
ret["comment"] = " ".join(
[
ret["comment"],
"Created egress rules on {} security group.".format(name),
]
)
else:
ret["comment"] = " ".join(
[
ret["comment"],
"Failed to create egress rules on {} security group.".format(
name
),
]
)
ret["result"] = False
ret["changes"]["old"] = {"rules_egress": sg["rules_egress"]}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
ret["changes"]["new"] = {"rules_egress": sg["rules_egress"]}
return ret
def absent(
name, vpc_id=None, vpc_name=None, region=None, key=None, keyid=None, profile=None
):
"""
Ensure a security group with the specified name does not exist.
name
Name of the security group.
vpc_id
The ID of the VPC to remove the security group from, if any. Exclusive with vpc_name.
vpc_name
The name of the VPC to remove the security group from, if any. Exclusive with vpc_name.
.. versionadded:: 2016.3.0
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
if sg:
if __opts__["test"]:
ret["comment"] = "Security group {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_secgroup.delete"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
if deleted:
ret["changes"]["old"] = {"secgroup": sg}
ret["changes"]["new"] = {"secgroup": None}
ret["comment"] = "Security group {} deleted.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} security group.".format(name)
else:
ret["comment"] = "{} security group does not exist.".format(name)
return ret
def _tags_present(
name,
tags,
vpc_id=None,
vpc_name=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
helper function to validate tags are correct
"""
ret = {"result": True, "comment": "", "changes": {}}
if tags:
sg = __salt__["boto_secgroup.get_config"](
name=name,
group_id=None,
region=region,
key=key,
keyid=keyid,
profile=profile,
vpc_id=vpc_id,
vpc_name=vpc_name,
)
if not sg:
ret[
"comment"
] = "{} security group configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
tags_to_add = tags
tags_to_update = {}
tags_to_remove = []
if sg.get("tags"):
for existing_tag in sg["tags"]:
if existing_tag not in tags:
if existing_tag not in tags_to_remove:
tags_to_remove.append(existing_tag)
else:
if tags[existing_tag] != sg["tags"][existing_tag]:
tags_to_update[existing_tag] = tags[existing_tag]
tags_to_add.pop(existing_tag)
if tags_to_remove:
if __opts__["test"]:
msg = "The following tag{} set to be removed: {}.".format(
("s are" if len(tags_to_remove) > 1 else " is"),
", ".join(tags_to_remove),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
else:
temp_ret = __salt__["boto_secgroup.delete_tags"](
tags_to_remove,
name=name,
group_id=None,
vpc_name=vpc_name,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not temp_ret:
ret["result"] = False
ret["comment"] = " ".join(
[
ret["comment"],
"Error attempting to delete tags {}.".format(
tags_to_remove
),
]
)
return ret
if "old" not in ret["changes"]:
ret["changes"] = dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
for rem_tag in tags_to_remove:
ret["changes"]["old"]["tags"][rem_tag] = sg["tags"][rem_tag]
if tags_to_add or tags_to_update:
if __opts__["test"]:
if tags_to_add:
msg = "The following tag{} set to be added: {}.".format(
("s are" if len(tags_to_add.keys()) > 1 else " is"),
", ".join(tags_to_add.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
if tags_to_update:
msg = "The following tag {} set to be updated: {}.".format(
(
"values are"
if len(tags_to_update.keys()) > 1
else "value is"
),
", ".join(tags_to_update.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
else:
all_tag_changes = dictupdate.update(tags_to_add, tags_to_update)
temp_ret = __salt__["boto_secgroup.set_tags"](
all_tag_changes,
name=name,
group_id=None,
vpc_name=vpc_name,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not temp_ret:
ret["result"] = False
msg = "Error attempting to set tags."
ret["comment"] = " ".join([ret["comment"], msg])
return ret
if "old" not in ret["changes"]:
ret["changes"] = dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
if "new" not in ret["changes"]:
ret["changes"] = dictupdate.update(
ret["changes"], {"new": {"tags": {}}}
)
for tag in all_tag_changes:
ret["changes"]["new"]["tags"][tag] = tags[tag]
if "tags" in sg:
if sg["tags"]:
if tag in sg["tags"]:
ret["changes"]["old"]["tags"][tag] = sg["tags"][tag]
if not tags_to_update and not tags_to_remove and not tags_to_add:
ret["comment"] = " ".join([ret["comment"], "Tags are already set."])
return ret
|
py | b4037bdd46054fe42273a6602ac9c2737d6ef3fa | from django.conf.urls import url
from .views import (
SearchPropertyView
)
urlpatterns = [
url(r'^$', SearchPropertyView.as_view(), name='query'),
]
|
py | b4037c5d3b29bcf56f77ed11f3ce1ea938fb13b5 |
from __future__ import absolute_import, unicode_literals
import json
import sys
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from leonardo import leonardo, messages
from leonardo.utils import render_region
from leonardo.views import (ContextMixin, CreateView, ModalFormView,
ModelFormMixin, UpdateView)
from ..models import Page
from .forms import (WidgetDeleteForm, WidgetMoveForm, WidgetSelectForm,
WidgetUpdateForm, form_repository)
from .tables import WidgetDimensionTable
from .utils import get_widget_from_id
class WidgetViewMixin(object):
def handle_dimensions(self, obj):
"""save dimensions
"""
from .tables import WidgetDimensionFormset
from ..models import WidgetDimension
formset = WidgetDimensionFormset(
self.request.POST, prefix='dimensions')
if formset.is_valid():
formset.save()
else:
for form in formset.forms:
if form.is_valid():
if 'id' in form.cleaned_data:
form.save()
else:
# little ugly
data = form.cleaned_data
data['widget_type'] = \
ContentType.objects.get_for_model(obj)
data['widget_id'] = obj.id
data.pop('DELETE', None)
wd = WidgetDimension(**data)
# do not update widget view
wd.update_view = False
wd.save()
if formset.is_valid():
# delete objects
for obj in formset.deleted_objects:
if obj.id != None:
obj.delete()
return True
def get_page(self):
if not hasattr(self, '_page'):
self._page = self.model.objects.get(id=self.kwargs['page_id'])
return self._page
def get_form_kwargs(self):
kwargs = super(WidgetViewMixin, self).get_form_kwargs()
kwargs.update({
'request': self.request,
'model': self.model
})
return kwargs
def get_classes(self, **kwargs):
return ' '.join(getattr(self, 'classes', ['admin']))
class WidgetUpdateView(WidgetViewMixin, UpdateView):
template_name = 'leonardo/common/modal.html'
form_class = WidgetUpdateForm
def get_context_data(self, **kwargs):
context = super(WidgetUpdateView, self).get_context_data(**kwargs)
context['modal_classes'] = self.get_classes()
context['url'] = reverse('widget_update', kwargs=self.kwargs)
context['actions'] = [{
'url': reverse_lazy('page_update', args=(self.object.parent.id,)),
'icon': 'fa fa-pencil',
'classes': 'ajax-modal',
'description': _('Edit parent page')
},
{
'url': reverse_lazy('widget_delete', args=(
self.kwargs['cls_name'],
self.kwargs['id'],)),
'icon': 'fa fa-trash',
'classes': 'ajax-modal',
'description': _('Delete widget')
}]
return context
def get_form_class(self):
if not hasattr(self, '_form_class'):
self._form_class = form_repository.get_form(**self.kwargs)
return self._form_class
def get_form(self, form_class):
"""Returns an instance of the form to be used in this view."""
if not hasattr(self, '_form'):
kwargs = self.get_form_kwargs()
self._form = form_class(**kwargs)
return self._form
def form_valid(self, form):
response = super(WidgetUpdateView, self).form_valid(form)
obj = self.object
self.handle_dimensions(obj)
if not self.request.is_ajax():
return response
request = self.request
request.method = 'GET'
return JsonResponse(data={
'id': obj.fe_identifier,
'parent_slug': obj.parent.slug,
'content': self.model.objects.get(
id=self.kwargs["id"]).render_content({'request': request})
})
class WidgetCreateView(WidgetViewMixin, CreateView):
template_name = 'leonardo/common/modal.html'
def get_form_class(self):
if not hasattr(self, '_form_class'):
self._form_class = form_repository.get_form(**self.kwargs)
return self._form_class
def get_context_data(self, **kwargs):
context = super(WidgetCreateView, self).get_context_data(**kwargs)
context['table'] = WidgetDimensionTable(self.request, data=[])
# add extra context for template
context['url'] = reverse("widget_create_full", kwargs=self.kwargs)
context['modal_classes'] = self.get_classes()
return context
def form_valid(self, form):
try:
obj = form.save(commit=False)
obj.save(created=False)
self.handle_dimensions(obj)
success_url = self.get_success_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
except Exception:
exc_info = sys.exc_info()
raise six.reraise(*exc_info)
if not self.request.is_ajax():
return response
data = {
'id': obj.fe_identifier,
'ordering': obj.ordering
}
# this is not necessary if websocket is installed
if not leonardo.config.get_attr("is_websocket_enabled", None):
data['region_content'] = render_region(
obj, request=self.request, view=self)
data['region'] = '%s-%s' % (
obj.region,
obj.parent.id)
return JsonResponse(data=data)
def get_initial(self):
return self.kwargs
class WidgetPreCreateView(CreateView, WidgetViewMixin):
form_class = WidgetSelectForm
template_name = 'leonardo/common/modal.html'
def get_label(self):
return ugettext("Add new Widget to {}".format(self.get_page()))
def get_context_data(self, **kwargs):
context = super(WidgetPreCreateView, self).get_context_data(**kwargs)
context['modal_size'] = 'md'
context['form_submit'] = _('Continue')
context['modal_classes'] = self.get_classes()
return context
def get_form(self, form_class):
"""Returns an instance of the form to be used in this view."""
kwargs = self.kwargs
kwargs.update(self.get_form_kwargs())
kwargs.update({
'request': self.request,
'next_view': WidgetCreateView
})
return form_class(**kwargs)
class WidgetInfoView(UpdateView, WidgetViewMixin):
template_name = 'leonardo/common/modal.html'
form_class = WidgetUpdateForm
def get(self, request, cls_name, id):
widget = self.object
widget_info = """
<ul>
<li><span><b>widget:</b> {name} ({id})</span></li>
<li><span><b>parent:</b> {parent} ({parent_id})</span></li>
<li><span><b>region:</b> {region}</span></li>
<li><span><b>ordering:</b> {ordering}</span></li>
</ul>""".format(**{
'name': widget.__class__.__name__,
'id': widget.id,
'region': widget.region,
'parent': widget.parent,
'parent_id': widget.parent.pk,
'ordering': widget.ordering,
})
messages.info(request, mark_safe(widget_info))
return HttpResponse(mark_safe(widget_info))
class SuccessUrlMixin(object):
def get_success_url(self):
if self.request.META.get("HTTP_REFERER") != \
self.request.build_absolute_uri():
return self.request.META.get('HTTP_REFERER')
try:
success_url = self.object.parent.get_absolute_url()
except:
pass
else:
return success_url
return super(WidgetActionMixin, self).get_success_url()
class WidgetDeleteView(SuccessUrlMixin, ModalFormView,
ContextMixin, ModelFormMixin, WidgetViewMixin):
form_class = WidgetDeleteForm
template_name = 'leonardo/common/modal.html'
def get_label(self):
return ugettext("Delete {}".format(self.object._meta.verbose_name))
def get_context_data(self, **kwargs):
context = super(WidgetDeleteView, self).get_context_data(**kwargs)
# add extra context for template
context['url'] = self.request.build_absolute_uri()
context['modal_header'] = self.get_header()
context['title'] = self.get_header()
context['form_submit'] = self.get_label()
context['heading'] = self.get_header()
context['help_text'] = self.get_help_text()
context['modal_classes'] = self.get_classes()
return context
def form_valid(self, form):
obj = self.object
fe_identifier = obj.fe_identifier
obj.delete()
success_url = self.get_success_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
if not self.request.is_ajax():
return response
return JsonResponse(data={
'id': fe_identifier,
})
def get_initial(self):
return self.kwargs
class WidgetActionMixin(SuccessUrlMixin):
template_name = 'leonardo/common/modal.html'
form_class = WidgetUpdateForm
success_url = "/"
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
class WidgetSortView(WidgetActionMixin, ModalFormView):
'''Simple handle jquery sortable'''
def post(self, *args, **kwargs):
widgets = self.request.POST.getlist('widgets[]', [])
widget_list = []
try:
for widget_id in widgets:
widget = get_widget_from_id(widget_id)
if widget:
widget_list.append(widget)
except:
messages.error(
self.request, _('Error occured while sorting widgets.'))
i = 0
for widget in widget_list:
widget.ordering = i
widget.save()
i += 1
messages.success(self.request, _('Widget sorting success.'))
return HttpResponse('ok')
class WidgetReorderView(WidgetActionMixin, ModalFormView, ModelFormMixin):
'''Handle reorder 0 = first, 1 = last'''
def post(self, *args, **kwargs):
widget = self.object
ordering = self.kwargs.get('ordering')
if int(ordering) == 0:
widget.ordering = 0
widget.save()
widgets = getattr(widget.parent.content, widget.region)
widgets = [w for w in widgets if w.id != widget.id]
for i, _widget in enumerate(widgets):
_widget.ordering = i + 1
_widget.save()
elif int(ordering) == -1:
widgets = getattr(widget.parent.content, widget.region)
widgets.sort(key=lambda w: w.ordering)
for i, w in enumerate(widgets):
if w.id == widget.id:
w.ordering = i - 1
w.save()
try:
next_widget = widgets[i - 1]
except IndexError:
pass
else:
next_widget.ordering += 1
next_widget.save()
elif int(ordering) == 1:
widgets = getattr(widget.parent.content, widget.region)
widgets.sort(key=lambda w: w.ordering)
for i, w in enumerate(widgets):
if w.id == widget.id:
w.ordering = i + 1
w.save()
try:
next_widget = widgets[i + 1]
except IndexError:
pass
else:
next_widget.ordering -= 1
next_widget.save()
else:
widget.ordering = widget.next_ordering
widget.save()
widgets = getattr(widget.parent.content, widget.region)
widgets = [w for w in widgets if w.id != widget.id]
widgets.sort(key=lambda w: w.ordering)
for i, _widget in enumerate(widgets):
_widget.ordering = i
_widget.save()
widget.parent.invalidate_cache()
messages.success(self.request, _('Widget was successfully moved.'))
success_url = self.get_success_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
return response
class WidgetCopyView(WidgetReorderView):
'''Create widget copy.'''
def post(self, *args, **kwargs):
widget = self.object
widget.pk = None
widget.save(created=False)
# also copy dimensions
for dimension in self.model.objects.get(
id=self.kwargs["id"]).dimensions:
dimension.pk = None
dimension.widget_id = widget.id
dimension.save()
messages.success(self.request, _('Widget was successfully cloned.'))
# TODO try HTTP_REFERER
success_url = self.get_success_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
return response
class JSReverseView(WidgetReorderView):
'''Returns url.'''
def clean_kwargs(self, kwargs):
_kwargs = {}
for key, value in kwargs.items():
if value != '':
_kwargs[key] = value
return _kwargs
def post(self, *args, **kwargs):
view_name = self.request.POST.get('viewname')
args = json.loads(self.request.POST.get('args', "{}")).values()
kwargs = json.loads(self.request.POST.get('kwargs', "{}"))
return JsonResponse({'url': reverse(
view_name, args=args, kwargs=self.clean_kwargs(kwargs))})
class WidgetMoveView(WidgetUpdateView):
'''Move action'''
form_class = WidgetMoveForm
def get_form_class(self):
if not hasattr(self, '_form_class'):
kw = self.kwargs
kw['form_cls'] = self.form_class
kw['widgets'] = self.form_class.Meta.widgets
self._form_class = form_repository.get_generic_form(**self.kwargs)
return self._form_class
def get_form(self, form_class):
"""Returns an instance of the form to be used in this view."""
if not hasattr(self, '_form'):
kwargs = self.get_form_kwargs()
self._form = form_class(instance=self.object, **kwargs)
return self._form
def form_valid(self, form):
obj = self.object
obj.parent = form.cleaned_data['parent']
obj.region = form.cleaned_data['region']
obj.save()
obj.parent.save()
if not self.request.is_ajax():
success_url = obj.parent.get_absolute_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
return JsonResponse(data={
'needs_reload': True,
# 'target': obj.parent.get_absolute_url(),
})
|
py | b4037c9784bb2abe1920b667278a7ba129466a31 | from app.docs.v2 import jwt_header
AUTH_CHECK_GET = {
'tags': ['[Mixed] JWT 관련'],
'description': '해당 Access Token이 유효한지(\'로그인되어 있음\'으로 표현할 수 있는지) 체크합니다.',
'parameters': [jwt_header],
'responses': {
'200': {
'description': 'Access Token이 유효함',
},
'204': {
'description': 'Access Token이 유효하지 않음'
}
}
}
|
py | b4037e79d4e7c8eab544d05fca51aab884c29e30 | #
# PySNMP MIB module HUAWEI-LswRSTP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-LswRSTP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:46:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection")
dot1dStpPortEntry, dot1dStpPort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dStpPortEntry", "dot1dStpPort")
lswCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "lswCommon")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Gauge32, ObjectIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, Integer32, Bits, NotificationType, iso, TimeTicks, MibIdentifier, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "ObjectIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "Integer32", "Bits", "NotificationType", "iso", "TimeTicks", "MibIdentifier", "Counter64")
TextualConvention, TruthValue, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString", "MacAddress")
hwLswRstpMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6))
hwLswRstpMib.setRevisions(('2001-06-29 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwLswRstpMib.setRevisionsDescriptions(('',))
if mibBuilder.loadTexts: hwLswRstpMib.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hwLswRstpMib.setOrganization('')
if mibBuilder.loadTexts: hwLswRstpMib.setContactInfo('')
if mibBuilder.loadTexts: hwLswRstpMib.setDescription('')
class EnabledStatus(TextualConvention, Integer32):
description = 'A simple status value for the object.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
hwLswRstpMibObject = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1))
hwdot1dStpStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpStatus.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpStatus.setDescription(' Bridge STP enabled/disabled state')
hwdot1dStpForceVersion = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpForceVersion.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpForceVersion.setDescription(' Running mode of the bridge RSTP state machine')
hwdot1dStpDiameter = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpDiameter.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpDiameter.setDescription(' Permitted amount of bridges between any two ends on the network.')
hwdot1dStpRootBridgeAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRootBridgeAddress.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRootBridgeAddress.setDescription(' MAC address of the root bridge')
hwDot1dStpBpduGuard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 6), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpBpduGuard.setStatus('current')
if mibBuilder.loadTexts: hwDot1dStpBpduGuard.setDescription(' If BPDU guard enabled. The edge port will discard illegal BPDU when enabled')
hwDot1dStpRootType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("primary", 2), ("secondary", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpRootType.setStatus('current')
if mibBuilder.loadTexts: hwDot1dStpRootType.setDescription(' Root type of the bridge')
hwDot1dTimeOutFactor = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 7))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dTimeOutFactor.setStatus('current')
if mibBuilder.loadTexts: hwDot1dTimeOutFactor.setDescription(' Time Out Factor of the bridge.')
hwDot1dStpPathCostStandard = MibScalar((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dot1d-1998", 1), ("dot1t", 2), ("legacy", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwDot1dStpPathCostStandard.setStatus('current')
if mibBuilder.loadTexts: hwDot1dStpPathCostStandard.setDescription(" Path Cost Standard of the bridge. Value 'dot1d-1998' is IEEE 802.1d standard in 1998, value 'dot1t' is IEEE 802.1t standard, and value 'legacy' is a private legacy standard.")
hwdot1dStpPortXTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5), )
if mibBuilder.loadTexts: hwdot1dStpPortXTable.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortXTable.setDescription('RSTP extended information of the port ')
hwdot1dStpPortXEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1), )
dot1dStpPortEntry.registerAugmentions(("HUAWEI-LswRSTP-MIB", "hwdot1dStpPortXEntry"))
hwdot1dStpPortXEntry.setIndexNames(*dot1dStpPortEntry.getIndexNames())
if mibBuilder.loadTexts: hwdot1dStpPortXEntry.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortXEntry.setDescription(' RSTP extended information of the port ')
hwdot1dStpPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 1), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortStatus.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortStatus.setDescription(' RSTP status of the port')
hwdot1dStpPortEdgeport = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortEdgeport.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortEdgeport.setDescription(' Whether the port can be an edge port')
hwdot1dStpPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("forceTrue", 1), ("forceFalse", 2), ("auto", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpPortPointToPoint.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortPointToPoint.setDescription(" It is the administrative value indicates whether the port can be connected to a point-to-point link or not. If the value is 'auto', the operative value of a point-to-point link state is determined by device itself, and can be read from hwdot1dStpOperPortPointToPoint.")
hwdot1dStpMcheck = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpMcheck.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpMcheck.setDescription(' Check if the port transfer state machine enters')
hwdot1dStpTransLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpTransLimit.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpTransLimit.setDescription(' Packet transmission limit of the bridge in a duration of Hello Time.')
hwdot1dStpRXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXStpBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRXStpBPDU.setDescription(' Number of STP BPDU received ')
hwdot1dStpTXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXStpBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpTXStpBPDU.setDescription(' Number of STP BPDU transmitted ')
hwdot1dStpRXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXTCNBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRXTCNBPDU.setDescription(' Number of TCN BPDU received ')
hwdot1dStpTXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXTCNBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpTXTCNBPDU.setDescription(' Number of TCN BPDU transmitted ')
hwdot1dStpRXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXRSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRXRSTPBPDU.setDescription('Number of RSTP BPDU received')
hwdot1dStpTXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpTXRSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpTXRSTPBPDU.setDescription(' Number of RSTP BPDU transmitted ')
hwdot1dStpClearStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("clear", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpClearStatistics.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpClearStatistics.setDescription('Clear RSTP statistics. Read operation not supported. ')
hwdot1dSetStpDefaultPortCost = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dSetStpDefaultPortCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1dSetStpDefaultPortCost.setDescription('Set PathCost back to the default setting. Read operation not supported.')
hwdot1dStpRootGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 14), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpRootGuard.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRootGuard.setDescription(' If the port guard root bridge. Other bridge which want to be root can not become root through this port if enabled. ')
hwdot1dStpLoopGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 15), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpLoopGuard.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpLoopGuard.setDescription(' Loop guard function that keep a root port or an alternate port in discarding state while the information on the port is aged out.')
hwdot1dStpPortBlockedReason = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notBlock", 1), ("blockForProtocol", 2), ("blockForRootGuard", 3), ("blockForBPDUGuard", 4), ("blockForLoopGuard", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpPortBlockedReason.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortBlockedReason.setDescription(' Record the block reason of the port. notBlock (1) means that the port is not in block state,. blockForProtocol (2) means that the port is blocked by stp protocol to avoid loop. blockForRootGuard(3) means that the root guard flag of bridge is set and a better message received from the port,and the port is blocked. blockForBPDUGuard(4) means that the port has been configured as an edge port and receive a BPDU and thus blocked. blockForLoopGuard(5) means that the port is blocked for loopguarded. ')
hwdot1dStpRXTCBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpRXTCBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpRXTCBPDU.setDescription(' The number of received TC BPDUs ')
hwdot1dStpPortSendingBPDUType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpPortSendingBPDUType.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpPortSendingBPDUType.setDescription(' Type of BPDU which the port is sending. ')
hwdot1dStpOperPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 5, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dStpOperPortPointToPoint.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpOperPortPointToPoint.setDescription(' This object indicates whether the port has connected to a point-to-point link or not. The administrative value should be read from hwdot1dStpPortPointToPoint. ')
hwRstpEventsV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0))
if mibBuilder.loadTexts: hwRstpEventsV2.setStatus('current')
if mibBuilder.loadTexts: hwRstpEventsV2.setDescription('Definition point for RSTP notifications.')
hwRstpBpduGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 1)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpBpduGuarded.setStatus('current')
if mibBuilder.loadTexts: hwRstpBpduGuarded.setDescription('The SNMP trap that is generated when an edged port of the BPDU-guard switch recevies BPDU packets.')
hwRstpRootGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 2)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpRootGuarded.setStatus('current')
if mibBuilder.loadTexts: hwRstpRootGuarded.setDescription('The SNMP trap that is generated when a root-guard port receives a superior bpdu.')
hwRstpBridgeLostRootPrimary = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 3))
if mibBuilder.loadTexts: hwRstpBridgeLostRootPrimary.setStatus('current')
if mibBuilder.loadTexts: hwRstpBridgeLostRootPrimary.setDescription('The SNMP trap that is generated when the bridge is no longer the root bridge of the spanning tree. Another switch with higher priority has already been the root bridge. ')
hwRstpLoopGuarded = NotificationType((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 0, 4)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwRstpLoopGuarded.setStatus('current')
if mibBuilder.loadTexts: hwRstpLoopGuarded.setDescription('The SNMP trap that is generated when a loop-guard port is aged out .')
hwdot1dStpIgnoredVlanTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10), )
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanTable.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanTable.setDescription('RSTP extended information of vlan ')
hwdot1dStpIgnoredVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1), ).setIndexNames((0, "HUAWEI-LswRSTP-MIB", "hwdot1dVlan"))
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanEntry.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpIgnoredVlanEntry.setDescription(' RSTP extended information of the vlan ')
hwdot1dVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1dVlan.setStatus('current')
if mibBuilder.loadTexts: hwdot1dVlan.setDescription(' Vlan id supported')
hwdot1dStpIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 2, 23, 1, 6, 1, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1dStpIgnore.setStatus('current')
if mibBuilder.loadTexts: hwdot1dStpIgnore.setDescription(' Whether the vlan is stp Ignored')
mibBuilder.exportSymbols("HUAWEI-LswRSTP-MIB", hwdot1dStpTransLimit=hwdot1dStpTransLimit, hwdot1dStpPortBlockedReason=hwdot1dStpPortBlockedReason, hwdot1dStpIgnoredVlanTable=hwdot1dStpIgnoredVlanTable, hwdot1dStpPortSendingBPDUType=hwdot1dStpPortSendingBPDUType, PYSNMP_MODULE_ID=hwLswRstpMib, EnabledStatus=EnabledStatus, hwdot1dStpRXTCBPDU=hwdot1dStpRXTCBPDU, hwdot1dStpDiameter=hwdot1dStpDiameter, hwdot1dVlan=hwdot1dVlan, hwRstpEventsV2=hwRstpEventsV2, hwDot1dStpBpduGuard=hwDot1dStpBpduGuard, hwdot1dStpTXTCNBPDU=hwdot1dStpTXTCNBPDU, hwRstpLoopGuarded=hwRstpLoopGuarded, hwdot1dStpPortXEntry=hwdot1dStpPortXEntry, hwdot1dStpTXStpBPDU=hwdot1dStpTXStpBPDU, hwdot1dStpPortStatus=hwdot1dStpPortStatus, hwLswRstpMibObject=hwLswRstpMibObject, hwDot1dStpPathCostStandard=hwDot1dStpPathCostStandard, hwdot1dStpLoopGuard=hwdot1dStpLoopGuard, hwdot1dStpIgnore=hwdot1dStpIgnore, hwDot1dStpRootType=hwDot1dStpRootType, hwdot1dStpRXStpBPDU=hwdot1dStpRXStpBPDU, hwDot1dTimeOutFactor=hwDot1dTimeOutFactor, hwdot1dStpRXTCNBPDU=hwdot1dStpRXTCNBPDU, hwdot1dStpRXRSTPBPDU=hwdot1dStpRXRSTPBPDU, hwdot1dStpTXRSTPBPDU=hwdot1dStpTXRSTPBPDU, hwdot1dStpOperPortPointToPoint=hwdot1dStpOperPortPointToPoint, hwdot1dStpForceVersion=hwdot1dStpForceVersion, hwdot1dStpIgnoredVlanEntry=hwdot1dStpIgnoredVlanEntry, hwLswRstpMib=hwLswRstpMib, hwdot1dStpRootBridgeAddress=hwdot1dStpRootBridgeAddress, hwRstpRootGuarded=hwRstpRootGuarded, hwRstpBridgeLostRootPrimary=hwRstpBridgeLostRootPrimary, hwdot1dStpRootGuard=hwdot1dStpRootGuard, hwdot1dSetStpDefaultPortCost=hwdot1dSetStpDefaultPortCost, hwdot1dStpStatus=hwdot1dStpStatus, hwdot1dStpMcheck=hwdot1dStpMcheck, hwdot1dStpPortPointToPoint=hwdot1dStpPortPointToPoint, hwRstpBpduGuarded=hwRstpBpduGuarded, hwdot1dStpPortXTable=hwdot1dStpPortXTable, hwdot1dStpClearStatistics=hwdot1dStpClearStatistics, hwdot1dStpPortEdgeport=hwdot1dStpPortEdgeport)
|
py | b4037ea2bad6b58e202f2ed7ead5b521fc00dce9 | scores = [1,2,3,4,5,6,7,8,9,0,11,12,13,14,15]
x = 0
while x < len(scores):
print (scores[x])
x = x + 1
|
py | b4037edd92340c22b512cf252cefd5c780fd69f7 | from bs4 import BeautifulSoup
from django.forms import (
BaseForm,
BaseFormSet,
BoundField,
CheckboxInput,
CheckboxSelectMultiple,
DateInput,
EmailInput,
FileInput,
MultiWidget,
NumberInput,
PasswordInput,
RadioSelect,
Select,
SelectDateWidget,
TextInput,
)
from django.utils.html import conditional_escape, escape, strip_tags
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from .exceptions import BootstrapError
from .forms import (
FORM_GROUP_CLASS,
is_widget_with_placeholder,
render_field,
render_form,
render_form_group,
render_label,
)
from .text import text_value
from .utils import add_css_class, render_template_file
try:
# If Django is set up without a database, importing this widget gives RuntimeError
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget
except RuntimeError:
ReadOnlyPasswordHashWidget = None
class BaseRenderer(object):
"""A content renderer."""
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError('Invalid value "%s" for parameter "size" (expected "sm", "md", "lg" or "").' % size)
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
class FormsetRenderer(BaseRenderer):
"""Default formset renderer."""
def __init__(self, formset, *args, **kwargs):
if not isinstance(formset, BaseFormSet):
raise BootstrapError('Parameter "formset" should contain a valid Django Formset.')
self.formset = formset
super().__init__(*args, **kwargs)
def render_management_form(self):
return text_value(self.formset.management_form)
def render_form(self, form, **kwargs):
return render_form(form, **kwargs)
def render_forms(self):
rendered_forms = []
for form in self.formset.forms:
rendered_forms.append(
self.render_form(
form,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
)
)
return "\n".join(rendered_forms)
def get_formset_errors(self):
return self.formset.non_form_errors()
def render_errors(self):
formset_errors = self.get_formset_errors()
if formset_errors:
return render_template_file(
"bootstrap5/form_errors.html",
context={"errors": formset_errors, "form": self.formset, "layout": self.layout},
)
return ""
def _render(self):
return "".join([self.render_errors(), self.render_management_form(), self.render_forms()])
class FormRenderer(BaseRenderer):
"""Default form renderer."""
def __init__(self, form, *args, **kwargs):
if not isinstance(form, BaseForm):
raise BootstrapError('Parameter "form" should contain a valid Django Form.')
self.form = form
super().__init__(*args, **kwargs)
self.error_css_class = kwargs.get("error_css_class", None)
self.required_css_class = kwargs.get("required_css_class", None)
self.bound_css_class = kwargs.get("bound_css_class", None)
self.alert_error_type = kwargs.get("alert_error_type", "non_fields")
self.form_check_class = kwargs.get("form_check_class", "form-check")
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(
render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
form_check_class=self.form_check_class,
show_label=self.show_label,
show_help=self.show_help,
exclude=self.exclude,
set_placeholder=self.set_placeholder,
size=self.size,
horizontal_label_class=self.horizontal_label_class,
horizontal_field_class=self.horizontal_field_class,
error_css_class=self.error_css_class,
required_css_class=self.required_css_class,
bound_css_class=self.bound_css_class,
)
)
return "\n".join(rendered_fields)
def get_fields_errors(self):
form_errors = []
for field in self.form:
if not field.is_hidden and field.errors:
form_errors += field.errors
return form_errors
def render_errors(self, type="all"):
form_errors = None
if type == "all":
form_errors = self.get_fields_errors() + self.form.non_field_errors()
elif type == "fields":
form_errors = self.get_fields_errors()
elif type == "non_fields":
form_errors = self.form.non_field_errors()
if form_errors:
return render_template_file(
"bootstrap5/form_errors.html",
context={"errors": form_errors, "form": self.form, "layout": self.layout, "type": type},
)
return ""
def _render(self):
return self.render_errors(self.alert_error_type) + self.render_fields()
class FieldRenderer(BaseRenderer):
"""Default field renderer."""
# These widgets will not be wrapped in a form-control class
WIDGETS_NO_FORM_CONTROL = (CheckboxInput, RadioSelect, CheckboxSelectMultiple, FileInput)
def __init__(self, field, *args, **kwargs):
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
super().__init__(*args, **kwargs)
self.widget = field.field.widget
self.is_multi_widget = isinstance(field.field.widget, MultiWidget)
self.initial_attrs = self.widget.attrs.copy()
self.field_help = text_value(mark_safe(field.help_text)) if self.show_help and field.help_text else ""
self.field_errors = [conditional_escape(text_value(error)) for error in field.errors]
self.form_check_class = kwargs.get("form_check_class", "form-check")
if "placeholder" in kwargs:
# Find the placeholder in kwargs, even if it's empty
self.placeholder = kwargs["placeholder"]
elif get_bootstrap_setting("set_placeholder"):
# If not found, see if we set the label
self.placeholder = field.label
else:
# Or just set it to empty
self.placeholder = ""
if self.placeholder:
self.placeholder = text_value(self.placeholder)
self.addon_before = kwargs.get("addon_before", self.widget.attrs.pop("addon_before", ""))
self.addon_after = kwargs.get("addon_after", self.widget.attrs.pop("addon_after", ""))
self.addon_before_class = kwargs.get(
"addon_before_class", self.widget.attrs.pop("addon_before_class", "input-group-text")
)
self.addon_after_class = kwargs.get(
"addon_after_class", self.widget.attrs.pop("addon_after_class", "input-group-text")
)
# These are set in Django or in the global BOOTSTRAP5 settings, and
# they can be overwritten in the template
error_css_class = kwargs.get("error_css_class", None)
required_css_class = kwargs.get("required_css_class", None)
bound_css_class = kwargs.get("bound_css_class", None)
if error_css_class is not None:
self.error_css_class = error_css_class
else:
self.error_css_class = getattr(field.form, "error_css_class", get_bootstrap_setting("error_css_class"))
if required_css_class is not None:
self.required_css_class = required_css_class
else:
self.required_css_class = getattr(
field.form, "required_css_class", get_bootstrap_setting("required_css_class")
)
if bound_css_class is not None:
self.success_css_class = bound_css_class
else:
self.success_css_class = getattr(field.form, "bound_css_class", get_bootstrap_setting("success_css_class"))
# If the form is marked as form.empty_permitted, do not set required class
if self.field.form.empty_permitted:
self.required_css_class = ""
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs.copy()
def add_class_attrs(self, widget=None):
if widget is None:
widget = self.widget
classes = widget.attrs.get("class", "")
if ReadOnlyPasswordHashWidget is not None and isinstance(widget, ReadOnlyPasswordHashWidget):
# Render this is a static control
classes = add_css_class(classes, "form-control-static", prepend=True)
elif not isinstance(widget, self.WIDGETS_NO_FORM_CONTROL):
classes = add_css_class(classes, "form-control", prepend=True)
# For these widget types, add the size class here
classes = add_css_class(classes, self.get_size_class())
elif isinstance(widget, CheckboxInput):
classes = add_css_class(classes, "form-check-input", prepend=True)
elif isinstance(widget, FileInput):
classes = add_css_class(classes, "form-control-file", prepend=True)
if self.field.errors:
if self.error_css_class:
classes = add_css_class(classes, self.error_css_class)
else:
if self.field.form.is_bound:
classes = add_css_class(classes, self.success_css_class)
widget.attrs["class"] = classes
def add_placeholder_attrs(self, widget=None):
if widget is None:
widget = self.widget
placeholder = widget.attrs.get("placeholder", self.placeholder)
if placeholder and self.set_placeholder and is_widget_with_placeholder(widget):
# TODO: Should this be stripped and/or escaped?
widget.attrs["placeholder"] = placeholder
def add_help_attrs(self, widget=None):
if widget is None:
widget = self.widget
if not isinstance(widget, CheckboxInput):
widget.attrs["title"] = widget.attrs.get("title", escape(strip_tags(self.field_help)))
def add_widget_attrs(self):
if self.is_multi_widget:
widgets = self.widget.widgets
else:
widgets = [self.widget]
for widget in widgets:
self.add_class_attrs(widget)
self.add_placeholder_attrs(widget)
self.add_help_attrs(widget)
def list_to_class(self, html, klass):
classes = add_css_class(klass, self.get_size_class())
mapping = [
("<ul", '<div class="{classes}"'.format(classes=classes)),
("</ul>", "</div>"),
("<li", '<div class="{form_check_class}"'.format(form_check_class=self.form_check_class)),
("</li>", "</div>"),
]
for k, v in mapping:
html = html.replace(k, v)
# Apply bootstrap5 classes to labels and inputs.
# A simple 'replace' isn't enough as we don't want to have several 'class' attr definition, which would happen
# if we tried to 'html.replace("input", "input class=...")'
soup = BeautifulSoup(html, features="html.parser")
enclosing_div = soup.find("div", {"class": classes})
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
try:
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
except AttributeError:
pass
return str(soup)
def add_checkbox_label(self, html):
return html + render_label(
content=self.field.label,
label_for=self.field.id_for_label,
label_title=escape(strip_tags(self.field_help)),
label_class="form-check-label",
)
def fix_date_select_input(self, html):
div1 = '<div class="col-4">'
div2 = "</div>"
html = html.replace("<select", div1 + "<select")
html = html.replace("</select>", "</select>" + div2)
return '<div class="row bootstrap5-multi-input">{html}</div>'.format(html=html)
def fix_file_input_label(self, html):
html = "<br>" + html
return html
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, "radio radio-success")
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, "checkbox")
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.add_checkbox_label(html)
elif isinstance(self.widget, FileInput):
html = self.fix_file_input_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
# Wrap checkboxes
# Note checkboxes do not get size classes, see #318
html = '<div class="form-check">{html}</div>'.format(html=html)
return html
def make_input_group_addon(self, inner_class, outer_class, content):
if not content:
return ""
if inner_class:
content = '<span class="{inner_class}">{content}</span>'.format(inner_class=inner_class, content=content)
return '<div class="{outer_class}">{content}</div>'.format(outer_class=outer_class, content=content)
@property
def is_input_group(self):
allowed_widget_types = (TextInput, PasswordInput, DateInput, NumberInput, Select, EmailInput)
return (self.addon_before or self.addon_after) and isinstance(self.widget, allowed_widget_types)
def make_input_group(self, html):
if self.is_input_group:
before = self.make_input_group_addon(self.addon_before_class, "input-group-prepend", self.addon_before)
after = self.make_input_group_addon(self.addon_after_class, "input-group-append", self.addon_after)
html = self.append_errors("{before}{html}{after}".format(before=before, html=html, after=after))
html = '<div class="input-group">{html}</div>'.format(html=html)
return html
def append_help(self, html):
field_help = self.field_help or None
if field_help:
help_html = render_template_file(
"bootstrap5/field_help_text.html",
context={
"field": self.field,
"field_help": field_help,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += help_html
return html
def append_errors(self, html):
field_errors = self.field_errors
if field_errors:
errors_html = render_template_file(
"bootstrap5/field_errors.html",
context={
"field": self.field,
"field_errors": field_errors,
"layout": self.layout,
"show_help": self.show_help,
},
)
html += errors_html
return html
def append_to_field(self, html):
if isinstance(self.widget, CheckboxInput):
# we have already appended errors and help to checkboxes
# in append_to_checkbox_field
return html
if not self.is_input_group:
# we already appended errors for input groups in make_input_group
html = self.append_errors(html)
return self.append_help(html)
def append_to_checkbox_field(self, html):
if not isinstance(self.widget, CheckboxInput):
# we will append errors and help to normal fields later in append_to_field
return html
html = self.append_errors(html)
return self.append_help(html)
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == "horizontal":
field_class = self.horizontal_field_class
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{field_class}">{html}</div>'.format(field_class=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == "horizontal":
label_class = self.horizontal_label_class
label_class = add_css_class(label_class, "col-form-label")
label_class = text_value(label_class)
if not self.show_label or self.show_label == "sr-only":
label_class = add_css_class(label_class, "sr-only")
return label_class
def get_label(self):
if self.show_label == "skip":
return None
elif isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == "horizontal" and not label:
return mark_safe(" ")
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_for=self.field.id_for_label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors:
if self.error_css_class:
form_group_class = add_css_class(form_group_class, self.error_css_class)
else:
if self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, self.success_css_class)
if self.field.field.required and self.required_css_class:
form_group_class = add_css_class(form_group_class, self.required_css_class)
if self.layout == "horizontal":
form_group_class = add_css_class(form_group_class, "row")
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def _render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(" ", "").split(","):
return ""
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.append_to_checkbox_field(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""Inline field renderer."""
def add_error_attrs(self):
field_title = self.widget.attrs.get("title", "")
field_title += " " + " ".join([strip_tags(e) for e in self.field_errors])
self.widget.attrs["title"] = field_title.strip()
def add_widget_attrs(self):
super().add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, "sr-only")
|
py | b4037ef8c2864cea22a202b2390b90a7e144685e | import datetime as date
from block import Block
def create_genesis_block():
# Manually construct a block with
# index zero and arbitrary previous hash
return Block(0, date.datetime.now(), "Genesis Block", "0")
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = date.datetime.now()
this_data = "Hey! I'm block " + str(this_index)
this_hash = last_block.hash
return Block(this_index, this_timestamp, this_data, this_hash)
|
py | b40380180b06c9e2e735b1b2f6edaae96835f6c7 | import numpy
import pandas as pd
import pytest
from pymilvus import DataType
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common import constants as cons
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
exp_shards_num = "shards_num"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_shards_num = 2
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"data": gen_vectors(1, default_dim),
"anns_field": default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": default_top_k,
}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with a empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception and
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_float_vec_field(name="tmp")]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not MilvusException")
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
def test_collection_shards_num_with_default_value(self):
"""
target:test collection with shards_num
method:create collection with shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=default_shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: default_shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
@pytest.mark.parametrize("shards_num", [-256, 0, 10, 256])
def test_collection_shards_num_with_not_default_value(self, shards_num):
"""
target:test collection with shards_num
method:create collection with not default shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
def test_collection_shards_num_with_error_type(self):
"""
target:test collection with error type shards_num
method:create collection with error type shards_num
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error_type_shards_num = "2" # suppose to be int rather than str
error = {ct.err_code: -1, ct.err_msg: f"expected one of: int, long"}
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=error_type_shards_num,
check_task=CheckTasks.err_res,
check_items=error)
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
# def teardown_method(self):
# if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:
# self.self.collection_wrap.drop()
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([c_name])
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([collection_w.name])
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_count_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.count_entities(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 0
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
request.param.update({"metric_type": "IP"})
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
entities = gen_entities(insert_count)
connect.insert(collection, entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
request.param["metric_type"] = "HAMMING"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
request.param["metric_type"] = "SUBSTRUCTURE"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
request.param["metric_type"] = "SUPERSTRUCTURE"
return request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
connect.flush([binary_collection])
# connect.load_collection(binary_collection)
connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self, connect, binary_collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == 0
class TestCollectionMultiCollections:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, connect, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
entities = gen_entities(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
collection_list = []
collection_num = 20
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
for i in range(int(collection_num / 2), collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
res = connect.insert(collection_name, cons.default_binary_entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
connect.drop_collection(collection_list[i])
class TestGetCollectionStats:
"""
******************************************************************
The following cases are used to test `collection_stats` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_name_not_existed(self, connect, collection):
"""
target: get collection stats where collection name does not exist
method: call collection_stats with a random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid_stats)
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name):
"""
target: get collection stats where collection name is invalid
method: call collection_stats with invalid collection_name
expected: status not ok
"""
collection_name = get_invalid_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_empty(self, connect, collection):
"""
target: get collection stats where no entity in collection
method: call collection_stats in empty collection
expected: segment = []
"""
stats = connect.get_collection_stats(collection)
connect.flush([collection])
assert stats[row_count] == 0
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.get_collection_stats(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_batch(self, connect, collection):
"""
target: get row count with collection_stats
method: add entities, check count in collection info
expected: count as expected
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert int(stats[row_count]) == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_single(self, connect, collection):
"""
target: get row count with collection_stats
method: add entity one by one, check count in collection info
expected: count as expected
"""
nb = 10
for i in range(nb):
connect.insert(collection, cons.default_entity)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_delete(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = [ids[0], ids[-1]]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb - 2
assert stats["partitions"][0]["row_count"] == default_nb - 2
assert stats["partitions"][0]["segments"][0]["data_size"] > 0
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_parts(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, and compact collection, check count in collection info
expected: status ok, count as expected
"""
delete_length = 1000
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:delete_length]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb - delete_length
compact_before = stats["partitions"][0]["segments"][0]["data_size"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["segments"][0]["data_size"]
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_delete_one(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete one entity, and compact collection, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:1]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_before = stats["partitions"][0]["row_count"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["row_count"]
# pdb.set_trace()
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partition(self, connect, collection):
"""
target: get partition info in a collection
method: call collection_stats after partition created and check partition_stats
expected: status ok, vectors added to partition
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions(self, connect, collection):
"""
target: get partition info in a collection
method: create two partitions, add vectors in one of the partitions, call collection_stats and check
expected: status ok, vectors added to one partition but not the other
"""
new_tag = "new_tag"
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
connect.insert(collection, cons.default_entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 2
connect.insert(collection, cons.default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 3
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_A(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_B(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions_C(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of vectors
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_D(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the collection count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.insert(collection, entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
get_simple_index["metric_type"] = "IP"
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
get_simple_index.update({"metric_type": "IP"})
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
"""
target: test collection info after index created
method: create collection, add binary entities, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
ids = connect.insert(binary_collection, cons.default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_create_different_index(self, connect, collection):
"""
target: test collection info after index created repeatedly
method: create collection, add vectors, create index and call collection_stats multiple times
expected: status ok, index info shown in segments
"""
result = connect.insert(collection, cons.default_entities)
connect.flush([collection])
for index_type in ["IVF_FLAT", "IVF_SQ8"]:
connect.create_index(collection, default_float_vec_field_name,
{"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"})
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_indexed(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: row count in segments
"""
collection_list = []
collection_num = 10
for i in range(collection_num):
collection_name = gen_unique_str(uid_stats)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
res = connect.insert(collection_name, cons.default_entities)
connect.flush(collection_list)
index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"}
index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"}
if i % 2:
connect.create_index(collection_name, default_float_vec_field_name, index_1)
else:
connect.create_index(collection_name, default_float_vec_field_name, index_2)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
index = connect.describe_index(collection_list[i], "")
if i % 2:
create_target_index(index_1, default_float_vec_field_name)
assert index == index_1
else:
create_target_index(index_2, default_float_vec_field_name)
assert index == index_2
# break
connect.drop_collection(collection_list[i])
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
"""
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
# pdb.set_trace()
connect.insert(collection, cons.default_entity)
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert_flush(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
connect.insert(collection, cons.default_entity)
connect.flush([collection])
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "Create collection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(cons.default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
"""
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
collection_name = gen_unique_str(uid_create)
limit_num = 64
fields = copy.deepcopy(cons.default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_without_connection(self, collection, dis_connect):
"""
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
"""
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test describe collection which name invalid
method: call describe_collection with invalid names
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test describe collection which name is empty or None
method: call describe_collection with '' or None name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self, connect, collection):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self, collection, dis_connect):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
collection_name = gen_unique_str(uid_drop)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_collection_multithread(self, connect):
"""
target: test create and drop collection with multithread
method: create and drop collection using multithread,
expected: collections are created, and dropped
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_drop)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop invalid collection
method: drop collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test drop invalid collection
method: drop collection with empty or None collection name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_has_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
"""
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test list collections with invalid scenario
method: show collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with empty collection name
expected: raise exception
"""
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with no collection name
expected: raise exception
"""
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self, connect):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = gen_unique_str(uid_list)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
assert collection_name in connect.list_collections()
for i in range(collection_num):
connect.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self, dis_connect):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.list_collections()
# TODO: make sure to run this case in the end
@pytest.mark.skip("r0.3-test")
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_no_collection(self, connect):
"""
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
"""
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self, connect):
"""
target: test list collection with multi-threads
method: list collection using multi-threads
expected: list collections correctly
"""
threads_num = 10
threads = []
collection_name = gen_unique_str(uid_list)
connect.create_collection(collection_name, cons.default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
result = connect.insert(binary_collection, cons.default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_empty_collection(self, connect, collection):
"""
target: test load an empty collection with no data inserted
method: no entities in collection, load and release the collection
expected: load and release successfully
"""
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self, connect, collection):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self, connect, collection):
"""
target: test release a not existed collection
method: release with a not existed collection name
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self, connect, collection):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self, connect, collection):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self, connect, collection):
"""
target: test load, release non-exist collection
method: 1. load, release and drop collection
2. load and release dropped collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: load collection failed
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.load_collection(collection)
# TODO
@pytest.mark.tags(CaseLabel.L2)
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, **default_single_query, partition_names=[default_tag])
res = connect.search(collection, **default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
# assert len(res[0]) == 0
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
params, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
future = connect.search(collection, **params, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
res = connect.search(collection, **default_single_query)
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected: raise exception
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected:
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, cons.default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, **default_single_query)
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test load invalid collection
method: load collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test release invalid collection
method: release collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, cons.default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
partition_name = gen_unique_str(uid_load)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release partition without load
method: release partition without load
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test load invalid partition
method: load partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test release invalid partition
method: release partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
py | b40380814f49a7efac9fe1f6b85a647a4686fd4f | # Generated by Django 2.2.11 on 2021-07-29 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0267_dailyround_meta'),
]
operations = [
migrations.AlterField(
model_name='historicalpatientregistration',
name='vaccine_name',
field=models.CharField(choices=[('CoviShield', 'COVISHIELD'), ('Covaxin', 'COVAXIN'), ('Sputnik', 'SPUTNIK'), ('Moderna', 'MODERNA'), ('Pfizer', 'PFIZER'), ('Janssen', 'JANSSEN'), ('Sinovac', 'SINOVAC')], default=None, max_length=15, null=True),
),
migrations.AlterField(
model_name='patientregistration',
name='vaccine_name',
field=models.CharField(choices=[('CoviShield', 'COVISHIELD'), ('Covaxin', 'COVAXIN'), ('Sputnik', 'SPUTNIK'), ('Moderna', 'MODERNA'), ('Pfizer', 'PFIZER'), ('Janssen', 'JANSSEN'), ('Sinovac', 'SINOVAC')], default=None, max_length=15, null=True),
),
]
|
py | b4038095bb34f6ff21f223bab8aa456c9ca49c72 | """
dolbyio_rest_apis.media.io
~~~~~~~~~~~~~~~
This module contains the functions to work with the IO APIs.
"""
from dolbyio_rest_apis.media.internal.http_context import MediaHttpContext
async def get_upload_url(
api_key: str,
dlb_url: str,
) -> str or None:
r"""
Start Media Input
To use the Dolby provided temporary storage is a two step process.
You start by declaring a dlb:// url that you can reference in any other Media API calls.
The response will provide a url where you can put your media.
This allows you to use the dlb:// url as a short-cut for a temporary storage location.
You'll be returned a pre-signed url you can use to PUT and upload your media file.
The temporary storage should allow you to read and write to the dlb:// locations for a period of at least 24 hours before it is removed.
See: https://docs.dolby.io/media-apis/reference/media-input-post
Args:
api_key: Your Dolby.io Media API Key.
dlb_url: The `url` should be in the form `dlb://object-key` where the object-key can be any alpha-numeric string.
The object-key is unique to your account API Key so there is no risk of collision with other users.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
payload = {
'url': dlb_url
}
async with MediaHttpContext() as http_context:
json_response = await http_context.requests_post(
api_key=api_key,
url='https://api.dolby.com/media/input',
payload=payload
)
if 'url' in json_response:
return json_response['url']
async def upload_file(
upload_url: str,
file_path: str,
) -> None:
r"""
Upload a file.
Args:
upload_url: URL where to upload the file to.
file_path: Local file path to upload.
Raises:
HTTPError: If one occurred.
"""
async with MediaHttpContext() as http_context:
await http_context.upload(
upload_url=upload_url,
file_path=file_path,
)
async def download_file(
api_key: str,
dlb_url: str,
file_path: str,
) -> None:
r"""
Start Media Download
You can download media you previously uploaded with /media/input or media that was generated through another Dolby Media API.
The temporary storage should allow you to read and write to the dlb:// locations for a period of at least 24 hours before it is removed.
See: https://docs.dolby.io/media-apis/reference/media-output-get
Args:
api_key: Your Dolby.io Media API Key.
dlb_url: The `url` should be in the form `dlb://object-key` where the object-key can be any alpha-numeric string.
The object-key is unique to your account API Key so there is no risk of collision with other users.
file_path: Local file path where to download the file to.
Raises:
HTTPError: If one occurred.
"""
params = {
'url': dlb_url,
}
async with MediaHttpContext() as http_context:
await http_context.download(
api_key=api_key,
url='https://api.dolby.com/media/output',
file_path=file_path,
params=params,
)
|
py | b4038145b582aedd0af40b49f1600cda7fe1bc3d | from django.db.models import Q
class SearchModel(object):
search_fields = ('title', 'teaser', 'story',)
@classmethod
def get_query(cls, query_string):
query = Q()
for field in cls.search_fields:
query |= Q(**{'translations__%s__icontains' % field: query_string})
return query
@classmethod
def search(cls, query_string):
query = cls.get_query(query_string)
objects = list(cls.objects.filter(query))
return objects
|
py | b403831f79620ee2f82379b771f67e8bfb54c5ea | import re
import os
import srsly
from pathlib import Path
from typing import Dict, List
from collections import defaultdict, OrderedDict
from spacy.util import ensure_path
from spacy.language import Language
from spacy.pipeline import EntityRuler
from spacy.tokens import Doc, Span, DocBin
DOC_BIN_FILE = Path(os.path.dirname(os.path.realpath(__file__))) / 'resources/doc_bins.msgpack'
class WaterWheel(EntityRuler):
"""WATERWHEEL (WATERloo Water and Hydrologic Entity Extractor and Linker)
is a spaCy pipeline component that detects rivers, lakes, and other
hydrologic entities, and links these entities to Wikidata. The
component is typically added to the pipeline using `nlp.add_pipe`
"""
name = 'waterwheel'
def __init__(self, nlp: Language, overwrite_ents: bool = True):
"""Initialize the class.
Parameters
----------
nlp : Language
The shared nlp object to pass the vocab to the matchers
and process phrase patterns.
overwrite_ents : bool, optional
If True then doc.ents from preceeding pipeline components
are removed/overwritten. Otherwise, previous doc.ents are kept
intact.
"""
super().__init__(nlp, phrase_matcher_attr='LOWER', overwrite_ents=overwrite_ents)
self._ent_ids = defaultdict(lambda: "WATER_BODY")
self._stop_words = set()
self._wikidata = {}
self._doc_bins = {}
# if a match without a qualifier can be of multiple potential types then
# this is used to set priority.
self._pq = {label: iter for iter, label in
enumerate(['RIVER', 'LAKE', 'DRAINAGEBASIN', 'WATERCOURSE', 'WATER_BODY'])}
self.from_disk(DOC_BIN_FILE)
Span.set_extension('wikilink', default=None, force=True)
def __call__(self, doc: Doc):
"""Find matches in document and add them as entities
Parameters
----------
doc : Doc
The Doc object in the pipeline.
Returns
-------
doc : Doc
The Doc with added entities, if available.
"""
if self.overwrite:
doc.ents = []
matches = list(self.phrase_matcher(doc))
matches = sorted([(start, end, self._ent_ids[m_id]) for m_id, start, end in matches if start != end])
current_range = set()
match_groups = []
for start, end, label in matches:
if any(t.ent_type for t in doc[start:end]) and not self.overwrite:
continue
match_str = str(doc[start:end])
is_improper_noun = re.search('^[\sA-Z]+$', match_str) or re.search('^[\sa-z]+$', match_str)
is_improper_noun = is_improper_noun is not None
is_stop_word = match_str.lower() in self._stop_words
q_before = str(doc[start-1:start]).lower() == label.lower()
q_after = str(doc[end:end+1]).lower() == label.lower()
end += q_after
# precedence given to proceeding qualifier over preceding one.
start -= q_before and not q_after
if not (q_before or q_after) and (is_stop_word or is_improper_noun):
# skip stop_words/improper nouns wihtout qualifiers.
continue
if start in current_range or end - 1 in current_range:
current_range.update(range(start, end))
else:
current_range = set(range(start, end))
match_groups.append([])
match_groups[-1].append({
'match_str': match_str,
'start': start,
'end': end,
'label': label,
'is_qualified': q_before or q_after,
'is_uncommon': not is_stop_word,
'is_proper_noun': not is_improper_noun,
'length': end - start,
'priority': self._pq[label]
})
final_matches = self._filter_matches(match_groups)
for match in final_matches:
span = Span(doc, match['start'], match['end'], label = match['label'])
span._.set(
'wikilink',
'https://www.wikidata.org/wiki/' + self._wikidata[match['label']][match['match_str'].lower()]
)
try:
doc.ents = list(doc.ents) + [span]
except ValueError:
# skip overlapping or intersecting matches.
continue
return doc
def __len__(self):
"""The number of all water_bodies."""
n_phrases = 0
for key in self._doc_bins:
n_phrases += len(self._doc_bins[key])
return n_phrases
def _filter_matches(self, match_groups: List):
"""Filter matches according to following procedure:
In case of overlap, give precedence to uncommon words over common words.
Example: In 'The Lake Ontario', 'Lake Ontario' is chosen over 'The Lake'.
In case of multiple potential match types, decide according to qualifier.
Example: In 'Mississippi River', type 'RIVER' is set over 'LAKE'.
In case of overlap, give precedence to maximal span.
Example: In 'Nile River', 'Nile River' is matched over just 'Nile'.
In case of multiple potential match types and no qualifier,
decicde according to default type priority.
Example: In 'Mississipi is something', type 'RIVER' is set over 'LAKE'.
In case of overlap, give precedence to match with proper noun.
Example: In 'superior lake Ontario', 'lake Ontario' is chosen over
'superior lake'.
In case of overlap, consume match from left to right and ignore leftovers.
Example: In 'Great Slave Lake Ontario', 'Great Slave Lake' is chosen
and 'Lake Ontario' is ignored/skipped.
Parameters
----------
match_groups : List
List of matches in same overlapping regions.
Returns
-------
final_matches : List
List of non overlapping matches filtered by the procedure.
"""
final_matches = []
seen = set()
for group in match_groups:
ordered_lists = [[i for i in range(len(group))]]
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.extend(self._separator(group, lst, 'is_uncommon'))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.extend(self._separator(group, lst, 'is_qualified'))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.append(self._sorter(group, lst, 'length', reverse = True))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.append(self._sorter(group, lst, 'priority'))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.extend(self._separator(group, lst, 'is_proper_noun'))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
new_ordered_lists.append(self._sorter(group, lst, 'start'))
ordered_lists = new_ordered_lists
new_ordered_lists = []
for lst in ordered_lists:
for index in lst:
if group[index]['start'] in seen and group[index]['end'] - 1 in seen:
continue
seen.update(range(group[index]['start'], group[index]['end']))
final_matches.append(group[index])
return final_matches
def _separator(self, group: List, lst: List, attr: str):
"""Separates a list into two lists according to
bool value of match attr in the list.
Parameters
----------
group : List
A list of overlapping matches.
lst : List
A ordering of matches in group.
attr : str
Attribute to check for in matches.
Returns
-------
ret_list : List
A list containing:
1) A list of matches with attr value True.
2) A list of matches with attr value False.
"""
list_a = [i for i in lst if group[i][attr]]
list_b = [i for i in lst if not group[i][attr]]
ret_list = []
if len(list_a) > 0:
ret_list.append(list_a)
if len(list_b) > 0:
ret_list.append(list_b)
return [list_a, list_b]
def _sorter(self, group: List, lst: List, attr: str, reverse: bool = False):
"""Separates a list into two lists according to
bool value of match attr in the list.
Parameters
----------
group : List
A list of overlapping matches.
lst : List
A ordering of matches in group.
attr : str
Attribute to check for in matches.
reverse : bool, optional
True for descending sort.
False (by default) for ascending sort.
Returns
-------
ret_list : List
A list sorted according attr value.
"""
return sorted(lst, key = lambda x: group[x][attr], reverse = reverse)
def to_bytes(self, **kwargs):
"""Serialize waterwheel data to a bytestring.
Returns
-------
serial : bytes
The serialized bytes data.
"""
doc_bins_bytes = {key: bin.to_bytes() for key, bin in self._doc_bins.items()}
serial = OrderedDict(
(
('stop_words', list(self._stop_words)),
('vocab', self._ent_ids),
('wikidata', self._wikidata),
('doc_bins', doc_bins_bytes),
)
)
return srsly.msgpack_dumps(serial)
def from_bytes(self, serial: bytes, **kwargs):
"""Load waterwheel from a bytestring.
Parameters
----------
serial : bytes
The serialized bytes data.
Returns
-------
self : WaterWheel
The loaded WaterWheel object.
"""
cfg = srsly.msgpack_loads(serial)
if isinstance(cfg, dict):
vocab = cfg.get('vocab', {})
for hash, label in vocab.items():
self._ent_ids[int(hash)] = label
self._stop_words = cfg.get('stop_words', [])
self._stop_words = set(self._stop_words)
self._wikidata = cfg.get('wikidata', {})
doc_bins_bytes = cfg.get('doc_bins', {})
self._doc_bins = {key: DocBin().from_bytes(value) for key, value in doc_bins_bytes.items()}
phrases_bin = {key: list(bin.get_docs(self.nlp.vocab)) for key, bin in self._doc_bins.items()}
for key, phrases in phrases_bin.items():
self.phrase_matcher.add(key.upper(), phrases)
return self
def to_disk(self, path, **kwargs):
"""Serialize waterwheel data to a file.
Parameters
----------
path : Path
path to file.
"""
path = ensure_path(path)
serial = self.to_bytes()
srsly.write_msgpack(path, serial)
def from_disk(self, path, **kwargs):
"""Load waterwheel from a file. Expects file to contain
a bytestring of the following dict format:
{
'stop_words': {},
'vocab': {},
'wikidata': {},
'doc_bins': doc_bins_bytes,
}
Parameters
----------
path : Path
path to the serialized file.
Returns
-------
self : WaterWheel
The loaded WaterWheel object.
"""
path = ensure_path(path)
with open(path, 'rb') as file:
serial = file.read()
self.from_bytes(serial)
return self |
py | b403836be1f8f07d7fc70819b70aba987f75d4f6 | # -*- coding: UTF-8 -*-
'''
@author: 'FenG_Vnc'
@date: 2017-08-08 10:42
@file: error.py
'''
from __future__ import unicode_literals
class Thirdy_OAuthException(RuntimeError):
def __init__(self, message, type=None, data=None):
self.message = message
self.type = type
self.data = data |
py | b403843b5d39f2c24373d965064e9ec958f9c558 | """
Django settings for ilovemyself_fitness_30427 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ilovemyself_fitness_30427.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ilovemyself_fitness_30427.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | b403846f09baa72f19b3af3ced0a5f98d1fe5436 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results',
]
import base64
import pickle
import collections
from google.appengine.datastore import entity_pb
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_rpc
class _BaseComponent(object):
"""A base class for query components.
Currently just implements basic == and != functions.
"""
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self is other or self.__dict__ == other.__dict__
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values, read_only=True)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((name, []) for name in property_names)
for prop in entity.property_list():
if prop.name() in value_map:
value_map[prop.name()].append(
datastore_types.PropertyValueToKeyValue(prop.value()))
if datastore_types.KEY_SPECIAL_PROPERTY in value_map:
value_map[datastore_types.KEY_SPECIAL_PROPERTY] = [
datastore_types.ReferenceToKeyValue(entity.key())]
return value_map
class _PropertyComponent(_BaseComponent):
"""A component that operates on a specific set of properties."""
def _get_prop_names(self):
"""Returns a set of property names used by the filter."""
raise NotImplementedError
class FilterPredicate(_PropertyComponent):
"""An abstract base class for all query filters.
All sub-classes must be immutable as these are often stored without creating a
defensive copy.
"""
def __call__(self, entity):
"""Applies the filter predicate to the given entity.
Args:
entity: the datastore_pb.EntityProto to test.
Returns:
True if the given entity matches the filter, False otherwise.
"""
return self._apply(_make_key_value_map(entity, self._get_prop_names()))
def _apply(self, key_value_map):
"""Apply the given component to the comparable value map.
A filter matches a list of values if at least one value in the list
matches the filter, for example:
'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3'
Note: the values are actually represented as tuples whose first item
encodes the type; see datastore_types.PropertyValueToKeyValue().
Args:
key_value_map: A dict mapping property names to a list of
comparable values.
Return:
A boolean indicating if the given map matches the filter.
"""
raise NotImplementedError
def _prune(self, key_value_map):
"""Removes values from the given map that do not match the filter.
When doing a scan in the datastore, only index values that match the filters
are seen. When multiple values that point to the same entity are seen, the
entity only appears where the first value is found. This function removes
all values that don't match the query so that the first value in the map
is the same one the datastore would see first.
Args:
key_value_map: the comparable value map from which to remove
values. Does not need to contain values for all filtered properties.
Returns:
A value that evaluates to False if every value in a single list was
completely removed. This effectively applies the filter but is less
efficient than _apply().
"""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
def _to_pbs(self):
"""Internal only function to generate a list of pbs."""
return [self._to_pb()]
class _SinglePropertyFilter(FilterPredicate):
"""Base class for a filter that operates on a single property."""
def _get_prop_name(self):
"""Returns the name of the property being filtered."""
raise NotImplementedError
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
raise NotImplementedError
def _get_prop_names(self):
return set([self._get_prop_name()])
def _apply(self, value_map):
for other_value in value_map[self._get_prop_name()]:
if self._apply_to_value(other_value):
return True
return False
def _prune(self, value_map):
if self._get_prop_name() not in value_map:
return True
values = [value for value in value_map[self._get_prop_name()]
if self._apply_to_value(value)]
value_map[self._get_prop_name()] = values
return bool(values)
class PropertyFilter(_SinglePropertyFilter):
"""An immutable filter predicate that constrains a single property."""
_OPERATORS = {
'<': datastore_pb.Query_Filter.LESS_THAN,
'<=': datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
'>': datastore_pb.Query_Filter.GREATER_THAN,
'>=': datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
'=': datastore_pb.Query_Filter.EQUAL,
}
_OPERATORS_INVERSE = dict((value, key)
for key, value in _OPERATORS.iteritems())
_OPERATORS_TO_PYTHON_OPERATOR = {
datastore_pb.Query_Filter.LESS_THAN: '<',
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL: '<=',
datastore_pb.Query_Filter.GREATER_THAN: '>',
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL: '>=',
datastore_pb.Query_Filter.EQUAL: '==',
}
_INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>='])
_INEQUALITY_OPERATORS_ENUM = frozenset([
datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
])
_UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
def __init__(self, op, value):
"""Constructor.
Args:
op: A string representing the operator to use.
value: A entity_pb.Property, the property and value to compare against.
Raises:
datastore_errors.BadArgumentError if op has an unsupported value or value
is not an entity_pb.Property.
"""
if op not in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,))
if not isinstance(value, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'value argument should be entity_pb.Property (%r)' % (value,))
super(PropertyFilter, self).__init__()
self._filter = datastore_pb.Query_Filter()
self._filter.set_op(self._OPERATORS[op])
self._filter.add_property().CopyFrom(value)
@property
def op(self):
raw_op = self._filter.op()
return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
@property
def value(self):
return self._filter.property(0)
def __repr__(self):
prop = self.value
name = prop.name()
value = datastore_types.FromPropertyPb(prop)
return '%s(%r, <%r, %r>)' % (self.__class__.__name__, self.op, name, value)
def _get_prop_name(self):
return self._filter.property(0).name()
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property(0).value())
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op()])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op() in self._INEQUALITY_OPERATORS_ENUM
@classmethod
def _from_pb(cls, filter_pb):
self = cls.__new__(cls)
self._filter = filter_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyFilter is unsupported.')
def __eq__(self, other):
if self.__class__ is not other.__class__:
if other.__class__ is _PropertyRangeFilter:
return [self._filter] == other._to_pbs()
return NotImplemented
return self._filter == other._filter
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
@datastore_rpc._positional(1)
def __init__(self, start=None, start_incl=True, end=None, end_incl=True):
"""Constructs a range filter using start and end properties.
Args:
start: A entity_pb.Property to use as a lower bound or None to indicate
no lower bound.
start_incl: A boolean that indicates if the lower bound is inclusive.
end: A entity_pb.Property to use as an upper bound or None to indicate
no upper bound.
end_incl: A boolean that indicates if the upper bound is inclusive.
"""
if start is not None and not isinstance(start, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb.Property (%r)' % (start,))
if end is not None and not isinstance(end, entity_pb.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb.Property (%r)' % (end,))
if start and end and start.name() != end.name():
raise datastore_errors.BadArgumentError(
'start and end arguments must be on the same property (%s != %s)' %
(start.name(), end.name()))
if not start and not end:
raise datastore_errors.BadArgumentError(
'Unbounded ranges are not supported.')
super(_PropertyRangeFilter, self).__init__()
self._start = start
self._start_incl = start_incl
self._end = end
self._end_incl = end_incl
@classmethod
def from_property_filter(cls, prop_filter):
op = prop_filter._filter.op()
if op == datastore_pb.Query_Filter.GREATER_THAN:
return cls(start=prop_filter._filter.property(0), start_incl=False)
elif op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL:
return cls(start=prop_filter._filter.property(0))
elif op == datastore_pb.Query_Filter.LESS_THAN:
return cls(end=prop_filter._filter.property(0), end_incl=False)
elif op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL:
return cls(end=prop_filter._filter.property(0))
else:
raise datastore_errors.BadArgumentError(
'Unsupported operator (%s)' % (op,))
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp(self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp(self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value())
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value())
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name()
if self._end:
return self._end.name()
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query_Filter.GREATER_THAN
pb = datastore_pb.Query_Filter()
pb.set_op(op)
pb.add_property().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query_Filter.LESS_THAN
pb = datastore_pb.Query_Filter()
pb.set_op(op)
pb.add_property().CopyFrom(self._end)
pbs.append(pb)
return pbs
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
def __init__(self, names):
super(_PropertyExistsFilter, self).__init__()
self._names = frozenset(names)
def _apply(self, value_map):
for name in self._names:
if not value_map.get(name):
return False
return True
def _get_prop_names(self):
return self._names
def _prune(self, _):
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
@property
def subfilter(self):
return self._subfilter
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.subfilter)
def _apply(self, value_map):
base_map = dict((prop, []) for prop in self._get_prop_names())
value_maps = []
for prop in base_map:
grouped = self._group_values(prop, value_map[prop])
while len(value_maps) < len(grouped):
value_maps.append(base_map.copy())
for value, map in zip(grouped, value_maps):
map[prop] = value
return self._apply_correlated(value_maps)
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
def _get_prop_names(self):
return self._subfilter._get_prop_names()
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
@property
def op(self):
return self._op
@property
def filters(self):
return self._filters
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.iteritems()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.iteritems():
matches[prop].update(values)
for prop, value_set in matches.iteritems():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
def __init__(self, key_value_set):
super(_IgnoreFilter, self).__init__()
self._keys = key_value_set
def _get_prop_name(self):
return datastore_types.KEY_SPECIAL_PROPERTY
def _apply_to_value(self, value):
return value not in self._keys
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
def __init__(self, key_value_set=None):
super(_DedupingFilter, self).__init__(key_value_set or set())
def _apply_to_value(self, value):
if super(_DedupingFilter, self)._apply_to_value(value):
self._keys.add(value)
return True
return False
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
def reversed(self):
"""Constructs an order representing the reverse of the current order.
Returns:
A new order representing the reverse direction.
"""
raise NotImplementedError
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def key_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x: self.key(x, filter_predicate)
return self.key
def cmp_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x, y: self.cmp(x, y, filter_predicate)
return self.cmp
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb.EntityProto
rhs: An entity_pb.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key()))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key()))
return cmp(lhs_key, rhs_key)
class _ReverseOrder(_BaseComponent):
"""Reverses the comparison for the given object."""
def __init__(self, obj):
"""Constructor for _ReverseOrder.
Args:
obj: Any comparable and hashable object.
"""
super(_ReverseOrder, self).__init__()
self._obj = obj
def __hash__(self):
return hash(self._obj)
def __cmp__(self, other):
assert self.__class__ == other.__class__, (
'A datastore_query._ReverseOrder object can only be compared to '
'an object of the same type.')
return -cmp(self._obj, other._obj)
class PropertyOrder(Order):
"""An immutable class that represents a sort order for a single property."""
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, prop, direction=ASCENDING):
"""Constructor.
Args:
prop: the name of the prop by which to sort.
direction: the direction in which to sort the given prop.
Raises:
datastore_errors.BadArgumentError if the prop name or direction is
invalid.
"""
datastore_types.ValidateString(prop,
'prop',
datastore_errors.BadArgumentError)
if not direction in self._DIRECTIONS:
raise datastore_errors.BadArgumentError('unknown direction: %r' %
(direction,))
super(PropertyOrder, self).__init__()
self.__order = datastore_pb.Query_Order()
self.__order.set_property(prop.encode('utf-8'))
self.__order.set_direction(direction)
@property
def prop(self):
return self.__order.property()
@property
def direction(self):
return self.__order.direction()
def __repr__(self):
name = self.prop
direction = self.direction
extra = ''
if direction == self.DESCENDING:
extra = ', DESCENDING'
name = repr(name).encode('utf-8')[1:-1]
return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
def reversed(self):
if self.__order.direction() == self.ASCENDING:
return PropertyOrder(self.__order.property().decode('utf-8'),
self.DESCENDING)
else:
return PropertyOrder(self.__order.property().decode('utf-8'),
self.ASCENDING)
def _get_prop_names(self):
return set([self.__order.property()])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property()]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property())
if self.__order.direction() == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property()]
rhs_values = rhs_value_map[self.__order.property()]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property())
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property())
if self.__order.direction() == self.ASCENDING:
return cmp(min(lhs_values), min(rhs_values))
else:
return cmp(max(rhs_values), max(lhs_values))
@classmethod
def _from_pb(cls, order_pb):
self = cls.__new__(cls)
self.__order = order_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyOrder is unsupported.')
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
@property
def orders(self):
return self._orders
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.orders))
def reversed(self):
return CompositeOrder([order.reversed() for order in self._orders])
def _get_prop_names(self):
names = set()
for order in self._orders:
names |= order._get_prop_names()
return names
def _key(self, lhs_value_map):
result = []
for order in self._orders:
result.append(order._key(lhs_value_map))
return tuple(result)
def _cmp(self, lhs_value_map, rhs_value_map):
for order in self._orders:
result = order._cmp(lhs_value_map, rhs_value_map)
if result != 0:
return result
return 0
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeOrder, self).__eq__(other)
if len(self._orders) == 1:
result = self._orders[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._orders[0])
return result
return NotImplemented
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
@datastore_rpc.ConfigOption
def produce_cursors(value):
"""If a Cursor should be returned with the fetched results.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'produce_cursors argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def offset(value):
"""The number of results to skip before returning the first result.
Only applies to the first request it is used with and is ignored if present
on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'offset',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def batch_size(value):
"""The number of results to attempt to retrieve in a batch.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'batch_size',
datastore_errors.BadArgumentError)
return value
class QueryOptions(FetchOptions):
"""An immutable class that contains all options for running a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in FetchOptions and
datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see below for details.
"""
ORDER_FIRST = datastore_pb.Query.ORDER_FIRST
ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST
FILTER_FIRST = datastore_pb.Query.FILTER_FIRST
_HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST])
@datastore_rpc.ConfigOption
def keys_only(value):
"""If the query should only return keys.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'keys_only argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def limit(value):
"""Limit on the number of results to return.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'limit',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def prefetch_size(value):
"""Number of results to attempt to return on the initial request.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'prefetch_size',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def start_cursor(value):
"""Cursor to use a start position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'start_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def end_cursor(value):
"""Cursor to use as an end position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'end_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def hint(value):
"""Hint on how the datastore should plan the query.
Raises:
datastore_errors.BadArgumentError if value is not a known hint.
"""
if value not in QueryOptions._HINTS:
raise datastore_errors.BadArgumentError('Unknown query hint (%r)' %
(value,))
return value
class Cursor(_BaseComponent):
"""An immutable class that represents a relative position in a query.
The position denoted by a Cursor is relative to a result in a query even
if the result has been removed from the given query. Usually to position
immediately after the last result returned by a batch.
A cursor should only be used on a query with an identical signature to the
one that produced it.
"""
@datastore_rpc._positional(1)
def __init__(self, _cursor_pb=None):
"""Constructor.
A Cursor constructed with no arguments points the first result of any
query. If such a Cursor is used as an end_cursor no results will ever be
returned.
"""
super(Cursor, self).__init__()
if _cursor_pb is not None:
if not isinstance(_cursor_pb, datastore_pb.CompiledCursor):
raise datastore_errors.BadArgumentError(
'_cursor_pb argument should be datastore_pb.CompiledCursor (%r)' %
(_cursor_pb,))
self.__compiled_cursor = _cursor_pb
else:
self.__compiled_cursor = datastore_pb.CompiledCursor()
def __repr__(self):
arg = self.to_websafe_string()
if arg:
arg = '<%s>' % arg
return '%s(%s)' % (self.__class__.__name__, arg)
def reversed(self):
"""Creates a cursor for use in a query with a reversed sort order."""
for pos in self.__compiled_cursor.position_list():
if pos.has_start_key():
raise datastore_errors.BadRequestError('Cursor cannot be reversed.')
rev_pb = datastore_pb.CompiledCursor()
rev_pb.CopyFrom(self.__compiled_cursor)
for pos in rev_pb.position_list():
pos.set_start_inclusive(not pos.start_inclusive())
return Cursor(_cursor_pb=rev_pb)
def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__compiled_cursor.Encode()
@staticmethod
def from_bytes(cursor):
"""Gets a Cursor given its byte string serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_bytes.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument does not represent a
serialized cursor.
"""
try:
cursor_pb = datastore_pb.CompiledCursor(cursor)
except (ValueError, TypeError), e:
raise datastore_errors.BadValueError(
'Invalid cursor (%r). Details: %s' % (cursor, e))
except Exception, e:
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
else:
raise
return Cursor(_cursor_pb=cursor_pb)
def to_websafe_string(self):
"""Serialize cursor as a websafe string.
Returns:
A base64-encoded serialized cursor.
"""
return base64.urlsafe_b64encode(self.to_bytes())
@staticmethod
def from_websafe_string(cursor):
"""Gets a Cursor given its websafe serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_websafe_string.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument is not a string
type of does not represent a serialized cursor.
"""
if not isinstance(cursor, basestring):
raise datastore_errors.BadValueError(
'cursor argument should be str or unicode (%r)' % (cursor,))
try:
decoded_bytes = base64.b64decode(str(cursor).replace('-', '+').replace('_', '/'))
except (ValueError, TypeError), e:
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
return Cursor.from_bytes(decoded_bytes)
@staticmethod
def _from_query_result(query_result):
if query_result.has_compiled_cursor():
return Cursor(_cursor_pb=query_result.compiled_cursor())
return None
def advance(self, offset, query, conn):
"""Advances a Cursor by the given offset.
Args:
offset: The amount to advance the current query.
query: A Query identical to the one this cursor was created from.
conn: The datastore_rpc.Connection to use.
Returns:
A new cursor that is advanced by offset using the given query.
"""
datastore_types.ValidateInteger(offset,
'offset',
datastore_errors.BadArgumentError)
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
query_options = QueryOptions(
start_cursor=self, offset=offset, limit=0, produce_cursors=True)
return query.run(conn, query_options).next_batch(0).cursor(0)
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__compiled_cursor
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None):
"""Constructs a _QueryKeyFilter.
If app/namespace and ancestor are not defined, the app/namespace set in the
environment is used.
Args:
app: a string representing the required app id or None.
namespace: a string representing the required namespace or None.
kind: a string representing the required kind or None.
ancestor: a entity_pb.Reference representing the required ancestor or
None.
Raises:
datastore_erros.BadArgumentError if app and ancestor.app() do not match or
an unexpected type is passed in for any argument.
"""
if kind is not None:
datastore_types.ValidateString(
kind, 'kind', datastore_errors.BadArgumentError)
if ancestor is not None:
if not isinstance(ancestor, entity_pb.Reference):
raise datastore_errors.BadArgumentError(
'ancestor argument should be entity_pb.Reference (%r)' %
(ancestor,))
if app is None:
app = ancestor.app()
elif app != ancestor.app():
raise datastore_errors.BadArgumentError(
'ancestor argument should match app ("%r" != "%r")' %
(ancestor.app(), app))
if namespace is None:
namespace = ancestor.name_space()
elif namespace != ancestor.name_space():
raise datastore_errors.BadArgumentError(
'ancestor argument should match namespace ("%r" != "%r")' %
(ancestor.name_space(), namespace))
pb = entity_pb.Reference()
pb.CopyFrom(ancestor)
ancestor = pb
self.__ancestor = ancestor
self.__path = ancestor.path().element_list()
else:
self.__ancestor = None
self.__path = None
super(_QueryKeyFilter, self).__init__()
self.__app = datastore_types.ResolveAppId(app).encode('utf-8')
self.__namespace = (
datastore_types.ResolveNamespace(namespace).encode('utf-8'))
self.__kind = kind and kind.encode('utf-8')
@property
def app(self):
return self.__app
@property
def namespace(self):
return self.__namespace
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb.EntityProto or
entity_pb.Reference.
"""
if isinstance(entity_or_reference, entity_pb.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb.EntityProto):
key = entity_or_reference.key()
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb.EntityProto ' +
'or entity_pb.Reference (%r)' % (entity_or_reference))
return (key.app() == self.__app and
key.name_space() == self.__namespace and
(not self.__kind or
key.path().element_list()[-1].type() == self.__kind) and
(not self.__path or
key.path().element_list()[0:len(self.__path)] == self.__path))
def _to_pb(self):
pb = datastore_pb.Query()
pb.set_app(self.__app)
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.set_kind(self.__kind)
if self.__ancestor:
ancestor = pb.mutable_ancestor()
ancestor.CopyFrom(self.__ancestor)
return pb
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate
and a desired ordering.
"""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None,
filter_predicate=None, order=None):
"""Constructor.
Args:
app: Optional app to query, derived from the environment if not specified.
namespace: Optional namespace to query, derived from the environment if
not specified.
kind: Optional kind to query.
ancestor: Optional ancestor to query, an entity_pb.Reference.
filter_predicate: Optional FilterPredicate by which to restrict the query.
order: Optional Order in which to return results.
Raises:
datastore_errors.BadArgumentError if any argument is invalid.
"""
if filter_predicate is not None and not isinstance(filter_predicate,
FilterPredicate):
raise datastore_errors.BadArgumentError(
'filter_predicate should be datastore_query.FilterPredicate (%r)' %
(ancestor,))
super(Query, self).__init__()
if isinstance(order, CompositeOrder):
if order.size() == 0:
order = None
elif isinstance(order, Order):
order = CompositeOrder([order])
elif order is not None:
raise datastore_errors.BadArgumentError(
'order should be Order (%r)' % (order,))
self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind,
ancestor=ancestor)
self._order = order
self._filter_predicate = filter_predicate
@property
def app(self):
return self._key_filter.app
@property
def namespace(self):
return self._key_filter.namespace
@property
def kind(self):
return self._key_filter.kind
@property
def ancestor(self):
return self._key_filter.ancestor
@property
def filter_predicate(self):
return self._filter_predicate
@property
def order(self):
return self._order
def __repr__(self):
args = []
args.append('app=%r' % self.app)
ns = self.namespace
if ns:
args.append('namespace=%r' % ns)
kind = self.kind
if kind is not None:
args.append('kind=%r' % kind)
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.Encode())
args.append('ancestor=<%s>' % websafe)
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
batch0 = Batch(query_options, self, conn, start_cursor=start_cursor)
req = self._to_pb(conn, query_options)
return batch0._make_query_result_rpc_call('RunQuery', query_options, req)
@classmethod
def _from_pb(cls, query_pb):
filter_predicate = None
if query_pb.filter_size() > 0:
filter_predicate = CompositeFilter(
CompositeFilter.AND,
[PropertyFilter._from_pb(filter_pb)
for filter_pb in query_pb.filter_list()])
order = None
if query_pb.order_size() > 0:
order = CompositeOrder([PropertyOrder._from_pb(order_pb)
for order_pb in query_pb.order_list()])
ancestor = query_pb.has_ancestor() and query_pb.ancestor() or None
kind = query_pb.has_kind() and query_pb.kind().decode('utf-8') or None
return Query(app=query_pb.app().decode('utf-8'),
namespace=query_pb.name_space().decode('utf-8'),
kind=kind,
ancestor=ancestor,
filter_predicate=filter_predicate,
order=order)
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.add_filter().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.add_order().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.set_keys_only(True)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.set_compile(True)
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.set_limit(limit)
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.set_count(count)
if query_options.offset:
pb.set_offset(query_options.offset)
if query_options.start_cursor is not None:
pb.mutable_compiled_cursor().CopyFrom(query_options.start_cursor._to_pb())
if query_options.end_cursor is not None:
pb.mutable_end_compiled_cursor().CopyFrom(
query_options.end_cursor._to_pb())
if ((query_options.hint == QueryOptions.ORDER_FIRST and pb.order_size()) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.has_ancestor()) or
(query_options.hint == QueryOptions.FILTER_FIRST and
pb.filter_size() > 0)):
pb.set_hint(query_options.hint)
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def apply_query(query, entities):
"""Performs the given query on a set of in-memory entities.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of entity_pb.EntityProto on which to apply the query.
Returns:
A list of entity_pb.EntityProto contain the results of the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
"query argument must be a datastore_query.Query (%r)" % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
"entities argument must be a list (%r)" % (entities,))
filtered_entities = filter(query._key_filter, entities)
if not query._order:
if query._filter_predicate:
return filter(query._filter_predicate, filtered_entities)
return filtered_entities
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for entity in filtered_entities:
value_map = _make_key_value_map(entity, names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__entity__'] = entity
value_maps.append(value_map)
value_maps.sort(query._order._cmp)
return [value_map['__entity__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
@datastore_rpc._positional(2)
def __init__(self, query, in_memory_results=None, in_memory_filter=None,
max_filtered_count=None):
"""Constructor for _AugmentedQuery.
Do not call directly. Use the utility functions instead (e.g.
datastore_query.inject_results)
Args:
query: A datastore_query.Query object to augment.
in_memory_results: a list of pre- sorted and filtered result to add to the
stream of datastore results or None .
in_memory_filter: a set of in-memory filters to apply to the datastore
results or None.
max_filtered_count: the maximum number of datastore entities that will be
filtered out by in_memory_filter if known.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
if (in_memory_filter is not None and
not isinstance(in_memory_filter, FilterPredicate)):
raise datastore_errors.BadArgumentError(
'in_memory_filter argument should be ' +
'datastore_query.FilterPredicate (%r)' % (in_memory_filter,))
if (in_memory_results is not None and
not isinstance(in_memory_results, list)):
raise datastore_errors.BadArgumentError(
'in_memory_results argument should be a list of' +
'datastore_pv.EntityProto (%r)' % (in_memory_results,))
datastore_types.ValidateInteger(max_filtered_count,
'max_filtered_count',
empty_ok=True,
zero_ok=True)
self._query = query
self._max_filtered_count = max_filtered_count
self._in_memory_filter = in_memory_filter
self._in_memory_results = in_memory_results
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
req = self._query._to_pb(
conn, QueryOptions(config=query_options, **changes))
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
batch0 = _AugmentedBatch(query_options, self, conn,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
return batch0._make_query_result_rpc_call('RunQuery', query_options, req)
@datastore_rpc._positional(1)
def inject_results(query, updated_entities=None, deleted_keys=None):
"""Creates a query object that will inject changes into results.
Args:
query: The datastore_query.Query to augment
updated_entities: A list of entity_pb.EntityProto's that have been updated
and should take priority over any values returned by query.
deleted_keys: A list of entity_pb.Reference's for entities that have been
deleted and should be removed from query results.
Returns:
A datastore_query.AugmentedQuery if in memory filtering is requred,
query otherwise.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
overriden_keys = set()
if deleted_keys is not None:
if not isinstance(deleted_keys, list):
raise datastore_errors.BadArgumentError(
'deleted_keys argument must be a list (%r)' % (deleted_keys,))
deleted_keys = filter(query._key_filter, deleted_keys)
for key in deleted_keys:
overriden_keys.add(datastore_types.ReferenceToKeyValue(key))
if updated_entities is not None:
if not isinstance(updated_entities, list):
raise datastore_errors.BadArgumentError(
'updated_entities argument must be a list (%r)' % (updated_entities,))
updated_entities = filter(query._key_filter, updated_entities)
for entity in updated_entities:
overriden_keys.add(datastore_types.ReferenceToKeyValue(entity.key()))
updated_entities = apply_query(query, updated_entities)
else:
updated_entities = []
if not overriden_keys:
return query
return _AugmentedQuery(query,
in_memory_filter=_IgnoreFilter(overriden_keys),
in_memory_results=updated_entities,
max_filtered_count=len(overriden_keys))
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
@datastore_rpc._positional(4)
def __init__(self, query_options, query, conn,
start_cursor=Cursor(), _compiled_query=None):
"""Constructor.
This class is constructed in stages (one when an RPC is sent and another
when an rpc is completed) and should not be constructed directly!!
Use Query.run_async().get_result() to create a Batch or Query.run()
to use a batcher.
This constructor does not perform verification.
Args:
query_options: The QueryOptions used to run the given query.
query: The Query the batch is derived from.
conn: A datastore_rpc.Connection to use.
start_cursor: Optional cursor pointing before this batch.
"""
self.__query = query
self._conn = conn
self.__query_options = query_options
self.__start_cursor = start_cursor
self._compiled_query = _compiled_query
@property
def query_options(self):
"""The QueryOptions used to retrieve the first batch."""
return self.__query_options
@property
def query(self):
"""The query the current batch came from."""
return self.__query
@property
def results(self):
"""A list of entities in this batch."""
return self.__results
@property
def keys_only(self):
"""Whether the entities in this batch only contain keys."""
return self.__keys_only
@property
def start_cursor(self):
"""A cursor that points to the position just before the current batch."""
return self.__start_cursor
@property
def end_cursor(self):
"""A cursor that points to the position just after the current batch."""
return self.__end_cursor
@property
def skipped_results(self):
"""The number of results skipped because of an offset in the request.
An offset is satisfied before any results are returned. The start_cursor
points to the position in the query before the skipped results.
"""
return self._skipped_results
@property
def more_results(self):
"""Whether more results can be retrieved from the query."""
return self.__more_results
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async = self.next_batch_async(fetch_options)
if async is None:
return None
return async.get_result()
def cursor(self, index):
"""Gets the cursor that points to the result at the given index.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result and the end_cursor
points to the position after the last result, the range of indexes this
function supports is limited to [-skipped_results, len(results)].
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points just before the result at the given index which if
used as a start_cursor will cause the first result to result[index].
"""
if not isinstance(index, (int, long)):
raise datastore_errors.BadArgumentError(
'index argument should be entity_pb.Reference (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == len(self.__results):
return self.__end_cursor
elif index == -self._skipped_results:
return self.__start_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self.__query, self._conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
req = self._to_pb(fetch_options)
config = self.__query_options.merge(fetch_options)
return next_batch._make_query_result_rpc_call(
'Next', config, req)
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self.__query_options,
self._conn.config):
req.set_compile(True)
count = FetchOptions.batch_size(fetch_options,
self.__query_options,
self._conn.config)
if count is not None:
req.set_count(count)
if fetch_options is not None and fetch_options.offset:
req.set_offset(fetch_options.offset)
req.mutable_cursor().CopyFrom(self.__datastore_cursor)
self.__datastore_cursor = None
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
self.__results.extend(next_batch.__results)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_result_rpc_call(self, name, config, req):
"""Makes either a RunQuery or Next call that will modify the instance.
Args:
name: A string, the name of the call to invoke.
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
return self._conn.make_rpc_call(config, name, req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError, exc:
if isinstance(rpc.request, datastore_pb.Query):
yaml = datastore_index.IndexYamlForQuery(
*datastore_index.CompositeIndexForQuery(rpc.request)[1:-1])
raise datastore_errors.NeedIndexError(
str(exc) + '\nThe suggested index for this query is:\n' + yaml)
raise
query_result = rpc.response
if query_result.has_compiled_query():
self._compiled_query = query_result.compiled_query
self.__keys_only = query_result.keys_only()
self.__end_cursor = Cursor._from_query_result(query_result)
self._skipped_results = query_result.skipped_results()
if query_result.more_results():
self.__datastore_cursor = query_result.cursor()
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result_list())
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self.__query_options, self.__query, self._conn,
start_cursor=self.__end_cursor,
_compiled_query=self._compiled_query)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
return [self._conn.adapter.pb_to_query_result(result, self.__keys_only)
for result in results]
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batch is unsupported.')
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
@datastore_rpc._positional(4)
def __init__(self, query_options, augmented_query, conn,
in_memory_offset=None, in_memory_limit=None,
start_cursor=Cursor(), _compiled_query=None,
next_index=0):
"""A Constructor for datastore_query._AugmentedBatch.
Constructed by datastore_query._AugmentedQuery. Should not be called
directly.
"""
super(_AugmentedBatch, self).__init__(query_options, augmented_query._query,
conn,
start_cursor=start_cursor,
_compiled_query=_compiled_query)
self.__augmented_query = augmented_query
self.__in_memory_offset = in_memory_offset
self.__in_memory_limit = in_memory_limit
self.__next_index = next_index
@property
def query(self):
"""The query the current batch came from."""
return self.__augmented_query
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_results(self, results):
in_memory_filter = self.__augmented_query._in_memory_filter
if in_memory_filter:
results = filter(in_memory_filter, results)
in_memory_results = self.__augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
if in_memory_offset and (self.__augmented_query._in_memory_filter or
self.__augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self.query_options, self.__augmented_query,
self._conn,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
_compiled_query=self._compiled_query,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_asyn(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
next_batch = batch.next_batch(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batcher is unsupported.')
def __iter__(self):
return self
class ResultsIterator(object):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Bather
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
self.__current_batch = None
self.__current_pos = 0
def cursor(self):
"""Returns a cursor that points just after the last result returned."""
if not self.__current_batch:
self.__current_batch = self.__batcher.next()
self.__current_pos = 0
return self.__current_batch.cursor(self.__current_pos)
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
if not self.__current_batch:
self.__current_batch = self.__batcher.next()
self.__current_pos = 0
return self.__current_batch._compiled_query
def next(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
next_batch = self.__batcher.next()
if not next_batch:
raise StopIteration
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
def __iter__(self):
return self
|
py | b403847621cfb45ba468852e8c6ccec3862f1179 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from wsme import types as wtypes
from inventory.api.controllers.v1 import base
def build_url(resource, resource_args, bookmark=False, base_url=None):
if base_url is None:
base_url = pecan.request.public_url
template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
# FIXME(lucasagomes): I'm getting a 404 when doing a GET on
# a nested resource that the URL ends with a '/'.
# https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs
template += '%(args)s' if resource_args.startswith('?') else '/%(args)s'
return template % {'url': base_url, 'res': resource, 'args': resource_args}
class Link(base.APIBase):
"""A link representation."""
href = wtypes.text
"""The url of a link."""
rel = wtypes.text
"""The name of a link."""
type = wtypes.text
"""Indicates the type of document/link."""
@staticmethod
def make_link(rel_name, url, resource, resource_args,
bookmark=False, type=wtypes.Unset):
href = build_url(resource, resource_args,
bookmark=bookmark, base_url=url)
return Link(href=href, rel=rel_name, type=type)
@classmethod
def sample(cls):
sample = cls(href="http://localhost:18002"
"eeaca217-e7d8-47b4-bb41-3f99f20ead81",
rel="bookmark")
return sample
|
py | b40387df9ccd299875b2ed4250a51f3cd1359782 | from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.db.models import Q
from django.shortcuts import render, get_object_or_404, redirect
from webapp.forms import TaskGroupCSVForm, TaskGroupAccessForm, TaskGroupInviteForm
from webapp.models import TaskGroupInviteToken, TaskGroupAccess
from webapp.utils.access import *
from webapp.utils.main import build_http_array
@staff_member_required
def index(request, group_id):
group, role = item_is_staff('group', request.user, group_id)
access_all = group.taskgroupaccess_set.filter(~Q(role='dev')).select_related('user').order_by(
'provider_last_name',
'provider_first_name',
'user__last_name',
'user__first_name'
)
access_user = []
access_staff = []
for access in access_all:
if access.role == 'user':
access_user.append(access)
else:
access_staff.append(access)
context = {
'user_role': role,
'group': group,
'access_user': access_user,
'access_staff': access_staff,
'invitations_count': group.taskgroupinvitetoken_set.count(),
'title': 'Access control for ' + group.name
}
return render(request, 'webapp/admin/group/access/index.html', context)
@staff_member_required
def invitation_list(request, group_id):
group, role = item_is_staff('group', request.user, group_id)
invitations = group.taskgroupinvitetoken_set.all().select_related('created_by').order_by('creation_date')
if not invitations:
messages.warning(request, 'There are no invitations for this group.')
return redirect('staff_group_access', group_id=group_id)
context = {
'group': group,
'role': role,
'title': 'Invitations to the {}'.format(group.name),
'invitations': invitations
}
return render(request, 'webapp/admin/group/access/invitations.html', context)
@staff_member_required
def revoke_invitation(request, invitation_id, group_id):
token = get_object_or_404(TaskGroupInviteToken, pk=invitation_id)
if token.check_valid():
token.is_valid = False
token.save()
messages.success(request, 'Invitation successfully revoked.')
else:
messages.error(request, 'Unable to revoke the invitation.')
return redirect('staff_group_invitation_list', group_id=group_id)
@staff_member_required
def change(request, group_id):
group, role = item_is_staff('group', request.user, group_id)
if group.archived:
messages.warning(request, 'This group is archived. No change of access type is allowed while archived.')
elif request.method == 'POST':
if not group.is_public:
# if group is not public - we are about to make it public.
# Hence all TaskGroupAccess'es for users should be revoked
group.taskgroupaccess_set.filter(role='user').delete()
group.is_public = not group.is_public
group.save()
messages.success(request, 'Access type successfully changed.')
else:
return render(request, 'webapp/admin/group/access/change_type.html', {
'group': group,
'title': 'Change access to ' + group.name
})
return redirect('staff_group_access', group_id=group.id)
@staff_member_required
def revoke_user_access(request, group_id, id_type, member_id):
group, role = item_is_staff('group', request.user, group_id)
rd = redirect('staff_group_access', group_id=group.id)
try:
if id_type == 'user':
cond = Q(user_id=member_id)
elif id_type == 'provider':
cond = Q(provider_id=member_id)
else:
messages.error(request, 'Undefined id type')
return rd
member = group.taskgroupaccess_set.get(cond & ~Q(role='owner'))
except TaskGroupAccess.DoesNotExist:
messages.warning(request, 'User to revoke access cannot be found.')
return rd
own = member.user and member.user == request.user
if request.method == 'POST':
if own:
# if revoking own access - redirect to the dashboard
rd = redirect('staff_dashboard')
member.delete()
messages.success(request, 'Access successfully revoked')
return rd
context = {
'member': member,
'own': own,
'id_type': id_type,
'member_id': member_id,
'group': group,
'title': 'Revoke access'
}
return render(request, 'webapp/admin/group/access/revoke.html', context)
@staff_member_required
def csv_import(request, group_id):
users = build_http_array(request.POST, 'user')
counter = 0
selected_counter = 0
for key in users:
if 'add' in users[key] and users[key]['add']:
selected_counter += 1
if grant_abstract_user_access(group_id, users[key]):
counter += 1
if counter:
messages.success(request, 'Successfully imported {} user{}.'.format(
str(counter),
's' if counter > 1 else ''
))
elif selected_counter:
messages.warning(request, 'No new users were imported because selected ones are already on the list.')
else:
messages.warning(request, 'No new users were imported.')
return redirect('staff_group_access', group_id=group_id)
@staff_member_required
def user(request, group_id):
group, role = item_is_staff('group', request.user, group_id)
if group.is_public:
messages.warning(request, 'Unable to add users because this group is public. All users have an access.')
return redirect('staff_group_access', group_id=group.id)
if request.method == 'POST':
if 'upload_csv' in request.POST:
csv_form = TaskGroupCSVForm(request.POST, request.FILES)
if csv_form.is_valid():
file = request.FILES['file']
if not file.name.endswith('.csv'):
messages.error(request, 'Wrong file type. Only .csv files are supported.')
elif file.size > 64000:
messages.error(request, 'File size exceeds 64KB.')
else:
try:
users = load_csv_users(file.read().decode('utf-8'), group)
return render(request, 'webapp/admin/group/access/csv_import.html', {
'users': users,
'group': group,
'title': 'Choose users to import'
})
except (UnicodeDecodeError, KeyError, IndexError):
messages.warning(request, 'File parsing failure. The file may be corrupted.')
else:
messages.error(request, 'No file was uploaded')
elif 'grant_single' in request.POST or 'send_invitation' in request.POST:
if 'grant_single' in request.POST:
level, message = grant_single_access(request, 'user', group)
else:
level, message = send_invitation(request, 'user', group)
messages.add_message(request, level, message)
if level == messages.SUCCESS:
return redirect('staff_group_access', group_id=group.id)
context = {
'type': 'user',
'csv_form': TaskGroupCSVForm(),
'grant_single_form': TaskGroupAccessForm(),
'single_invitation_form': TaskGroupInviteForm(),
'user_role': role,
'group': group,
'title': 'Add user access for ' + group.name
}
return render(request, 'webapp/admin/group/access/add.html', context)
@staff_member_required
def staff(request, group_id):
group, role = item_is_staff('group', request.user, group_id)
if request.method == 'POST':
if 'grant_single' in request.POST:
level, message = grant_single_access(request, 'staff', group)
elif 'send_invitation' in request.POST:
level, message = send_invitation(request, 'staff', group)
else:
return redirect('staff_group_access_staff', group_id=group.id)
messages.add_message(request, level, message)
if level == messages.SUCCESS:
return redirect('staff_group_access', group_id=group.id)
context = {
'type': 'staff',
'grant_single_form': TaskGroupAccessForm(),
'single_invitation_form': TaskGroupInviteForm(),
'user_role': role,
'group': group,
'title': 'Add staff access for ' + group.name
}
return render(request, 'webapp/admin/group/access/add.html', context)
|
py | b403883f535c07378811e4f6fdc73fdca11d15e8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# All code provided from the http://gengo.com site, such as API example code
# and libraries, is provided under the New BSD license unless otherwise
# noted. Details are below.
#
# New BSD License
# Copyright (c) 2009-2020, Gengo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Gengo, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gengo import Gengo
# Get an instance of Gengo to work with...
gengo = Gengo(
public_key='your_public_key',
private_key='your_private_key',
sandbox=True,
debug=True
)
# Update a job that has an id of 42, and reject it, cite the reason
# and add a comment. See the docs for
# more information pertaining to this method, it can do quite a bit. :)
print(gengo.updateTranslationJob(id=42, action={
'action': 'reject',
'reason': 'quality',
'comment': 'My grandmother does better.',
}))
|
py | b40388a0bc6e710c6642c53222ca6c99ee375105 | from django.contrib import admin
# Register your models here.
from skillsfinder.models import Skill, SkillCategory, Profile, Organisation
admin.site.register(Organisation)
admin.site.register(Profile)
admin.site.register(Skill)
admin.site.register(SkillCategory)
|
py | b40389004ef91e438ab638881aa342574e0f8e81 | from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User)
firstname = models.CharField(max_length=30, blank=False, null=False)
lastname = models.CharField(max_length=30, blank=True, null=True)
status = models.CharField(max_length=7, blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_date = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return self.firstname
class ProfileInvites(models.Model):
profile = models.ManyToManyField(UserProfile)
invitee = models.ManyToManyField(User)
status = models.CharField(max_length=20, blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_date = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProfileFiles(models.Model):
profile = models.ManyToManyField(UserProfile)
locked = models.ManyToManyField(User, blank=True, null=True)
filename = models.CharField(max_length=256, blank=False, null=False)
fullpath = models.CharField(max_length=10240, blank=False, null=False)
created_date = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_date = models.DateTimeField(auto_now_add=False, auto_now=True)
class ChatMessage(models.Model):
user = models.ForeignKey(UserProfile)
chatroom = models.CharField(max_length=30, blank=False, null=True)
message = models.CharField(max_length=1024, blank=False, null=True)
created_date = models.DateTimeField(auto_now_add=True, auto_now=False)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
py | b403894d4ccce5b371737012da6b802469859edc | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import dataclasses
import json
from dataclasses import dataclass
from typing import List, Optional, Union
from ...utils import is_tf_available, is_torch_available, logging
logger = logging.get_logger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
class DataProcessor:
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""
Gets an example from a dict with tensorflow tensors.
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""
Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
examples to the correct format.
"""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
"""Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = f"{split_name}-{i}" if split_name else str(i)
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
if labels is not None and len(texts_or_text_and_labels) != len(labels):
raise ValueError(
f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
)
if ids is not None and len(texts_or_text_and_labels) != len(ids):
raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors=None,
):
"""
Convert examples in a list of `InputFeatures`
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
values)
Returns:
If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
`InputFeatures` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info(f"Tokenizing example {ex_index}")
input_ids = tokenizer.encode(
example.text_a,
add_special_tokens=True,
max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info(f"Writing example {ex_index}/{len(self.examples)}")
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(input_ids) != batch_length:
raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
if len(attention_mask) != batch_length:
raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info(f"guid: {example.guid}")
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
logger.info(f"label: {example.label} (id = {label})")
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
|
py | b403899e54f9195d8444b0e8a7bbc8feb6edfcf7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import inspect
import logging
import math
import re
from collections import defaultdict, OrderedDict
from datetime import date, datetime, timedelta
from itertools import product
from typing import (
Any,
Callable,
cast,
Dict,
List,
Optional,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from pandas.tseries.frequencies import to_offset
from superset import app, db, is_feature_enabled
from superset.constants import NULL_STRING
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
CacheLoadError,
NullValueException,
QueryObjectValidationError,
SpatialException,
)
from superset.extensions import cache_manager, security_manager
from superset.models.cache import CacheKey
from superset.models.helpers import QueryResult
from superset.typing import QueryObjectDict, VizData, VizPayload
from superset.utils import core as utils
from superset.utils.cache import set_and_log_cache
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
QueryMode,
to_adhoc,
)
from superset.utils.date_parser import get_since_until, parse_past_timedelta
from superset.utils.dates import datetime_to_epoch
from superset.utils.hashing import md5_sha_from_str
import dataclasses # isort:skip
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
logger = logging.getLogger(__name__)
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
# This regex is to get user defined filter column name, which is the first param in the filter_values function.
# see the definition of filter_values template:
# https://github.com/apache/superset/blob/24ad6063d736c1f38ad6f962e586b9b1a21946af/superset/jinja_context.py#L63
FILTER_VALUES_REGEX = re.compile(r"filter_values\(['\"](\w+)['\"]\,")
class BaseViz:
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(
self,
datasource: "BaseDatasource",
form_data: Dict[str, Any],
force: bool = False,
force_cached: bool = False,
) -> None:
if not datasource:
raise QueryObjectValidationError(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = utils.get_form_data_token(form_data)
self.groupby: List[str] = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status: Optional[str] = None
self.error_msg = ""
self.results: Optional[QueryResult] = None
self.errors: List[Dict[str, Any]] = []
self.force = force
self._force_cached = force_cached
self.from_dttm: Optional[datetime] = None
self.to_dttm: Optional[datetime] = None
self._extra_chart_data: List[Tuple[str, pd.DataFrame]] = []
self.process_metrics()
self.applied_filters: List[Dict[str, str]] = []
self.rejected_filters: List[Dict[str, str]] = []
@property
def force_cached(self) -> bool:
return self._force_cached
def process_metrics(self) -> None:
# metrics in Viz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(
data: Dict[str, List[Dict[str, Any]]]
) -> Dict[str, List[Dict[str, Any]]]:
for d in data.get("records", {}):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self) -> None:
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def apply_rolling(self, df: pd.DataFrame) -> pd.DataFrame:
fd = self.form_data
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
if df.empty:
raise QueryObjectValidationError(
_(
"Applied rolling window did not return any data. Please make sure "
"the source query satisfies the minimum periods defined in the "
"rolling window."
)
)
return df
def get_samples(self) -> List[Dict[str, Any]]:
query_obj = self.query_obj()
query_obj.update(
{
"is_timeseries": False,
"groupby": [],
"metrics": [],
"orderby": [],
"row_limit": config["SAMPLES_ROW_LIMIT"],
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df_payload(query_obj)["df"] # leverage caching logic
return df.to_dict(orient="records")
def get_df(self, query_obj: Optional[QueryObjectDict] = None) -> pd.DataFrame:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return pd.DataFrame()
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
granularity_col = self.datasource.get_column(query_obj["granularity"])
if granularity_col:
timestamp_format = granularity_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.errors = self.results.errors
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if not df.empty:
df = utils.normalize_dttm_col(
df=df,
timestamp_format=timestamp_format,
offset=self.datasource.offset,
time_shift=self.time_shift,
)
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df: pd.DataFrame) -> None:
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self) -> None:
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self) -> QueryObjectDict:
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = self.groupby
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
# merge list and dedup while preserving order
groupby = list(OrderedDict.fromkeys(gb + columns))
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
try:
since, until = get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
time_shift = form_data.get("time_shift", "")
self.time_shift = parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise QueryObjectValidationError(
_("From date cannot be larger than to date")
)
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla"),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
return {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
@property
def cache_timeout(self) -> int:
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data["cache_timeout"])
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
if config["DATA_CACHE_CONFIG"].get("CACHE_DEFAULT_TIMEOUT") is not None:
return config["DATA_CACHE_CONFIG"]["CACHE_DEFAULT_TIMEOUT"]
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self) -> str:
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj: QueryObjectDict, **extra: Any) -> str:
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm`, `to_dttm`,
`inner_from_dttm`, and `inner_to_dttm` values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm", "inner_from_dttm", "inner_to_dttm"]:
if k in cache_dict:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
cache_dict["rls"] = (
security_manager.get_rls_ids(self.datasource)
if is_feature_enabled("ROW_LEVEL_SECURITY")
and self.datasource.is_rls_supported
else []
)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return md5_sha_from_str(json_data)
def get_payload(self, query_obj: Optional[QueryObjectDict] = None) -> VizPayload:
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
filters = self.form_data.get("filters", [])
filter_columns = [flt.get("col") for flt in filters]
columns = set(self.datasource.column_names)
filter_values_columns = []
# if using virtual datasource, check filter_values
if self.datasource.sql:
filter_values_columns = (
re.findall(FILTER_VALUES_REGEX, self.datasource.sql)
) or []
applied_time_extras = self.form_data.get("applied_time_extras", {})
applied_time_columns, rejected_time_columns = utils.get_time_filter_status(
self.datasource, applied_time_extras
)
payload["applied_filters"] = [
{"column": col}
for col in filter_columns
if col in columns or col in filter_values_columns
] + applied_time_columns
payload["rejected_filters"] = [
{"reason": "not_in_datasource", "column": col}
for col in filter_columns
if col not in columns and col not in filter_values_columns
] + rejected_time_columns
return payload
def get_df_payload(
self, query_obj: Optional[QueryObjectDict] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
cache_value = None
logger.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
if cache_key and cache_manager.data_cache and not self.force:
cache_value = cache_manager.data_cache.get(cache_key)
if cache_value:
stats_logger.incr("loading_from_cache")
try:
df = cache_value["df"]
self.query = cache_value["query"]
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
stats_logger.incr("loaded_from_cache")
except Exception as ex:
logger.exception(ex)
logger.error(
"Error reading cache: " + utils.error_msg_from_exception(ex)
)
logger.info("Serving from cache")
if query_obj and not is_loaded:
if self.force_cached:
logger.warning(
f"force_cached (viz.py): value not found for cache key {cache_key}"
)
raise CacheLoadError(_("Cached value not found"))
try:
invalid_columns = [
col
for col in (query_obj.get("columns") or [])
+ (query_obj.get("groupby") or [])
+ utils.get_column_names_from_metrics(
cast(
List[Union[str, Dict[str, Any]]], query_obj.get("metrics"),
)
)
if col not in self.datasource.column_names
]
if invalid_columns:
raise QueryObjectValidationError(
_(
"Columns missing in datasource: %(invalid_columns)s",
invalid_columns=invalid_columns,
)
)
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not self.force:
stats_logger.incr("loaded_from_source_without_force")
is_loaded = True
except QueryObjectValidationError as ex:
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
except Exception as ex:
logger.exception(ex)
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if is_loaded and cache_key and self.status != utils.QueryStatus.FAILED:
set_and_log_cache(
cache_manager.data_cache,
cache_key,
{"df": df, "query": self.query},
self.cache_timeout,
self.datasource.uid,
)
return {
"cache_key": cache_key,
"cached_dttm": cache_value["dttm"] if cache_value is not None else None,
"cache_timeout": self.cache_timeout,
"df": df,
"errors": self.errors,
"form_data": self.form_data,
"is_cached": cache_value is not None,
"query": self.query,
"from_dttm": self.from_dttm,
"to_dttm": self.to_dttm,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def has_error(self, payload: VizPayload) -> bool:
return (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
or bool(payload.get("errors"))
)
def payload_json_and_has_error(self, payload: VizPayload) -> Tuple[str, bool]:
return self.json_dumps(payload), self.has_error(payload)
@property
def data(self) -> Dict[str, Any]:
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self) -> Optional[str]:
df = self.get_df_payload()["df"] # leverage caching logic
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
@property
def json_data(self) -> str:
return json.dumps(self.data)
def raise_for_access(self) -> None:
"""
Raise an exception if the user cannot access the resource.
:raises SupersetSecurityException: If the user cannot access the resource
"""
security_manager.raise_for_access(viz=self)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def process_metrics(self) -> None:
"""Process form data and store parsed column configs.
1. Determine query mode based on form_data params.
- Use `query_mode` if it has a valid value
- Set as RAW mode if `all_columns` is set
- Otherwise defaults to AGG mode
2. Determine output columns based on query mode.
"""
# Verify form data first: if not specifying query mode, then cannot have both
# GROUP BY and RAW COLUMNS.
fd = self.form_data
if (
not fd.get("query_mode")
and fd.get("all_columns")
and (fd.get("groupby") or fd.get("metrics") or fd.get("percent_metrics"))
):
raise QueryObjectValidationError(
_(
"You cannot use [Columns] in combination with "
"[Group By]/[Metrics]/[Percentage Metrics]. "
"Please choose one or the other."
)
)
super().process_metrics()
self.query_mode: QueryMode = QueryMode.get(fd.get("query_mode")) or (
# infer query mode from the presence of other fields
QueryMode.RAW
if len(fd.get("all_columns") or []) > 0
else QueryMode.AGGREGATE
)
columns: List[str] = [] # output columns sans time and percent_metric column
percent_columns: List[str] = [] # percent columns that needs extra computation
if self.query_mode == QueryMode.RAW:
columns = utils.get_metric_names(fd.get("all_columns") or [])
else:
columns = utils.get_metric_names(self.groupby + (fd.get("metrics") or []))
percent_columns = utils.get_metric_names(fd.get("percent_metrics") or [])
self.columns = columns
self.percent_columns = percent_columns
self.is_timeseries = self.should_be_timeseries()
def should_be_timeseries(self) -> bool:
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise QueryObjectValidationError(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return bool(fd.get("include_time"))
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if self.query_mode == QueryMode.RAW:
d["columns"] = fd.get("all_columns")
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
# must disable groupby and metrics in raw mode
d["groupby"] = []
d["metrics"] = []
# raw mode does not support timeseries queries
d["timeseries_limit_metric"] = None
d["timeseries_limit"] = None
d["is_timeseries"] = None
else:
sort_by = fd.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
elif d["metrics"]:
# Legacy behavior of sorting by first metric by default
first_metric = d["metrics"][0]
d["orderby"] = [(first_metric, not fd.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform the query result to the table representation.
:param df: The interim dataframe
:returns: The table visualization data
The interim dataframe comprises of the group-by and non-group-by columns and
the union of the metrics representing the non-percent and percent metrics. Note
the percent metrics have yet to be transformed.
"""
# Transform the data frame to adhere to the UI ordering of the columns and
# metrics whilst simultaneously computing the percentages (via normalization)
# for the percent metrics.
if df.empty:
return None
columns, percent_columns = self.columns, self.percent_columns
if DTTM_ALIAS in df and self.is_timeseries:
columns = [DTTM_ALIAS] + columns
df = pd.concat(
[
df[columns],
(df[percent_columns].div(df[percent_columns].sum()).add_prefix("%")),
],
axis=1,
)
return self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if fd.get("groupby") and len(fd["metrics"]) > 1:
raise QueryObjectValidationError(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
columns = None
values: Union[List[str], str] = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=True if fd.get("groupby") else False,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise QueryObjectValidationError(
_("Please choose at least one 'Group by' field ")
)
if transpose and not columns:
raise QueryObjectValidationError(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise QueryObjectValidationError(_("Please choose at least one metric"))
if set(groupby) & set(columns):
raise QueryObjectValidationError(_("Group By' and 'Columns' can't overlap"))
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
@staticmethod
def get_aggfunc(
metric: str, df: pd.DataFrame, form_data: Dict[str, Any]
) -> Union[str, Callable[[Any], Any]]:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
return lambda x: x.sum(min_count=1)
# only min and max work properly for non-numerics
return aggfunc if aggfunc in ("min", "max") else "max"
@staticmethod
def _format_datetime(value: Union[pd.Timestamp, datetime, date, str]) -> str:
"""
Format a timestamp in such a way that the viz will be able to apply
the correct formatting in the frontend.
:param value: the value of a temporal column
:return: formatted timestamp if it is a valid timestamp, otherwise
the original value
"""
tstamp: Optional[pd.Timestamp] = None
if isinstance(value, pd.Timestamp):
tstamp = value
if isinstance(value, datetime) or isinstance(value, date):
tstamp = pd.Timestamp(value)
if isinstance(value, str):
try:
tstamp = pd.Timestamp(value)
except ValueError:
pass
if tstamp:
return f"__timestamp:{datetime_to_epoch(tstamp)}"
# fallback in case something incompatible is returned
return cast(str, value)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [utils.get_metric_name(m) for m in self.form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfuncs[metric] = self.get_aggfunc(metric, df, self.form_data)
groupby = self.form_data.get("groupby") or []
columns = self.form_data.get("columns") or []
for column_name in groupby + columns:
column = self.datasource.get_column(column_name)
if column and column.is_temporal:
ts = df[column_name].apply(self._format_datetime)
df[column_name] = ts
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=self.form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metrics = self.form_data.get("metrics")
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
def _nest(self, metric: str, df: pd.DataFrame) -> List[Dict[str, Any]]:
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
try:
start, end = get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
if not start or not end:
raise QueryObjectValidationError(
"Please provide both time bounds (Since and Until)"
)
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = end.year - start.year + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1 # type: ignore
else:
range_ = diff_secs // (60 * 60) + 1 # type: ignore
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
mapping = {
"min": "PT1M",
"hour": "PT1H",
"day": "P1D",
"week": "P1W",
"month": "P1M",
"year": "P1Y",
}
time_grain = mapping[fd.get("subdomain_granularity", "min")]
if self.datasource.type == "druid":
d["granularity"] = time_grain
else:
d["extras"]["time_grain_sqla"] = time_grain
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
# dedup groupby if it happens to be the same
d["groupby"] = list(dict.fromkeys(d["groupby"]))
self.x_metric = form_data["x"]
self.y_metric = form_data["y"]
self.z_metric = form_data["size"]
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise QueryObjectValidationError(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise QueryObjectValidationError(_("Pick a metric for x, y and size"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series: Dict[Any, List[Any]] = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
self.metric = form_data["metric"]
d["metrics"] = [self.metric]
if not self.metric:
raise QueryObjectValidationError(_("Pick a metric to display"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.pivot_table(
index=DTTM_ALIAS,
columns=[],
values=self.metric_labels,
dropna=False,
aggfunc=np.min, # looking for any (only) value, preserving `None`
)
df = self.apply_rolling(df)
df[DTTM_ALIAS] = df.index
return super().get_data(df)
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
pivot_fill_value: Optional[int] = None
def to_series(
self, df: pd.DataFrame, classed: str = "", title_suffix: str = ""
) -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
series_title: Union[List[str], str, Tuple[str, ...]]
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, list):
series_title = series_title + [title_suffix]
elif isinstance(series_title, tuple):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df: pd.DataFrame, aggregate: bool = False) -> VizData:
fd = self.form_data
if fd.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
if df.empty:
return df
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=self.pivot_fill_value,
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
df = self.apply_rolling(df)
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self) -> None:
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
try:
delta = parse_past_timedelta(option)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise QueryObjectValidationError(
_(
"An enclosed time range (both start and end) must be specified "
"when using a Time Comparison."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise QueryObjectValidationError(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
multiline_fd = self.form_data
# Late import to avoid circular import issues
from superset.charts.dao import ChartDAO
axis1_chart_ids = multiline_fd.get("line_charts", [])
axis2_chart_ids = multiline_fd.get("line_charts_2", [])
all_charts = {
chart.id: chart
for chart in ChartDAO.find_by_ids(axis1_chart_ids + axis2_chart_ids)
}
axis1_charts = [all_charts[chart_id] for chart_id in axis1_chart_ids]
axis2_charts = [all_charts[chart_id] for chart_id in axis2_chart_ids]
filters = multiline_fd.get("filters", [])
add_prefix = multiline_fd.get("prefix_metric_with_slice_name", False)
data = []
min_x, max_x = None, None
for chart, y_axis in [(chart, 1) for chart in axis1_charts] + [
(chart, 2) for chart in axis2_charts
]:
prefix = f"{chart.chart}: " if add_prefix else ""
chart_fd = chart.form_data
chart_fd["filters"] = chart_fd.get("filters", []) + filters
if "extra_filters" in multiline_fd:
chart_fd["extra_filters"] = multiline_fd["extra_filters"]
if "time_range" in multiline_fd:
chart_fd["time_range"] = multiline_fd["time_range"]
viz_obj = viz_types[chart.viz_type](
chart.datasource,
form_data=chart_fd,
force=self.force,
force_cached=self.force_cached,
)
df = viz_obj.get_df_payload()["df"]
chart_series = viz_obj.get_data(df) or []
for series in chart_series:
x_values = [value["x"] for value in series["values"]]
min_x = min(x_values + ([min_x] if min_x is not None else []))
max_x = max(x_values + ([max_x] if max_x is not None else []))
series_key = (
series["key"]
if isinstance(series["key"], (list, tuple))
else [series["key"]]
)
data.append(
{
"key": prefix + ", ".join(series_key),
"type": "line",
"values": series["values"],
"yAxis": y_axis,
}
)
bounds = []
if min_x is not None:
bounds.append({"x": min_x, "y": None})
if max_x is not None:
bounds.append({"x": max_x, "y": None})
for series in data:
series["values"].extend(bounds)
return data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise QueryObjectValidationError(_("Pick a metric for left axis!"))
if not m2:
raise QueryObjectValidationError(_("Pick a metric for right axis!"))
if m1 == m2:
raise QueryObjectValidationError(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df: pd.DataFrame, classed: str = "") -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data["metric"], self.form_data["metric_2"]]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
metric = utils.get_metric_name(fd["metric"])
metric_2 = utils.get_metric_name(fd["metric_2"])
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd["metric"]),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
pivot_fill_value = 0
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metrics = self.form_data.get("metrics")
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise QueryObjectValidationError(
_("Must have at least one numeric column specified")
)
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys: Union[List[str], str], column: str) -> str:
if isinstance(keys, str):
keys = [keys]
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df: pd.DataFrame) -> VizData:
"""Returns the chart data"""
if df.empty:
return None
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(BaseViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise QueryObjectValidationError(
_("Can't have overlap between Series and Breakdowns")
)
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if not fd.get("groupby"):
raise QueryObjectValidationError(_("Pick at least one field for [Series]"))
sort_by = fd.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
elif d["metrics"]:
# Legacy behavior of sorting by first metric by default
first_metric = d["metrics"][0]
d["orderby"] = [(first_metric, not fd.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
metrics = self.metric_labels
columns = fd.get("columns") or []
# pandas will throw away nulls when grouping/pivoting,
# so we substitute NULL_STRING for any nulls in the necessary columns
filled_cols = self.groupby + columns
df = df.copy()
df[filled_cols] = df[filled_cols].fillna(value=NULL_STRING)
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
# Re-order the columns adhering to the metric ordering.
pt = pt[metrics]
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = copy.deepcopy(self.form_data)
cols = fd.get("groupby") or []
cols.extend(["m1", "m2"])
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
if metric == secondary_metric or secondary_metric is None:
df.rename(columns={df.columns[-1]: "m1"}, inplace=True)
df["m2"] = df["m1"]
else:
df.rename(columns={df.columns[-2]: "m1"}, inplace=True)
df.rename(columns={df.columns[-1]: "m2"}, inplace=True)
# Re-order the columns as the query result set column ordering may differ from
# that listed in the hierarchy.
df = df[cols]
return df.to_numpy().tolist()
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise QueryObjectValidationError(
_("Pick exactly 2 columns as [Source / Target]")
)
qry["metrics"] = [self.form_data["metric"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
source, target = self.groupby
(value,) = self.metric_labels
df.rename(
columns={source: "source", target: "target", value: "value",}, inplace=True,
)
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy: Dict[str, Set[str]] = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g: Dict[str, Set[str]]) -> Optional[Tuple[str, str]]:
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex: str) -> Optional[Tuple[str, str]]:
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
return None
for v in g:
cycle = visit(v)
if cycle:
return cycle
return None
cycle = find_cycle(hierarchy)
if cycle:
raise QueryObjectValidationError(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise QueryObjectValidationError(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [fd.get("metric")]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
if self.form_data.get("sort_by_metric", False):
qry["orderby"] = [(qry["metrics"][0], False)]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
if "country_fieldtype" in fd:
country = countries.get(fd["country_fieldtype"], row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self) -> QueryObjectDict:
return {}
def run_extra_queries(self) -> None:
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise QueryObjectValidationError(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df: pd.DataFrame) -> VizData:
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if df is not None and not df.empty:
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]}
for row in df.itertuples(index=False)
]
else:
df[col] = []
return d
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if self.form_data.get("sort_by_metric", False):
d["orderby"] = [(d["metrics"][0], False)]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metrics = self.form_data.get("metrics")
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise QueryObjectValidationError(
_("[Longitude] and [Latitude] must be set")
)
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise QueryObjectValidationError(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Label] must be present in [Group By]")
)
if (
fd.get("point_radius") != "Auto"
and fd.get("point_radius") not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Point Radius] must be present in [Group By]")
)
if (
fd.get("all_columns_x") not in fd["groupby"]
or fd.get("all_columns_y") not in fd["groupby"]
):
raise QueryObjectValidationError(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"): # type: ignore
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"): # type: ignore
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]] # type: ignore
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset import db
from superset.models.slice import Slice
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self) -> List[str]:
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key: str, group_by: List[str]) -> None:
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key: str) -> List[str]:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
return []
@staticmethod
def parse_coordinates(s: Any) -> Optional[Tuple[float, float]]:
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude)
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code: str) -> Tuple[str, str]:
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df: pd.DataFrame, key: str) -> None:
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key: str, df: pd.DataFrame) -> pd.DataFrame:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self) -> None:
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb: List[str] = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd["dimension"]]
if fd.get("js_columns"):
gb += fd.get("js_columns") or []
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
first_metric = d["metrics"][0]
d["orderby"] = [(first_metric, not fd.get("order_desc", True))]
else:
d["columns"] = gb
return d
def get_js_columns(self, d: Dict[str, Any]) -> Dict[str, Any]:
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self) -> List[str]:
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed["value"]
return [self.metric]
return []
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"metric": d.get(self.metric_label) if self.metric_label else None,
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label)
if self.metric_label
else None,
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed and self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
def geohash_to_json(geohash_code: str) -> List[List[float]]:
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
fd = self.form_data
line_type = fd["line_type"]
deser = self.deser_map[line_type]
line_column = fd["line_column"]
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self) -> List[str]:
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
geojson = d[self.form_data["geojson"]]
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
d = super().get_data(df)
return {
"features": d["features"], # type: ignore
"mapboxApiKey": config["MAPBOX_API_KEY"],
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
query = super().query_obj()
form_data = self.form_data
event_key = form_data["all_columns_x"]
entity_key = form_data["entity"]
meta_keys = [
col
for col in form_data["all_columns"] or []
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metrics = self.form_data.get("metrics")
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data: Dict[str, List[Dict[str, Any]]] = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metrics = self.form_data.get("metrics")
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
if self.form_data.get("order_desc"):
d["orderby"] = [(sort_by, not self.form_data.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
data = super().get_data(df)
result: Dict[str, List[Dict[str, str]]] = {}
for datum in data: # type: ignore
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in query_obj["metrics"]:
query_obj["metrics"].append(sort_by)
query_obj["orderby"] = [
(sort_by, not self.form_data.get("order_desc", True))
]
return query_obj
def levels_for(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.Series]:
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.DataFrame]:
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(
self, groups: List[str], df: pd.DataFrame
) -> Dict[int, VizData]:
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(
self,
levels: Dict[int, pd.DataFrame],
level: int = 0,
metric: Optional[str] = None,
dims: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if dims is None:
dims = []
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
metric_level = levels[1][metric]
return [
{
"name": i,
"val": metric_level[i],
"children": self.nest_values(levels, 2, metric, [i]),
}
for i in metric_level.index
]
if level >= len(levels):
return []
dim_level = levels[level][metric][[dims[0]]]
return [
{
"name": i,
"val": dim_level[i],
"children": self.nest_values(levels, level + 1, metric, dims + [i]),
}
for i in dim_level.index
]
def nest_procs(
self,
procs: Dict[int, pd.DataFrame],
level: int = -1,
dims: Optional[Tuple[str, ...]] = None,
time: Any = None,
) -> List[Dict[str, Any]]:
if dims is None:
dims = ()
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
def get_subclasses(cls: Type[BaseViz]) -> Set[Type[BaseViz]]:
return set(cls.__subclasses__()).union(
[sc for c in cls.__subclasses__() for sc in get_subclasses(c)]
)
viz_types = {
o.viz_type: o
for o in get_subclasses(BaseViz)
if o.viz_type not in config["VIZ_TYPE_DENYLIST"]
}
|
py | b4038b2007e9f52dc46df8d0a803b794bf28cee9 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
from collections import defaultdict
from .deprecation import ImplicitDeprecated, resolve_deprecate_info
from .preview import ImplicitPreviewItem, resolve_preview_info
from .experimental import ImplicitExperimentalItem, resolve_experimental_info
from .util import CLIError, CtxTypeError, CommandResultItem, todict
from .parser import CLICommandParser
from .commands import CLICommandsLoader
from .events import (EVENT_INVOKER_PRE_CMD_TBL_CREATE, EVENT_INVOKER_POST_CMD_TBL_CREATE,
EVENT_INVOKER_CMD_TBL_LOADED, EVENT_INVOKER_PRE_PARSE_ARGS,
EVENT_INVOKER_POST_PARSE_ARGS, EVENT_INVOKER_TRANSFORM_RESULT,
EVENT_INVOKER_FILTER_RESULT)
from .help import CLIHelp
class CommandInvoker(object):
def __init__(self,
cli_ctx=None,
parser_cls=CLICommandParser,
commands_loader_cls=CLICommandsLoader,
help_cls=CLIHelp,
initial_data=None):
""" Manages a single invocation of the CLI (i.e. running a command)
:param cli_ctx: CLI Context
:type cli_ctx: knack.cli.CLI
:param parser_cls: A class to handle command parsing
:type parser_cls: knack.parser.CLICommandParser
:param commands_loader_cls: A class to handle loading commands
:type commands_loader_cls: knack.commands.CLICommandsLoader
:param help_cls: A class to handle help
:type help_cls: knack.help.CLIHelp
:param initial_data: The initial in-memory collection for this command invocation
:type initial_data: dict
"""
from .cli import CLI
if cli_ctx is not None and not isinstance(cli_ctx, CLI):
raise CtxTypeError(cli_ctx)
self.cli_ctx = cli_ctx
# In memory collection of key-value data for this current invocation This does not persist between invocations.
self.data = initial_data or defaultdict(lambda: None)
self.data['command'] = 'unknown'
self._global_parser = parser_cls.create_global_parser(cli_ctx=self.cli_ctx)
self.help = help_cls(cli_ctx=self.cli_ctx)
self.parser = parser_cls(cli_ctx=self.cli_ctx, cli_help=self.help,
prog=self.cli_ctx.name, parents=[self._global_parser])
self.commands_loader = commands_loader_cls(cli_ctx=self.cli_ctx)
def _filter_params(self, args): # pylint: disable=no-self-use
# Consider - we are using any args that start with an underscore (_) as 'private'
# arguments and remove them from the arguments that we pass to the actual function.
params = {key: value
for key, value in args.__dict__.items()
if not key.startswith('_')}
params.pop('func', None)
params.pop('command', None)
return params
def _rudimentary_get_command(self, args):
""" Rudimentary parsing to get the command """
nouns = []
command_names = self.commands_loader.command_table.keys()
for arg in args:
if arg and arg[0] != '-':
nouns.append(arg)
else:
break
def _find_args(args):
search = ' '.join(args).lower()
return next((x for x in command_names if x.startswith(search)), False)
# since the command name may be immediately followed by a positional arg, strip those off
while nouns and not _find_args(nouns):
del nouns[-1]
# ensure the command string is case-insensitive
for i in range(len(nouns)):
args[i] = args[i].lower()
return ' '.join(nouns)
def _validate_cmd_level(self, ns, cmd_validator): # pylint: disable=no-self-use
if cmd_validator:
cmd_validator(ns)
try:
delattr(ns, '_command_validator')
except AttributeError:
pass
def _validate_arg_level(self, ns, **_): # pylint: disable=no-self-use
for validator in getattr(ns, '_argument_validators', []):
validator(ns)
try:
delattr(ns, '_argument_validators')
except AttributeError:
pass
def _validation(self, parsed_ns):
try:
cmd_validator = getattr(parsed_ns, '_command_validator', None)
if cmd_validator:
self._validate_cmd_level(parsed_ns, cmd_validator)
else:
self._validate_arg_level(parsed_ns)
except CLIError:
raise
except Exception: # pylint: disable=broad-except
err = sys.exc_info()[1]
getattr(parsed_ns, '_parser', self.parser).validation_error(str(err))
# pylint: disable=too-many-statements
def execute(self, args):
""" Executes the command invocation
:param args: The command arguments for this invocation
:type args: list
:return: The command result
:rtype: knack.util.CommandResultItem
"""
self.cli_ctx.raise_event(EVENT_INVOKER_PRE_CMD_TBL_CREATE, args=args)
cmd_tbl = self.commands_loader.load_command_table(args)
command = self._rudimentary_get_command(args)
self.cli_ctx.invocation.data['command_string'] = command
self.commands_loader.load_arguments(command)
self.cli_ctx.raise_event(EVENT_INVOKER_POST_CMD_TBL_CREATE, cmd_tbl=cmd_tbl)
self.parser.load_command_table(self.commands_loader)
self.cli_ctx.raise_event(EVENT_INVOKER_CMD_TBL_LOADED, parser=self.parser)
arg_check = [a for a in args if a not in ['--verbose', '--debug', '--only-show-warnings']]
if not arg_check:
self.cli_ctx.completion.enable_autocomplete(self.parser)
subparser = self.parser.subparsers[tuple()]
self.help.show_welcome(subparser)
return CommandResultItem(None, exit_code=0)
if args[0].lower() == 'help':
args[0] = '--help'
self.cli_ctx.completion.enable_autocomplete(self.parser)
self.cli_ctx.raise_event(EVENT_INVOKER_PRE_PARSE_ARGS, args=args)
parsed_args = self.parser.parse_args(args)
self.cli_ctx.raise_event(EVENT_INVOKER_POST_PARSE_ARGS, command=parsed_args.command, args=parsed_args)
self._validation(parsed_args)
# save the command name (leaf in the tree)
self.data['command'] = parsed_args.command
cmd = parsed_args.func
if hasattr(parsed_args, 'cmd'):
parsed_args.cmd = cmd
deprecations = getattr(parsed_args, '_argument_deprecations', [])
if cmd.deprecate_info:
deprecations.append(cmd.deprecate_info)
previews = getattr(parsed_args, '_argument_previews', [])
if cmd.preview_info:
previews.append(cmd.preview_info)
experimentals = getattr(parsed_args, '_argument_experimentals', [])
if cmd.experimental_info:
experimentals.append(cmd.experimental_info)
params = self._filter_params(parsed_args)
# search for implicit deprecation
path_comps = cmd.name.split()[:-1]
implicit_deprecate_info = None
while path_comps and not implicit_deprecate_info:
implicit_deprecate_info = resolve_deprecate_info(self.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_deprecate_info:
deprecate_kwargs = implicit_deprecate_info.__dict__.copy()
deprecate_kwargs['object_type'] = 'command'
del deprecate_kwargs['_get_tag']
del deprecate_kwargs['_get_message']
deprecations.append(ImplicitDeprecated(cli_ctx=self.cli_ctx, **deprecate_kwargs))
# search for implicit preview
path_comps = cmd.name.split()[:-1]
implicit_preview_info = None
while path_comps and not implicit_preview_info:
implicit_preview_info = resolve_preview_info(self.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_preview_info:
preview_kwargs = implicit_preview_info.__dict__.copy()
preview_kwargs['object_type'] = 'command'
previews.append(ImplicitPreviewItem(cli_ctx=self.cli_ctx, **preview_kwargs))
# search for implicit experimental
path_comps = cmd.name.split()[:-1]
implicit_experimental_info = None
while path_comps and not implicit_experimental_info:
implicit_experimental_info = resolve_experimental_info(self.cli_ctx, ' '.join(path_comps))
del path_comps[-1]
if implicit_experimental_info:
experimental_kwargs = implicit_experimental_info.__dict__.copy()
experimental_kwargs['object_type'] = 'command'
experimentals.append(ImplicitExperimentalItem(cli_ctx=self.cli_ctx, **experimental_kwargs))
if not self.cli_ctx.only_show_errors:
for d in deprecations:
print(d.message, file=sys.stderr)
for p in previews:
print(p.message, file=sys.stderr)
for p in experimentals:
print(p.message, file=sys.stderr)
cmd_result = parsed_args.func(params)
cmd_result = todict(cmd_result)
event_data = {'result': cmd_result}
self.cli_ctx.raise_event(EVENT_INVOKER_TRANSFORM_RESULT, event_data=event_data)
self.cli_ctx.raise_event(EVENT_INVOKER_FILTER_RESULT, event_data=event_data)
return CommandResultItem(event_data['result'],
exit_code=0,
table_transformer=cmd_tbl[parsed_args.command].table_transformer,
is_query_active=self.data['query_active'],
raw_result=cmd_result)
|
py | b4038c09cd47ac4f2a352579ffa0e421687bb68a | from __future__ import annotations
import uuid
from typing import List
from flask_babel import format_date
from labster.domain.models.profiles import Profile
from labster.domain.models.util import parse_date
from labster.types import JSONDict
class Field:
required = False
visible = True
hidden = False
editable = True
scalar = True
note = ""
specs: List[List[str]] = []
def __init__(self, name, label, **kw):
self.name = name
self.label = label
for k, v in kw.items():
setattr(self, k, v)
def to_dict(self) -> JSONDict:
return {
"name": self.name,
"type": self.__class__.__name__,
"scalar": self.scalar,
"label": self.label,
"required": self.required,
"visible": self.visible,
"hidden": self.hidden,
"editable": self.editable,
"note": self.note,
"specs": self.specs,
}
def get_display_value(self, demande) -> str:
"""Return the name of this field for display in the form."""
if self.name == "porteur":
# get a Profile object, so than we get the full_name below
# and not just its uid.
value = demande.porteur
else:
value = demande.data.get(self.name, "")
if value in (None, "None"):
value = ""
elif value is False:
value = "Non"
elif value is True:
value = "Oui"
elif isinstance(value, Profile):
value = value.full_name
return str(value)
class StringField(Field):
pass
class DateField(Field):
def get_display_value(self, demande):
value = demande.data.get(self.name, "")
date_value = parse_date(value)
if not value:
return ""
return format_date(date_value, format="medium")
class IntegerField(Field):
pass
class EmailField(Field):
pass
class BooleanField(Field):
pass
class Boolean2Field(Field):
pass
class TriStateField(Field):
pass
# def make_choices(l: List[str]):
# return [{"value": x, "label": x} for x in l]
class Select2Field(Field):
choices: List[str] = []
def to_dict(self) -> JSONDict:
d = Field.to_dict(self)
if callable(self.choices):
choices = self.choices()
else:
choices = self.choices
d["choices"] = choices
return d
# if choices and isinstance(choices[0], str):
# d["choices"] = make_choices(choices)
# else:
# d["choices"] = choices
# return d
class MultipleSelect2Field(Field):
choices: List[str] = []
def to_dict(self) -> JSONDict:
d = Field.to_dict(self)
if callable(self.choices):
choices = self.choices()
else:
choices = self.choices
d["choices"] = choices
return d
class TextAreaField(Field):
pass
class HTML(Field):
editable = False
def __init__(self, text, name=""):
if not name:
name = "html-" + uuid.uuid4().hex
super().__init__(name, text)
class ListField(Field):
scalar = False
class ListePartenaires(ListField):
specs = [
["nom_partenaire", "Nom du partenaire"],
["prenom_nom_contact", "Contact"],
["mail_contact", "Email"],
["telephone_contact", "Téléphone"],
]
class ListePartenairesContactes(ListField):
specs = [
["contact", "Contact"],
["nom_partenaire", "Nom du partenaire"],
]
class ListeDivulgationsPassees(ListField):
specs = [
["type_divulgation", "Type de divulgation"],
["titre", "Titre"],
["date_lieu", "Date et lieu"],
]
class ListeDivulgationsFutures(ListField):
specs = [
["type_divulgation", "Type de divulgation"],
["date", "Date envisagée"],
]
class ListeContrats(ListField):
specs = [
["contrat", "Contrat/Partenariat de recherche"],
["date_signature", "Date de signature du contrat"],
["reference", "Référence du contrat"],
]
class ListeMateriels(ListField):
specs = [
["materiel", "Matériel"],
]
class ListeAutresDeclarations(ListField):
specs = [
["type_protection", "Type de protection"],
["organisme", "Organisme ayant fait le dépôt"],
["exploitation", "Exploitation industrielle"],
]
class ListeLicencesExistantes(ListField):
specs = [
["type_licence", "Type de la licence"],
["nom_version_licence", "Nom et version de la licence"],
]
class FieldSet:
def __init__(self, name, label, fields):
self.name = name
self.label = label
self.fields = fields
self.visible = True
self.hidden = False
def to_dict(self) -> JSONDict:
return {
"name": self.name,
"type": [self.__class__.__name__],
"label": self.label,
"fields": [field.name for field in self.fields],
"visible": self.visible,
"hidden": self.hidden,
}
def __repr__(self):
return f"<FieldSet name={self.name} visible={self.visible}>"
|
py | b4038c24f527988e674b09ad059f11b590ab8b18 | # -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.bancodobrasil import BoletoBB
from .testutils import BoletoTestCase
class TestBancoBrasil(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoBB(7, 1)
d.carteira = '18'
d.data_documento = datetime.date(2011, 3, 8)
d.data_vencimento = datetime.date(2011, 3, 8)
d.data_processamento = datetime.date(2012, 7, 4)
d.valor_documento = 2952.95
d.agencia = '9999'
d.conta = '99999'
d.convenio = '7777777'
d.nosso_numero = str(87654 + i)
d.numero_documento = str(87654 + i)
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(self.dados[0].linha_digitavel,
'00190.00009 07777.777009 00087.654182 6 49000000295295'
)
def test_codigo_de_barras(self):
self.assertEqual(self.dados[0].barcode,
'00196490000002952950000007777777000008765418'
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoBrasil)
if __name__ == '__main__':
unittest.main()
|
py | b4038c84fe643918f605e61011938e2768e6d6e0 | from six.moves.urllib.parse import parse_qs, urlparse
from mock import patch
from django.test import SimpleTestCase
from corehq.apps.sms.models import SMS
from corehq.messaging.smsbackends.unicel.models import SQLUnicelBackend
class TestUnicelSend(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(TestUnicelSend, cls).setUpClass()
cls.backend = SQLUnicelBackend()
def test_sending_ascii(self):
self._test_unicel_send('ascii', 'ascii')
def test_sending_utf8(self):
self._test_unicel_send('útf-8', '00FA00740066002D0038')
def _test_unicel_send(self, text, expected_msg):
message = SMS(text=text, phone_number='+15555555555')
with patch('corehq.messaging.smsbackends.unicel.models.urlopen') as patch_urlopen:
self.backend.send(message)
self.assertEqual(len(patch_urlopen.call_args_list), 1)
called_args = patch_urlopen.call_args_list[0]
url, = called_args[0]
parsed_url = urlparse(url)
url_params = parse_qs(parsed_url.query)
self.assertEqual(url_params['msg'], [expected_msg])
|
py | b4038e073bf7726ae0858655b580be80f9598e88 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import DataRequired, URL
from flask_ckeditor import CKEditorField
##WTForm
class CreatePostForm(FlaskForm):
title = StringField("Blog Post Title", validators=[DataRequired()])
subtitle = StringField("Subtitle", validators=[DataRequired()])
img_url = StringField("Blog Image URL", validators=[DataRequired(), URL()])
body = CKEditorField("Blog Content", validators=[DataRequired()])
submit = SubmitField("Submit Post")
class RegisterForm(FlaskForm):
email = StringField('E-mail', validators=[DataRequired()])
login = StringField('Login', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Register')
class LoginForm(FlaskForm):
email = StringField('E-mail', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Let me in')
class CommentForm(FlaskForm):
body = CKEditorField('Comment', validators=[DataRequired()])
submit = SubmitField('comment')
|
py | b4038e4e5c571c40839b280a1d8aff7c31af145e | # -*- coding: UTF-8 -*-
# File: utils.py
import os
import sys
from contextlib import contextmanager
import inspect
from datetime import datetime, timedelta
from tqdm import tqdm
import numpy as np
__all__ = ['change_env',
'get_rng',
'fix_rng_seed',
'get_tqdm',
'execute_only_once',
'humanize_time_delta'
]
def humanize_time_delta(sec):
"""Humanize timedelta given in seconds
Args:
sec (float): time difference in seconds.
Examples:
Several time differences as a human readable string
.. code-block:: python
print humanize_seconds(1) # 1 second
print humanize_seconds(60 + 1) # 1 minute 1 second
print humanize_seconds(87.6) # 1 minute 27 seconds
print humanize_seconds(0.01) # 0.01 seconds
print humanize_seconds(60 * 60 + 1) # 1 hour 0 minutes 1 second
print humanize_seconds(60 * 60 * 24 + 1) # 1 day 0 hours 0 minutes 1 second
print humanize_seconds(60 * 60 * 24 + 60 * 2 + 60*60*9+ 3) # 1 day 9 hours 2 minutes 3 seconds
Returns:
time difference as a readable string
"""
time = datetime(2000, 1, 1) + timedelta(seconds=int(sec))
units = ['day', 'hour', 'minute', 'second']
vals = [time.day - 1, time.hour, time.minute, time.second]
if sec < 60:
vals[-1] = sec
def _format(v, u):
return "{} {}{}".format(v, u, "s" if v > 1 else "")
required = False
ans = []
for v, u in zip(vals, units):
if not required:
if v > 0:
required = True
ans.append(_format(v, u))
else:
ans.append(_format(v, u))
return " ".join(ans)
@contextmanager
def change_env(name, val):
"""
Args:
name(str), val(str):
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits.
"""
oldval = os.environ.get(name, None)
os.environ[name] = val
yield
if oldval is None:
del os.environ[name]
else:
os.environ[name] = oldval
_RNG_SEED = None
def fix_rng_seed(seed):
"""
Call this function at the beginning of program to fix rng seed within tensorpack.
Args:
seed (int):
Note:
See https://github.com/ppwwyyxx/tensorpack/issues/196.
Examples:
Fix random seed in both tensorpack and tensorflow.
.. code-block:: python
import tensorpack.utils.utils as utils
seed = 42
utils.fix_rng_seed(seed)
tesnorflow.set_random_seed(seed)
# run trainer
"""
global _RNG_SEED
_RNG_SEED = int(seed)
def get_rng(obj=None):
"""
Get a good RNG seeded with time, pid and the object.
Args:
obj: some object to use to generate random seed.
Returns:
np.random.RandomState: the RNG.
"""
seed = (id(obj) + os.getpid() +
int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
if _RNG_SEED is not None:
seed = _RNG_SEED
return np.random.RandomState(seed)
_EXECUTE_HISTORY = set()
def execute_only_once():
"""
Each called in the code to this function is guranteed to return True the
first time and False afterwards.
Returns:
bool: whether this is the first time this function gets called from this line of code.
Example:
.. code-block:: python
if execute_only_once():
# do something only once
"""
f = inspect.currentframe().f_back
ident = (f.f_code.co_filename, f.f_lineno)
if ident in _EXECUTE_HISTORY:
return False
_EXECUTE_HISTORY.add(ident)
return True
def get_tqdm_kwargs(**kwargs):
"""
Return default arguments to be used with tqdm.
Args:
kwargs: extra arguments to be used.
Returns:
dict:
"""
default = dict(
smoothing=0.5,
dynamic_ncols=True,
ascii=True,
bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]'
)
f = kwargs.get('file', sys.stderr)
isatty = f.isatty()
# NOTE when run under mpirun/slurm, isatty is always False
# Jupyter notebook should be recognized as tty.
# Wait for https://github.com/ipython/ipykernel/issues/268
try:
from ipykernel import iostream
if isinstance(f, iostream.OutStream):
isatty = True
except ImportError:
pass
if isatty:
default['mininterval'] = 0.5
else:
# If not a tty, don't refresh progress bar that often
default['mininterval'] = 180
default.update(kwargs)
return default
def get_tqdm(**kwargs):
""" Similar to :func:`get_tqdm_kwargs`,
but returns the tqdm object directly. """
return tqdm(**get_tqdm_kwargs(**kwargs))
|
py | b4038e7b87d44fd49949081832aeb351c28cdf9b | import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt # noqa
import matplotlib.gridspec as gridspec # noqa
import numpy as np # noqa
from edflow.util import walk # noqa
def flow2hsv(flow):
"""Given a Flowmap of shape ``[W, H, 2]`` calculates an hsv image,
showing the relative magnitude and direction of the optical flow.
Parameters
---------
flow : np.array
Optical flow with shape ``[W, H, 2]``.
Returns
-------
np.array
Containing the hsv data.
"""
# prepare array - value is always at max
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 2] = 255
# magnitude and angle
mag, ang = cart2polar(flow[..., 0], flow[..., 1])
# make it colorful
hsv[..., 0] = ang * 180 / np.pi
normalizer = mpl.colors.Normalize(mag.min(), mag.max())
hsv[..., 1] = np.int32(normalizer(mag) * 255)
return hsv
def cart2polar(x, y):
"""
Takes two array as x and y coordinates and returns the magnitude and angle.
"""
r = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(x, y)
return r, phi
def hsv2rgb(hsv):
"""color space conversion hsv -> rgb. simple wrapper for nice name."""
rgb = mpl.colors.hsv_to_rgb(hsv)
return rgb
def flow2rgb(flow):
"""converts a flow field to an rgb color image.
Parameters
---------
flow : np.array
optical flow with shape ``[W, H, 2]``.
Returns
-------
np.array
Containing the rgb data. Color indicates orientation,
intensity indicates magnitude.
"""
return hsv2rgb(flow2hsv(flow))
def get_support(image):
"""
... warning: This function makes a lot of assumptions that need not be met!
Assuming that there are three categories of images and that the image_array
has been properly constructed, this function will estimate the support of
the given :attr:`image`.
Parameters
---------
image : np.ndarray
Some properly constructed image like array. No
assumptions need to be made about the shape of the image, we
simply assme each value is some color value.
Returns
-------
str
The support. Either '0->1', '-1->1' or '0->255'
"""
if image.min() < 0:
return "-1->1"
elif image.max() > 1:
return "0->255"
else:
return "0->1"
VALID_SUPPORTS = ["0->1", "-1->1", "0->255"]
def sup_str_to_num(support_str):
"""Converts a support string into usable numbers."""
mn = -1.0 if support_str == "-1->1" else 0.0
mx = 255.0 if support_str == "0->255" else 1.0
return mn, mx
def adjust_support(image, future_support, current_support=None, clip=False):
"""Will adjust the support of all color values in :attr:`image`.
Parameters
---------
image : np.ndarray
Array containing color values. Make sure this is
properly constructed.
future_support : str
The support this array is supposed to have after
the transformation. Must be one of '-1->1', '0->1', or '0->255'.
current_support : str
The support of the colors currentl in
:attr:`image`. If not given it will be estimated by
:func:`get_support`.
clip : bool
By default the return values in image are simply coming
from a linear transform, thus the actual support might be larger
than the requested interval. If set to ``True`` the returned
array will be cliped to ``future_support``.
Returns
-------
same type as image
The given :attr:`image` with transformed support.
"""
if current_support is None:
current_support = get_support(image)
else:
assert current_support in VALID_SUPPORTS
cur_min, cur_max = sup_str_to_num(current_support)
fut_min, fut_max = sup_str_to_num(future_support)
# To [0, 1]
image = image.astype(float)
image -= cur_min
image /= cur_max - cur_min
# To [fut_min, fut_max]
image *= fut_max - fut_min
image += fut_min
if clip:
image = clip_to_support(image, future_support)
if future_support == "0->255":
image = image.astype(np.uint8)
return image
def clip_to_support(image, supp_str):
vmin, vmax = sup_str_to_num(supp_str)
return np.clip(image, vmin, vmax)
def add_im_info(image, ax):
"""Adds some interesting facts about the image."""
shape = "x".join([str(s) for s in image.shape])
mn = image.min()
mx = image.max()
supp = get_support(image)
info_str = "shape: {}\nsupport: {} (min={}, max={})"
info_str = info_str.format(shape, supp, mn, mx)
ax.text(0, 0, info_str)
def im_fn(key, im, ax):
"""Plot an image. Used by :func:`plot_datum`."""
if im.shape[-1] == 1:
im = np.squeeze(im)
add_im_info(im, ax)
ax.imshow(adjust_support(im, "0->1"))
ax.set_ylabel(key, rotation=0)
def heatmap_fn(key, im, ax):
"""Assumes that heatmap shape is [H, W, N]. Used by
:func:`plot_datum`."""
im = np.mean(im, axis=-1)
im_fn(key, im, ax)
def keypoints_fn(key, keypoints, ax):
"""
Plots a list of keypoints as a dot plot.
"""
add_im_info(keypoints, ax)
x = keypoints[:, 0]
y = keypoints[:, 1]
ax.plot(x, y, "go", markersize=1)
ax.set_ylabel(key)
def flow_fn(key, im, ax):
"""Plot an flow. Used by :func:`plot_datum`."""
im = flow2rgb(im)
im_fn(key, im, ax)
def other_fn(key, obj, ax):
"""Print some text about the object. Used by :func:`plot_datum`."""
text = "{}: {} - {}".format(key, type(obj), obj)
ax.axis("off")
# ax.imshow(np.ones([10, 100]))
ax.text(0, 0, text)
PLOT_FUNCTIONS = {
"image": im_fn,
"heat": heatmap_fn,
"keypoints": keypoints_fn,
"flow": flow_fn,
"other": other_fn,
}
def default_heuristic(key, obj):
"""Determines the kind of an object. Used by :func:`plot_datum`."""
if isinstance(obj, np.ndarray):
if len(obj.shape) > 3 or len(obj.shape) < 2:
# This is no image -> Maybe later implement sequence fn
return "other"
else:
if obj.shape[-1] in [3, 4]:
return "image"
elif obj.shape[-1] == 2:
if len(obj.shape) <= 2:
return "keypoints"
else:
return "flow"
else:
return "heat"
return "other"
def plot_datum(
nested_thing,
savename="datum.png",
heuristics=default_heuristic,
plt_functions=PLOT_FUNCTIONS,
):
"""Plots all data in the nested_thing as best as can.
If heuristics is given, this determines how each leaf datum is converted
to something plottable.
Parameters
---------
nested_thing : dict or list
Some nested object.
savename : str
``Path/to/the/plot.png``.
heuristics : Callable
If given this should produce a string specifying
the kind of data of the leaf. If ``None`` determinde automatically.
See :func:`default_heuristic`.
plt_functions : dict of Callables
Maps a ``kind`` to a function which
can plot it. Each callable must be able to receive a the key, the
leaf object and the Axes to plot it in.
"""
class Plotter(object):
def __init__(self, kind_fn, savename):
self.kind_fn = kind_fn
self.savename = savename
self.buffer = []
def __call__(self, key, obj):
kind = self.kind_fn(key, obj)
self.buffer += [[kind, key, obj]]
def plot(self):
n_pl = len(self.buffer)
f = plt.figure(figsize=(5, 2 * n_pl))
gs = gridspec.GridSpec(n_pl, 1)
for i, [kind, key, obj] in enumerate(self.buffer):
ax = f.add_subplot(gs[i])
plt_functions[kind](key, obj, ax)
f.savefig(self.savename)
def __str__(self):
self.plot()
return "Saved Plot at {}".format(self.savename)
P = Plotter(heuristics, savename)
walk(nested_thing, P, pass_key=True)
print(P)
|
py | b4038f1b354a4f896f0dc61c79bf54b2bda8e476 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
__author__ = 'andyguo'
import nuke
def callback():
node = nuke.thisNode()
knob = nuke.thisKnob()
if knob.name() == 'dayu_write_auto_name':
_auto_rename(node, knob)
def _auto_rename(node, knob):
def get_current_nk_filename():
nk_file = None
try:
nk_file = nuke.scriptName()
except RuntimeError as e:
nuke.alert(e.message)
return nk_file
ext_value = node['dayu_write_ext_list'].value()
user_sub_level = node['dayu_write_user_sub_level'].value()
current_nk_filename = get_current_nk_filename()
if current_nk_filename:
render_filename = _generate_filename(current_nk_filename, ext_value, user_sub_level)
node['file'].setValue(render_filename)
node['label'].setValue(user_sub_level)
def _generate_filename(current_nk_filename, ext_value, user_sub_level):
import os
basename = os.path.basename(current_nk_filename)
components = basename.split('.')[0].split('_')
render_root_path = os.environ.get('DAYU_NUKE_RENDER_PATH', '~')
render_file_path = '{root}' \
'{sep}' \
'{sub_level}' \
'{filename}'.format(root=render_root_path,
sep=os.path.sep,
sub_level=user_sub_level + os.path.sep if user_sub_level else '',
filename='_'.join(components) + '.%04d' + ext_value)
return render_file_path
|
py | b4038f1f2dc4af64b066e1218a9eaf0817f0669f | import autoaim
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import time
# Devices
if torch.cuda.is_available():
device = torch.device('cuda')
print('Device: GPU.')
else:
device = torch.device('cpu')
print('Device: CPU.')
# device = torch.device('cpu')
# Dataset
def preprocess(t, h):
# shuffling
r = torch.randperm(t.size(0))
t = t[r, :]
# GIVE ME MORE!!
_ = t[:, :-1]
t = torch.cat((_, t[:, -1:]), 1)
return t
def load(filename):
header, data = autoaim.helpers.read_csv(filename)
data = torch.Tensor(data).to(device)
data = preprocess(data,header)
x = data[:, :-1]
y = data[:, -1:]
return x, y, header
x_train, y_train, header = load('test_lamp_train.csv')
x_test, y_test, _ = load('test_lamp_test.csv')
train_dataset_size = x_train.size(0)
test_dataset_size = x_test.size(0)
input_size = x_train.size(1)
output_size = 1
print('====== Input ======')
print('train_dataset_size: {}'.format(train_dataset_size))
print('test_dataset_size: {}'.format(test_dataset_size))
print('input_size: {}'.format(input_size))
# Model
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
y_pred = self.sigmoid(self.linear(x))
return y_pred
# Training loop
@autoaim.helpers.time_this
def train(learning_rate, epoch_num):
# Loss and optimizer
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train loop
print('====== Config ======')
print('learning_rate: {}'.format(learning_rate))
print('epoch_num: {}'.format(epoch_num))
for epoch in range(epoch_num):
# Forward pass
y_pred = model(x_train)
loss = criterion(y_pred, y_train)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch == 0 or (epoch+1) % (epoch_num/10) == 0:
y_pred = model(x_test)
loss_test = criterion(y_pred, y_test)
print("Epoch: [{!s:6}/{!s:6}], Loss: {:.2f}, Test loss: {:.2f}"
.format(epoch+1, epoch_num, loss, loss_test))
def analyse(x_anls, y_anls, threshold):
# Predict
y_pred = model(x_anls)
# Convert to numpy array
x_anls, y_anls, y_pred = (t.numpy() for t in [x_anls, y_anls, y_pred])
# Sort
_1, _2 = np.where(y_anls == 1)[0], np.where(y_anls == 0)[0]
x_anls, y_anls, y_pred = (np.concatenate(
(t[_1, :], t[_2, :])) for t in (x_anls, y_anls, y_pred))
# Distribution
print('Data Distribution')
x = np.arange(0, x_anls.shape[0], dtype=int)
# x_anls = np.arange(0, 40, dtype=int)
plt.plot(x, y_pred[x, :], 'bo', label='Predict')
plt.plot(x, y_anls[x, :], 'ro', label='Data')
plt.legend()
plt.show()
# ROC
print('ROC')
num_positive = len(np.where(y_anls == 1)[0])
num_negative = len(np.where(y_anls == 0)[0])
_ = np.where(y_pred >= threshold)[0]
num_true_positive = len(np.where(y_anls[_, :] == 1)[0])
num_false_positive = len(np.where(y_anls[_, :] == 0)[0])
_ = np.where(y_pred < threshold)[0]
num_false_negative = len(np.where(y_anls[_, :] == 1)[0])
num_true_negative = len(np.where(y_anls[_, :] == 0)[0])
print('true positive: {}'.format(num_true_positive))
print('false positive: {}'.format(num_false_positive))
print('true negative: {}'.format(num_true_negative))
print('false negative: {}\n'.format(num_false_negative))
# Weight
x = np.linspace(0, 1)
w = [wi.data.cpu() for wi in model.parameters()]
w = torch.cat((w[0][0], w[1])).numpy()
print('Weight')
b = w[-1]
for i in range(input_size):
a = w[i]
y = a*x+b
plt.plot(x, (y-y.min())/(y.max()-y.min()), linestyle='-')
plt.plot(x_anls[:, i], y_pred, 'bo', label='Predict')
plt.plot(x_anls[:, i], y_anls, 'ro', label='Data')
plt.legend()
plt.show()
_1, _2 = i % (len(header) - 1), int((i+1)/len(header)+1)
print('w[{}] {} #{}: {}'.format(i, header[_1], _2, w[i]))
# Save
def save(filename):
dataloader = autoaim.DataLoader()
autoaim.helpers.new_csv(filename, autoaim.aimmat.enabled_props)
w = [wi.data.cpu() for wi in model.parameters()]
w = torch.cat((w[0][0], w[1])).numpy()
autoaim.helpers.append_csv(filename, w)
def test(n):
# CPU
start_time = time.time()
a = torch.ones(n, n)
for _ in range(1000):
a += a
elapsed_time = time.time() - start_time
print('CPU time = ', elapsed_time)
# GPU
start_time = time.time()
b = torch.ones(n, n).cuda()
for _ in range(1000):
b += b
elapsed_time = time.time() - start_time
print('GPU time = ', elapsed_time)
if __name__ == '__main__':
test(2048)
model = Model().to(device)
train(0.01, 10000)
with torch.no_grad():
# x_test, y_test,*_ = load('test.csv', 0)
save('weight.csv')
# analyse(x_test, y_test, 0.5)
|
py | b4038f57267a864e6fd1128647c9e5d1f0293bf4 | import os
BASE = os.getcwd()
RESULTS_BASE = os.getcwd() + "/results/"
DATA_BASE = "/data/"
MODEL_CHECKPOINTS = RESULTS_BASE + "/model_checkpoints/"
MODEL = RESULTS_BASE + "/model/"
SENTIMENT140_DATASET = DATA_BASE +"sentiment140/training.1600000.processed.noemoticon.csv" ##BASE +"\\Datasets\Sentiment140_twitter\\training.1600000.processed.noemoticon.csv"
SENTIMENT140_DATASET_PARSED = DATA_BASE +"sentiment140/training_parsed.csv"
SENTIMENT140_DATASET_PARSED_TSV = (DATA_BASE +"sentiment140/training_parsed.tsv", DATA_BASE +"sentiment140/test_parsed.tsv") #lazy_loading only
SENTIMENT140_URL = "https://nyc3.digitaloceanspaces.com/ml-files-distro/v1/sentiment-analysis-is-bad/data/training.1600000.processed.noemoticon.csv.zip"
IMDB_DATASET = DATA_BASE + "aclImdb/aclImdb.csv"
IMDB_DATASET_Parsed = DATA_BASE + "aclImdb/aclImdb_parsed.csv"
TRAINING_RESULT_CSV = (RESULTS_BASE, "result.csv") #path, filename |
py | b4038f983dc9a13a811ee4dbf393534bff423069 | """Object representation of a request.
The ``Request`` class can be used to hold a request's information.
"""
from dataclasses import dataclass
from typing import Any
@dataclass
class Request:
"""A dataclass representation of a request.
Holds the information of a request in an object for easy access.
Args:
data (dict[str, Any]): the data passed with the request.
protocol (str): the protocol used to send the request.
type (str): the type of the request.
Attributes:
data (dict[str, Any]): the complete request.
protocol (str): the protocol used to send the request.
type (str): the request type, for example: request.
Example:
Creating a request::
>>> request = Request(
>>> protocol="http",
>>> type="request",
>>> data={"body": b"", "more_body": False}
>>> )
"""
data: dict[str, Any]
protocol: str
type: str
|
py | b403907c6cb1339a5f8a37c8dc6ca0a5c19f1eb2 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def app_init(app):
db.init_app(app)
|
py | b40390872a67473951b1ed6aafa821fbd02a6e50 | # Ejemplo de la estructura de datos tipo 'list'
# Definir una lista con 10 elementos
c = [9.5,8.7,6.5,10]
print(c)
# Obtener un elemento en particular (aqui el penultimo y el ultimo):
print(c[2],type(c[2]))
print(c[3],type(c[3]))
#tambien ponemos contar desde atras:
print(c[-1],type(c[-1]))
# Agregar un elemento (al final)
c.append(9.8)
print(c)
#Agregar un elemento en una posicion particular
print("Indice 2 antes:",c[2])
c.insert(1,2.5)
print("Indice 2 despues:",c[2])
print(c) # Los elementos a la derecha se mueven a un indice mayor
# Cambiar un elemento en particular
c[1] = 8.8
print(c,"Después del cambio de c[1]")
# Eliminar un elento particular (aqui el indice 1)
c.pop(1)
print(c)
# Obtener el tamanio de la lista
print("La lista tiene %s elementos" % len(c))
# Eliminar todos los elementos de una lista (sin borrar el objeto)
c.clear()
print(c)
# OJO: lists son punteros!
x = [1,2,3,4]
y = x
z = x.copy()
x[0] = 5
print(y)
print(z)
print("-------")
x = [1,2,3,4] # x=[1,2,3,4]
y = x #copy x to y=> y=[1,2,3,4]
x[3]=99 # change the last value of x => x=[1,2,3,99]
# HOWEVER: now y is also y=[1,2,3,99]
print(y)
print("----- La manera correcta de hacerlo -----")
x = [1,2,3,4]
y = x.copy()
x[3]=99 # change the last value of x => x=[1,2,3,99]
# we have: x=[1,2,3,99] and y=[1,2,3,4]
print(x,y)
## MULTIDIMENSIONAL LISTS
x = [[1,2,3,4],[5,6,7,8,0]]
print(x)
print(x[0][3])
# FOR LOOP THROUGH INDEXES
print("Loops (version 1):")
x = [1,2,3,4,5,6,7]
for i in range(0,len(x)):
print(x[i])
print("Loops (version 2):")
for i in x:
print(i)
|
py | b40390cb89635d5837d0cb04416fef5d7c0133d7 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `plutus-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
py | b40391284f8edabe428604bee88a7288d2436c8c | import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import numpy as np
# This function is used to train a model with the goal of classifying 28x28 pixel images to a specific digit.
def train():
# Load the dataset
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize the values from the
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
model = tf.keras.models.Sequential() # This creates the neural network. Sequential because its feed forward.
model.add(tf.keras.layers.Flatten()) # This turns the 28x28 picture into a 784x1
# Dense layers are used to make fully connected layers.
# The activation function is Rectified Linear. {0 if x < 0: x if x > 0}
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# Softmax activation function shows the probability of selecting the item.
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
# This is to compile the results
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Run the model for the given number of epochs
model.fit(x_train, y_train, epochs=3)
# Save the model for future use
model.save('num_classification.model')
# Return the test sets for future use
return x_test, y_test
|
py | b4039141fbe443fcbe11c6e171ab062fdc3bf07b | # MIT License
#
# Copyright (c) 2019 SSL-Roots
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python
#encoding: utf8
import rospy, unittest, rostest
import rosnode
import time
class WorldObserverTest(unittest.TestCase):
def test_node_exist(self):
nodes = rosnode.get_node_names()
self.assertIn('/robot_0/observer', nodes, 'node does not exist')
if __name__ == '__main__':
time.sleep(3) # テスト対象のノードが立ち上がるのを待つ
rospy.init_node('test_world_observer')
rostest.rosrun('world_observer', 'test_world_observer', WorldObserverTest)
|
py | b4039199f52065d70e3efb2501e3345c594eb0e7 | # -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Object-oriented filesystem path representation.
"""
import os
import errno
import random
import sha
import base64
from os.path import isabs, exists, normpath, abspath, splitext
from os.path import basename, dirname
from os.path import join as joinpath
from os import sep as slash
from os import listdir, utime, stat
from stat import S_ISREG, S_ISDIR
# Please keep this as light as possible on other Twisted imports; many, many
# things import this module, and it would be good if it could easily be
# modified for inclusion in the standard library. --glyph
from twisted.python.runtime import platform
from twisted.python.win32 import ERROR_FILE_NOT_FOUND, ERROR_PATH_NOT_FOUND
from twisted.python.win32 import ERROR_INVALID_NAME, ERROR_DIRECTORY
from twisted.python.win32 import WindowsError
def _stub_islink(path):
"""
Always return 'false' if the operating system does not support symlinks.
@param path: a path string.
@type path: L{str}
@return: false
"""
return False
def _stub_urandom(n):
"""
Provide random data in versions of Python prior to 2.4. This is an
effectively compatible replacement for 'os.urandom'.
@type n: L{int}
@param n: the number of bytes of data to return
@return: C{n} bytes of random data.
@rtype: str
"""
randomData = [random.randrange(256) for n in xrange(n)]
return ''.join(map(chr, randomData))
def _stub_armor(s):
"""
ASCII-armor for random data. This uses a hex encoding, although we will
prefer url-safe base64 encoding for features in this module if it is
available.
"""
return s.encode('hex')
islink = getattr(os.path, 'islink', _stub_islink)
randomBytes = getattr(os, 'urandom', _stub_urandom)
armor = getattr(base64, 'urlsafe_b64encode', _stub_armor)
class InsecurePath(Exception):
"""
Error that is raised when the path provided to FilePath is invalid.
"""
class LinkError(Exception):
"""
An error with symlinks - either that there are cyclical symlinks or that
symlink are not supported on this platform.
"""
class UnlistableError(OSError):
"""
An exception which is used to distinguish between errors which mean 'this
is not a directory you can list' and other, more catastrophic errors.
This error will try to look as much like the original error as possible,
while still being catchable as an independent type.
@ivar originalException: the actual original exception instance, either an
L{OSError} or a L{WindowsError}.
"""
def __init__(self, originalException):
"""
Create an UnlistableError exception.
@param originalException: an instance of OSError.
"""
self.__dict__.update(originalException.__dict__)
self.originalException = originalException
class _WindowsUnlistableError(UnlistableError, WindowsError):
"""
This exception is raised on Windows, for compatibility with previous
releases of FilePath where unportable programs may have done "except
WindowsError:" around a call to children().
It is private because all application code may portably catch
L{UnlistableError} instead.
"""
def _secureEnoughString():
"""
Create a pseudorandom, 16-character string for use in secure filenames.
"""
return armor(sha.new(randomBytes(64)).digest())[:16]
class _PathHelper:
"""
Abstract helper class also used by ZipPath; implements certain utility
methods.
"""
def getContent(self):
return self.open().read()
def children(self):
"""
List the chilren of this path object.
@raise OSError: If an error occurs while listing the directory. If the
error is 'serious', meaning that the operation failed due to an access
violation, exhaustion of some kind of resource (file descriptors or
memory), OSError or a platform-specific variant will be raised.
@raise UnlistableError: If the inability to list the directory is due
to this path not existing or not being a directory, the more specific
OSError subclass L{UnlistableError} is raised instead.
@return: an iterable of all currently-existing children of this object
accessible with L{_PathHelper.child}.
"""
try:
subnames = self.listdir()
except WindowsError, winErrObj:
# WindowsError is an OSError subclass, so if not for this clause
# the OSError clause below would be handling these. Windows error
# codes aren't the same as POSIX error codes, so we need to handle
# them differently.
# Under Python 2.5 on Windows, WindowsError has a winerror
# attribute and an errno attribute. The winerror attribute is
# bound to the Windows error code while the errno attribute is
# bound to a translation of that code to a perhaps equivalent POSIX
# error number.
# Under Python 2.4 on Windows, WindowsError only has an errno
# attribute. It is bound to the Windows error code.
# For simplicity of code and to keep the number of paths through
# this suite minimal, we grab the Windows error code under either
# version.
# Furthermore, attempting to use os.listdir on a non-existent path
# in Python 2.4 will result in a Windows error code of
# ERROR_PATH_NOT_FOUND. However, in Python 2.5,
# ERROR_FILE_NOT_FOUND results instead. -exarkun
winerror = getattr(winErrObj, 'winerror', winErrObj.errno)
if winerror not in (ERROR_PATH_NOT_FOUND,
ERROR_FILE_NOT_FOUND,
ERROR_INVALID_NAME,
ERROR_DIRECTORY):
raise
raise _WindowsUnlistableError(winErrObj)
except OSError, ose:
if ose.errno not in (errno.ENOENT, errno.ENOTDIR):
# Other possible errors here, according to linux manpages:
# EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the
# sort of thing which should be handled normally. -glyph
raise
raise UnlistableError(ose)
return map(self.child, subnames)
def walk(self, descend=None):
"""
Yield myself, then each of my children, and each of those children's
children in turn. The optional argument C{descend} is a predicate that
takes a FilePath, and determines whether or not that FilePath is
traversed/descended into. It will be called with each path for which
C{isdir} returns C{True}. If C{descend} is not specified, all
directories will be traversed (including symbolic links which refer to
directories).
@param descend: A one-argument callable that will return True for
FilePaths that should be traversed, False otherwise.
@return: a generator yielding FilePath-like objects.
"""
yield self
if self.isdir() and (descend is None or descend(self)):
for c in self.children():
for subc in c.walk(descend):
if os.path.realpath(self.path).startswith(
os.path.realpath(subc.path)):
raise LinkError("Cycle in file graph.")
yield subc
def sibling(self, path):
return self.parent().child(path)
def segmentsFrom(self, ancestor):
"""
Return a list of segments between a child and its ancestor.
For example, in the case of a path X representing /a/b/c/d and a path Y
representing /a/b, C{Y.segmentsFrom(X)} will return C{['c',
'd']}.
@param ancestor: an instance of the same class as self, ostensibly an
ancestor of self.
@raise: ValueError if the 'ancestor' parameter is not actually an
ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d.
@return: a list of strs
"""
# this might be an unnecessarily inefficient implementation but it will
# work on win32 and for zipfiles; later I will deterimine if the
# obvious fast implemenation does the right thing too
f = self
p = f.parent()
segments = []
while f != ancestor and p != f:
segments[0:0] = [f.basename()]
f = p
p = p.parent()
if f == ancestor and segments:
return segments
raise ValueError("%r not parent of %r" % (ancestor, self))
# new in 8.0
def __hash__(self):
"""
Hash the same as another FilePath with the same path as mine.
"""
return hash((self.__class__, self.path))
# pending deprecation in 8.0
def getmtime(self):
"""
Deprecated. Use getModificationTime instead.
"""
return int(self.getModificationTime())
def getatime(self):
"""
Deprecated. Use getAccessTime instead.
"""
return int(self.getAccessTime())
def getctime(self):
"""
Deprecated. Use getStatusChangeTime instead.
"""
return int(self.getStatusChangeTime())
class FilePath(_PathHelper):
"""
I am a path on the filesystem that only permits 'downwards' access.
Instantiate me with a pathname (for example,
FilePath('/home/myuser/public_html')) and I will attempt to only provide
access to files which reside inside that path. I may be a path to a file,
a directory, or a file which does not exist.
The correct way to use me is to instantiate me, and then do ALL filesystem
access through me. In other words, do not import the 'os' module; if you
need to open a file, call my 'open' method. If you need to list a
directory, call my 'path' method.
Even if you pass me a relative path, I will convert that to an absolute
path internally.
Note: although time-related methods do return floating-point results, they
may still be only second resolution depending on the platform and the last
value passed to L{os.stat_float_times}. If you want greater-than-second
precision, call C{os.stat_float_times(True)}, or use Python 2.5.
Greater-than-second precision is only available in Windows on Python2.5 and
later.
@type alwaysCreate: C{bool}
@ivar alwaysCreate: When opening this file, only succeed if the file does not
already exist.
"""
statinfo = None
path = None
def __init__(self, path, alwaysCreate=False):
self.path = abspath(path)
self.alwaysCreate = alwaysCreate
def __getstate__(self):
d = self.__dict__.copy()
if d.has_key('statinfo'):
del d['statinfo']
return d
def child(self, path):
if platform.isWindows() and path.count(":"):
# Catch paths like C:blah that don't have a slash
raise InsecurePath("%r contains a colon." % (path,))
norm = normpath(path)
if slash in norm:
raise InsecurePath("%r contains one or more directory separators" % (path,))
newpath = abspath(joinpath(self.path, norm))
if not newpath.startswith(self.path):
raise InsecurePath("%r is not a child of %s" % (newpath, self.path))
return self.clonePath(newpath)
def preauthChild(self, path):
"""
Use me if `path' might have slashes in it, but you know they're safe.
(NOT slashes at the beginning. It still needs to be a _child_).
"""
newpath = abspath(joinpath(self.path, normpath(path)))
if not newpath.startswith(self.path):
raise InsecurePath("%s is not a child of %s" % (newpath, self.path))
return self.clonePath(newpath)
def childSearchPreauth(self, *paths):
"""Return my first existing child with a name in 'paths'.
paths is expected to be a list of *pre-secured* path fragments; in most
cases this will be specified by a system administrator and not an
arbitrary user.
If no appropriately-named children exist, this will return None.
"""
p = self.path
for child in paths:
jp = joinpath(p, child)
if exists(jp):
return self.clonePath(jp)
def siblingExtensionSearch(self, *exts):
"""Attempt to return a path with my name, given multiple possible
extensions.
Each extension in exts will be tested and the first path which exists
will be returned. If no path exists, None will be returned. If '' is
in exts, then if the file referred to by this path exists, 'self' will
be returned.
The extension '*' has a magic meaning, which means "any path that
begins with self.path+'.' is acceptable".
"""
p = self.path
for ext in exts:
if not ext and self.exists():
return self
if ext == '*':
basedot = basename(p)+'.'
for fn in listdir(dirname(p)):
if fn.startswith(basedot):
return self.clonePath(joinpath(dirname(p), fn))
p2 = p + ext
if exists(p2):
return self.clonePath(p2)
def realpath(self):
"""
Returns the absolute target as a FilePath if self is a link, self
otherwise. The absolute link is the ultimate file or directory the
link refers to (for instance, if the link refers to another link, and
another...). If the filesystem does not support symlinks, or
if the link is cyclical, raises a LinkError.
Behaves like L{os.path.realpath} in that it does not resolve link
names in the middle (ex. /x/y/z, y is a link to w - realpath on z
will return /x/y/z, not /x/w/z).
@return: FilePath of the target path
@raises LinkError: if links are not supported or links are cyclical.
"""
if self.islink():
result = os.path.realpath(self.path)
if result == self.path:
raise LinkError("Cyclical link - will loop forever")
return self.clonePath(result)
return self
def siblingExtension(self, ext):
return self.clonePath(self.path+ext)
def linkTo(self, linkFilePath):
"""
Creates a symlink to self to at the path in the L{FilePath}
C{linkFilePath}. Only works on posix systems due to its dependence on
C{os.symlink}. Propagates C{OSError}s up from C{os.symlink} if
C{linkFilePath.parent()} does not exist, or C{linkFilePath} already
exists.
@param linkFilePath: a FilePath representing the link to be created
@type linkFilePath: L{FilePath}
"""
os.symlink(self.path, linkFilePath.path)
def open(self, mode='r'):
if self.alwaysCreate:
assert 'a' not in mode, "Appending not supported when alwaysCreate == True"
return self.create()
return open(self.path, mode+'b')
# stat methods below
def restat(self, reraise=True):
"""
Re-calculate cached effects of 'stat'. To refresh information on this path
after you know the filesystem may have changed, call this method.
@param reraise: a boolean. If true, re-raise exceptions from
L{os.stat}; otherwise, mark this path as not existing, and remove any
cached stat information.
"""
try:
self.statinfo = stat(self.path)
except OSError:
self.statinfo = 0
if reraise:
raise
def chmod(self, mode):
"""
Changes the permissions on self, if possible. Propagates errors from
C{os.chmod} up.
@param mode: integer representing the new permissions desired (same as
the command line chmod)
@type mode: C{int}
"""
os.chmod(self.path, mode)
def getsize(self):
st = self.statinfo
if not st:
self.restat()
st = self.statinfo
return st.st_size
def getModificationTime(self):
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: float
"""
st = self.statinfo
if not st:
self.restat()
st = self.statinfo
return float(st.st_mtime)
def getStatusChangeTime(self):
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: float
"""
st = self.statinfo
if not st:
self.restat()
st = self.statinfo
return float(st.st_ctime)
def getAccessTime(self):
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: float
"""
st = self.statinfo
if not st:
self.restat()
st = self.statinfo
return float(st.st_atime)
def exists(self):
"""
Check if the C{path} exists.
@return: C{True} if the stats of C{path} can be retrieved successfully,
C{False} in the other cases.
@rtype: C{bool}
"""
if self.statinfo:
return True
else:
self.restat(False)
if self.statinfo:
return True
else:
return False
def isdir(self):
st = self.statinfo
if not st:
self.restat(False)
st = self.statinfo
if not st:
return False
return S_ISDIR(st.st_mode)
def isfile(self):
st = self.statinfo
if not st:
self.restat(False)
st = self.statinfo
if not st:
return False
return S_ISREG(st.st_mode)
def islink(self):
# We can't use cached stat results here, because that is the stat of
# the destination - (see #1773) which in *every case* but this one is
# the right thing to use. We could call lstat here and use that, but
# it seems unlikely we'd actually save any work that way. -glyph
return islink(self.path)
def isabs(self):
return isabs(self.path)
def listdir(self):
return listdir(self.path)
def splitext(self):
return splitext(self.path)
def __repr__(self):
return 'FilePath(%r)' % (self.path,)
def touch(self):
try:
self.open('a').close()
except IOError:
pass
utime(self.path, None)
def remove(self):
"""
Removes the file or directory that is represented by self. If
C{self.path} is a directory, recursively remove all its children
before removing the directory. If it's a file or link, just delete
it.
"""
if self.isdir() and not self.islink():
for child in self.children():
child.remove()
os.rmdir(self.path)
else:
os.remove(self.path)
self.restat(False)
def makedirs(self):
"""
Create all directories not yet existing in C{path} segments, using
C{os.makedirs}.
"""
return os.makedirs(self.path)
def globChildren(self, pattern):
"""
Assuming I am representing a directory, return a list of
FilePaths representing my children that match the given
pattern.
"""
import glob
path = self.path[-1] == '/' and self.path + pattern or slash.join([self.path, pattern])
return map(self.clonePath, glob.glob(path))
def basename(self):
return basename(self.path)
def dirname(self):
return dirname(self.path)
def parent(self):
return self.clonePath(self.dirname())
def setContent(self, content, ext='.new'):
sib = self.siblingExtension(ext)
f = sib.open('w')
f.write(content)
f.close()
if platform.isWindows() and exists(self.path):
os.unlink(self.path)
os.rename(sib.path, self.path)
# new in 2.2.0
def __cmp__(self, other):
if not isinstance(other, FilePath):
return NotImplemented
return cmp(self.path, other.path)
def createDirectory(self):
os.mkdir(self.path)
def requireCreate(self, val=1):
self.alwaysCreate = val
def create(self):
"""Exclusively create a file, only if this file previously did not exist.
"""
fdint = os.open(self.path, (os.O_EXCL |
os.O_CREAT |
os.O_RDWR))
# XXX TODO: 'name' attribute of returned files is not mutable or
# settable via fdopen, so this file is slighly less functional than the
# one returned from 'open' by default. send a patch to Python...
return os.fdopen(fdint, 'w+b')
def temporarySibling(self):
"""
Create a path naming a temporary sibling of this path in a secure fashion.
"""
sib = self.sibling(_secureEnoughString() + self.basename())
sib.requireCreate()
return sib
_chunkSize = 2 ** 2 ** 2 ** 2
def copyTo(self, destination, followLinks=True):
"""
Copies self to destination.
If self is a directory, this method copies its children (but not
itself) recursively to destination - if destination does not exist as a
directory, this method creates it. If destination is a file, an
IOError will be raised.
If self is a file, this method copies it to destination. If
destination is a file, this method overwrites it. If destination is a
directory, an IOError will be raised.
If self is a link (and followLinks is False), self will be copied
over as a new symlink with the same target as returned by os.readlink.
That means that if it is absolute, both the old and new symlink will
link to the same thing. If it's relative, then perhaps not (and
it's also possible that this relative link will be broken).
File/directory permissions and ownership will NOT be copied over.
If followLinks is True, symlinks are followed so that they're treated
as their targets. In other words, if self is a link, the link's target
will be copied. If destination is a link, self will be copied to the
destination's target (the actual destination will be destination's
target). Symlinks under self (if self is a directory) will be
followed and its target's children be copied recursively.
If followLinks is False, symlinks will be copied over as symlinks.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets
"""
if self.islink() and not followLinks:
os.symlink(os.readlink(self.path), destination.path)
return
# XXX TODO: *thorough* audit and documentation of the exact desired
# semantics of this code. Right now the behavior of existent
# destination symlinks is convenient, and quite possibly correct, but
# its security properties need to be explained.
if self.isdir():
if not destination.exists():
destination.createDirectory()
for child in self.children():
destChild = destination.child(child.basename())
child.copyTo(destChild, followLinks)
elif self.isfile():
writefile = destination.open('w')
readfile = self.open()
while 1:
# XXX TODO: optionally use os.open, os.read and O_DIRECT and
# use os.fstatvfs to determine chunk sizes and make
# *****sure**** copy is page-atomic; the following is good
# enough for 99.9% of everybody and won't take a week to audit
# though.
chunk = readfile.read(self._chunkSize)
writefile.write(chunk)
if len(chunk) < self._chunkSize:
break
writefile.close()
readfile.close()
else:
# If you see the following message because you want to copy
# symlinks, fifos, block devices, character devices, or unix
# sockets, please feel free to add support to do sensible things in
# reaction to those types!
raise NotImplementedError(
"Only copying of files and directories supported")
def moveTo(self, destination, followLinks=True):
"""
Move self to destination - basically renaming self to whatever
destination is named. If destination is an already-existing directory,
moves all children to destination if destination is empty. If
destination is a non-empty directory, or destination is a file, an
OSError will be raised.
If moving between filesystems, self needs to be copied, and everything
that applies to copyTo applies to moveTo.
@param destination: the destination (a FilePath) to which self
should be copied
@param followLinks: whether symlinks in self should be treated as links
or as their targets (only applicable when moving between
filesystems)
"""
try:
os.rename(self.path, destination.path)
self.restat(False)
except OSError, ose:
if ose.errno == errno.EXDEV:
# man 2 rename, ubuntu linux 5.10 "breezy":
# oldpath and newpath are not on the same mounted filesystem.
# (Linux permits a filesystem to be mounted at multiple
# points, but rename(2) does not work across different mount
# points, even if the same filesystem is mounted on both.)
# that means it's time to copy trees of directories!
secsib = destination.temporarySibling()
self.copyTo(secsib, followLinks) # slow
secsib.moveTo(destination, followLinks) # visible
# done creating new stuff. let's clean me up.
mysecsib = self.temporarySibling()
self.moveTo(mysecsib, followLinks) # visible
mysecsib.remove() # slow
else:
raise
FilePath.clonePath = FilePath
|
py | b403932ba10d576791a359f682826a0ae72e1b78 | #!/usr/bin/env python
import asyncio
from app import beer_bot
async def main():
beer_bot.run()
if __name__ == "__main__":
asyncio.run(main())
|
py | b403934f3760fddc931595bb4d785eb85f39ff93 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Created by wuxinwang
#
import h5py
import numpy as np
import pandas as pd
from .utils import uv2sd
def read_windadata(file_path):
"""
This function is used to read wind speed and wind direction fom winddata file
in hdf format.
:param file_path: the path of the winddata, which has a endfix '.hdf'
:return: The data and date which are returned from this function is used to create
littler format data for WRFDA
"""
# ===========================================================================
# Read HDF5 file.
f = h5py.File(file_path, "r") # mode = {'w', 'r', 'a'}
lat = np.array(f['latitude'][:]).astype(np.float).flatten()
lon = np.array(f['longitude'][:]).astype(np.float).flatten()
date = str.split(f.filename, '/')[-1][4:-4]
uwnd = np.array(f['uwnd'][:]).astype(np.float).flatten()
vwnd = np.array(f['vwnd'][:]).astype(np.float).flatten()
wspd, wdir = uv2sd(uwnd, vwnd)
for i in range(len(uwnd)):
if uwnd[i] == -9999 or vwnd[i] == -9999:
wspd[i] = -888888.0
wdir[i] = -888888.0
data = {
'lat': lat,
'lon': lon,
'wind_speed': wspd,
'wind_dir': wdir
}
df = pd.DataFrame(data)
# Save and exit the file
f.close()
return df, date |
py | b4039355e6d3e9c4a7725562bb17939169a341a2 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class HostedPublicVirtualInterface(pulumi.CustomResource):
address_family: pulumi.Output[str]
"""
The address family for the BGP peer. `ipv4 ` or `ipv6`.
"""
amazon_address: pulumi.Output[str]
"""
The IPv4 CIDR address to use to send traffic to Amazon. Required for IPv4 BGP peers.
"""
amazon_side_asn: pulumi.Output[str]
arn: pulumi.Output[str]
"""
The ARN of the virtual interface.
"""
aws_device: pulumi.Output[str]
"""
The Direct Connect endpoint on which the virtual interface terminates.
"""
bgp_asn: pulumi.Output[float]
"""
The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
"""
bgp_auth_key: pulumi.Output[str]
"""
The authentication key for BGP configuration.
"""
connection_id: pulumi.Output[str]
"""
The ID of the Direct Connect connection (or LAG) on which to create the virtual interface.
"""
customer_address: pulumi.Output[str]
"""
The IPv4 CIDR destination address to which Amazon should send traffic. Required for IPv4 BGP peers.
"""
name: pulumi.Output[str]
"""
The name for the virtual interface.
"""
owner_account_id: pulumi.Output[str]
"""
The AWS account that will own the new virtual interface.
"""
route_filter_prefixes: pulumi.Output[list]
"""
A list of routes to be advertised to the AWS network in this region.
"""
vlan: pulumi.Output[float]
"""
The VLAN ID.
"""
def __init__(__self__, resource_name, opts=None, address_family=None, amazon_address=None, bgp_asn=None, bgp_auth_key=None, connection_id=None, customer_address=None, name=None, owner_account_id=None, route_filter_prefixes=None, vlan=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Direct Connect hosted public virtual interface resource. This resource represents the allocator's side of the hosted virtual interface.
A hosted virtual interface is a virtual interface that is owned by another AWS account.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.directconnect.HostedPublicVirtualInterface("foo",
address_family="ipv4",
amazon_address="175.45.176.2/30",
bgp_asn=65352,
connection_id="dxcon-zzzzzzzz",
customer_address="175.45.176.1/30",
route_filter_prefixes=[
"210.52.109.0/24",
"175.45.176.0/22",
],
vlan=4094)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family for the BGP peer. `ipv4 ` or `ipv6`.
:param pulumi.Input[str] amazon_address: The IPv4 CIDR address to use to send traffic to Amazon. Required for IPv4 BGP peers.
:param pulumi.Input[float] bgp_asn: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
:param pulumi.Input[str] bgp_auth_key: The authentication key for BGP configuration.
:param pulumi.Input[str] connection_id: The ID of the Direct Connect connection (or LAG) on which to create the virtual interface.
:param pulumi.Input[str] customer_address: The IPv4 CIDR destination address to which Amazon should send traffic. Required for IPv4 BGP peers.
:param pulumi.Input[str] name: The name for the virtual interface.
:param pulumi.Input[str] owner_account_id: The AWS account that will own the new virtual interface.
:param pulumi.Input[list] route_filter_prefixes: A list of routes to be advertised to the AWS network in this region.
:param pulumi.Input[float] vlan: The VLAN ID.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if address_family is None:
raise TypeError("Missing required property 'address_family'")
__props__['address_family'] = address_family
__props__['amazon_address'] = amazon_address
if bgp_asn is None:
raise TypeError("Missing required property 'bgp_asn'")
__props__['bgp_asn'] = bgp_asn
__props__['bgp_auth_key'] = bgp_auth_key
if connection_id is None:
raise TypeError("Missing required property 'connection_id'")
__props__['connection_id'] = connection_id
__props__['customer_address'] = customer_address
__props__['name'] = name
if owner_account_id is None:
raise TypeError("Missing required property 'owner_account_id'")
__props__['owner_account_id'] = owner_account_id
if route_filter_prefixes is None:
raise TypeError("Missing required property 'route_filter_prefixes'")
__props__['route_filter_prefixes'] = route_filter_prefixes
if vlan is None:
raise TypeError("Missing required property 'vlan'")
__props__['vlan'] = vlan
__props__['amazon_side_asn'] = None
__props__['arn'] = None
__props__['aws_device'] = None
super(HostedPublicVirtualInterface, __self__).__init__(
'aws:directconnect/hostedPublicVirtualInterface:HostedPublicVirtualInterface',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, address_family=None, amazon_address=None, amazon_side_asn=None, arn=None, aws_device=None, bgp_asn=None, bgp_auth_key=None, connection_id=None, customer_address=None, name=None, owner_account_id=None, route_filter_prefixes=None, vlan=None):
"""
Get an existing HostedPublicVirtualInterface resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family for the BGP peer. `ipv4 ` or `ipv6`.
:param pulumi.Input[str] amazon_address: The IPv4 CIDR address to use to send traffic to Amazon. Required for IPv4 BGP peers.
:param pulumi.Input[str] arn: The ARN of the virtual interface.
:param pulumi.Input[str] aws_device: The Direct Connect endpoint on which the virtual interface terminates.
:param pulumi.Input[float] bgp_asn: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
:param pulumi.Input[str] bgp_auth_key: The authentication key for BGP configuration.
:param pulumi.Input[str] connection_id: The ID of the Direct Connect connection (or LAG) on which to create the virtual interface.
:param pulumi.Input[str] customer_address: The IPv4 CIDR destination address to which Amazon should send traffic. Required for IPv4 BGP peers.
:param pulumi.Input[str] name: The name for the virtual interface.
:param pulumi.Input[str] owner_account_id: The AWS account that will own the new virtual interface.
:param pulumi.Input[list] route_filter_prefixes: A list of routes to be advertised to the AWS network in this region.
:param pulumi.Input[float] vlan: The VLAN ID.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_family"] = address_family
__props__["amazon_address"] = amazon_address
__props__["amazon_side_asn"] = amazon_side_asn
__props__["arn"] = arn
__props__["aws_device"] = aws_device
__props__["bgp_asn"] = bgp_asn
__props__["bgp_auth_key"] = bgp_auth_key
__props__["connection_id"] = connection_id
__props__["customer_address"] = customer_address
__props__["name"] = name
__props__["owner_account_id"] = owner_account_id
__props__["route_filter_prefixes"] = route_filter_prefixes
__props__["vlan"] = vlan
return HostedPublicVirtualInterface(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b40393b675a0a6d9bde25e78021584a9df3ac15b | #! /usr/bin/env python
# this import must comes first to make sure we use the non-display backend
import matplotlib
matplotlib.use('Agg')
# add parent folder to search path, to enable import of core modules like settings
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import argparse
#import ipdb as pdb
import cPickle as pickle
import settings
from caffevis.caffevis_helper import set_mean
from siamese_helper import SiameseHelper
from jby_misc import WithTimer
from max_tracker import output_max_patches
from find_max_acts import load_max_tracker_from_file
from settings_misc import load_network
def main():
parser = argparse.ArgumentParser(description='Loads a pickled NetMaxTracker and outputs one or more of {the patches of the image, a deconv patch, a backprop patch} associated with the maxes.')
parser.add_argument('--N', type = int, default = 9, help = 'Note and save top N activations.')
parser.add_argument('--gpu', action = 'store_true', default=settings.caffevis_mode_gpu, help = 'Use gpu.')
parser.add_argument('--do-maxes', action = 'store_true', default=settings.max_tracker_do_maxes, help = 'Output max patches.')
parser.add_argument('--do-deconv', action = 'store_true', default=settings.max_tracker_do_deconv, help = 'Output deconv patches.')
parser.add_argument('--do-deconv-norm', action = 'store_true', default=settings.max_tracker_do_deconv_norm, help = 'Output deconv-norm patches.')
parser.add_argument('--do-backprop', action = 'store_true', default=settings.max_tracker_do_backprop, help = 'Output backprop patches.')
parser.add_argument('--do-backprop-norm', action = 'store_true', default=settings.max_tracker_do_backprop_norm, help = 'Output backprop-norm patches.')
parser.add_argument('--do-info', action = 'store_true', default=settings.max_tracker_do_info, help = 'Output info file containing max filenames and labels.')
parser.add_argument('--idx-begin', type = int, default = None, help = 'Start at this unit (default: all units).')
parser.add_argument('--idx-end', type = int, default = None, help = 'End at this unit (default: all units).')
parser.add_argument('--nmt_pkl', type = str, default = os.path.join(settings.caffevis_outputs_dir, 'find_max_acts_output.pickled'), help = 'Which pickled NetMaxTracker to load.')
parser.add_argument('--net_prototxt', type = str, default = settings.caffevis_deploy_prototxt, help = 'network prototxt to load')
parser.add_argument('--net_weights', type = str, default = settings.caffevis_network_weights, help = 'network weights to load')
parser.add_argument('--datadir', type = str, default = settings.static_files_dir, help = 'directory to look for files in')
parser.add_argument('--filelist', type = str, default = settings.static_files_input_file, help = 'List of image files to consider, one per line. Must be the same filelist used to produce the NetMaxTracker!')
parser.add_argument('--outdir', type = str, default = settings.caffevis_outputs_dir, help = 'Which output directory to use. Files are output into outdir/layer/unit_%%04d/{maxes,deconv,backprop}_%%03d.png')
parser.add_argument('--search-min', action='store_true', default=False, help='Should we also search for minimal activations?')
args = parser.parse_args()
settings.caffevis_deploy_prototxt = args.net_prototxt
settings.caffevis_network_weights = args.net_weights
net, data_mean = load_network(settings)
# validate batch size
if settings.is_siamese and settings._calculated_siamese_network_format == 'siamese_batch_pair':
# currently, no batch support for siamese_batch_pair networks
# it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
assert (settings.max_tracker_batch_size == 1)
# set network batch size
current_input_shape = net.blobs[net.inputs[0]].shape
current_input_shape[0] = settings.max_tracker_batch_size
net.blobs[net.inputs[0]].reshape(*current_input_shape)
net.reshape()
assert args.do_maxes or args.do_deconv or args.do_deconv_norm or args.do_backprop or args.do_backprop_norm or args.do_info, 'Specify at least one do_* option to output.'
siamese_helper = SiameseHelper(settings.layers_list)
nmt = load_max_tracker_from_file(args.nmt_pkl)
for layer_name in settings.layers_to_output_in_offline_scripts:
print 'Started work on layer %s' % (layer_name)
normalized_layer_name = siamese_helper.normalize_layer_name_for_max_tracker(layer_name)
mt = nmt.max_trackers[normalized_layer_name]
if args.idx_begin is None:
idx_begin = 0
if args.idx_end is None:
idx_end = mt.max_vals.shape[0]
with WithTimer('Saved %d images per unit for %s units %d:%d.' % (args.N, normalized_layer_name, idx_begin, idx_end)):
output_max_patches(settings, mt, net, normalized_layer_name, idx_begin, idx_end,
args.N, args.datadir, args.filelist, args.outdir, False,
(args.do_maxes, args.do_deconv, args.do_deconv_norm, args.do_backprop, args.do_backprop_norm, args.do_info))
if args.search_min:
output_max_patches(settings, mt, net, normalized_layer_name, idx_begin, idx_end,
args.N, args.datadir, args.filelist, args.outdir, True,
(args.do_maxes, args.do_deconv, args.do_deconv_norm, args.do_backprop, args.do_backprop_norm, args.do_info))
if __name__ == '__main__':
main()
|
py | b40394c823072ad1863b0662dbf25e23c0a4ff7b | import requests
import base64
from constants import URL, IMAGE_EXTENSION
"""
http://<URL>/image (POST)
"""
def add_image(image_id):
url = URL + 'image'
json_file = dict()
json_file['id'] = image_id
image = open(image_id + IMAGE_EXTENSION, 'rb')
image_read = image.read()
image_64_encode = base64.encodestring(image_read)
image_64_encode = image_64_encode.decode("utf-8")
json_file['image_data'] = image_64_encode
response = requests.post(url=url, json=json_file)
print(f"URL: {url} - Response: {response.content} - Status Code: {response}")
if __name__ == '__main__':
image_id = "example"
add_image(image_id)
|
py | b4039550962d2e12fd535c17a8f3d2e215204c36 | # Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
APP_VERSION = {
'MAJOR_VERSION': 3,
"MINOR_VERSION": 12,
"PATCH": 0
}
def version():
global APP_VERSION
version = (str(APP_VERSION['MAJOR_VERSION'])+"."+
str(APP_VERSION['MINOR_VERSION'])+"."+
str(APP_VERSION['PATCH']))
return version |
py | b403960abab0011fdf01f4778133c52409c3b762 | from __future__ import absolute_import, division, print_function
import functools
from unittest import TestCase
import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions import Bernoulli, Normal
from pyro.util import NonlocalExit, all_escape, discrete_escape, ng_ones, ng_zeros
from six.moves.queue import Queue
from tests.common import assert_equal
def eq(x, y, prec=1e-10):
return (torch.norm(x - y).data[0] < prec)
# XXX name is a bit silly
class NormalNormalNormalPoutineTestCase(TestCase):
def setUp(self):
pyro.clear_param_store()
def model():
latent1 = pyro.sample("latent1",
Normal(Variable(torch.zeros(2)),
Variable(torch.ones(2))))
latent2 = pyro.sample("latent2",
Normal(latent1,
5 * Variable(torch.ones(2))))
x_dist = Normal(latent2, Variable(torch.ones(2)))
pyro.observe("obs", x_dist, Variable(torch.ones(2)))
return latent1
def guide():
mu1 = pyro.param("mu1", Variable(torch.randn(2), requires_grad=True))
sigma1 = pyro.param("sigma1", Variable(torch.ones(2), requires_grad=True))
pyro.sample("latent1", Normal(mu1, sigma1))
mu2 = pyro.param("mu2", Variable(torch.randn(2), requires_grad=True))
sigma2 = pyro.param("sigma2", Variable(torch.ones(2), requires_grad=True))
latent2 = pyro.sample("latent2", Normal(mu2, sigma2))
return latent2
self.model = model
self.guide = guide
self.model_sites = ["latent1", "latent2",
"obs",
"_INPUT", "_RETURN"]
self.guide_sites = ["latent1", "latent2",
"mu1", "sigma1",
"mu2", "sigma2",
"_INPUT", "_RETURN"]
self.full_sample_sites = {"latent1": "latent1", "latent2": "latent2"}
self.partial_sample_sites = {"latent1": "latent1"}
class TracePoutineTests(NormalNormalNormalPoutineTestCase):
def test_trace_full(self):
guide_trace = poutine.trace(self.guide).get_trace()
model_trace = poutine.trace(self.model).get_trace()
for name in model_trace.nodes.keys():
assert name in self.model_sites
for name in guide_trace.nodes.keys():
assert name in self.guide_sites
assert guide_trace.nodes[name]["type"] in \
("args", "return", "sample", "param")
if guide_trace.nodes[name]["type"] == "sample":
assert not guide_trace.nodes[name]["is_observed"]
def test_trace_return(self):
model_trace = poutine.trace(self.model).get_trace()
assert_equal(model_trace.nodes["latent1"]["value"],
model_trace.nodes["_RETURN"]["value"])
class ReplayPoutineTests(NormalNormalNormalPoutineTestCase):
def test_replay_full(self):
guide_trace = poutine.trace(self.guide).get_trace()
model_trace = poutine.trace(poutine.replay(self.model, guide_trace)).get_trace()
for name in self.full_sample_sites.keys():
assert_equal(model_trace.nodes[name]["value"],
guide_trace.nodes[name]["value"])
def test_replay_partial(self):
guide_trace = poutine.trace(self.guide).get_trace()
model_trace = poutine.trace(poutine.replay(self.model,
guide_trace,
sites=self.partial_sample_sites)).get_trace()
for name in self.full_sample_sites.keys():
if name in self.partial_sample_sites:
assert_equal(model_trace.nodes[name]["value"],
guide_trace.nodes[name]["value"])
else:
assert not eq(model_trace.nodes[name]["value"],
guide_trace.nodes[name]["value"])
def test_replay_full_repeat(self):
model_trace = poutine.trace(self.model).get_trace()
ftr = poutine.trace(poutine.replay(self.model, model_trace))
tr11 = ftr.get_trace()
tr12 = ftr.get_trace()
tr2 = poutine.trace(poutine.replay(self.model, model_trace)).get_trace()
for name in self.full_sample_sites.keys():
assert_equal(tr11.nodes[name]["value"], tr12.nodes[name]["value"])
assert_equal(tr11.nodes[name]["value"], tr2.nodes[name]["value"])
assert_equal(model_trace.nodes[name]["value"], tr11.nodes[name]["value"])
assert_equal(model_trace.nodes[name]["value"], tr2.nodes[name]["value"])
class BlockPoutineTests(NormalNormalNormalPoutineTestCase):
def test_block_full(self):
model_trace = poutine.trace(poutine.block(self.model)).get_trace()
guide_trace = poutine.trace(poutine.block(self.guide)).get_trace()
for name in model_trace.nodes.keys():
assert model_trace.nodes[name]["type"] in ("args", "return")
for name in guide_trace.nodes.keys():
assert guide_trace.nodes[name]["type"] in ("args", "return")
def test_block_full_hide(self):
model_trace = poutine.trace(poutine.block(self.model,
hide=self.model_sites)).get_trace()
guide_trace = poutine.trace(poutine.block(self.guide,
hide=self.guide_sites)).get_trace()
for name in model_trace.nodes.keys():
assert model_trace.nodes[name]["type"] in ("args", "return")
for name in guide_trace.nodes.keys():
assert guide_trace.nodes[name]["type"] in ("args", "return")
def test_block_full_expose(self):
model_trace = poutine.trace(poutine.block(self.model,
expose=self.model_sites)).get_trace()
guide_trace = poutine.trace(poutine.block(self.guide,
expose=self.guide_sites)).get_trace()
for name in self.model_sites:
assert name in model_trace
for name in self.guide_sites:
assert name in guide_trace
def test_block_full_hide_expose(self):
try:
poutine.block(self.model,
hide=self.partial_sample_sites.keys(),
expose=self.partial_sample_sites.keys())()
assert False
except AssertionError:
assert True
def test_block_partial_hide(self):
model_trace = poutine.trace(
poutine.block(self.model, hide=self.partial_sample_sites.keys())).get_trace()
guide_trace = poutine.trace(
poutine.block(self.guide, hide=self.partial_sample_sites.keys())).get_trace()
for name in self.full_sample_sites.keys():
if name in self.partial_sample_sites:
assert name not in model_trace
assert name not in guide_trace
else:
assert name in model_trace
assert name in guide_trace
def test_block_partial_expose(self):
model_trace = poutine.trace(
poutine.block(self.model, expose=self.partial_sample_sites.keys())).get_trace()
guide_trace = poutine.trace(
poutine.block(self.guide, expose=self.partial_sample_sites.keys())).get_trace()
for name in self.full_sample_sites.keys():
if name in self.partial_sample_sites:
assert name in model_trace
assert name in guide_trace
else:
assert name not in model_trace
assert name not in guide_trace
class QueuePoutineDiscreteTest(TestCase):
def setUp(self):
# simple Gaussian-mixture HMM
def model():
ps = pyro.param("ps", Variable(torch.Tensor([[0.8], [0.3]])))
mu = pyro.param("mu", Variable(torch.Tensor([[-0.1], [0.9]])))
sigma = Variable(torch.ones(1, 1))
latents = [Variable(torch.ones(1))]
observes = []
for t in range(3):
latents.append(
pyro.sample("latent_{}".format(str(t)),
Bernoulli(ps[latents[-1][0].long().data])))
observes.append(
pyro.observe("observe_{}".format(str(t)),
Normal(mu[latents[-1][0].long().data], sigma),
pyro.ones(1)))
return latents
self.sites = ["observe_{}".format(str(t)) for t in range(3)] + \
["latent_{}".format(str(t)) for t in range(3)] + \
["_INPUT", "_RETURN"]
self.model = model
self.queue = Queue()
self.queue.put(poutine.Trace())
def test_queue_single(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
tr = f.get_trace()
for name in self.sites:
assert name in tr
def test_queue_enumerate(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
trs = []
while not self.queue.empty():
trs.append(f.get_trace())
assert len(trs) == 2 ** 3
true_latents = set()
for i1 in range(2):
for i2 in range(2):
for i3 in range(2):
true_latents.add((i1, i2, i3))
tr_latents = []
for tr in trs:
tr_latents.append(tuple([int(tr.nodes[name]["value"].view(-1).data[0]) for name in tr
if tr.nodes[name]["type"] == "sample" and
not tr.nodes[name]["is_observed"]]))
assert true_latents == set(tr_latents)
def test_queue_max_tries(self):
f = poutine.queue(self.model, queue=self.queue, max_tries=3)
with pytest.raises(ValueError):
f()
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc = nn.Linear(2, 1)
def forward(self, x):
return self.fc(x)
class LiftPoutineTests(TestCase):
def setUp(self):
pyro.clear_param_store()
def mu1_prior(tensor, *args, **kwargs):
flat_tensor = tensor.view(-1)
m = Variable(torch.zeros(flat_tensor.size(0)))
s = Variable(torch.ones(flat_tensor.size(0)))
return Normal(m, s).sample().view(tensor.size())
def sigma1_prior(tensor, *args, **kwargs):
flat_tensor = tensor.view(-1)
m = Variable(torch.zeros(flat_tensor.size(0)))
s = Variable(torch.ones(flat_tensor.size(0)))
return Normal(m, s).sample().view(tensor.size())
def mu2_prior(tensor, *args, **kwargs):
flat_tensor = tensor.view(-1)
m = Variable(torch.zeros(flat_tensor.size(0)))
return Bernoulli(m).sample().view(tensor.size())
def sigma2_prior(tensor, *args, **kwargs):
return sigma1_prior(tensor)
def bias_prior(tensor, *args, **kwargs):
return mu2_prior(tensor)
def weight_prior(tensor, *args, **kwargs):
return sigma1_prior(tensor)
def stoch_fn(tensor, *args, **kwargs):
mu = Variable(torch.zeros(tensor.size()))
sigma = Variable(torch.ones(tensor.size()))
return pyro.sample("sample", Normal(mu, sigma))
def guide():
mu1 = pyro.param("mu1", Variable(torch.randn(2), requires_grad=True))
sigma1 = pyro.param("sigma1", Variable(torch.ones(2), requires_grad=True))
pyro.sample("latent1", Normal(mu1, sigma1))
mu2 = pyro.param("mu2", Variable(torch.randn(2), requires_grad=True))
sigma2 = pyro.param("sigma2", Variable(torch.ones(2), requires_grad=True))
latent2 = pyro.sample("latent2", Normal(mu2, sigma2))
return latent2
self.model = Model()
self.guide = guide
self.prior = mu1_prior
self.prior_dict = {"mu1": mu1_prior, "sigma1": sigma1_prior, "mu2": mu2_prior, "sigma2": sigma2_prior}
self.partial_dict = {"mu1": mu1_prior, "sigma1": sigma1_prior}
self.nn_prior = {"fc.bias": bias_prior, "fc.weight": weight_prior}
self.fn = stoch_fn
self.data = Variable(torch.randn(2, 2))
def test_splice(self):
tr = poutine.trace(self.guide).get_trace()
lifted_tr = poutine.trace(poutine.lift(self.guide, prior=self.prior)).get_trace()
for name in tr.nodes.keys():
if name in ('mu1', 'mu2', 'sigma1', 'sigma2'):
assert name not in lifted_tr
else:
assert name in lifted_tr
def test_prior_dict(self):
tr = poutine.trace(self.guide).get_trace()
lifted_tr = poutine.trace(poutine.lift(self.guide, prior=self.prior_dict)).get_trace()
for name in tr.nodes.keys():
assert name in lifted_tr
if name in {'sigma1', 'mu1', 'sigma2', 'mu2'}:
assert name + "_prior" == lifted_tr.nodes[name]['fn'].__name__
if tr.nodes[name]["type"] == "param":
assert lifted_tr.nodes[name]["type"] == "sample"
assert not lifted_tr.nodes[name]["is_observed"]
def test_unlifted_param(self):
tr = poutine.trace(self.guide).get_trace()
lifted_tr = poutine.trace(poutine.lift(self.guide, prior=self.partial_dict)).get_trace()
for name in tr.nodes.keys():
assert name in lifted_tr
if name in ('sigma1', 'mu1'):
assert name + "_prior" == lifted_tr.nodes[name]['fn'].__name__
assert lifted_tr.nodes[name]["type"] == "sample"
assert not lifted_tr.nodes[name]["is_observed"]
if name in ('sigma2', 'mu2'):
assert lifted_tr.nodes[name]["type"] == "param"
def test_random_module(self):
pyro.clear_param_store()
lifted_tr = poutine.trace(pyro.random_module("name", self.model, prior=self.prior)).get_trace()
for name in lifted_tr.nodes.keys():
if lifted_tr.nodes[name]["type"] == "param":
assert lifted_tr.nodes[name]["type"] == "sample"
assert not lifted_tr.nodes[name]["is_observed"]
def test_random_module_prior_dict(self):
pyro.clear_param_store()
lifted_nn = pyro.random_module("name", self.model, prior=self.nn_prior)
lifted_tr = poutine.trace(lifted_nn).get_trace()
for key_name in lifted_tr.nodes.keys():
name = pyro.params.user_param_name(key_name)
if name in {'fc.weight', 'fc.prior'}:
dist_name = name[3:]
assert dist_name + "_prior" == lifted_tr.nodes[key_name]['fn'].__name__
assert lifted_tr.nodes[key_name]["type"] == "sample"
assert not lifted_tr.nodes[key_name]["is_observed"]
class QueuePoutineMixedTest(TestCase):
def setUp(self):
# Simple model with 1 continuous + 1 discrete + 1 continuous variable.
def model():
p = Variable(torch.Tensor([0.5]))
mu = Variable(torch.zeros(1))
sigma = Variable(torch.ones(1))
x = pyro.sample("x", Normal(mu, sigma)) # Before the discrete variable.
y = pyro.sample("y", Bernoulli(p))
z = pyro.sample("z", Normal(mu, sigma)) # After the discrete variable.
return dict(x=x, y=y, z=z)
self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
self.model = model
self.queue = Queue()
self.queue.put(poutine.Trace())
def test_queue_single(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
tr = f.get_trace()
for name in self.sites:
assert name in tr
def test_queue_enumerate(self):
f = poutine.trace(poutine.queue(self.model, queue=self.queue))
trs = []
while not self.queue.empty():
trs.append(f.get_trace())
assert len(trs) == 2
values = [
{name: tr.nodes[name]['value'].view(-1).data[0] for name in tr.nodes.keys()
if tr.nodes[name]['type'] == 'sample'}
for tr in trs
]
expected_ys = set([0, 1])
actual_ys = set([value["y"] for value in values])
assert actual_ys == expected_ys
# Check that x was sampled the same on all each paths.
assert values[0]["x"] == values[1]["x"]
# Check that y was sampled differently on each path.
assert values[0]["z"] != values[1]["z"] # Almost surely true.
class IndirectLambdaPoutineTests(TestCase):
def setUp(self):
def model(batch_size_outer=2, batch_size_inner=2):
mu_latent = pyro.sample("mu_latent", dist.normal, ng_zeros(1), ng_ones(1))
def outer(i, x):
pyro.map_data("map_inner_%d" % i, x, lambda _i, _x:
inner(i, _i, _x), batch_size=batch_size_inner)
def inner(i, _i, _x):
pyro.sample("z_%d_%d" % (i, _i), dist.normal, mu_latent + _x, ng_ones(1))
pyro.map_data("map_outer", [[ng_ones(1)] * 2] * 2, lambda i, x:
outer(i, x), batch_size=batch_size_outer)
return mu_latent
self.model = model
self.expected_nodes = set(["z_0_0", "z_0_1", "z_1_0", "z_1_1", "mu_latent",
"_INPUT", "_RETURN"])
self.expected_edges = set([
("mu_latent", "z_0_0"), ("mu_latent", "z_0_1"),
("mu_latent", "z_1_0"), ("mu_latent", "z_1_1"),
])
def test_graph_structure(self):
tracegraph = poutine.trace(self.model, graph_type="dense").get_trace()
# Ignore structure on map_* nodes.
actual_nodes = set(n for n in tracegraph.nodes() if not n.startswith("map_"))
actual_edges = set((n1, n2) for n1, n2 in tracegraph.edges
if not n1.startswith("map_") if not n2.startswith("map_"))
assert actual_nodes == self.expected_nodes
assert actual_edges == self.expected_edges
def test_scale_factors(self):
def _test_scale_factor(batch_size_outer, batch_size_inner, expected):
trace = poutine.trace(self.model, graph_type="dense").get_trace(batch_size_outer=batch_size_outer,
batch_size_inner=batch_size_inner)
scale_factors = []
for node in ['z_0_0', 'z_0_1', 'z_1_0', 'z_1_1']:
if node in trace:
scale_factors.append(trace.nodes[node]['scale'])
assert scale_factors == expected
_test_scale_factor(1, 1, [4.0])
_test_scale_factor(2, 2, [1.0] * 4)
_test_scale_factor(1, 2, [2.0] * 2)
_test_scale_factor(2, 1, [2.0] * 2)
class ConditionPoutineTests(NormalNormalNormalPoutineTestCase):
def test_condition(self):
data = {"latent2": Variable(torch.randn(2))}
tr2 = poutine.trace(poutine.condition(self.model, data=data)).get_trace()
assert "latent2" in tr2
assert tr2.nodes["latent2"]["type"] == "sample" and \
tr2.nodes["latent2"]["is_observed"]
assert tr2.nodes["latent2"]["value"] is data["latent2"]
def test_do(self):
data = {"latent2": Variable(torch.randn(2))}
tr3 = poutine.trace(poutine.do(self.model, data=data)).get_trace()
assert "latent2" not in tr3
def test_trace_data(self):
tr1 = poutine.trace(
poutine.block(self.model, expose_types=["sample"])).get_trace()
tr2 = poutine.trace(
poutine.condition(self.model, data=tr1)).get_trace()
assert tr2.nodes["latent2"]["type"] == "sample" and \
tr2.nodes["latent2"]["is_observed"]
assert tr2.nodes["latent2"]["value"] is tr1.nodes["latent2"]["value"]
def test_stack_overwrite_failure(self):
data1 = {"latent2": Variable(torch.randn(2))}
data2 = {"latent2": Variable(torch.randn(2))}
cm = poutine.condition(poutine.condition(self.model, data=data1),
data=data2)
with pytest.raises(AssertionError):
cm()
def test_stack_success(self):
data1 = {"latent1": Variable(torch.randn(2))}
data2 = {"latent2": Variable(torch.randn(2))}
tr = poutine.trace(
poutine.condition(poutine.condition(self.model, data=data1),
data=data2)).get_trace()
assert tr.nodes["latent1"]["type"] == "sample" and \
tr.nodes["latent1"]["is_observed"]
assert tr.nodes["latent1"]["value"] is data1["latent1"]
assert tr.nodes["latent2"]["type"] == "sample" and \
tr.nodes["latent2"]["is_observed"]
assert tr.nodes["latent2"]["value"] is data2["latent2"]
def test_do_propagation(self):
pyro.clear_param_store()
def model():
z = pyro.sample("z", Normal(10.0 * ng_ones(1), 0.0001 * ng_ones(1)))
latent_prob = torch.exp(z) / (torch.exp(z) + ng_ones(1))
flip = pyro.sample("flip", Bernoulli(latent_prob))
return flip
sample_from_model = model()
z_data = {"z": -10.0 * ng_ones(1)}
# under model flip = 1 with high probability; so do indirect DO surgery to make flip = 0
sample_from_do_model = poutine.trace(poutine.do(model, data=z_data))()
assert eq(sample_from_model, ng_ones(1))
assert eq(sample_from_do_model, ng_zeros(1))
class EscapePoutineTests(TestCase):
def setUp(self):
# Simple model with 1 continuous + 1 discrete + 1 continuous variable.
def model():
p = Variable(torch.Tensor([0.5]))
mu = Variable(torch.zeros(1))
sigma = Variable(torch.ones(1))
x = pyro.sample("x", Normal(mu, sigma)) # Before the discrete variable.
y = pyro.sample("y", Bernoulli(p))
z = pyro.sample("z", Normal(mu, sigma)) # After the discrete variable.
return dict(x=x, y=y, z=z)
self.sites = ["x", "y", "z", "_INPUT", "_RETURN"]
self.model = model
def test_discrete_escape(self):
try:
poutine.escape(self.model, functools.partial(discrete_escape,
poutine.Trace()))()
assert False
except NonlocalExit as e:
assert e.site["name"] == "y"
def test_all_escape(self):
try:
poutine.escape(self.model, functools.partial(all_escape,
poutine.Trace()))()
assert False
except NonlocalExit as e:
assert e.site["name"] == "x"
def test_trace_compose(self):
tm = poutine.trace(self.model)
try:
poutine.escape(tm, functools.partial(all_escape, poutine.Trace()))()
assert False
except NonlocalExit:
assert "x" in tm.trace
try:
tem = poutine.trace(
poutine.escape(self.model, functools.partial(all_escape,
poutine.Trace())))
tem()
assert False
except NonlocalExit:
assert "x" not in tem.trace
|
py | b403964716cd84b7774a6416196a32bac18ad3f7 | from selenium import webdriver # $ pip install selenium
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
def notify(title, text):
os.system("""
osascript -e 'display notification "{}" with title "{}"'
""".format(text, title))
def removeWeirdChars(str):
toRemove = []
tmp = str
for c in tmp:
if ord(c) < 33 or ord(c) > 126:
toRemove.append(c)
for c in toRemove:
tmp = tmp.replace(c, '')
return tmp
def getLatestVersion(url):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--disable-notifications')
options.add_argument("--mute-audio")
# https://sites.google.com/a/chromium.org/chromedriver/downloads
browser = webdriver.Chrome(options=options)
browser.get(url)
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'latest-version')))
latestDiv = browser.find_element(by=By.CLASS_NAME, value='latest-version')
latestLink = latestDiv.find_element(by=By.TAG_NAME, value='a')
version = latestLink.get_attribute("innerHTML")
browser.quit()
return removeWeirdChars(version);
def hasNewer(lastKnownVersion, url):
latest = getLatestVersion(url).strip()
if latest != lastKnownVersion:
notify("Version Watcher", "New version available! Latest is " + latest)
else:
notify("Version Watcher", "No new version available. Latest is " + latest)
if __name__ == "__main__":
hasNewer("2.13.2", "https://search.maven.org/artifact/com.fasterxml.jackson.core/jackson-databind")
|
py | b4039686129fcaa57ac2b130537b485c020bc3f9 | import pathlib
from arcade import load_texture
from themes.current_theme import *
assets_path = pathlib.Path(__file__).resolve().parent / "custom_1"
filenames = [
"floor.png",
"player.png",
"orc.png",
"troll.png",
"wall.png",
"red_potion.png",
"scroll.png",
"dead_body.png",
"stairs_down.png",
]
# Load the textures our sprites use on game start-up.
textures = [load_texture(str(assets_path / filename)) for filename in filenames]
|
py | b403985452a5d05b95ec4bff7034f3306e305c48 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.DashboardOverview.as_view(),
name='overview'
)
]
|
py | b40398dd24c37f0ef63a5b43b2fc7278627485ec | """
Define DataStats class for managing statistical data.
"""
import numpy as np
class CachedAttribute(object):
'''
Computes attribute value and caches it in instance.
Not actually used in code at present, but could be useful for later refactoring.
Author: Denis Otkidach
Example of use:
class MyClass(object):
def myMethod(self):
# ...
myMethod = CachedAttribute(myMethod)
Use "del inst.myMethod" to clear cache.
'''
def __init__(self, method, name=None):
self.method = method
self.name = name or method.__name__
def __get__(self, inst, cls):
if inst is None:
return self
result = self.method(inst)
setattr(inst, self.name, result)
return result
class DataStats(object):
"""Manages statistical information on causal model.
Allows for specification of statistics "by hand" when sample data is not available.
"""
_PREC = 1e-9
def __init__(self, var_names=None, raw_data=None):
"""Create DataStats object. Each column of raw_data corresponds to a variable named in var_names.
TODO 3: replace this with a more standard data structure (named numpy array? pandas?)
:param var_names: names of data variables
:param raw_data: sample data
"""
self.var_names = var_names
if raw_data is not None:
self.raw_data = raw_data
def get_corr_mat(self):
"""Get correlation matrix, either by returning cached value or calculating from raw data.
:return: correlation matrix
"""
if not hasattr(self,'corr_mat'): # if corr_mat doesn't exist, calculate it
# by default, numpy.corrcoef assumes each *row* corresponds to a variable
# for our *column* variables, need to set rowvar = False
if hasattr(self,'raw_data'):
self.corr_mat = np.corrcoef(self.raw_data,rowvar=False)
else:
Exception('Raw data not present, cannot compute correlation matrix.')
return self.corr_mat
def set_corr_mat(self, corr_mat):
"""Set correlation matrix by hand. Use when raw data is not present.
:param corr_mat: correlation matrix
"""
if not hasattr(self,'raw_data'):
self.corr_mat = corr_mat
else:
Exception('Raw data present! Compute correlation matrix using get_corr_mat().')
def get_prec_mat(self, var_set_key):
"""Get precision matrix for a desired conditioning set.
The precision matrix is an intermediate variable for calculating partial correlation.
:param var_set_key: set of conditioning variables
:return: precision matrix for conditioning variables specified in var_set_key
"""
# initialize dictionary of precision matrices
if not hasattr(self,'prec_mat'):
self.prec_mat = dict()
# if the relevant precision matrix is not in the dictionary, calculate it
if var_set_key not in self.prec_mat.keys():
corr_mat = self.get_corr_mat()
var_list = list(var_set_key)
var_corr = corr_mat[np.ix_(var_list,var_list)]
self.prec_mat[var_set_key] = np.linalg.pinv(var_corr)
return self.prec_mat[var_set_key]
def get_part_corr(self, pair, cond_set):
"""Get partial correlation and corresponding Fisher's z for a pair of variables, conditioned on a set of other variables.
:param pair: variables to correlate
:param cond_set: conditioning variables
:return: part_corr = partial correlation, fisher_z = Fisher's z statistic
"""
# load the relevant precision matrix
var_set = list(pair)+list(cond_set)
var_set_keys = tuple(sorted(var_set))
prec_mat = self.get_prec_mat(var_set_keys)
# compute partial correlation
var_range = range(len(var_set_keys))
xi = [ind for ind in var_range if var_set_keys[ind]==pair[0]][0]
yi = [ind for ind in var_range if var_set_keys[ind]==pair[1]][0]
part_corr = -prec_mat[xi,yi]/np.sqrt(prec_mat[xi,xi]*prec_mat[yi,yi])
# compute Fisher z statistic for comparison to standard normal distribution
nsamp = self.get_nsamp()
ncond = len(cond_set)
with np.errstate(divide='ignore'):
if abs(part_corr) > 1-self._PREC:
part_corr = 1-2*self._PREC
fisher_z = np.sqrt(nsamp-ncond-3)*np.arctanh(part_corr)
return part_corr,fisher_z
def get_nsamp(self):
"""Get the number of samples for computation of Fisher's z.
The number of samples is computed from the sample data, if available.
:return: number of samples
"""
if hasattr(self,'raw_data'):
nsamp = np.shape(self.raw_data)[0]
elif hasattr(self,'nsamp'):
nsamp = self.nsamp
else:
Exception('Number of samples is unknown and cannot be computed from data.')
return nsamp
def set_nsamp(self,nsamp):
"""Set the number of samples.
Use only in the absence of actual sample data.
:param nsamp: number of samples
"""
if not hasattr(self,'raw_data'):
self.nsamp = nsamp
else:
Exception('Raw data present! Compute number of samples using get_nsamp().')
|
py | b4039959990cde92baa720e5aa6b02bf49f14ba4 | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from abc import ABCMeta, abstractmethod
from typing import List
import mmcv
import numpy as np
from torch.utils.data import Dataset
from mmcls.core.evaluation import precision_recall_f1, support
from mmcls.models.losses import accuracy
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base dataset.
Args:
data_prefix (str): the prefix of data path
pipeline (list): a list of dict, where each element represents
a operation defined in `mmcls.datasets.pipelines`
ann_file (str | None): the annotation file. When ann_file is str,
the subclass is expected to read from the ann_file. When ann_file
is None, the subclass is expected to read according to data_prefix
test_mode (bool): in train mode or test mode
"""
CLASSES = None
def __init__(self,
data_prefix,
pipeline,
classes=None,
ann_file=None,
test_mode=False):
super(BaseDataset, self).__init__()
self.ann_file = ann_file
self.data_prefix = data_prefix
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
self.CLASSES = self.get_classes(classes)
self.data_infos = self.load_annotations()
@abstractmethod
def load_annotations(self):
pass
@property
def class_to_idx(self):
"""Map mapping class name to class index.
Returns:
dict: mapping from class name to class index.
"""
return {_class: i for i, _class in enumerate(self.CLASSES)}
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
np.ndarray: categories for all images.
"""
gt_labels = np.array([data['gt_label'] for data in self.data_infos])
return gt_labels
def get_cat_ids(self, idx: int) -> List[int]:
"""Get category id by index.
Args:
idx (int): Index of data.
Returns:
cat_ids (List[int]): Image category of specified index.
"""
return [int(self.data_infos[idx]['gt_label'])]
def prepare_data(self, idx):
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
return len(self.data_infos)
def __getitem__(self, idx):
return self.prepare_data(idx)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
indices=None,
logger=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
indices (list, optional): The indices of samples corresponding to
the results. Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5)}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
allowed_metrics = [
'accuracy', 'precision', 'recall', 'f1_score', 'support'
]
eval_results = {}
results = np.vstack(results)
gt_labels = self.get_gt_labels()
if indices is not None:
gt_labels = gt_labels[indices]
num_imgs = len(results)
assert len(gt_labels) == num_imgs, 'dataset testing results should '\
'be of the same length as gt_labels.'
invalid_metrics = set(metrics) - set(allowed_metrics)
if len(invalid_metrics) != 0:
raise ValueError(f'metric {invalid_metrics} is not supported.')
#hardcode
topk = 1
thrs = metric_options.get('thrs')
average_mode = metric_options.get('average_mode', 'macro')
if 'accuracy' in metrics:
if thrs is not None:
acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)
else:
acc = accuracy(results, gt_labels, topk=topk)
if isinstance(topk, tuple):
eval_results_ = {
f'accuracy_top-{k}': a
for k, a in zip(topk, acc)
}
else:
eval_results_ = {'accuracy': acc}
if isinstance(thrs, tuple):
for key, values in eval_results_.items():
eval_results.update({
f'{key}_thr_{thr:.2f}': value.item()
for thr, value in zip(thrs, values)
})
else:
eval_results.update(
{k: v.item()
for k, v in eval_results_.items()})
if 'support' in metrics:
support_value = support(
results, gt_labels, average_mode=average_mode)
eval_results['support'] = support_value
precision_recall_f1_keys = ['precision', 'recall', 'f1_score']
if len(set(metrics) & set(precision_recall_f1_keys)) != 0:
if thrs is not None:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode, thrs=thrs)
else:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode)
for key, values in zip(precision_recall_f1_keys,
precision_recall_f1_values):
if key in metrics:
if isinstance(thrs, tuple):
eval_results.update({
f'{key}_thr_{thr:.2f}': value
for thr, value in zip(thrs, values)
})
else:
eval_results[key] = values
return eval_results
|
py | b4039a8443ed2863f422d307944870beadd4fb39 | # pylint: disable=no-member
import graphene
from mongoengine import DoesNotExist
from .domain.user import User
from .domain.healthrecord import HealthRecord
from .domain.patient import Patient
from .domain.management import Management
from .domain.announcement import Announcement
from .domain.medication import Medication
from .domain.invoice import Invoice
from .domain.registration import Registration
from .sub_graphql.patient_graphql import PatientMeta, PatientsMeta
from .sub_graphql.healthrecord_graphql import HealthRecordMeta, HealthRecordsMeta
from .sub_graphql.user_graphql import UserMeta
from .sub_graphql.announcement_graphql import AnnouncementMeta, AnnouncementsMeta
from .sub_graphql.management_graphql import ManagementMeta
from .sub_graphql.medication_graphql import MedicationMeta, MedicationsMeta
from .sub_graphql.invoice_graphql import InvoiceMeta, InvoicesMeta
from .sub_graphql.registration_graphql import RegistrationMeta
class Result(graphene.ObjectType):
ok = graphene.Boolean()
class Query(graphene.ObjectType):
user = graphene.Field(UserMeta, email=graphene.String(), id=graphene.String())
users = graphene.List(UserMeta)
login = graphene.Field(
Result, email=graphene.String(required=True), password=graphene.String(required=True)
)
health_record = graphene.Field(HealthRecordMeta, id=graphene.Int(required=True))
health_records = graphene.Field(
HealthRecordsMeta, offset=graphene.Int(), count=graphene.Int(), patient_id=graphene.Int()
)
patient = graphene.Field(PatientMeta, id=graphene.Int(), identifier=graphene.String())
patients = graphene.Field(PatientsMeta, offset=graphene.Int(), count=graphene.Int())
announcement = graphene.Field(AnnouncementMeta, id=graphene.String(required=True))
announcements = graphene.Field(AnnouncementsMeta, offset=graphene.Int(), count=graphene.Int())
management = graphene.Field(ManagementMeta)
medication = graphene.Field(
MedicationMeta, id=graphene.Int(required=True), name=graphene.String()
)
medications = graphene.Field(MedicationsMeta, offset=graphene.Int(), count=graphene.Int())
invoice = graphene.Field(InvoiceMeta, id=graphene.Int(required=True))
invoices = graphene.Field(InvoicesMeta, offset=graphene.Int(), count=graphene.Int())
registration = graphene.Field(RegistrationMeta, id=graphene.String())
registrations = graphene.List(
RegistrationMeta, identifier=graphene.String(), registration_date=graphene.String()
)
latest_order = graphene.Int(registration_date=graphene.String())
def resolve_user(self, info, email=None, id=None):
try:
if id != None:
return User(id=id).get()
elif email != None:
return User(email=email).get()
else:
return None
except DoesNotExist:
return None
def resolve_users(self, info):
return User.get_all()
def resolve_login(self, info, email, password):
try:
user = User(email=email)
return Result(ok=user.check_password(password))
except Exception:
return Result(ok=False)
def resolve_health_record(self, info, id):
try:
hr = HealthRecord(id=id)
return hr
except Exception:
return None
def resolve_health_records(self, info, offset=0, count=20, patient_id=None):
try:
hrs = None
if patient_id == None:
hrs = HealthRecord.get_all(offset, count)
else:
hrs = HealthRecord.query_patient(patient_id, offset, count)
return hrs
except Exception:
return None
def resolve_patient(self, info, id=None, identifier=None):
try:
patient = None
if id != None:
patient = Patient(id=id)
elif identifier != None:
patient = Patient(identifier=identifier)
else:
raise AttributeError("Id or Identifier must have one.")
return patient
except Exception:
return None
def resolve_patients(self, info, offset=0, count=20):
try:
patients = Patient.get_all(offset=offset, count=count)
return patients
except Exception:
return None
def resolve_announcement(self, info, id):
try:
announcement = Announcement(id=id)
return announcement.get()
except Exception:
return None
def resolve_announcements(self, info, offset=0, count=20):
try:
announcements = Announcement.get_all(offset=offset, count=count)
return announcements
except Exception:
return None
def resolve_management(self, info):
management = Management()
return management.get()
def resolve_medication(self, info, id):
try:
medication = Medication(id=id)
return medication
except Exception:
return None
def resolve_medications(self, info, offset=0, count=20):
try:
medications = Medication.get_all(offset=offset, count=count)
return medications
except Exception:
return None
def resolve_invoice(self, info, id):
try:
invoice = Invoice(id=id)
return invoice
except Exception:
return None
def resolve_invoices(self, info, offset=0, count=20, patient_id=None):
try:
invoices = None
if patient_id == None:
invoices = Invoice.get_all(offset=offset, count=count)
else:
invoices = Invoice.query_patient(patient_id, offset, count)
return invoices
except Exception:
return None
def resolve_registration(self, info, id=None):
try:
if id != None:
return Registration(id=id).get()
else:
return None
except DoesNotExist:
return None
def resolve_registrations(self, info, identifier=None, registration_date=None):
try:
if identifier != str(None):
return Registration(identifier=identifier).get_result()
elif registration_date != str(None):
return Registration(registration_date=registration_date).get_result()
else:
return Registration.get_all()
except DoesNotExist:
return None
def resolve_latest_order(self, info, registration_date=None):
try:
if registration_date != None:
Registration(registration_date=registration_date)
return Registration(registration_date=registration_date).get_latest_order()
else:
return None
except DoesNotExist:
return None
|
py | b4039af4fc984d30f540dc0921fd2c39b349b64a | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
default_prefix = "STM"
known_chains = {
"HIVE": {
"chain_id": "0" * int(256 / 4),
"min_version": '0.23.0',
"prefix": "STM",
"chain_assets": [
{"asset": "@@000000013", "symbol": "HBD", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "HIVE", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"STEEMAPPBASE": {
"chain_id": "0" * int(256 / 4),
"min_version": '0.19.10',
"prefix": "STM",
"chain_assets": [
{"asset": "@@000000013", "symbol": "SBD", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "STEEM", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"STEEM": {
"chain_id": "0" * int(256 / 4),
"min_version": '0.19.5',
"prefix": "STM",
"chain_assets": [
{"asset": "SBD", "symbol": "SBD", "precision": 3, "id": 0},
{"asset": "STEEM", "symbol": "STEEM", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"STEEMZERO": {
"chain_id": "0" * int(256 / 4),
"min_version": '0.0.0',
"prefix": "STM",
"chain_assets": [
{"asset": "SBD", "symbol": "SBD", "precision": 3, "id": 0},
{"asset": "STEEM", "symbol": "STEEM", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"TESTNET": {
"chain_id": "79276aea5d4877d9a25892eaa01b0adf019d3e5cb12a97478df3298ccdd01673",
"min_version": '0.20.0',
"prefix": "STX",
"chain_assets": [
{"asset": "@@000000013", "symbol": "SBD", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "STEEM", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"TESTDEV": {
"chain_id":
"18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e",
"min_version": '0.20.0',
"prefix": "TST",
"chain_assets": [
{"asset": "@@000000013", "symbol": "TBD", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "TESTS", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"GOLOS": {
"chain_id": "782a3039b478c839e4cb0c941ff4eaeb7df40bdd68bd441afd444b9da763de12",
"min_version": '0.0.0',
"prefix": "GLS",
"chain_assets": [
{"asset": "SBD", "symbol": "GBG", "precision": 3, "id": 0},
{"asset": "STEEM", "symbol": "GOLOS", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "GESTS", "precision": 6, "id": 2}
],
},
"VIT": {
"chain_id": "73f14dd4b7b07a8663be9d84300de0f65ef2ee7e27aae32bbe911c548c08f000",
"min_version": "0.0.0",
"prefix": "VIT",
"chain_assets": [
{"asset": "STEEM", "symbol": "VIT", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"WEKU": {
"chain_id": "b24e09256ee14bab6d58bfa3a4e47b0474a73ef4d6c47eeea007848195fa085e",
"min_version": "0.19.3",
"prefix": "WKA",
"chain_assets": [
{"asset": "SBD", "symbol": "WKD", "precision": 3, "id": 0},
{"asset": "STEEM", "symbol": "WEKU", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"EFTGAPPBASE": {
"chain_id": "1c15984beb16945c01cb9bc3d654b0417c650461dfe535018fe03a4fc5a36864",
"min_version": "0.19.12",
"prefix": "EUR",
"chain_assets": [
{"asset": "@@000000013", "symbol": "EUR", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "EFTG", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"EFTG": {
"chain_id": "1c15984beb16945c01cb9bc3d654b0417c650461dfe535018fe03a4fc5a36864",
"min_version": "0.19.6",
"prefix": "EUR",
"chain_assets": [
{"asset": "SBD", "symbol": "EUR", "precision": 3, "id": 0},
{"asset": "STEEM", "symbol": "EFTG", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"PULSAR": {
"chain_id": "07c687c01f134adaf217a9b9367d1cef679c3c020167fdd25ee8c403f687528e",
"min_version": "0.101.0",
"prefix": "EUR",
"chain_assets": [
{"asset": "@@000000013", "symbol": "EUR", "precision": 3, "id": 0},
{"asset": "@@000000021", "symbol": "PULSE", "precision": 3, "id": 1},
{"asset": "@@000000037", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
"WLS": {
"chain_id": "de999ada2ff7ed3d3d580381f229b40b5a0261aec48eb830e540080817b72866",
"min_version": "0.0.0",
"prefix": "WLS",
"chain_assets": [
{"asset": "STEEM", "symbol": "WLS", "precision": 3, "id": 1},
{"asset": "VESTS", "symbol": "VESTS", "precision": 6, "id": 2}
],
},
}
|
py | b4039d6b701619f53ba3f8ebbcc15b6cf60fbcec | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
from . import base_service
from .. import resources
from ..paginator import Paginator
from .. import errors
class BillingRequestTemplatesService(base_service.BaseService):
"""Service class that provides access to the billing_request_templates
endpoints of the GoCardless Pro API.
"""
RESOURCE_CLASS = resources.BillingRequestTemplate
RESOURCE_NAME = 'billing_request_templates'
def list(self,params=None, headers=None):
"""List Billing Request Templates.
Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your
Billing Request Templates.
Args:
params (dict, optional): Query string parameters.
Returns:
ListResponse of BillingRequestTemplate instances
"""
path = '/billing_request_templates'
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response)
def all(self, params=None):
if params is None:
params = {}
return Paginator(self, params)
def get(self,identity,params=None, headers=None):
"""Get a single Billing Request Template.
Fetches a Billing Request Template
Args:
identity (string): Unique identifier, beginning with "BRT".
params (dict, optional): Query string parameters.
Returns:
BillingRequestTemplate
"""
path = self._sub_url_params('/billing_request_templates/:identity', {
'identity': identity,
})
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response)
def create(self,params=None, headers=None):
"""Create a Billing Request Template.
Args:
params (dict, optional): Request body.
Returns:
BillingRequestTemplate
"""
path = '/billing_request_templates'
if params is not None:
params = {self._envelope_key(): params}
try:
response = self._perform_request('POST', path, params, headers,
retry_failures=True)
except errors.IdempotentCreationConflictError as err:
if self.raise_on_idempotency_conflict:
raise err
return self.get(identity=err.conflicting_resource_id,
params=params,
headers=headers)
return self._resource_for(response)
def update(self,identity,params=None, headers=None):
"""Update a Billing Request Template.
Updates a Billing Request Template, which will affect all future
Billing Requests created by this template.
Args:
identity (string): Unique identifier, beginning with "BRQ".
params (dict, optional): Request body.
Returns:
BillingRequestTemplate
"""
path = self._sub_url_params('/billing_request_templates/:identity', {
'identity': identity,
})
if params is not None:
params = {self._envelope_key(): params}
response = self._perform_request('PUT', path, params, headers,
retry_failures=True)
return self._resource_for(response)
|
py | b4039fc7c73aeb99d02d5c4f97c85ca5b1bc9e95 | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.tensor.utils import astensor1d
from megengine.distributed.helper import get_device_count_by_fork
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [30]}]
for case in cases:
np.testing.assert_allclose(
F.eye(case["input"], dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
np.testing.assert_allclose(
F.eye(*case["input"], dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
np.testing.assert_allclose(
F.eye(tensor(case["input"]), dtype=dtype).numpy(),
np.eye(*case["input"]).astype(dtype),
)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_concat_device():
data1 = tensor(np.random.random((3, 2, 2)).astype("float32"), device="cpu0")
data2 = tensor(np.random.random((2, 2, 2)).astype("float32"), device="cpu1")
out = F.concat([data1, data2], device="cpu0")
assert str(out.device).split(":")[0] == "cpu0"
def test_stack():
data1 = np.random.random((3, 2, 2)).astype("float32")
data2 = np.random.random((3, 2, 2)).astype("float32")
data3 = np.random.random((3, 2, 2)).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
for ai in range(3):
def run(data1, data2):
return F.stack([data1, data2], axis=ai)
opr_test(cases, run, ref_fn=lambda x, y: np.stack([x, y], axis=ai))
def test_split():
data = np.random.random((2, 3, 4, 5)).astype(np.float32)
inp = tensor(data)
mge_out0 = F.split(inp, 2, axis=3)
mge_out1 = F.split(inp, [3], axis=3)
np_out = np.split(data, [3, 5], axis=3)
assert len(mge_out0) == 2
assert len(mge_out1) == 2
np.testing.assert_equal(mge_out0[0].numpy(), np_out[0])
np.testing.assert_equal(mge_out1[0].numpy(), np_out[0])
np.testing.assert_equal(mge_out0[1].numpy(), np_out[1])
np.testing.assert_equal(mge_out1[1].numpy(), np_out[1])
try:
F.split(inp, 4)
assert False
except ValueError as e:
pass
try:
F.split(inp, [3, 3, 5], axis=3)
assert False
except ValueError as e:
assert str(e) == "Invalid nsplits_or_secions: [3, 3, 5]"
def test_reshape():
x = np.arange(6, dtype="float32")
xx = tensor(x)
y = x.reshape(1, 2, 3)
for shape in [
(1, 2, 3),
(1, -1, 3),
(1, tensor(-1), 3),
np.array([1, -1, 3], dtype="int32"),
tensor([1, -1, 3]),
]:
yy = F.reshape(xx, shape)
np.testing.assert_equal(yy.numpy(), y)
def test_squeeze():
x = np.arange(6, dtype="float32").reshape(1, 2, 3, 1)
xx = tensor(x)
for axis in [None, 3, -4, (3, -4)]:
y = np.squeeze(x, axis)
yy = F.squeeze(xx, axis)
np.testing.assert_equal(y, yy.numpy())
def test_expand_dims():
x = np.arange(6, dtype="float32").reshape(2, 3)
xx = tensor(x)
for axis in [2, -3, (3, -4), (1, -4)]:
y = np.expand_dims(x, axis)
yy = F.expand_dims(xx, axis)
np.testing.assert_equal(y, yy.numpy())
def test_elemwise_dtype_promotion():
x = np.random.rand(2, 3).astype("float32")
y = np.random.rand(1, 3).astype("float16")
xx = tensor(x)
yy = tensor(y)
z = xx * yy
np.testing.assert_equal(z.numpy(), x * y)
z = xx + y
np.testing.assert_equal(z.numpy(), x + y)
z = x - yy
np.testing.assert_equal(z.numpy(), x - y)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [1, tensor(9), 9]},
{"input": [tensor(1), 9, tensor(9)]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(1, 9, 9, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.shape[0] == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [
{"input": data0, "output": output0},
{"input": data1, "output": output1},
]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [
{"input": data0, "output": output0},
{"input": data1, "output": output1},
]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [
{"input": data0, "output": output0},
{"input": data1, "output": output1},
]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [
{"input": data0, "output": output0},
{"input": data1, "output": output1},
]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_broadcast():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 1)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.shape[0] == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
x = F.ones((2, 1, 3))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (2, 3, 4))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (4, 1, 3))
with pytest.raises(RuntimeError):
F.broadcast_to(x, (1, 3))
def test_utils_astensor1d():
reference = tensor(0)
# literal
x = [1, 2, 3]
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert type(xx) is tensor
np.testing.assert_equal(xx.numpy(), x)
# numpy array
x = np.asarray([1, 2, 3], dtype="int32")
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert type(xx) is tensor
np.testing.assert_equal(xx.numpy(), x.astype(dtype) if dtype else x)
# tensor
x = tensor([1, 2, 3], dtype="int32")
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert type(xx) is tensor
np.testing.assert_equal(xx.numpy(), x.numpy())
# mixed
x = [1, tensor(2), 3]
for dtype in [None, "float32"]:
xx = astensor1d(x, reference, dtype=dtype)
assert type(xx) is tensor
np.testing.assert_equal(xx.numpy(), [1, 2, 3])
def test_device():
x = tensor([1, 2, 3], dtype="float32")
y1 = F.eye(x.shape, dtype="float32")
y2 = F.eye(x.shape, dtype="float32", device=None)
np.testing.assert_almost_equal(y1.numpy(), y2.numpy())
y3 = F.eye(x.shape, dtype="float32", device="xpux")
y4 = F.eye(x.shape, dtype="float32", device=x.device)
np.testing.assert_almost_equal(y3.numpy(), y4.numpy())
y5 = F.full((3, 2), 4, device=x.device)
y6 = F.full((3, 2), 4, device="xpux")
np.testing.assert_almost_equal(y5.numpy(), y6.numpy())
def test_identity():
x = tensor(np.random.random((5, 10)).astype(np.float32))
y = F.copy(x)
np.testing.assert_equal(y.numpy(), x)
def copy_test(dst, src):
data = np.random.random((2, 3)).astype(np.float32)
x = tensor(data, device=src)
y = F.copy(x, dst)
assert np.allclose(data, y.numpy())
z = x.to(dst)
assert np.allclose(data, z.numpy())
@pytest.mark.require_ngpu(1)
def test_copy_h2d():
copy_test("cpu0", "gpu0")
@pytest.mark.require_ngpu(1)
def test_copy_d2h():
copy_test("gpu0", "cpu0")
@pytest.mark.require_ngpu(2)
def test_copy_d2d():
copy_test("gpu0", "gpu1")
copy_test("gpu0:0", "gpu0:1")
|
py | b4039fce3067350087ab91cdc62f106c7825a548 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Subnet
"""
from boto.ec2.ec2object import TaggedEC2Object
class Subnet(TaggedEC2Object):
def __init__(self, connection=None):
super(Subnet, self).__init__(connection)
self.id = None
self.vpc_id = None
self.state = None
self.cidr_block = None
self.available_ip_address_count = 0
self.availability_zone = None
def __repr__(self):
return 'Subnet:%s' % self.id
def endElement(self, name, value, connection):
if name == 'subnetId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'availableIpAddressCount':
self.available_ip_address_count = int(value)
elif name == 'availabilityZone':
self.availability_zone = value
else:
setattr(self, name, value)
|
py | b4039ff9e49dbd76587ffa2d75cb3153e0388a91 | #!/usr/bin/env python
"""
Utilities for downloading data from Standforw LMRD, tokenizing, creating vocabulary, encoding and decoding sentences.
modified copy of https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/data_utils.py
__author__ = "Valentin Lievin, DTU, Denmark"
__copyright__ = "Copyright 2017, Valentin Lievin"
__credits__ = ["Valentin Lievin"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Valentin Lievin"
__email__ = "[email protected]"
__status__ = "Development"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import tarfile
import sys
from tqdm import tqdm
import spacy
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize
from six.moves import urllib
from tensorflow.python.platform import gfile
import tensorflow as tf
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
TEST_SET_LENGTH = 5000
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
# URLs for WMT data.
_DATA_URL_ = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
_DATA_DIR_ = 'data_LMR/'
_SENTENCES_DIR = _DATA_DIR_+'sentences/'
_TEST_SENTENCES_DIR = _DATA_DIR_+'test_sentences/'
_TRAIN_DIRS_ = [_DATA_DIR_+ 'aclImdb/train/neg/', _DATA_DIR_ + 'aclImdb/train/pos/']
_TEST_DIRS_ = [_DATA_DIR_+ 'aclImdb/test/neg/', _DATA_DIR_ + 'aclImdb/test/pos/']
_VOCAB_DIR_ = _DATA_DIR_+'vocab.dat'
nlp = spacy.load('en')
character_pattern = re.compile('([^\s\w\'\.\!\,\?]|_)+')
special_character_pattern = re.compile(r"([\'\.\!\,\?])")
def to_unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
def cleanHTML(raw_html):
"""
Remove HTML tags
"""
return BeautifulSoup(raw_html,"lxml").text
def sentence_tokenizer(text):
"""
split a text into a list of sentences
Args:
text: input text
Return:
list of sentences
"""
return sent_tokenize(text)
def character_tokenizer(sentence):
"""
character tokenizer
Remove non alphanumeric characters, lowercase and split
Args:
sentence: String. input to be processed
Return:
a list of characters
"""
# remove non alphanumeric characters
sentence = character_pattern.sub('', sentence)
# add spaces before and after special characters
sentence = special_character_pattern.sub(" \\1 ", sentence)
#remove redondant spaces
sentence = re.sub(' +',' ',sentence)
# replace spaces with "_"
sentence = sentence.replace(' ', '_')
sentence= sentence[:len(sentence)-1]
# remove last space
return list(sentence.lower())
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not os.path.exists(directory):
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not os.path.exists(filepath):
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Successfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def getData(directory):
"""Download the data unless it's already there"""
train_path = directory
corpus_file = maybe_download(directory, "LMRD.tar.gz",
_DATA_URL_)
if not os.path.isdir(_TRAIN_DIRS_[0]):
print("Extracting tar file %s" % corpus_file)
with tarfile.open(corpus_file, "r") as corpus_tar:
corpus_tar.extractall(directory)
else:
print("Data already downloaded.")
def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data files are supposed to be a list of files with the list of directories. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
vocab = {}
files = []
for d in data_paths:
files += [d+f for f in os.listdir(d) ]
for one_file in tqdm(files):
with gfile.GFile(one_file, mode="rb") as f:
review = f.read()
tokens = tokenizer(review) if tokenizer else character_tokenizer(review)
for w in tokens:
word = _DIGIT_RE.sub(b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
else:
print("Vocabulary already created.")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = character_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
output = [GO_ID]
output += [vocabulary.get(_DIGIT_RE.sub(b"0", w), UNK_ID) for w in words]
output += [EOS_ID]
return output
def data_to_token_ids(data_paths, target_path, vocabulary_path,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path.
Sentiment scores are added using the file names ([[id]_[rating].txt])
See comment for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not os.path.exists(target_path):
os.makedirs(target_path)
if not gfile.Exists(target_path+"sentences.txt"):
print("Tokenizing data in %s" % data_paths)
vocab, _ = initialize_vocabulary(vocabulary_path)
files = []
for d in data_paths:
files += [d+f for f in os.listdir(d) ]
with gfile.GFile(target_path+"sentences.txt" , mode="w") as tokens_file:
with gfile.GFile(target_path+"sentiments.txt" , mode="w") as sentiments_files:
for one_file in tqdm(files):
with gfile.GFile(one_file, mode="rb") as f:
rating = one_file.split('/')[-1].split('.')[0].split('_')[-1]
review = cleanHTML( f.read() )
for sentence in sentence_tokenizer(review):
if len(sentence) > 3:
while sentence[0] == " ":
if len(sentence) > 2:
sentence = sentence[1:]
token_ids = sentence_to_token_ids(tf.compat.as_bytes(sentence), vocab,
tokenizer, normalize_digits)
tokens_file.write(str(rating) + '|' + " ".join([str(tok) for tok in token_ids]) + "\n")
#sentiments_files.write( str(rating) + "\n")
def moveLinesFromFileToFile(source_file_path, target_file_path, lines_to_keep):
"""
copy some lines from a files and append them to another files. lines
copied from the source file are deleted from the source. the parameter
lines_to_keep indicates the number of lines to keep in the source file
Args:
source_file_path: file to copy from
target_file_path: file to copy to
lines_to_kepp: lines to keep in the source_file
"""
#num_lines = sum(1 for line in tf.gfile.GFile(source_file, mode="r"))
saved_lines = []
with tf.gfile.GFile(source_file_path, mode="r") as source_file:
with tf.gfile.GFile(target_file_path, mode="a") as target_file:
source = source_file.readline()
counter = 0
while source:
if counter < lines_to_keep:
saved_lines.append(source)
else:
target_file.write(source)
counter += 1
source = source_file.readline()
#delete target and rewrite lines to keep
os.remove(source_file_path)
with gfile.GFile(source_file_path, mode="w") as source_file:
for row in saved_lines:
source_file.write(row)
def prepare_data(vocabulary_size):
"""
Download the Large Movie Review Dataset, create the vocabulary
and convert every sentence in the dataset into list of ids
Args:
vocabulary_size: maximum number words in the vocabulary
"""
print("Downloading data from " + _DATA_DIR_ +"..")
getData(_DATA_DIR_)
print("Creating Vocabulary..")
create_vocabulary( _VOCAB_DIR_, _TRAIN_DIRS_, vocabulary_size )
print("Converting sentences to sequences of ids..")
data_to_token_ids( _TRAIN_DIRS_ , _SENTENCES_DIR, _VOCAB_DIR_ )
data_to_token_ids( _TEST_DIRS_ , _TEST_SENTENCES_DIR, _VOCAB_DIR_ )
print("Moving some line from test set to train set..")
moveLinesFromFileToFile(_TEST_SENTENCES_DIR+"sentences.txt", _SENTENCES_DIR+"sentences.txt", TEST_SET_LENGTH)
def read_data(max_size=None, max_sentence_size=None, min_sentence_size=10, test=False):
"""Read data from source.
Args:
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
max_sentence_size: maximum size of sentences
min_sentence_size: minimum sentence length
test_set (boolean): use test dataset of note
Returns:
data_set: training data
"""
sentences = []
ratings = []
PATH = _SENTENCES_DIR
if test:
PATH = _TEST_SENTENCES_DIR
with tf.gfile.GFile(PATH +'sentences.txt', mode="r") as source_file:
source = source_file.readline()
counter = 0
while source and (not max_size or counter < max_size):
rating = int(source.split('|')[0])
source_ids = [int(x) for x in source.split('|')[1].split()]
if len(source_ids) < max_sentence_size and len(source_ids) > min_sentence_size:
sentences.append(source_ids)
ratings.append(rating)
counter += 1
if counter % 10000 == 0 and counter != 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source = source_file.readline()
return sentences,ratings
class EncoderDecoder:
"""
A class to encode text to a sequence of ids or to decode a sequence of ids to text
"""
def __init__(self):
"""
Load vocabulary
"""
self.vocab,self.rev_vocab = initialize_vocabulary(_VOCAB_DIR_)
def encode(self, sentence):
"""
Encode a sentence to a sequence of ids
"""
return sentence_to_token_ids(sentence, self.vocab)
def encodeForTraining(self,sentence):
"""
Encode a sentence at the character and word level and return training parameters
input:
Sentence (String): input sentence
Returns:
seq_ids: list of ids
seq_len : length of the sentence
words_endings: list of indexes corresponding to the end of the words
seq_words_len: lenght of the sentence in words
"""
seq_ids = self.encode(sentence)
seq_len = len(seq_ids)
space_symbol = self.encode("I am")[1]
word_delimiters = [ EOS_ID, GO_ID, space_symbol ]
words_endings = [i for i, j in enumerate(seq_ids) if j in word_delimiters]
words_endings = [ [0,x] for x in words_endings ]
seq_words_len = len(words_endings)
return seq_ids,seq_len,words_endings,seq_words_len
def decode(self, seq):
"""
Decode a sequence of ids to a sentence
"""
return [ self.rev_vocab[int(el)] for el in seq ]
def prettyDecode(self,seq):
"""
decode and return a nicely formatted string
"""
s = "".join(self.decode(seq))
s = s.replace("_GO", "" )
s = s.replace("_EOS", "" )
s = s.replace("_PAD", "" )
s = s.replace("_", " " )
s = s.replace(" ,", "," )
s = s.replace(" .", "." )
s = s.replace(" !", "!" )
s = s.replace(" ?", "?" )
s = s.replace(" '", "'" )
for u in ['.','?','!']:
if u in s:
s = s.split(u)[0]+u
return s
def vocabularySize(self):
"""
return the number of unique symbols in the vocabulary (useful for oneHot encoding)
"""
return len(self.vocab.keys())
|
py | b403a213ed153d1e6f306e338231c251f8002d04 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
import socket
from oslo_config import cfg
from oslo_log import log
from oslo_service import service
from oslo_service import wsgi
from oslo_service import loopingcall
from oslo_utils import importutils
import oslo_messaging as messaging
from dolphin import context
from dolphin import exception
from dolphin import rpc
from dolphin import coordination
from dolphin.alert_manager import constants
LOG = log.getLogger(__name__)
service_opts = [
cfg.BoolOpt('periodic_enable',
default=True,
help='If enable periodic task.'),
cfg.IntOpt('periodic_interval',
default=60,
help='Seconds between running periodic tasks.'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the '
'periodic task scheduler to reduce stampeding. '
'(Disable by setting to 0)'),
cfg.HostAddressOpt('dolphin_listen',
default="::",
help='IP address for Dolphin API to listen '
'on.'),
cfg.PortOpt('dolphin_listen_port',
default=8190,
help='Port for Dolphin API to listen on.'),
cfg.IntOpt('dolphin_workers',
default=1,
help='Number of workers for Dolphin API service.'),
cfg.BoolOpt('dolphin_use_ssl',
default=False,
help='Wraps the socket in a SSL context if True is set. '
'A certificate file and key file must be specified.'),
cfg.HostAddressOpt('trap_receiver_address',
default=constants.DEF_TRAP_RECV_ADDR,
help='IP address at which trap receiver listens.'),
cfg.PortOpt('trap_receiver_port',
default=constants.DEF_TRAP_RECV_PORT,
help='Port at which trap receiver listens.'),
cfg.StrOpt('snmp_mib_path',
default=constants.SNMP_MIB_PATH,
help='Path at which mib files to be loaded are placed.'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, periodic_enable=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, coordination=False, *args, **kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.periodic_enable = periodic_enable
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.coordinator = coordination
def start(self):
if self.coordinator:
coordination.LOCK_COORDINATOR.start()
LOG.info('Starting %(topic)s node.', {'topic': self.topic})
LOG.debug("Creating RPC server for service %s.", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
self.rpcserver = rpc.get_server(target, endpoints)
self.rpcserver.start()
self.manager.init_host()
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
periodic_enable=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None,
coordination=False):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'dolphin-' part
:param manager: defaults to CONF.<topic>_manager
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('dolphin-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
periodic_enable=periodic_enable,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name,
coordination=coordination)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
if self.coordinator:
try:
coordination.LOCK_COORDINATOR.stop()
except Exception:
LOG.exception("Unable to stop the Tooz Locking "
"Coordinator.")
self.timers = []
super(Service, self).stop()
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
class AlertMngrService(service.Service):
"""Service object for triggering trap receiver functionalities.
"""
def __init__(self, trap_receiver_address=None,
trap_receiver_port=None, snmp_mib_path=None, trap_receiver_class=None):
super(AlertMngrService, self).__init__()
if not trap_receiver_address:
trap_receiver_address = CONF.trap_receiver_address
if not trap_receiver_port:
trap_receiver_port = CONF.trap_receiver_port
if not snmp_mib_path:
snmp_mib_path = CONF.snmp_mib_path
if not trap_receiver_class:
trap_receiver_class = CONF.trap_receiver_class
manager_class = importutils.import_class(trap_receiver_class)
self.manager = manager_class(trap_receiver_address,
trap_receiver_port, snmp_mib_path)
def start(self):
"""Trigger trap receiver creation"""
try:
self.manager.start()
except Exception:
LOG.exception("Failed to start alert manager service.")
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
def stop(self):
"""Calls the shutdown flow of the service."""
self.manager.stop()
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader(CONF)
if not rpc.initialized():
rpc.init(CONF)
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False)
if self.workers is not None and self.workers < 1:
LOG.warning(
"Value of config option %(name)s_workers must be integer "
"greater than 1. Input value ignored.", {'name': name})
# Reset workers to default
self.workers = None
self.server = wsgi.Server(
CONF,
name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl
)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
# return service.ProcessLauncher(CONF, restart_method='mutate')
return service.ServiceLauncher(CONF, restart_method='reload')
# # NOTE(vish): the global launcher is to maintain the existing
# # functionality of calling service.serve +
# # service.wait
# _launcher = None
#
#
# def serve(server, workers=None):
# global _launcher
# if _launcher:
# raise RuntimeError('serve() can only be called once')
# _launcher = service.launch(CONF, server, workers=workers,
# restart_method='mutate')
#
#
# def wait():
# CONF.log_opt_values(LOG, log.DEBUG)
# try:
# _launcher.wait()
# except KeyboardInterrupt:
# _launcher.stop()
# rpc.cleanup()
|
py | b403a2c5f296ce20326e2c1e3f4e695efd8db82a | import numpy as np
import emcee
from matplotlib import pyplot as plt
import corner
import sys
import os
import time
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_analisis_cadenas import graficar_cadenas,graficar_contornos,graficar_taus_vs_n
#%%
os.chdir(path_git+'/Software/Estadística/Resultados_simulaciones')
with np.load('valores_medios_HS_AGN_5params_nuisance.npz') as data:
sol = data['sol']
#%%
os.chdir(path_datos_global+'/Resultados_cadenas')
filename = "sample_HS_AGN_5params_nuisance.h5"
reader = emcee.backends.HDFBackend(filename)
# Algunos valores
tau = reader.get_autocorr_time()
burnin = int(2 * np.max(tau))
thin = int(0.5 * np.min(tau))
samples = reader.get_chain(discard=burnin, flat=True, thin=thin)
print(tau)
#%%
%matplotlib qt5
graficar_cadenas(reader,
labels = ['omega_m','b','beta','gamma','delta'])
#%%
#burnin=1500
#thin=50
graficar_contornos(reader,params_truths=sol,discard=burnin,thin=thin,
labels = ['omega_m','b','beta','gamma','delta'])
#%%
#Ojo, siempre muestra que convergio, aun cuando no
#plt.figure()
#graficar_taus_vs_n(reader,num_param=0,threshold=1000)
#graficar_taus_vs_n(reader,num_param=1,threshold=1000)
#%% Printeo los valores!
#thin=1
from IPython.display import display, Math
samples = reader.get_chain(discard=burnin, flat=True, thin=thin)
labels = ['omega_m','b','beta','gamma','delta']
len_chain,nwalkers,ndim=reader.get_chain().shape
print(len_chain)
for i in range(ndim):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
mcmc[1]=sol[i] #Correción de mati: En vez de percentil 50 poner el mu
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{+{2:.3f}}}"
txt = txt.format(mcmc[1], q[0], q[1], labels[i])
display(Math(txt))
#%%
betas_2_unflitered = samples[:, 2]
gammas_unflitered = samples[:, 3]
#burnin = 1500
#thin = 15
def filtrar_puntos(sample, burnin=0,thin=1):
sample_2 = sample[burnin:]
sample_3 =[]
for i in range(len(sample_2)):
if (i%thin==0):
sample_3.append(sample_2[i])
return np.array(sample_3)
betas_2 = filtrar_puntos(betas_2_unflitered, burnin=burnin,thin=thin)
gammas = filtrar_puntos(gammas_unflitered, burnin=burnin,thin=thin)
len(betas_2),len(gammas)
#%%
betas = betas_2 + (gammas-1) * (np.log10(4*np.pi) - 2 * np.log10(70))
np.mean(betas)
np.std(betas)
beta_posta = np.random.normal(7.735,0.244,10**7)
plt.close()
plt.figure()
plt.title('Hu-Sawicki')
plt.xlabel(r'$\beta$')
plt.hist(betas,density=True,bins=round(np.sqrt(len(betas))),label=r'$\beta_{propagacion}$')
plt.hist(beta_posta,density=True,bins=round(np.sqrt(len(beta_posta))),label=r'$\beta_{paper}$')
plt.grid(True)
plt.legend()
plt.savefig( '/home/matias/propagacion_beta_HS.png')
#%%
mcmc = np.percentile(betas, [16, 50, 84]) #Hay coincidencia a 1 sigma :)
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{+{2:.3f}}}"
txt = txt.format(mcmc[1], q[0], q[1], r'\beta')
display(Math(txt))
txt = "\mathrm{{{2}}} = {0:.3f}\pm{{{1:.3f}}}"
txt = txt.format(np.mean(beta_posta), np.std(beta_posta), r'\beta')
display(Math(txt))
#%%
gamma_posta = np.random.normal(0.648,0.007,10**7)
plt.close()
plt.figure()
plt.title('Hu-Sawicki')
plt.xlabel(r'$\gamma$')
plt.hist(gammas,density=True,bins=round(np.sqrt(len(gammas))),label=r'$\gamma_{cadenas}$')
plt.hist(gamma_posta,density=True,bins=round(np.sqrt(len(gamma_posta))),label=r'$\gamma_{paper}$')
plt.grid(True)
plt.legend()
plt.savefig( '/home/matias/propagacion_gamma_HS.png')
mcmc = np.percentile(gammas, [16, 50, 84]) #Hay coincidencia a 1 sigma :)
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{+{2:.3f}}}"
txt = txt.format(mcmc[1], q[0], q[1], r'\gamma')
display(Math(txt))
txt = "\mathrm{{{2}}} = {0:.3f}\pm{{{1:.3f}}}"
txt = txt.format(np.mean(gamma_posta), np.std(gamma_posta), r'\gamma')
display(Math(txt))
|
py | b403a32c22411d9547afd8cc39a89c4fb345577f | import datetime
import logging
import typing as t
import dateutil.parser
import flask
from apd.sensors import cli
from apd.sensors.base import HistoricalSensor
from apd.sensors.exceptions import DataCollectionError
from .base import require_api_key
version = flask.Blueprint(__name__, __name__)
logger = logging.getLogger(__name__)
@version.route("/sensors/")
@version.route("/sensors/<sensor_id>")
@require_api_key
def sensor_values(sensor_id=None) -> t.Tuple[t.Dict[str, t.Any], int, t.Dict[str, str]]:
headers = {"Content-Security-Policy": "default-src 'none'"}
sensors = []
errors = []
for sensor in cli.get_sensors():
now = datetime.datetime.now()
if sensor_id and sensor_id != sensor.name:
continue
try:
try:
value = sensor.value()
except Exception as err:
if isinstance(err, DataCollectionError):
# We allow data collection errors
message = str(err)
else:
# Other errors shouldn't be published, but should be logged
# Don't refuse to service the request in this case
message = "Unhandled error"
logger.error(f"Unhandled error while handling {sensor.name}")
error = {
"id": sensor.name,
"title": sensor.title,
"collected_at": now.isoformat(),
"error": message,
}
errors.append(error)
continue
sensor_data = {
"id": sensor.name,
"title": sensor.title,
"value": sensor.to_json_compatible(value),
"human_readable": sensor.format(value),
"collected_at": now.isoformat(),
}
sensors.append(sensor_data)
except NotImplementedError:
pass
data = {"sensors": sensors, "errors": errors}
return data, 200, headers
@version.route("/info/sensors")
@require_api_key
def sensor_types(sensor_id=None) -> t.Tuple[t.Dict[str, t.Any], int, t.Dict[str, str]]:
headers = {"Content-Security-Policy": "default-src 'none'"}
known_sensors = {sensor.name: sensor.title for sensor in cli.get_sensors()}
return known_sensors, 200, headers
@version.route("/sensors/<sensor_id>/historical")
@version.route("/sensors/<sensor_id>/historical/<start>")
@version.route("/sensors/<sensor_id>/historical/<start>/<end>")
@version.route("/historical")
@version.route("/historical/<start>")
@version.route("/historical/<start>/<end>")
@require_api_key
def historical_values(
start: str = None, end: str = None, sensor_id: str = None,
) -> t.Tuple[t.Dict[str, t.Any], int, t.Dict[str, str]]:
sensors = []
known_sensors = {sensor.name: sensor for sensor in cli.get_sensors()}
if sensor_id and sensor_id in known_sensors:
known_sensors = {sensor_id: known_sensors[sensor_id]}
headers = {"Content-Security-Policy": "default-src 'none'"}
if start:
start_dt = dateutil.parser.parse(start)
else:
start_dt = dateutil.parser.parse("1900-01-01")
if end:
end_dt = dateutil.parser.parse(end)
else:
end_dt = datetime.datetime.now()
try:
from apd.sensors.database import sensor_values as sensor_values_table
from apd.sensors.wsgi import db
session = db.session
except (ImportError, AttributeError):
session = None
else:
db_session = db.session
query = db_session.query(sensor_values_table)
query = query.filter(sensor_values_table.c.collected_at >= start_dt)
query = query.filter(sensor_values_table.c.collected_at <= end_dt)
for data in query:
if data.sensor_name not in known_sensors:
continue
sensor = known_sensors[data.sensor_name]
sensor_data = {
"id": sensor.name,
"title": sensor.title,
"value": data.data,
"human_readable": sensor.format(sensor.from_json_compatible(data.data)),
"collected_at": data.collected_at.isoformat(),
}
sensors.append(sensor_data)
for sensor in known_sensors.values():
if isinstance(sensor, HistoricalSensor):
for date, value in sensor.historical(start_dt, end_dt):
sensor_data = {
"id": sensor.name,
"title": sensor.title,
"value": value,
"human_readable": sensor.format(sensor.from_json_compatible(value)),
"collected_at": date.isoformat(),
}
sensors.append(sensor_data)
data = {"sensors": sensors}
try:
return data, 200, headers
finally:
if session is not None:
session.close()
@version.route("/deployment_id")
def deployment_id() -> t.Tuple[t.Dict[str, t.Any], int, t.Dict[str, str]]:
headers = {"Content-Security-Policy": "default-src 'none'"}
data = {"deployment_id": flask.current_app.config["APD_SENSORS_DEPLOYMENT_ID"]}
return data, 200, headers
|
py | b403a3fb932232baca72a6ed65673ae5c30d2458 | # -*- coding: utf-8 -*-
from cms.toolbar.base import Toolbar
from cms.toolbar.constants import LEFT, RIGHT
from cms.toolbar.items import (Anchor, Switcher, TemplateHTML, ListItem, List,
GetButton)
from cms.utils.moderator import page_moderator_state, I_APPROVE
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
import urllib
def _get_page_admin_url(context, toolbar, **kwargs):
return reverse('admin:cms_page_change', args=(toolbar.request.current_page.pk,))
def _get_page_history_url(context, toolbar, **kwargs):
return reverse('admin:cms_page_history', args=(toolbar.request.current_page.pk,))
def _get_add_child_url(context, toolbar, **kwargs):
data = {
'position': 'last-child',
'target': toolbar.request.current_page.pk,
}
args = urllib.urlencode(data)
return '%s?%s' % (reverse('admin:cms_page_add'), args)
def _get_add_sibling_url(context, toolbar, **kwargs):
data = {
'position': 'last-child',
}
if toolbar.request.current_page.parent_id:
data['target'] = toolbar.request.current_page.parent_id
args = urllib.urlencode(data)
return '%s?%s' % (reverse('admin:cms_page_add'), args)
def _get_delete_url(context, toolbar, **kwargs):
return reverse('admin:cms_page_delete', args=(toolbar.request.current_page.pk,))
def _get_approve_url(context, toolbar, **kwargs):
return reverse('admin:cms_page_approve_page', args=(toolbar.request.current_page.pk,))
def _get_publish_url(context, toolbar, **kwargs):
return reverse('admin:cms_page_publish_page', args=(toolbar.request.current_page.pk,))
class CMSToolbarLoginForm(forms.Form):
cms_username = forms.CharField()
cms_password = forms.CharField()
class CMSToolbar(Toolbar):
"""
The default CMS Toolbar
"""
def __init__(self, request):
super(CMSToolbar, self).__init__(request)
self.init()
def init(self):
self.is_staff = self.request.user.is_staff
self.can_change = (self.request.current_page and
self.request.current_page.has_change_permission(self.request))
self.edit_mode_switcher = Switcher(LEFT, 'editmode', 'edit', 'edit-off',
_('Edit mode'))
self.edit_mode = self.is_staff and self.edit_mode_switcher.get_state(self.request)
def get_items(self, context, **kwargs):
"""
Get the CMS items on the toolbar
"""
items = [
Anchor(LEFT, 'logo', _('django CMS'), 'https://www.django-cms.org'),
]
self.page_states = []
if self.is_staff:
items.append(
self.edit_mode_switcher
)
if self.request.current_page:
states = self.request.current_page.last_page_states()
has_states = states.exists()
self.page_states = states
if has_states:
items.append(
TemplateHTML(LEFT, 'status',
'cms/toolbar/items/status.html')
)
# publish button
if self.edit_mode and settings.CMS_MODERATOR:
moderator_state = page_moderator_state(self.request, self.request.current_page)
should_approve = moderator_state['state'] >= I_APPROVE
has_perms = self.request.current_page.has_moderate_permission(self.request)
if should_approve and has_perms:
label = moderator_state['label']
urlgetter = _get_approve_url
elif has_perms:
label = _("Publish")
urlgetter = _get_publish_url
else:
urlgetter = _get_approve_url
label = _("Request Approval")
items.append(
GetButton(RIGHT, 'moderator', label, urlgetter)
)
# The 'templates' Menu
items.append(self.get_template_menu(context, self.can_change, self.is_staff))
# The 'page' Menu
items.append(self.get_page_menu(context, self.can_change, self.is_staff))
# The 'Admin' Menu
items.append(self.get_admin_menu(context, self.can_change, self.is_staff))
items.append(
GetButton(RIGHT, 'logout', _('Logout'), '?cms-toolbar-logout',
'cms/images/toolbar/icons/icon_lock.png')
)
elif not self.request.user.is_authenticated():
items.append(
TemplateHTML(LEFT, 'login', 'cms/toolbar/items/login.html')
)
else:
items.append(
GetButton(RIGHT, 'logout', _('Logout'), '?cms-toolbar-logout',
'cms/images/toolbar/icons/icon_lock.png')
)
return items
def get_template_menu(self, context, can_change, is_staff):
menu_items = []
url = reverse('admin:cms_page_change_template', args=(self.request.current_page.pk,))
for path, name in settings.CMS_TEMPLATES:
args = urllib.urlencode({'template': path})
css = 'template'
if self.request.current_page.get_template() == path:
css += ' active'
menu_items.append(
ListItem(css, name, '%s?%s' % (url, args), 'POST'),
)
return List(RIGHT, 'templates', _('Template'),
'', items=menu_items)
def get_page_menu(self, context, can_change, is_staff):
"""
Builds the 'page menu'
"""
menu_items = [
ListItem('overview', _('Move/add Pages'),
reverse('admin:cms_page_changelist'),
icon='cms/images/toolbar/icons/icon_sitemap.png'),
]
menu_items.append(
ListItem('addchild', _('Add child page'),
_get_add_child_url,
icon='cms/images/toolbar/icons/icon_child.png')
)
menu_items.append(
ListItem('addsibling', _('Add sibling page'),
_get_add_sibling_url,
icon='cms/images/toolbar/icons/icon_sibling.png')
)
menu_items.append(
ListItem('delete', _('Delete Page'), _get_delete_url,
icon='cms/images/toolbar/icons/icon_delete.png')
)
return List(RIGHT, 'page', _('Page'),
'cms/images/toolbar/icons/icon_page.png', items=menu_items)
def get_admin_menu(self, context, can_change, is_staff):
"""
Builds the 'admin menu' (the one with the cogwheel)
"""
admin_items = [
ListItem('admin', _('Site Administration'),
reverse('admin:index'),
icon='cms/images/toolbar/icons/icon_admin.png'),
]
if can_change:
admin_items.append(
ListItem('settings', _('Page Settings'),
_get_page_admin_url,
icon='cms/images/toolbar/icons/icon_page.png')
)
if 'reversion' in settings.INSTALLED_APPS:
admin_items.append(
ListItem('history', _('View History'),
_get_page_history_url,
icon='cms/images/toolbar/icons/icon_history.png')
)
return List(RIGHT, 'admin', _('Admin'), 'cms/images/toolbar/icons/icon_admin.png',
items=admin_items)
def request_hook(self):
if self.request.method != 'POST':
return self._request_hook_get()
else:
return self._request_hook_post()
def _request_hook_get(self):
if 'cms-toolbar-logout' in self.request.GET:
logout(self.request)
return HttpResponseRedirect(self.request.path)
def _request_hook_post(self):
# login hook
login_form = CMSToolbarLoginForm(self.request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['cms_username']
password = login_form.cleaned_data['cms_password']
user = authenticate(username=username, password=password)
if user:
login(self.request, user)
self.init()
|
py | b403a4f1fd45b114b831039da2c5e739dccc4c64 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wurstfabrik.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | b403a5bad1baa7803f772c04303176ba0f235d43 | ###############################################################################
#
# Custom - A class for writing the Excel XLSX Custom Property file.
#
# Copyright 2013-2018, John McNamara, [email protected]
#
# Package imports.
from . import xmlwriter
class Custom(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Custom Workbook Property file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Custom, self).__init__()
self.properties = []
self.pid = 1
def _set_properties(self, properties):
# Set the document properties.
self.properties = properties
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_properties()
self._xml_end_tag('Properties')
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_properties(self):
# Write the <Properties> element.
schema = 'http://schemas.openxmlformats.org/officeDocument/2006/'
xmlns = schema + 'custom-properties'
xmlns_vt = schema + 'docPropsVTypes'
attributes = [
('xmlns', xmlns),
('xmlns:vt', xmlns_vt),
]
self._xml_start_tag('Properties', attributes)
for custom_property in self.properties:
# Write the property element.
self._write_property(custom_property)
def _write_property(self, custom_property):
# Write the <property> element.
fmtid = '{D5CDD505-2E9C-101B-9397-08002B2CF9AE}'
name, value, property_type = custom_property
self.pid += 1
attributes = [
('fmtid', fmtid),
('pid', self.pid),
('name', name),
]
self._xml_start_tag('property', attributes)
if property_type == 'number_int':
# Write the vt:i4 element.
self._write_vt_i4(value)
elif property_type == 'number':
# Write the vt:r8 element.
self._write_vt_r8(value)
elif property_type == 'date':
# Write the vt:filetime element.
self._write_vt_filetime(value)
elif property_type == 'bool':
# Write the vt:bool element.
self._write_vt_bool(value)
else:
# Write the vt:lpwstr element.
self._write_vt_lpwstr(value)
self._xml_end_tag('property')
def _write_vt_lpwstr(self, value):
# Write the <vt:lpwstr> element.
self._xml_data_element('vt:lpwstr', value)
def _write_vt_filetime(self, value):
# Write the <vt:filetime> element.
self._xml_data_element('vt:filetime', value)
def _write_vt_i4(self, value):
# Write the <vt:i4> element.
self._xml_data_element('vt:i4', value)
def _write_vt_r8(self, value):
# Write the <vt:r8> element.
self._xml_data_element('vt:r8', value)
def _write_vt_bool(self, value):
# Write the <vt:bool> element.
if value:
value = 'true'
else:
value = 'false'
self._xml_data_element('vt:bool', value)
|
py | b403a60ebf6dd605a4952e120a644bcd9cf15576 | """Command-line interface and corresponding API for CNVkit."""
# NB: argparse CLI definitions and API functions are interwoven:
# "_cmd_*" handles I/O and arguments processing for the command
# "do_*" runs the command's functionality as an API
import argparse
import logging
import os
import sys
# Filter spurious Cython warnings re: numpy
# https://github.com/numpy/numpy/pull/432
import warnings
warnings.filterwarnings('ignore', message="numpy.dtype size changed")
warnings.filterwarnings('ignore', message="numpy.ufunc size changed")
# Choose a safe plotting backend supported by the current platform
import matplotlib
if not os.environ.get('DISPLAY'):
# If running headless, use a GUI-less backend
matplotlib.use('Agg')
elif sys.platform == 'darwin':
# Prevent crash on OS X
# https://github.com/MTG/sms-tools/issues/36
matplotlib.use('TkAgg')
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
pyplot.ioff()
import pandas as pd
from skgenome import tabio, GenomicArray as _GA
from skgenome.rangelabel import to_label
from . import (access, antitarget, autobin, batch, bintest, call, core,
coverage, diagram, export, fix, heatmap, import_rna, importers,
metrics, parallel, reference, reports, scatter, segmentation,
segmetrics, target)
from .cmdutil import (load_het_snps, read_cna, verify_sample_sex,
write_tsv, write_text, write_dataframe)
from ._version import __version__
__all__ = []
def public(fn):
__all__.append(fn.__name__)
return fn
AP = argparse.ArgumentParser(
description="CNVkit, a command-line toolkit for copy number analysis.",
epilog="See the online manual for details: https://cnvkit.readthedocs.io")
AP_subparsers = AP.add_subparsers(
help="Sub-commands (use with -h for more info)")
# _____________________________________________________________________________
# Core pipeline
# batch -----------------------------------------------------------------------
def _cmd_batch(args):
"""Run the complete CNVkit pipeline on one or more BAM files."""
logging.info("CNVkit %s", __version__)
# Validate/restrict options, beyond what argparse mutual exclusion can do
bad_args_msg = ""
if args.reference:
bad_flags = [flag
for is_used, flag in (
(args.normal is not None, '-n/--normal'),
(args.fasta, '-f/--fasta'),
(args.targets, '-t/--targets'),
(args.antitargets, '-a/--antitargets'),
(args.access, '-g/--access'),
(args.annotate, '--annotate'),
(args.short_names, '--short-names'),
(args.target_avg_size, '--target-avg-size'),
(args.antitarget_avg_size, '--antitarget-avg-size'),
(args.antitarget_min_size, '--antitarget-min-size'),
) if is_used]
if bad_flags:
bad_args_msg = ("If -r/--reference is given, options to construct "
"a new reference (%s) should not be used."
% ", ".join(bad_flags))
elif args.normal is None:
bad_args_msg = ("Option -n/--normal must be given to build a new "
"reference if -r/--reference is not used.")
elif args.seq_method in ('hybrid', 'amplicon') and not args.targets:
bad_args_msg = ("For the '%r' sequencing method, option -t/--targets "
"(at least) must be given to build a new reference if "
"-r/--reference is not used." % args.seq_method)
if bad_args_msg:
sys.exit(bad_args_msg + "\n(See: cnvkit.py batch -h)")
# Ensure sample IDs are unique to avoid overwriting outputs
seen_sids = {}
for fname in (args.bam_files or []) + (args.normal or []):
sid = core.fbase(fname)
if sid in seen_sids:
sys.exit("Duplicate sample ID %r (from %s and %s)"
% (sid, fname, seen_sids[sid]))
seen_sids[sid] = fname
if args.processes < 1:
import multiprocessing
args.processes = multiprocessing.cpu_count()
if not args.reference:
# Build a copy number reference; update (anti)targets upon request
args.reference, args.targets, args.antitargets = batch.batch_make_reference(
args.normal, args.targets, args.antitargets, args.male_reference,
args.fasta, args.annotate, args.short_names, args.target_avg_size,
args.access, args.antitarget_avg_size, args.antitarget_min_size,
args.output_reference, args.output_dir, args.processes,
args.count_reads, args.seq_method, args.cluster)
elif args.targets is None and args.antitargets is None:
# Extract (anti)target BEDs from the given, existing CN reference
ref_arr = read_cna(args.reference)
targets, antitargets = reference.reference2regions(ref_arr)
ref_pfx = os.path.join(args.output_dir, core.fbase(args.reference))
args.targets = ref_pfx + '.target-tmp.bed'
args.antitargets = ref_pfx + '.antitarget-tmp.bed'
tabio.write(targets, args.targets, 'bed4')
tabio.write(antitargets, args.antitargets, 'bed4')
if args.bam_files:
if args.processes == 1:
procs_per_bam = 1
logging.info("Running %d samples in serial", len(args.bam_files))
else:
procs_per_bam = max(1, args.processes // len(args.bam_files))
logging.info("Running %d samples in %d processes "
"(that's %d processes per bam)",
len(args.bam_files), args.processes, procs_per_bam)
with parallel.pick_pool(args.processes) as pool:
for bam in args.bam_files:
pool.submit(batch.batch_run_sample,
bam, args.targets, args.antitargets, args.reference,
args.output_dir, args.male_reference, args.scatter,
args.diagram, args.rscript_path, args.count_reads,
args.drop_low_coverage, args.seq_method, args.segment_method, procs_per_bam,
args.cluster, args.fasta)
else:
logging.info("No tumor/test samples (but %d normal/control samples) "
"specified on the command line.",
len(args.normal))
P_batch = AP_subparsers.add_parser('batch', help=_cmd_batch.__doc__)
P_batch.add_argument('bam_files', nargs='*',
help="Mapped sequence reads (.bam)")
P_batch.add_argument('-m', '--seq-method', '--method',
choices=('hybrid', 'amplicon', 'wgs'), default='hybrid',
help="""Sequencing assay type: hybridization capture ('hybrid'),
targeted amplicon sequencing ('amplicon'), or whole genome
sequencing ('wgs'). Determines whether and how to use antitarget
bins. [Default: %(default)s]""")
P_batch.add_argument('--segment-method',
choices=segmentation.SEGMENT_METHODS,
default='cbs',
help="""Method used in the 'segment' step. [Default: %(default)s]"""),
P_batch.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Use or assume a male reference (i.e. female samples will have +1
log-CNR of chrX; otherwise male samples would have -1 chrX).""")
P_batch.add_argument('-c', '--count-reads', action='store_true',
help="""Get read depths by counting read midpoints within each bin.
(An alternative algorithm).""")
P_batch.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before segmentation to avoid
false-positive deletions in poor-quality tumor samples.""")
P_batch.add_argument('-p', '--processes',
nargs='?', type=int, const=0, default=1,
help="""Number of subprocesses used to running each of the BAM files in
parallel. Without an argument, use the maximum number of
available CPUs. [Default: process each BAM in serial]""")
P_batch.add_argument("--rscript-path", metavar="PATH", default="Rscript",
help="""Path to the Rscript excecutable to use for running R code.
Use this option to specify a non-default R installation.
[Default: %(default)s]""")
# Reference-building options
P_batch_newref = P_batch.add_argument_group(
"To construct a new copy number reference")
P_batch_newref.add_argument('-n', '--normal', nargs='*', metavar="FILES",
help="""Normal samples (.bam) used to construct the pooled, paired, or
flat reference. If this option is used but no filenames are
given, a "flat" reference will be built. Otherwise, all
filenames following this option will be used.""")
P_batch_newref.add_argument('-f', '--fasta', metavar="FILENAME",
help="Reference genome, FASTA format (e.g. UCSC hg19.fa)")
P_batch_newref.add_argument('-t', '--targets', metavar="FILENAME",
help="Target intervals (.bed or .list)")
P_batch_newref.add_argument('-a', '--antitargets', metavar="FILENAME",
help="Antitarget intervals (.bed or .list)")
# For pre-processing targets
P_batch_newref.add_argument('--annotate', metavar="FILENAME",
help="""Use gene models from this file to assign names to the target
regions. Format: UCSC refFlat.txt or ensFlat.txt file
(preferred), or BED, interval list, GFF, or similar.""")
P_batch_newref.add_argument('--short-names', action='store_true',
help="Reduce multi-accession bait labels to be short and consistent.")
P_batch_newref.add_argument('--target-avg-size', type=int,
help="Average size of split target bins (results are approximate).")
# For antitargets:
P_batch_newref.add_argument('-g', '--access', metavar="FILENAME",
help="""Regions of accessible sequence on chromosomes (.bed), as
output by the 'access' command.""")
P_batch_newref.add_argument('--antitarget-avg-size', type=int,
help="Average size of antitarget bins (results are approximate).")
P_batch_newref.add_argument('--antitarget-min-size', type=int,
help="Minimum size of antitarget bins (smaller regions are dropped).")
P_batch_newref.add_argument('--output-reference', metavar="FILENAME",
help="""Output filename/path for the new reference file being created.
(If given, ignores the -o/--output-dir option and will write the
file to the given path. Otherwise, \"reference.cnn\" will be
created in the current directory or specified output directory.)
""")
P_batch_newref.add_argument('--cluster',
action='store_true',
help="""Calculate and use cluster-specific summary stats in the
reference pool to normalize samples.""")
P_batch_oldref = P_batch.add_argument_group("To reuse an existing reference")
P_batch_oldref.add_argument('-r', '--reference', #required=True,
help="Copy number reference file (.cnn).")
# Reporting options
P_batch_report = P_batch.add_argument_group("Output options")
P_batch_report.add_argument('-d', '--output-dir',
metavar="DIRECTORY", default='.',
help="Output directory.")
P_batch_report.add_argument('--scatter', action='store_true',
help="Create a whole-genome copy ratio profile as a PDF scatter plot.")
P_batch_report.add_argument('--diagram', action='store_true',
help="Create an ideogram of copy ratios on chromosomes as a PDF.")
P_batch.set_defaults(func=_cmd_batch)
# target ----------------------------------------------------------------------
do_target = public(target.do_target)
def _cmd_target(args):
"""Transform bait intervals into targets more suitable for CNVkit."""
regions = tabio.read_auto(args.interval)
regions = target.do_target(regions, args.annotate, args.short_names,
args.split, args.avg_size)
tabio.write(regions, args.output, "bed4")
P_target = AP_subparsers.add_parser('target', help=_cmd_target.__doc__)
P_target.add_argument('interval',
help="""BED or interval file listing the targeted regions.""")
P_target.add_argument('--annotate',
help="""Use gene models from this file to assign names to the target
regions. Format: UCSC refFlat.txt or ensFlat.txt file
(preferred), or BED, interval list, GFF, or similar.""")
P_target.add_argument('--short-names', action='store_true',
help="Reduce multi-accession bait labels to be short and consistent.")
P_target.add_argument('--split', action='store_true',
help="Split large tiled intervals into smaller, consecutive targets.")
# Exons: [114--188==203==292--21750], mean=353 -> outlier=359, extreme=515
# NV2: [65--181==190==239--12630], mean=264 -> outlier=277, extreme=364
# Default avg_size chosen s.t. minimum bin size after split is ~= median
P_target.add_argument('-a', '--avg-size', type=int, default=200 / .75,
help="""Average size of split target bins (results are approximate).
[Default: %(default)s]""")
P_target.add_argument('-o', '--output', metavar="FILENAME",
help="""Output file name.""")
P_target.set_defaults(func=_cmd_target)
# access ----------------------------------------------------------------------
do_access = public(access.do_access)
def _cmd_access(args):
"""List the locations of accessible sequence regions in a FASTA file."""
access_arr = access.do_access(args.fa_fname, args.exclude,
args.min_gap_size)
tabio.write(access_arr, args.output, "bed3")
P_access = AP_subparsers.add_parser('access', help=_cmd_access.__doc__)
P_access.add_argument("fa_fname",
help="Genome FASTA file name")
P_access.add_argument("-s", "--min-gap-size", type=int, default=5000,
help="""Minimum gap size between accessible sequence
regions. Regions separated by less than this distance will
be joined together. [Default: %(default)s]""")
P_access.add_argument("-x", "--exclude", action="append", default=[],
help="""Additional regions to exclude, in BED format. Can be
used multiple times.""")
P_access.add_argument("-o", "--output", metavar="FILENAME",
type=argparse.FileType('w'), default=sys.stdout,
help="Output file name")
P_access.set_defaults(func=_cmd_access)
# antitarget ------------------------------------------------------------------
do_antitarget = public(antitarget.do_antitarget)
def _cmd_antitarget(args):
"""Derive off-target ("antitarget") bins from target regions."""
targets = tabio.read_auto(args.targets)
access = tabio.read_auto(args.access) if args.access else None
out_arr = antitarget.do_antitarget(targets, access, args.avg_size,
args.min_size)
if not args.output:
base, ext = args.interval.rsplit('.', 1)
args.output = base + '.antitarget.' + ext
tabio.write(out_arr, args.output, "bed4")
P_anti = AP_subparsers.add_parser('antitarget', help=_cmd_antitarget.__doc__)
P_anti.add_argument('targets',
help="""BED or interval file listing the targeted regions.""")
P_anti.add_argument('-g', '--access', metavar="FILENAME",
help="""Regions of accessible sequence on chromosomes (.bed), as
output by genome2access.py.""")
P_anti.add_argument('-a', '--avg-size', type=int, default=150000,
help="""Average size of antitarget bins (results are approximate).
[Default: %(default)s]""")
P_anti.add_argument('-m', '--min-size', type=int,
help="""Minimum size of antitarget bins (smaller regions are dropped).
[Default: 1/16 avg size, calculated]""")
P_anti.add_argument('-o', '--output', metavar="FILENAME",
help="""Output file name.""")
P_anti.set_defaults(func=_cmd_antitarget)
# autobin ---------------------------------------------------------------------
do_autobin = public(autobin.do_autobin)
def _cmd_autobin(args):
"""Quickly calculate reasonable bin sizes from BAM read counts."""
if args.method in ('hybrid', 'amplicon') and not args.targets:
raise RuntimeError("Sequencing method %r requires targets (-t)",
args.method)
if args.method == 'wgs':
if not args.access:
raise RuntimeError("Sequencing method 'wgs' requires accessible "
"regions (-g)")
if args.targets:
logging.warning("Targets will be ignored: %s", args.targets)
if args.method == 'amplicon' and args.access:
logging.warning("Sequencing-accessible regions will be ignored: %s",
args.access)
def read_regions(bed_fname):
if bed_fname:
regions = tabio.read_auto(bed_fname)
if len(regions):
return regions
else:
logging.warning("No regions to estimate depth from %s",
regions.meta.get('filename', ''))
tgt_arr = read_regions(args.targets)
access_arr = read_regions(args.access)
bam_fname = autobin.midsize_file(args.bams)
fields = autobin.do_autobin(bam_fname, args.method, tgt_arr, access_arr,
args.bp_per_bin, args.target_min_size,
args.target_max_size, args.antitarget_min_size,
args.antitarget_max_size, args.fasta)
(_tgt_depth, tgt_bin_size), (_anti_depth, anti_bin_size) = fields
# Create & write BED files
target_out_arr = target.do_target(access_arr if args.method == 'wgs'
else tgt_arr,
args.annotate, args.short_names,
do_split=True, avg_size=tgt_bin_size)
tgt_name_base = tgt_arr.sample_id if tgt_arr else core.fbase(bam_fname)
target_bed = args.target_output_bed or tgt_name_base + '.target.bed'
tabio.write(target_out_arr, target_bed, "bed4")
if args.method == "hybrid" and anti_bin_size:
# Build antitarget BED from the given targets
anti_arr = antitarget.do_antitarget(target_out_arr,
access=access_arr,
avg_bin_size=anti_bin_size,
min_bin_size=args.antitarget_min_size)
else:
# No antitargets for wgs, amplicon
anti_arr = _GA([])
antitarget_bed = args.antitarget_output_bed or tgt_name_base + '.antitarget.bed'
tabio.write(anti_arr, antitarget_bed, "bed4")
# Print depths & bin sizes as a table on stdout
labels = ("Target", "Antitarget")
width = max(map(len, labels)) + 1
print(" " * width, "Depth", "Bin size", sep='\t')
for label, (depth, binsize) in zip(labels, fields):
if depth is not None:
print((label + ":").ljust(width),
format(depth, ".3f"),
binsize,
sep='\t')
P_autobin = AP_subparsers.add_parser('autobin', help=_cmd_autobin.__doc__)
P_autobin.add_argument('bams', nargs='+',
help="""Sample BAM file(s) to test for target coverage""")
P_autobin.add_argument('-f', '--fasta', metavar="FILENAME",
help="Reference genome, FASTA format (e.g. UCSC hg19.fa)")
P_autobin.add_argument('-m', '--method',
choices=('hybrid', 'amplicon', 'wgs'), default='hybrid',
help="""Sequencing protocol: hybridization capture ('hybrid'), targeted
amplicon sequencing ('amplicon'), or whole genome sequencing
('wgs'). Determines whether and how to use antitarget bins.
[Default: %(default)s]""")
P_autobin.add_argument('-g', '--access', metavar="FILENAME",
help="""Sequencing-accessible genomic regions, or exons to use as
possible targets (e.g. output of refFlat2bed.py)""")
P_autobin.add_argument('-t', '--targets',
help="""Potentially targeted genomic regions, e.g. all possible exons
for the reference genome. Format: BED, interval list, etc.""")
P_autobin.add_argument('-b', '--bp-per-bin',
type=float, default=100000.,
help="""Desired average number of sequencing read bases mapped to each
bin. [Default: %(default)s]""")
P_autobin.add_argument('--target-max-size', metavar="BASES",
type=int, default=20000,
help="Maximum size of target bins. [Default: %(default)s]")
P_autobin.add_argument('--target-min-size', metavar="BASES",
type=int, default=20,
help="Minimum size of target bins. [Default: %(default)s]")
P_autobin.add_argument('--antitarget-max-size', metavar="BASES",
type=int, default=500000,
help="Maximum size of antitarget bins. [Default: %(default)s]")
P_autobin.add_argument('--antitarget-min-size', metavar="BASES",
type=int, default=500,
help="Minimum size of antitarget bins. [Default: %(default)s]")
P_autobin.add_argument(
'--annotate', metavar='FILENAME',
help="""Use gene models from this file to assign names to the target regions. Format: UCSC refFlat.txt or
ensFlat.txt file (preferred), or BED, interval list, GFF, or similar."""
)
P_autobin.add_argument(
'--short-names', action='store_true',
help='Reduce multi-accession bait labels to be short and consistent.'
)
P_autobin.add_argument(
'--target-output-bed', metavar='FILENAME',
help='Filename for target BED output. If not specified, constructed from the input file basename.'
)
P_autobin.add_argument(
'--antitarget-output-bed', metavar='FILENAME',
help='Filename for antitarget BED output. If not specified, constructed from the input file basename.'
)
# Option: --dry-run to not write BED files?
P_autobin.set_defaults(func=_cmd_autobin)
# coverage --------------------------------------------------------------------
do_coverage = public(coverage.do_coverage)
def _cmd_coverage(args):
"""Calculate coverage in the given regions from BAM read depths."""
pset = coverage.do_coverage(args.interval, args.bam_file, args.count,
args.min_mapq, args.processes, args.fasta)
if not args.output:
# Create an informative but unique name for the coverage output file
bambase = core.fbase(args.bam_file)
bedbase = core.fbase(args.interval)
tgtbase = ('antitargetcoverage'
if 'anti' in bedbase.lower()
else 'targetcoverage')
args.output = '%s.%s.cnn' % (bambase, tgtbase)
if os.path.exists(args.output):
args.output = '%s.%s.cnn' % (bambase, bedbase)
core.ensure_path(args.output)
tabio.write(pset, args.output)
P_coverage = AP_subparsers.add_parser('coverage', help=_cmd_coverage.__doc__)
P_coverage.add_argument('bam_file', help="Mapped sequence reads (.bam)")
P_coverage.add_argument('interval', help="Intervals (.bed or .list)")
P_coverage.add_argument('-f', '--fasta', metavar="FILENAME",
help="Reference genome, FASTA format (e.g. UCSC hg19.fa)")
P_coverage.add_argument('-c', '--count', action='store_true',
help="""Get read depths by counting read midpoints within each bin.
(An alternative algorithm).""")
P_coverage.add_argument('-q', '--min-mapq', type=int, default=0,
help="""Minimum mapping quality score (phred scale 0-60) to count a read
for coverage depth. [Default: %(default)s]""")
P_coverage.add_argument('-o', '--output', metavar="FILENAME",
help="""Output file name.""")
P_coverage.add_argument('-p', '--processes',
nargs='?', type=int, const=0, default=1,
help="""Number of subprocesses to calculate coverage in parallel.
Without an argument, use the maximum number of available CPUs.
[Default: use 1 process]""")
P_coverage.set_defaults(func=_cmd_coverage)
# reference -------------------------------------------------------------------
do_reference = public(reference.do_reference)
do_reference_flat = public(reference.do_reference_flat)
def _cmd_reference(args):
"""Compile a coverage reference from the given files (normal samples)."""
usage_err_msg = ("Give .cnn samples OR targets and (optionally) antitargets.")
if args.targets:
# Flat refence
assert not args.references, usage_err_msg
ref_probes = reference.do_reference_flat(args.targets, args.antitargets,
args.fasta,
args.male_reference)
elif args.references:
# Pooled reference
assert not args.targets and not args.antitargets, usage_err_msg
filenames = []
for path in args.references:
if os.path.isdir(path):
filenames.extend(os.path.join(path, f) for f in os.listdir(path)
if f.endswith('targetcoverage.cnn'))
else:
filenames.append(path)
targets = [f for f in filenames if 'antitarget' not in f]
antitargets = [f for f in filenames if 'antitarget' in f]
logging.info("Number of target and antitarget files: %d, %d",
len(targets), len(antitargets))
female_samples = ((args.sample_sex.lower() not in ['y', 'm', 'male'])
if args.sample_sex else None)
ref_probes = reference.do_reference(targets, antitargets, args.fasta,
args.male_reference, female_samples,
args.do_gc, args.do_edge,
args.do_rmask, args.cluster,
args.min_cluster_size)
else:
raise ValueError(usage_err_msg)
ref_fname = args.output or "cnv_reference.cnn"
core.ensure_path(ref_fname)
tabio.write(ref_probes, ref_fname)
P_reference = AP_subparsers.add_parser('reference', help=_cmd_reference.__doc__)
P_reference.add_argument('references', nargs='*',
help="""Normal-sample target or antitarget .cnn files, or the
directory that contains them.""")
P_reference.add_argument('-f', '--fasta',
help="Reference genome, FASTA format (e.g. UCSC hg19.fa)")
P_reference.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_reference.add_argument('-c', '--cluster',
action='store_true',
help="""Calculate and store summary stats for clustered subsets of the
normal samples with similar coverage profiles.""")
P_reference.add_argument('--min-cluster-size',
metavar="NUM",
type=int,
default=4,
help="""Minimum cluster size to keep in reference profiles.""")
P_reference.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the chromosomal sex of all given samples as male or
female. (Default: guess each sample from coverage of X and Y
chromosomes).""")
P_reference.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Create a male reference: shift female samples' chrX
log-coverage by -1, so the reference chrX average is -1.
Otherwise, shift male samples' chrX by +1, so the reference chrX
average is 0.""")
P_reference_flat = P_reference.add_argument_group(
"To construct a generic, \"flat\" copy number reference with neutral "
"expected coverage")
P_reference_flat.add_argument('-t', '--targets',
help="Target intervals (.bed or .list)")
P_reference_flat.add_argument('-a', '--antitargets',
help="Antitarget intervals (.bed or .list)")
P_reference_bias = P_reference.add_argument_group(
"To disable specific automatic bias corrections")
P_reference_bias.add_argument('--no-gc', dest='do_gc', action='store_false',
help="Skip GC correction.")
P_reference_bias.add_argument('--no-edge', dest='do_edge', action='store_false',
help="Skip edge-effect correction.")
P_reference_bias.add_argument('--no-rmask', dest='do_rmask', action='store_false',
help="Skip RepeatMasker correction.")
P_reference.set_defaults(func=_cmd_reference)
# fix -------------------------------------------------------------------------
do_fix = public(fix.do_fix)
def _cmd_fix(args):
"""Combine target and antitarget coverages and correct for biases.
Adjust raw coverage data according to the given reference, correct potential
biases and re-center.
"""
# Verify that target and antitarget are from the same sample
tgt_raw = read_cna(args.target, sample_id=args.sample_id)
anti_raw = read_cna(args.antitarget, sample_id=args.sample_id)
if len(anti_raw) and tgt_raw.sample_id != anti_raw.sample_id:
raise ValueError("Sample IDs do not match:"
"'%s' (target) vs. '%s' (antitarget)"
% (tgt_raw.sample_id, anti_raw.sample_id))
target_table = fix.do_fix(tgt_raw, anti_raw, read_cna(args.reference),
args.do_gc, args.do_edge, args.do_rmask,
args.cluster)
tabio.write(target_table, args.output or tgt_raw.sample_id + '.cnr')
P_fix = AP_subparsers.add_parser('fix', help=_cmd_fix.__doc__)
P_fix.add_argument('target',
help="Target coverage file (.targetcoverage.cnn).")
P_fix.add_argument('antitarget',
help="Antitarget coverage file (.antitargetcoverage.cnn).")
P_fix.add_argument('reference',
help="Reference coverage (.cnn).")
P_fix.add_argument('-c', '--cluster',
action='store_true',
help="""Compare and use cluster-specific values present in the
reference profile. (Requires that the reference profile
was built with the --cluster option.)""")
P_fix.add_argument('-i', '--sample-id',
help="Sample ID for target/antitarget files. Otherwise inferred from file names.")
# P_fix.add_argument('--do-gc', action='store_true', default=True,
# help="Do GC correction.")
# P_fix.add_argument('--do-edge', action='store_true',
# help="Do edge-effect correction.")
# P_fix.add_argument('--do-size', action='store_true',
# help="Do interval-size correction.")
P_fix.add_argument('--no-gc', dest='do_gc', action='store_false',
help="Skip GC correction.")
P_fix.add_argument('--no-edge', dest='do_edge', action='store_false',
help="Skip edge-effect correction.")
P_fix.add_argument('--no-rmask', dest='do_rmask', action='store_false',
help="Skip RepeatMasker correction.")
P_fix.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_fix.set_defaults(func=_cmd_fix)
# segment ---------------------------------------------------------------------
do_segmentation = public(segmentation.do_segmentation)
def _cmd_segment(args):
"""Infer copy number segments from the given coverage table."""
cnarr = read_cna(args.filename)
variants = load_het_snps(args.vcf, args.sample_id, args.normal_id,
args.min_variant_depth, args.zygosity_freq)
results = segmentation.do_segmentation(cnarr, args.method, args.threshold,
variants=variants,
skip_low=args.drop_low_coverage,
skip_outliers=args.drop_outliers,
save_dataframe=bool(args.dataframe),
rscript_path=args.rscript_path,
processes=args.processes,
smooth_cbs=args.smooth_cbs)
if args.dataframe:
segments, dframe = results
with open(args.dataframe, 'w') as handle:
handle.write(dframe)
logging.info("Wrote %s", args.dataframe)
else:
segments = results
tabio.write(segments, args.output or segments.sample_id + '.cns')
P_segment = AP_subparsers.add_parser('segment', help=_cmd_segment.__doc__)
P_segment.add_argument('filename',
help="Bin-level log2 ratios (.cnr file), as produced by 'fix'.")
P_segment.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name (CNR-like table of segments, .cns).")
P_segment.add_argument('-d', '--dataframe',
help="""File name to save the raw R dataframe emitted by CBS or
Fused Lasso. (Useful for debugging.)""")
P_segment.add_argument('-m', '--method',
choices=segmentation.SEGMENT_METHODS,
default='cbs',
help="""Segmentation method (see docs), or 'none' for chromosome
arm-level averages as segments. [Default: %(default)s]""")
P_segment.add_argument('-t', '--threshold', type=float,
help="""Significance threshold (p-value or FDR, depending on method) to
accept breakpoints during segmentation.
For HMM methods, this is the smoothing window size.""")
P_segment.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before segmentation to avoid
false-positive deletions in poor-quality tumor samples.""")
P_segment.add_argument("--drop-outliers", metavar="FACTOR",
type=float, default=10,
help="""Drop outlier bins more than this many multiples of the 95th
quantile away from the average within a rolling window.
Set to 0 for no outlier filtering.
[Default: %(default)g]""")
P_segment.add_argument("--rscript-path", metavar="PATH", default="Rscript",
help="""Path to the Rscript excecutable to use for running R code.
Use this option to specify a non-default R installation.
[Default: %(default)s]""")
P_segment.add_argument('-p', '--processes',
nargs='?', type=int, const=0, default=1,
help="""Number of subprocesses to segment in parallel.
Give 0 or a negative value to use the maximum number
of available CPUs. [Default: use 1 process]""")
P_segment.add_argument('--smooth-cbs', action='store_true',
help="""Perform an additional smoothing before CBS segmentation,
which in some cases may increase the sensitivity.
Used only for CBS method.""")
P_segment_vcf = P_segment.add_argument_group(
"To additionally segment SNP b-allele frequencies")
P_segment_vcf.add_argument('-v', '--vcf', metavar="FILENAME",
help="""VCF file name containing variants for segmentation by allele
frequencies.""")
P_segment_vcf.add_argument('-i', '--sample-id',
help="""Specify the name of the sample in the VCF (-v/--vcf) to use for
b-allele frequency extraction and as the default plot title.""")
P_segment_vcf.add_argument('-n', '--normal-id',
help="""Corresponding normal sample ID in the input VCF (-v/--vcf).
This sample is used to select only germline SNVs to plot
b-allele frequencies.""")
P_segment_vcf.add_argument('--min-variant-depth', type=int, default=20,
help="""Minimum read depth for a SNV to be displayed in the b-allele
frequency plot. [Default: %(default)s]""")
P_segment_vcf.add_argument('-z', '--zygosity-freq',
metavar='ALT_FREQ', nargs='?', type=float, const=0.25,
help="""Ignore VCF's genotypes (GT field) and instead infer zygosity
from allele frequencies. [Default if used without a number:
%(const)s]""")
P_segment.set_defaults(func=_cmd_segment)
# call ------------------------------------------------------------------------
do_call = public(call.do_call)
def _cmd_call(args):
"""Call copy number variants from segmented log2 ratios."""
if args.purity and not 0.0 < args.purity <= 1.0:
raise RuntimeError("Purity must be between 0 and 1.")
cnarr = read_cna(args.filename)
if args.center_at:
logging.info("Shifting log2 ratios by %f", -args.center_at)
cnarr['log2'] -= args.center_at
elif args.center:
cnarr.center_all(args.center, skip_low=args.drop_low_coverage,
verbose=True)
varr = load_het_snps(args.vcf, args.sample_id, args.normal_id,
args.min_variant_depth, args.zygosity_freq)
is_sample_female = (verify_sample_sex(cnarr, args.sample_sex,
args.male_reference)
if args.purity and args.purity < 1.0
else None)
cnarr = call.do_call(cnarr, varr, args.method, args.ploidy, args.purity,
args.male_reference, is_sample_female, args.filters,
args.thresholds)
tabio.write(cnarr, args.output or cnarr.sample_id + '.call.cns')
def csvstring(text):
return tuple(map(float, text.split(",")))
P_call = AP_subparsers.add_parser('call', help=_cmd_call.__doc__)
P_call.add_argument('filename',
help="Copy ratios (.cnr or .cns).")
P_call.add_argument("--center", nargs='?', const='median',
choices=('mean', 'median', 'mode', 'biweight'),
help="""Re-center the log2 ratio values using this estimator of the
center or average value. ('median' if no argument given.)""")
P_call.add_argument("--center-at", type=float,
help="""Subtract a constant number from all log2 ratios. For "manual"
re-centering, in case the --center option gives unsatisfactory
results.)""")
P_call.add_argument('--filter', action='append', default=[], dest='filters',
choices=('ampdel', 'cn', 'ci', 'sem', # 'bic'
),
help="""Merge segments flagged by the specified filter(s) with the
adjacent segment(s).""")
P_call.add_argument('-m', '--method',
choices=('threshold', 'clonal', 'none'), default='threshold',
help="""Calling method. [Default: %(default)s]""")
P_call.add_argument('-t', '--thresholds',
type=csvstring, default="-1.1,-0.25,0.2,0.7",
help="""Hard thresholds for calling each integer copy number, separated
by commas. Use the '=' sign on the command line, e.g.: -t=-1,0,1
[Default: %(default)s]""")
P_call.add_argument("--ploidy", type=int, default=2,
help="Ploidy of the sample cells. [Default: %(default)d]")
P_call.add_argument("--purity", type=float,
help="Estimated tumor cell fraction, a.k.a. purity or cellularity.")
P_call.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before segmentation to avoid
false-positive deletions in poor-quality tumor samples.""")
P_call.add_argument('-x', '--sample-sex', '-g', '--gender', dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the sample's chromosomal sex as male or female.
(Otherwise guessed from X and Y coverage).""")
P_call.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Was a male reference used? If so, expect half ploidy on
chrX and chrY; otherwise, only chrY has half ploidy. In CNVkit,
if a male reference was used, the "neutral" copy number (ploidy)
of chrX is 1; chrY is haploid for either reference sex.""")
P_call.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name (CNR-like table of segments, .cns).")
P_call_vcf = P_call.add_argument_group(
"To additionally process SNP b-allele frequencies for allelic copy number")
P_call_vcf.add_argument('-v', '--vcf', metavar="FILENAME",
help="""VCF file name containing variants for calculation of b-allele
frequencies.""")
P_call_vcf.add_argument('-i', '--sample-id',
help="""Name of the sample in the VCF (-v/--vcf) to use for b-allele
frequency extraction.""")
P_call_vcf.add_argument('-n', '--normal-id',
help="""Corresponding normal sample ID in the input VCF (-v/--vcf).
This sample is used to select only germline SNVs to calculate
b-allele frequencies.""")
P_call_vcf.add_argument('--min-variant-depth', type=int, default=20,
help="""Minimum read depth for a SNV to be used in the b-allele
frequency calculation. [Default: %(default)s]""")
P_call_vcf.add_argument('-z', '--zygosity-freq',
metavar='ALT_FREQ', nargs='?', type=float, const=0.25,
help="""Ignore VCF's genotypes (GT field) and instead infer zygosity
from allele frequencies. [Default if used without a number:
%(const)s]""")
P_call.set_defaults(func=_cmd_call)
# _____________________________________________________________________________
# Plots and graphics
# diagram ---------------------------------------------------------------------
def _cmd_diagram(args):
"""Draw copy number (log2 coverages, segments) on chromosomes as a diagram.
If both the raw probes and segments are given, show them side-by-side on
each chromosome (segments on the left side, probes on the right side).
"""
if not args.filename and not args.segment:
raise ValueError("Must specify a filename as an argument or with "
"the '-s' option, or both. You did neither.")
cnarr = read_cna(args.filename) if args.filename else None
segarr = read_cna(args.segment) if args.segment else None
if args.adjust_xy:
is_sample_female = verify_sample_sex(cnarr or segarr, args.sample_sex,
args.male_reference)
if cnarr:
cnarr = cnarr.shift_xx(args.male_reference, is_sample_female)
if segarr:
segarr = segarr.shift_xx(args.male_reference, is_sample_female)
outfname = diagram.create_diagram(cnarr, segarr, args.threshold,
args.min_probes, args.output,
args.chromosome, args.title,
args.show_labels)
logging.info("Wrote %s", outfname)
P_diagram = AP_subparsers.add_parser('diagram', help=_cmd_diagram.__doc__)
P_diagram.add_argument('filename', nargs='?',
help="""Processed coverage data file (*.cnr), the output of the
'fix' sub-command.""")
P_diagram.add_argument('-s', '--segment',
help="Segmentation calls (.cns), the output of the 'segment' command.")
P_diagram.add_argument('-c', '--chromosome',
help="""Chromosome to display, e.g. 'chr1'
(no chromosomal range allowed)""")
P_diagram.add_argument('-t', '--threshold', type=float, default=0.5,
help="""Copy number change threshold to label genes.
[Default: %(default)s]""")
P_diagram.add_argument('-m', '--min-probes', type=int, default=3,
help="""Minimum number of covered probes to label a gene.
[Default: %(default)d]""")
P_diagram.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Assume inputs were normalized to a male reference
(i.e. female samples will have +1 log-CNR of chrX;
otherwise male samples would have -1 chrX).""")
P_diagram.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the sample's chromosomal sex as male or female.
(Otherwise guessed from X and Y coverage).""")
P_diagram.add_argument('--no-shift-xy', dest='adjust_xy', action='store_false',
help="Don't adjust the X and Y chromosomes according to sample sex.")
P_diagram.add_argument('-o', '--output', metavar="FILENAME",
help="Output PDF file name.")
P_diagram_aes = P_diagram.add_argument_group("Plot aesthetics")
P_diagram_aes.add_argument('--title',
help="Plot title. [Default: sample ID, from filename or -i]")
P_diagram_aes.add_argument('--no-gene-labels',
dest='show_labels', action='store_false',
help="""Disable gene_name labels on plot
(useful when a lot of CNV were called).""")
P_diagram.set_defaults(func=_cmd_diagram)
# scatter ---------------------------------------------------------------------
do_scatter = public(scatter.do_scatter)
def _cmd_scatter(args):
"""Plot probe log2 coverages and segmentation calls together."""
cnarr = read_cna(args.filename, sample_id=args.sample_id
) if args.filename else None
segarr = read_cna(args.segment, sample_id=args.sample_id
) if args.segment else None
varr = load_het_snps(args.vcf, args.sample_id, args.normal_id,
args.min_variant_depth, args.zygosity_freq)
scatter_opts = {k: v for k, v in (
("do_trend", args.trend),
("by_bin", args.by_bin),
("window_width", args.width),
("y_min", args.y_min),
("y_max", args.y_max),
("fig_size", args.fig_size),
("antitarget_marker", args.antitarget_marker),
("segment_color", args.segment_color),
) if v is not None}
if args.range_list:
with PdfPages(args.output) as pdf_out:
for region in tabio.read_auto(args.range_list).coords():
try:
if args.title is not None:
scatter_opts["title"] = "%s %s" % (args.title,
region.chromosome)
scatter.do_scatter(cnarr, segarr, varr, show_range=region,
**scatter_opts)
except ValueError as exc:
# Probably no bins in the selected region
logging.warning("Not plotting region %r: %s",
to_label(region), exc)
pdf_out.savefig()
pyplot.close()
else:
if args.title is not None:
scatter_opts["title"] = args.title
scatter.do_scatter(cnarr, segarr, varr, args.chromosome, args.gene,
**scatter_opts)
if args.output:
oformat = os.path.splitext(args.output)[-1].replace(".", "")
pyplot.savefig(args.output, format=oformat, bbox_inches="tight")
logging.info("Wrote %s", args.output)
else:
pyplot.show()
P_scatter = AP_subparsers.add_parser('scatter', help=_cmd_scatter.__doc__)
P_scatter.add_argument('filename', nargs="?",
help="""Processed bin-level copy ratios (*.cnr), the output
of the 'fix' sub-command.""")
P_scatter.add_argument('-s', '--segment', metavar="FILENAME",
help="Segmentation calls (.cns), the output of the 'segment' command.")
P_scatter.add_argument('-c', '--chromosome', metavar="RANGE",
help="""Chromosome or chromosomal range, e.g. 'chr1' or
'chr1:2333000-2444000', to display. If a range is given,
all targeted genes in this range will be shown, unless
-g/--gene is also given.""")
P_scatter.add_argument('-g', '--gene',
help="Name of gene or genes (comma-separated) to display.")
P_scatter.add_argument('-l', '--range-list',
help="""File listing the chromosomal ranges to display, as BED, interval
list or 'chr:start-end' text. Creates focal plots similar to
-c/--chromosome for each listed region, combined into a
multi-page PDF. The output filename must also be
specified (-o/--output).""")
P_scatter.add_argument('-w', '--width', type=float, default=1e6,
help="""Width of margin to show around the selected gene(s) (-g/--gene)
or small chromosomal region (-c/--chromosome).
[Default: %(default)d]""")
P_scatter.add_argument('-o', '--output', metavar="FILENAME",
help="Output PDF file name.")
P_scatter_aes = P_scatter.add_argument_group("Plot aesthetics")
P_scatter_aes.add_argument('-a', '--antitarget-marker',
metavar='CHARACTER', dest='antitarget_marker', default=None,
help="""Plot antitargets using this symbol when plotting in a selected
chromosomal region (-g/--gene or -c/--chromosome).
[Default: same as targets]""")
P_scatter_aes.add_argument("--by-bin", action="store_true",
help="""Plot data x-coordinates by bin indices instead of genomic
coordinates. All bins will be shown with equal width, no blank
regions will be shown, and x-axis values indicate bin number
(within chromosome) instead of genomic position.""")
P_scatter_aes.add_argument('--segment-color', default=scatter.SEG_COLOR,
help="""Plot segment lines in this color. Value can be any string
accepted by matplotlib, e.g. 'red' or '#CC0000'.""")
P_scatter_aes.add_argument('--title',
help="Plot title. [Default: sample ID, from filename or -i]")
P_scatter_aes.add_argument('-t', '--trend', action='store_true',
help="Draw a smoothed local trendline on the scatter plot.")
P_scatter_aes.add_argument('--y-max', type=float, help="y-axis upper limit.")
P_scatter_aes.add_argument('--y-min', type=float, help="y-axis lower limit.")
P_scatter_aes.add_argument('--fig-size', nargs=2,
metavar=('WIDTH','HEIGHT'), type=float,
help="""Width and height of the plot in inches.
[Default: Pre-defined in Matplotlib 'rcParams' variable
(most of the time: '6.4 4.8')]""")
P_scatter_vcf = P_scatter.add_argument_group(
"To plot SNP b-allele frequencies")
P_scatter_vcf.add_argument('-v', '--vcf', metavar="FILENAME",
help="""VCF file name containing variants to plot for SNV b-allele
frequencies.""")
P_scatter_vcf.add_argument('-i', '--sample-id',
help="""Name of the sample in the VCF to use for b-allele frequency
extraction and as the default plot title.""")
P_scatter_vcf.add_argument('-n', '--normal-id',
help="""Corresponding normal sample ID in the input VCF. This sample is
used to select only germline SNVs to plot.""")
P_scatter_vcf.add_argument('-m', '--min-variant-depth', type=int, default=20,
help="""Minimum read depth for a SNV to be used in the b-allele
frequency calculation. [Default: %(default)s]""")
P_scatter_vcf.add_argument('-z', '--zygosity-freq',
metavar='ALT_FREQ', nargs='?', type=float, const=0.25,
help="""Ignore VCF's genotypes (GT field) and instead infer zygosity
from allele frequencies. [Default if used without a number:
%(const)s]""")
P_scatter.set_defaults(func=_cmd_scatter)
# heatmap ---------------------------------------------------------------------
do_heatmap = public(heatmap.do_heatmap)
def _cmd_heatmap(args):
"""Plot copy number for multiple samples as a heatmap."""
cnarrs = []
for fname in args.filenames:
cnarr = read_cna(fname)
if args.adjust_xy:
is_sample_female = verify_sample_sex(cnarr, args.sample_sex,
args.male_reference)
cnarr = cnarr.shift_xx(args.male_reference, is_sample_female)
cnarrs.append(cnarr)
heatmap.do_heatmap(cnarrs, args.chromosome, args.desaturate, args.by_bin,
args.delim_sampl, args.vertical)
if args.output:
oformat = os.path.splitext(args.output)[-1].replace(".", "")
pyplot.savefig(args.output, format=oformat, bbox_inches="tight")
logging.info("Wrote %s", args.output)
else:
pyplot.show()
P_heatmap = AP_subparsers.add_parser('heatmap', help=_cmd_heatmap.__doc__)
P_heatmap.add_argument('filenames', nargs='+',
help="Sample coverages as raw probes (.cnr) or segments (.cns).")
P_heatmap.add_argument('-b', '--by-bin', action="store_true",
help="""Plot data x-coordinates by bin indices instead of genomic
coordinates. All bins will be shown with equal width, no blank
regions will be shown, and x-axis values indicate bin number
(within chromosome) instead of genomic position.""")
P_heatmap.add_argument('-c', '--chromosome',
help="""Chromosome (e.g. 'chr1') or chromosomal range (e.g.
'chr1:2333000-2444000') to display. If a range is given,
all targeted genes in this range will be shown, unless
'--gene'/'-g' is already given.""")
# P_heatmap.add_argument('-g', '--gene',
# help="Name of gene to display.")
P_heatmap.add_argument('-d', '--desaturate', action='store_true',
help="Tweak color saturation to focus on significant changes.")
P_heatmap.add_argument('-v', '--vertical', action='store_true',
help="Plot heatmap with samples as X-axis (instead of Y-axis).")
P_heatmap.add_argument('--delimit-samples',
action='store_true', dest='delim_sampl',
help="Add an horizontal delimitation line between each sample.")
P_heatmap.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Assume inputs were normalized to a male reference
(i.e. female samples will have +1 log-CNR of chrX;
otherwise male samples would have -1 chrX).""")
P_heatmap.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the chromosomal sex of all given samples as male or
female. (Default: guess each sample from coverage of X and Y
chromosomes).""")
P_heatmap.add_argument('--no-shift-xy', dest='adjust_xy', action='store_false',
help="Don't adjust the X and Y chromosomes according to sample sex.")
P_heatmap.add_argument('-o', '--output', metavar="FILENAME",
help="Output PDF file name.")
P_heatmap.set_defaults(func=_cmd_heatmap)
# _____________________________________________________________________________
# Tabular outputs
# breaks ----------------------------------------------------------------------
do_breaks = public(reports.do_breaks)
def _cmd_breaks(args):
"""List the targeted genes in which a copy number breakpoint occurs."""
cnarr = read_cna(args.filename)
segarr = read_cna(args.segment)
bpoints = do_breaks(cnarr, segarr, args.min_probes)
logging.info("Found %d gene breakpoints", len(bpoints))
write_dataframe(args.output, bpoints)
P_breaks = AP_subparsers.add_parser('breaks', help=_cmd_breaks.__doc__)
P_breaks.add_argument('filename',
help="""Processed sample coverage data file (*.cnr), the output
of the 'fix' sub-command.""")
P_breaks.add_argument('segment',
help="Segmentation calls (.cns), the output of the 'segment' command).")
P_breaks.add_argument('-m', '--min-probes', type=int, default=1,
help="""Minimum number of within-gene probes on both sides of a
breakpoint to report it. [Default: %(default)d]""")
P_breaks.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name.")
P_breaks.set_defaults(func=_cmd_breaks)
# genemetrics/gainloss --------------------------------------------------------
do_genemetrics = public(reports.do_genemetrics)
def _cmd_genemetrics(args):
"""Identify targeted genes with copy number gain or loss."""
cnarr = read_cna(args.filename)
segarr = read_cna(args.segment) if args.segment else None
is_sample_female = verify_sample_sex(cnarr, args.sample_sex,
args.male_reference)
# TODO use the stats args
table = do_genemetrics(cnarr, segarr, args.threshold, args.min_probes,
args.drop_low_coverage, args.male_reference,
is_sample_female)
logging.info("Found %d gene-level gains and losses", len(table))
write_dataframe(args.output, table)
P_genemetrics = AP_subparsers.add_parser('genemetrics',
help=_cmd_genemetrics.__doc__)
P_genemetrics.add_argument('filename',
help="""Processed sample coverage data file (*.cnr), the output
of the 'fix' sub-command.""")
P_genemetrics.add_argument('-s', '--segment',
help="Segmentation calls (.cns), the output of the 'segment' command).")
P_genemetrics.add_argument('-t', '--threshold', type=float, default=0.2,
help="""Copy number change threshold to report a gene gain/loss.
[Default: %(default)s]""")
P_genemetrics.add_argument('-m', '--min-probes', type=int, default=3,
help="""Minimum number of covered probes to report a gain/loss.
[Default: %(default)d]""")
P_genemetrics.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before segmentation to avoid
false-positive deletions in poor-quality tumor samples.""")
P_genemetrics.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Assume inputs were normalized to a male reference
(i.e. female samples will have +1 log-coverage of chrX;
otherwise male samples would have -1 chrX).""")
P_genemetrics.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the sample's chromosomal sex as male or female.
(Otherwise guessed from X and Y coverage).""")
P_genemetrics.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name.")
P_genemetrics_stats = P_genemetrics.add_argument_group(
"Statistics available")
# Location statistics
P_genemetrics_stats.add_argument('--mean',
action='append_const', dest='location_stats', const='mean',
help="Mean log2-ratio (unweighted).")
P_genemetrics_stats.add_argument('--median',
action='append_const', dest='location_stats', const='median',
help="Median.")
P_genemetrics_stats.add_argument('--mode',
action='append_const', dest='location_stats', const='mode',
help="Mode (i.e. peak density of log2 ratios).")
P_genemetrics_stats.add_argument('--ttest',
action='append_const', dest='location_stats', const='p_ttest',
help="One-sample t-test of bin log2 ratios versus 0.0.")
# Dispersion statistics
P_genemetrics_stats.add_argument('--stdev',
action='append_const', dest='spread_stats', const='stdev',
help="Standard deviation.")
P_genemetrics_stats.add_argument('--sem',
action='append_const', dest='spread_stats', const='sem',
help="Standard error of the mean.")
P_genemetrics_stats.add_argument('--mad',
action='append_const', dest='spread_stats', const='mad',
help="Median absolute deviation (standardized).")
P_genemetrics_stats.add_argument('--mse',
action='append_const', dest='spread_stats', const='mse',
help="Mean squared error.")
P_genemetrics_stats.add_argument('--iqr',
action='append_const', dest='spread_stats', const='iqr',
help="Inter-quartile range.")
P_genemetrics_stats.add_argument('--bivar',
action='append_const', dest='spread_stats', const='bivar',
help="Tukey's biweight midvariance.")
# Interval statistics
P_genemetrics_stats.add_argument('--ci',
action='append_const', dest='interval_stats', const='ci',
help="Confidence interval (by bootstrap).")
P_genemetrics_stats.add_argument('--pi',
action='append_const', dest='interval_stats', const='pi',
help="Prediction interval.")
P_genemetrics_stats.add_argument('-a', '--alpha', type=float, default=.05,
help="""Level to estimate confidence and prediction intervals;
use with --ci and --pi. [Default: %(default)s]""")
P_genemetrics_stats.add_argument('-b', '--bootstrap', type=int, default=100,
help="""Number of bootstrap iterations to estimate confidence interval;
use with --ci. [Default: %(default)d]""")
P_genemetrics_stats.set_defaults(location_stats=[], spread_stats=[],
interval_stats=[])
P_genemetrics.set_defaults(func=_cmd_genemetrics)
# Shims
AP_subparsers._name_parser_map['gainloss'] = P_genemetrics
do_gainloss = public(do_genemetrics)
# sex/gender ------------------------------------------------------------------
def _cmd_sex(args):
"""Guess samples' sex from the relative coverage of chromosomes X and Y."""
cnarrs = map(read_cna, args.filenames)
table = do_sex(cnarrs, args.male_reference)
write_dataframe(args.output, table, header=True)
@public
def do_sex(cnarrs, is_male_reference):
"""Guess samples' sex from the relative coverage of chromosomes X and Y."""
def strsign(num):
if num > 0:
return "+%.3g" % num
return "%.3g" % num
def guess_and_format(cna):
is_xy, stats = cna.compare_sex_chromosomes(is_male_reference)
return (cna.meta["filename"] or cna.sample_id,
"Male" if is_xy else "Female",
strsign(stats['chrx_ratio']) if stats else "NA",
strsign(stats['chry_ratio']) if stats else "NA")
rows = (guess_and_format(cna) for cna in cnarrs)
columns = ["sample", "sex", "X_logratio", "Y_logratio"]
return pd.DataFrame.from_records(rows, columns=columns)
P_sex = AP_subparsers.add_parser('sex', help=_cmd_sex.__doc__)
P_sex.add_argument('filenames', nargs='+',
help="Copy number or copy ratio files (*.cnn, *.cnr).")
P_sex.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Assume inputs were normalized to a male reference
(i.e. female samples will have +1 log-coverage of chrX;
otherwise male samples would have -1 chrX).""")
P_sex.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name.")
P_sex.set_defaults(func=_cmd_sex)
# Shims
AP_subparsers._name_parser_map['gender'] = P_sex
do_gender = public(do_sex)
# metrics ---------------------------------------------------------------------
do_metrics = public(metrics.do_metrics)
def _cmd_metrics(args):
"""Compute coverage deviations and other metrics for self-evaluation."""
if (len(args.cnarrays) > 1 and
args.segments and len(args.segments) > 1 and
len(args.cnarrays) != len(args.segments)):
raise ValueError("Number of coverage/segment filenames given must be "
"equal, if more than 1 segment file is given.")
cnarrs = map(read_cna, args.cnarrays)
if args.segments:
args.segments = map(read_cna, args.segments)
table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)
write_dataframe(args.output, table)
P_metrics = AP_subparsers.add_parser('metrics', help=_cmd_metrics.__doc__)
P_metrics.add_argument('cnarrays', nargs='+',
help="""One or more bin-level coverage data files (*.cnn, *.cnr).""")
P_metrics.add_argument('-s', '--segments', nargs='+',
help="""One or more segmentation data files (*.cns, output of the
'segment' command). If more than one file is given, the number
must match the coverage data files, in which case the input
files will be paired together in the given order. Otherwise, the
same segments will be used for all coverage files.""")
P_metrics.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before calculations to reduce
negative "fat tail" of bin log2 values in poor-quality
tumor samples.""")
P_metrics.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name.")
P_metrics.set_defaults(func=_cmd_metrics)
# segmetrics ------------------------------------------------------------------
do_segmetrics = public(segmetrics.do_segmetrics)
def _cmd_segmetrics(args):
"""Compute segment-level metrics from bin-level log2 ratios."""
if not 0.0 < args.alpha <= 1.0:
raise RuntimeError("alpha must be between 0 and 1.")
if not any((args.location_stats, args.spread_stats, args.interval_stats)):
logging.info("No stats specified")
return
# Calculate all metrics
cnarr = read_cna(args.cnarray)
if args.drop_low_coverage:
cnarr = cnarr.drop_low_coverage()
segarr = read_cna(args.segments)
segarr = do_segmetrics(cnarr, segarr, args.location_stats,
args.spread_stats, args.interval_stats,
args.alpha, args.bootstrap, args.smooth_bootstrap)
tabio.write(segarr, args.output or segarr.sample_id + ".segmetrics.cns")
P_segmetrics = AP_subparsers.add_parser('segmetrics', help=_cmd_segmetrics.__doc__)
P_segmetrics.add_argument('cnarray',
help="""Bin-level copy ratio data file (*.cnn, *.cnr).""")
P_segmetrics.add_argument('-s', '--segments', required=True,
help="Segmentation data file (*.cns, output of the 'segment' command).")
P_segmetrics.add_argument("--drop-low-coverage", action='store_true',
help="""Drop very-low-coverage bins before calculations to avoid
negative bias in poor-quality tumor samples.""")
P_segmetrics.add_argument('-o', '--output', metavar="FILENAME",
help="Output table file name.")
P_segmetrics_stats = P_segmetrics.add_argument_group(
"Statistics available")
# Location statistics
P_segmetrics_stats.add_argument('--mean',
action='append_const', dest='location_stats', const='mean',
help="Mean log2 ratio (unweighted).")
P_segmetrics_stats.add_argument('--median',
action='append_const', dest='location_stats', const='median',
help="Median.")
P_segmetrics_stats.add_argument('--mode',
action='append_const', dest='location_stats', const='mode',
help="Mode (i.e. peak density of bin log2 ratios).")
P_segmetrics_stats.add_argument('--t-test',
action='append_const', dest='location_stats', const='p_ttest',
help="One-sample t-test of bin log2 ratios versus 0.0.")
# Dispersion statistics
P_segmetrics_stats.add_argument('--stdev',
action='append_const', dest='spread_stats', const='stdev',
help="Standard deviation.")
P_segmetrics_stats.add_argument('--sem',
action='append_const', dest='spread_stats', const='sem',
help="Standard error of the mean.")
P_segmetrics_stats.add_argument('--mad',
action='append_const', dest='spread_stats', const='mad',
help="Median absolute deviation (standardized).")
P_segmetrics_stats.add_argument('--mse',
action='append_const', dest='spread_stats', const='mse',
help="Mean squared error.")
P_segmetrics_stats.add_argument('--iqr',
action='append_const', dest='spread_stats', const='iqr',
help="Inter-quartile range.")
P_segmetrics_stats.add_argument('--bivar',
action='append_const', dest='spread_stats', const='bivar',
help="Tukey's biweight midvariance.")
# Interval statistics
P_segmetrics_stats.add_argument('--ci',
action='append_const', dest='interval_stats', const='ci',
help="Confidence interval (by bootstrap).")
P_segmetrics_stats.add_argument('--pi',
action='append_const', dest='interval_stats', const='pi',
help="Prediction interval.")
P_segmetrics_stats.add_argument('-a', '--alpha',
type=float, default=.05,
help="""Level to estimate confidence and prediction intervals;
use with --ci and --pi. [Default: %(default)s]""")
P_segmetrics_stats.add_argument('-b', '--bootstrap',
type=int, default=100,
help="""Number of bootstrap iterations to estimate confidence interval;
use with --ci. [Default: %(default)d]""")
P_segmetrics_stats.add_argument('--smooth-bootstrap',
action='store_true',
help="""Apply Gaussian noise to bootstrap samples, a.k.a. smoothed
bootstrap, to estimate confidence interval; use with --ci.
""")
P_segmetrics_stats.set_defaults(location_stats=[], spread_stats=[],
interval_stats=[])
P_segmetrics.set_defaults(func=_cmd_segmetrics)
# bintest -----------------------------------------------------------------------
do_bintest = public(bintest.do_bintest)
def _cmd_bintest(args):
"""Test for single-bin copy number alterations."""
cnarr = read_cna(args.cnarray)
segments = read_cna(args.segment) if args.segment else None
sig = do_bintest(cnarr, segments, args.alpha, args.target)
tabio.write(sig, args.output or sys.stdout)
P_bintest = AP_subparsers.add_parser('bintest', help=_cmd_bintest.__doc__)
P_bintest.add_argument('cnarray',
help="Bin-level log2 ratios (.cnr file), as produced by 'fix'.")
P_bintest.add_argument('-s', '--segment', metavar="FILENAME",
help="""Segmentation calls (.cns), the output of the
'segment' command).""")
P_bintest.add_argument("-a", "--alpha", type=float, default=0.005,
help="Significance threhold. [Default: %(default)s]")
P_bintest.add_argument("-t", "--target", action="store_true",
help="Test target bins only; ignore off-target bins.")
P_bintest.add_argument("-o", "--output",
help="Output filename.")
P_bintest.set_defaults(func=_cmd_bintest)
# _____________________________________________________________________________
# Other I/O and compatibility
# import-picard ---------------------------------------------------------------
def _cmd_import_picard(args):
"""Convert Picard CalculateHsMetrics tabular output to CNVkit .cnn files.
The input file is generated by the PER_TARGET_COVERAGE option in the
CalculateHsMetrics script in Picard tools.
If 'antitarget' is in the input filename, the generated output filename will
have the suffix '.antitargetcoverage.cnn', otherwise '.targetcoverage.cnn'.
"""
for fname in args.targets:
if not os.path.isfile(fname):
# Legacy usage: previously accepted directory as an argument
raise ValueError("Not a file: %s" % fname)
garr = importers.do_import_picard(fname)
outfname = ("{}.{}targetcoverage.cnn"
.format(garr.sample_id,
'anti' if 'antitarget' in fname else ''))
if args.output_dir:
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
logging.info("Created directory %s", args.output_dir)
outfname = os.path.join(args.output_dir, outfname)
tabio.write(garr, outfname)
P_import_picard = AP_subparsers.add_parser('import-picard',
help=_cmd_import_picard.__doc__)
P_import_picard.add_argument('targets', nargs='+',
help="""Sample coverage .csv files (target and antitarget).""")
P_import_picard.add_argument('-d', '--output-dir',
metavar="DIRECTORY", default='.',
help="Output directory name.")
P_import_picard.set_defaults(func=_cmd_import_picard)
# import-seg ------------------------------------------------------------------
def _cmd_import_seg(args):
"""Convert a SEG file to CNVkit .cns files."""
from .cnary import CopyNumArray as _CNA
if args.chromosomes:
if args.chromosomes == 'human':
chrom_names = {'23': 'X', '24': 'Y', '25': 'M'}
else:
chrom_names = dict(kv.split(':')
for kv in args.chromosomes.split(','))
else:
chrom_names = args.chromosomes
for sid, segtable in tabio.seg.parse_seg(args.segfile, chrom_names,
args.prefix, args.from_log10):
segarr = _CNA(segtable, {"sample_id": sid})
tabio.write(segarr, os.path.join(args.output_dir, sid + '.cns'))
P_import_seg = AP_subparsers.add_parser('import-seg',
help=_cmd_import_seg.__doc__)
P_import_seg.add_argument('segfile',
help="""Input file in SEG format. May contain multiple samples.""")
P_import_seg.add_argument('-c', '--chromosomes',
help="""Mapping of chromosome indexes to names. Syntax:
"from1:to1,from2:to2". Or use "human" for the preset:
"23:X,24:Y,25:M".""")
P_import_seg.add_argument('-p', '--prefix',
help="""Prefix to add to chromosome names (e.g 'chr' to rename '8' in
the SEG file to 'chr8' in the output).""")
P_import_seg.add_argument('--from-log10', action='store_true',
help="Convert base-10 logarithm values in the input to base-2 logs.")
P_import_seg.add_argument('-d', '--output-dir',
metavar="DIRECTORY", default='.',
help="Output directory name.")
P_import_seg.set_defaults(func=_cmd_import_seg)
# import-theta ---------------------------------------------------------------
do_import_theta = public(importers.do_import_theta)
def _cmd_import_theta(args):
"""Convert THetA output to a BED-like, CNVkit-like tabular format.
Equivalently, use the THetA results file to convert CNVkit .cns segments to
integer copy number calls.
"""
tumor_segs = read_cna(args.tumor_cns)
for i, new_cns in enumerate(do_import_theta(tumor_segs, args.theta_results,
args.ploidy)):
tabio.write(new_cns,
os.path.join(args.output_dir,
"%s-%d.cns" % (tumor_segs.sample_id, i + 1)))
P_import_theta = AP_subparsers.add_parser('import-theta',
help=_cmd_import_theta.__doc__)
P_import_theta.add_argument("tumor_cns")
P_import_theta.add_argument("theta_results")
P_import_theta.add_argument("--ploidy", type=int, default=2,
help="Ploidy of normal cells. [Default: %(default)d]")
P_import_theta.add_argument('-d', '--output-dir',
metavar="DIRECTORY", default='.',
help="Output directory name.")
P_import_theta.set_defaults(func=_cmd_import_theta)
# import-rna ------------------------------------------------------------------
do_import_rna = public(import_rna.do_import_rna)
def _cmd_import_rna(args):
"""Convert a cohort of per-gene log2 ratios to CNVkit .cnr format."""
all_data, cnrs = import_rna.do_import_rna(
args.gene_counts, args.format, args.gene_resource, args.correlations,
args.normal, args.do_gc, args.do_txlen, args.max_log2)
logging.info("Writing output files")
if args.output:
all_data.to_csv(args.output, sep='\t', index=True)
logging.info("Wrote %s with %d rows", args.output, len(all_data))
else:
logging.info(all_data.describe(), file=sys.stderr)
for cnr in cnrs:
outfname = os.path.join(args.output_dir, cnr.sample_id + ".cnr")
tabio.write(cnr, outfname, 'tab')
P_import_rna = AP_subparsers.add_parser('import-rna',
help=_cmd_import_rna.__doc__)
P_import_rna.add_argument('gene_counts',
nargs='+', metavar="FILES",
help="""Tabular files with Ensembl gene ID and number of reads mapped to
each gene, from RSEM or another transcript quantifier.""")
P_import_rna.add_argument('-f', '--format',
choices=('rsem', 'counts'), default='counts', metavar='NAME',
help="""Input format name: 'rsem' for RSEM gene-level read counts
(*_rsem.genes.results), or 'counts' for generic 2-column gene
IDs and their read counts (e.g. TCGA level 2 RNA expression).
""")
P_import_rna.add_argument('-g', '--gene-resource',
metavar="FILE", required=True,
help="Location of gene info table from Ensembl BioMart.")
P_import_rna.add_argument('-c', '--correlations', metavar="FILE",
help="""Correlation of each gene's copy number with
expression. Output of cnv_expression_correlate.py.""")
P_import_rna.add_argument('--max-log2',
metavar="FLOAT", default=3.0, type=float,
help="""Maximum log2 ratio in output. Observed values above this limit
will be replaced with this value. [Default: %(default)s]""")
P_import_rna.add_argument('-n', '--normal', nargs='+', default=[],
help="""Normal samples (same format as `gene_counts`) to be used as a
control to when normalizing and re-centering gene read depth
ratios. All filenames following this option will be used.""")
P_import_rna.add_argument('-d', '--output-dir',
default='.', metavar="PATH",
help="""Directory to write a CNVkit .cnr file for each input
sample. [Default: %(default)s]""")
P_import_rna.add_argument('-o', '--output', metavar="FILE",
help="Output file name (summary table).")
P_import_rna_bias = P_import_rna.add_argument_group(
"To disable specific automatic bias corrections")
P_import_rna_bias.add_argument('--no-gc', dest='do_gc', action='store_false',
help="Skip GC correction.")
P_import_rna_bias.add_argument('--no-txlen', dest='do_txlen', action='store_false',
help="Skip transcript length correction.")
P_import_rna.set_defaults(func=_cmd_import_rna)
# export ----------------------------------------------------------------------
P_export = AP_subparsers.add_parser('export',
help="""Convert CNVkit output files to another format.""")
P_export_subparsers = P_export.add_subparsers(
help="Export formats (use with -h for more info).")
# BED special case: multiple samples's segments, like SEG
def _cmd_export_bed(args):
"""Convert segments to BED format.
Input is a segmentation file (.cns) where, preferably, log2 ratios have
already been adjusted to integer absolute values using the 'call' command.
"""
bed_tables = []
for segfname in args.segments:
segments = read_cna(segfname)
# ENH: args.sample_sex as a comma-separated list
is_sample_female = verify_sample_sex(segments, args.sample_sex,
args.male_reference)
if args.sample_id:
label = args.sample_id
elif args.label_genes:
label = None
else:
label = segments.sample_id
tbl = export.export_bed(segments, args.ploidy,
args.male_reference, is_sample_female,
label, args.show)
bed_tables.append(tbl)
table = pd.concat(bed_tables)
write_dataframe(args.output, table, header=False)
P_export_bed = P_export_subparsers.add_parser('bed',
help=_cmd_export_bed.__doc__)
P_export_bed.add_argument('segments', nargs='+',
help="""Segmented copy ratio data files (*.cns), the output of the
'segment' or 'call' sub-commands.""")
P_export_bed.add_argument("-i", "--sample-id", metavar="LABEL",
help="""Identifier to write in the 4th column of the BED file.
[Default: use the sample ID, taken from the file name]""")
P_export_bed.add_argument('--label-genes', action='store_true',
help="""Show gene names in the 4th column of the BED file.
(This is a bad idea if >1 input files are given.)""")
P_export_bed.add_argument("--ploidy", type=int, default=2,
help="Ploidy of the sample cells. [Default: %(default)d]")
P_export_bed.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the sample's chromosomal sex as male or female.
(Otherwise guessed from X and Y coverage).""")
P_export_bed.add_argument("--show",
choices=('ploidy', 'variant', 'all'), default="ploidy",
help="""Which segmented regions to show:
'all' = all segment regions;
'variant' = CNA regions with non-neutral copy number;
'ploidy' = CNA regions with non-default ploidy.
[Default: %(default)s]""")
P_export_bed.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Was a male reference used? If so, expect half ploidy on
chrX and chrY; otherwise, only chrY has half ploidy. In CNVkit,
if a male reference was used, the "neutral" copy number (ploidy)
of chrX is 1; chrY is haploid for either reference sex.""")
P_export_bed.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_bed.set_defaults(func=_cmd_export_bed)
# SEG special case: segment coords don't match across samples
def _cmd_export_seg(args):
"""Convert segments to SEG format.
Compatible with IGV and GenePattern.
"""
table = export.export_seg(args.filenames, chrom_ids=args.enumerate_chroms)
write_dataframe(args.output, table)
P_export_seg = P_export_subparsers.add_parser('seg',
help=_cmd_export_seg.__doc__)
P_export_seg.add_argument('filenames', nargs='+',
help="""Segmented copy ratio data file(s) (*.cns), the output of the
'segment' sub-command.""")
P_export_seg.add_argument('--enumerate-chroms', action='store_true',
help="""Replace chromosome names with sequential integer IDs.""")
P_export_seg.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_seg.set_defaults(func=_cmd_export_seg)
# VCF special case: only 1 sample, for now
def _cmd_export_vcf(args):
"""Convert segments to VCF format.
Input is a segmentation file (.cns) where, preferably, log2 ratios have
already been adjusted to integer absolute values using the 'call' command.
"""
segarr = read_cna(args.segments)
cnarr = read_cna(args.cnr) if args.cnr else None
is_sample_female = verify_sample_sex(segarr, args.sample_sex,
args.male_reference)
header, body = export.export_vcf(segarr, args.ploidy, args.male_reference,
is_sample_female, args.sample_id, cnarr)
write_text(args.output, header, body)
P_export_vcf = P_export_subparsers.add_parser('vcf',
help=_cmd_export_vcf.__doc__)
P_export_vcf.add_argument('segments', #nargs='1',
help="""Segmented copy ratio data file (*.cns), the output of the
'segment' or 'call' sub-commands.""")
# ENH?: Incorporate left/right CI into .cns via 'segment' or 'segmetrics',
# potentially calculated another way besides adjacent bin boundaries
P_export_vcf.add_argument("--cnr",
help="""Bin-level copy ratios (*.cnr). Used to indicate fuzzy boundaries
for segments in the output VCF via the CIPOS and CIEND tags.""")
P_export_vcf.add_argument("-i", "--sample-id", metavar="LABEL",
help="""Sample name to write in the genotype field of the output VCF file.
[Default: use the sample ID, taken from the file name]""")
P_export_vcf.add_argument("--ploidy", type=int, default=2,
help="Ploidy of the sample cells. [Default: %(default)d]")
P_export_vcf.add_argument('-x', '--sample-sex', '-g', '--gender',
dest='sample_sex',
choices=('m', 'y', 'male', 'Male', 'f', 'x', 'female', 'Female'),
help="""Specify the sample's chromosomal sex as male or female.
(Otherwise guessed from X and Y coverage).""")
P_export_vcf.add_argument('-y', '--male-reference', '--haploid-x-reference',
action='store_true',
help="""Was a male reference used? If so, expect half ploidy on
chrX and chrY; otherwise, only chrY has half ploidy. In CNVkit,
if a male reference was used, the "neutral" copy number (ploidy)
of chrX is 1; chrY is haploid for either reference sex.""")
P_export_vcf.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_vcf.set_defaults(func=_cmd_export_vcf)
# THetA special case: takes tumor .cns and normal .cnr or reference.cnn
def _cmd_export_theta(args):
"""Convert segments to THetA2 input file format (*.input)."""
tumor_cn = read_cna(args.tumor_segment)
normal_cn = read_cna(args.reference) if args.reference else None
table = export.export_theta(tumor_cn, normal_cn)
if not args.output:
args.output = tumor_cn.sample_id + ".interval_count"
table.to_csv(args.output, sep='\t', index=False)
logging.info("Wrote %s", args.output)
if args.vcf:
variants = load_het_snps(args.vcf,
args.sample_id, # or tumor_cn.sample_id,
args.normal_id, args.min_variant_depth,
args.zygosity_freq)
if not len(variants):
raise ValueError("VCF contains no usable SNV records")
try:
tumor_snps, normal_snps = export.export_theta_snps(variants)
except ValueError:
raise ValueError("VCF does not contain any tumor/normal paired "
"samples")
for title, table in [("tumor", tumor_snps), ("normal", normal_snps)]:
out_fname = "{}.{}.snp_formatted.txt".format(tumor_cn.sample_id, title)
table.to_csv(out_fname, sep='\t', index=False)
logging.info("Wrote %s", out_fname)
P_export_theta = P_export_subparsers.add_parser('theta',
help=_cmd_export_theta.__doc__)
P_export_theta.add_argument('tumor_segment',
help="""Tumor-sample segmentation file from CNVkit (.cns).""")
P_export_theta.add_argument('-r', '--reference',
help="""Reference copy number profile (.cnn), or normal-sample bin-level
log2 copy ratios (.cnr). Use if the tumor_segment input file
does not contain a "weight" column.""")
P_export_theta.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_extheta_vcf = P_export_theta.add_argument_group(
"To also output tables of SNP b-allele frequencies for THetA2")
P_extheta_vcf.add_argument('-v', '--vcf',
help="""VCF file containing SNVs observed in both the tumor and normal
samples. Tumor sample ID should match the `tumor_segment`
filename or be specified with -i/--sample-id.""")
P_extheta_vcf.add_argument('-i', '--sample-id',
help="""Specify the name of the tumor sample in the VCF (given with
-v/--vcf). [Default: taken the tumor_segment file name]""")
P_extheta_vcf.add_argument('-n', '--normal-id',
help="Corresponding normal sample ID in the input VCF.")
P_extheta_vcf.add_argument('-m', '--min-variant-depth', type=int, default=20,
help="""Minimum read depth for a SNP in the VCF to be counted.
[Default: %(default)s]""")
P_extheta_vcf.add_argument('-z', '--zygosity-freq',
metavar='ALT_FREQ', nargs='?', type=float, const=0.25,
help="""Ignore VCF's genotypes (GT field) and instead infer zygosity
from allele frequencies. [Default if used without a number:
%(const)s]""")
P_export_theta.set_defaults(func=_cmd_export_theta)
# Nexus "basic" special case: can only represent 1 sample
def _cmd_export_nb(args):
"""Convert bin-level log2 ratios to Nexus Copy Number "basic" format."""
cnarr = read_cna(args.filename)
table = export.export_nexus_basic(cnarr)
write_dataframe(args.output, table)
P_export_nb = P_export_subparsers.add_parser('nexus-basic',
help=_cmd_export_nb.__doc__)
P_export_nb.add_argument('filename',
help="""Log2 copy ratio data file (*.cnr), the output of the 'fix'
sub-command.""")
P_export_nb.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_nb.set_defaults(func=_cmd_export_nb)
# Nexus "Custom-OGT" special case: can only represent 1 sample
def _cmd_export_nbo(args):
"""Convert log2 ratios and b-allele freqs to Nexus "Custom-OGT" format."""
cnarr = read_cna(args.filename)
varr = load_het_snps(args.vcf, args.sample_id, args.normal_id,
args.min_variant_depth, args.zygosity_freq)
table = export.export_nexus_ogt(cnarr, varr, args.min_weight)
write_dataframe(args.output, table)
P_export_nbo = P_export_subparsers.add_parser('nexus-ogt',
help=_cmd_export_nbo.__doc__)
P_export_nbo.add_argument('filename',
help="""Log2 copy ratio data file (*.cnr), the output of the 'fix'
sub-command.""")
P_export_nbo.add_argument('vcf',
help="""VCF of SNVs for the same sample, to calculate b-allele
frequencies.""")
P_export_nbo.add_argument('-i', '--sample-id',
help="""Specify the name of the sample in the VCF to use to extract
b-allele frequencies.""")
P_export_nbo.add_argument('-n', '--normal-id',
help="Corresponding normal sample ID in the input VCF.")
P_export_nbo.add_argument('-m', '--min-variant-depth', type=int, default=20,
help="""Minimum read depth for a SNV to be included in the b-allele
frequency calculation. [Default: %(default)s]""")
P_export_nbo.add_argument('-z', '--zygosity-freq',
metavar='ALT_FREQ', nargs='?', type=float, const=0.25,
help="""Ignore VCF's genotypes (GT field) and instead infer zygosity
from allele frequencies. [Default if used without a number:
%(const)s]""")
P_export_nbo.add_argument('-w', '--min-weight', type=float, default=0.0,
help="""Minimum weight (between 0 and 1) for a bin to be included in
the output. [Default: %(default)s]""")
P_export_nbo.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_nbo.set_defaults(func=_cmd_export_nbo)
# All else: export any number of .cnr or .cns files
def _cmd_export_cdt(args):
"""Convert log2 ratios to CDT format. Compatible with Java TreeView."""
sample_ids = list(map(core.fbase, args.filenames))
table = export.merge_samples(args.filenames)
formatter = export.EXPORT_FORMATS['cdt']
outheader, outrows = formatter(sample_ids, table)
write_tsv(args.output, outrows, colnames=outheader)
P_export_cdt = P_export_subparsers.add_parser('cdt',
help=_cmd_export_cdt.__doc__)
P_export_cdt.add_argument('filenames', nargs='+',
help="""Log2 copy ratio data file(s) (*.cnr), the output of the
'fix' sub-command.""")
P_export_cdt.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_cdt.set_defaults(func=_cmd_export_cdt)
def _cmd_export_jtv(args):
"""Convert log2 ratios to Java TreeView's native format."""
sample_ids = list(map(core.fbase, args.filenames))
table = export.merge_samples(args.filenames)
formatter = export.EXPORT_FORMATS['jtv']
outheader, outrows = formatter(sample_ids, table)
write_tsv(args.output, outrows, colnames=outheader)
P_export_jtv = P_export_subparsers.add_parser('jtv',
help=_cmd_export_jtv.__doc__)
P_export_jtv.add_argument('filenames', nargs='+',
help="""Log2 copy ratio data file(s) (*.cnr), the output of the
'fix' sub-command.""")
P_export_jtv.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_jtv.set_defaults(func=_cmd_export_jtv)
def _cmd_export_gistic(args):
formatter = export.EXPORT_FORMATS['gistic']
outdf = formatter(args.filenames)
write_dataframe(args.output, outdf)
P_export_gistic = P_export_subparsers.add_parser('gistic',
help=_cmd_export_gistic.__doc__)
P_export_gistic.add_argument('filenames', nargs='+',
help="""Log2 copy ratio data file(s) (*.cnr), the output of the
'fix' sub-command.""")
P_export_gistic.add_argument('-o', '--output', metavar="FILENAME",
help="Output file name.")
P_export_gistic.set_defaults(func=_cmd_export_gistic)
# version ---------------------------------------------------------------------
def print_version(_args):
"""Display this program's version."""
print(__version__)
P_version = AP_subparsers.add_parser('version', help=print_version.__doc__)
P_version.set_defaults(func=print_version)
# _____________________________________________________________________________
# Shim for command-line execution
def parse_args(args=None):
"""Parse the command line."""
return AP.parse_args(args=args)
|
py | b403a612b5202e0a94c3edc2f78493a3533b8fd2 |
from .models import *
from .summarizer import *
from .question_generation.pipelines import pipeline
from random import randint
nlp = pipeline("question-generation")
def make_questions(snippet):
'''
:param snippet: A snippet object which will have Topic, Content
Using the content of the snippet, generate some questions
:return: The list of Question Objects generated for snippet
'''
snippet_text = snippet.content
snippet_title = snippet.title
print(snippet_text)
json_result = nlp(snippet_text)
print(json_result)
# Make 4 lists 1.questions 2.options 3.answers 4.Time using snippet_text
# See Following Example
qno = [] # ['Q1', 'Q2']
questions = [] # ['XYZ', 'PQR']
options = [] # ['1,2,3,4', '1,2,3,4']
answers = [] # ['1', '4']
time = [] # [10, 12]
for idx, qa_pair in enumerate(json_result):
qno.append("Q" + str(idx + 1))
questions.append(qa_pair["question"])
wrong_options = ['Wrong1', 'Wrong2', 'Wrong3', 'Wrong4']
idx = randint(0, 3)
opt_temp = wrong_options
opt_temp[idx] = qa_pair["answer"]
options.append(str(opt_temp))
answers.append(str(idx))
time.append(20)
questions_object_list = []
for x in range(len(questions)):
new_question = Question.objects.create(content=questions[x], options=options[x], max_time=time[x], correct_option=answers[x])
new_question.save()
questions_object_list.append(new_question)
return questions_object_list
def parse_question(question):
question_dict = {'maxTimeSec': question.max_time, 'content': question.content}
options_list = []
options_split_list = question.options.split(',')
for option in options_split_list:
option_dict = {'correct': False, 'content': option}
if option == question.correct_option:
option_dict['correct'] = True
options_list.append(option_dict)
question_dict['options'] = options_list
return question_dict
|
py | b403a67f22fd5582dd5f9db18341dd4ab8c5b638 | import json
morphobr_to_bosque = {
'A': ['Cat=ADJ'],
'ADV': ['Cat=ADV'],
'N': ['Cat=NOUN'],
'V': ['Cat=VERB'],
'F': ['Gender=Fem'],
'M': ['Gender=Masc'],
'SG': ['Number=Sing'],
'PL': ['Number=Plur'],
'NEG': ['Polarity=Neg'],
'SUPER': ['Degree=Abs'],
'DIM': ['Degree=Dim'],
'AUG': ['Degree=Aug'],
'1': ['Person=1'],
'2': ['Person=2'],
'3': ['Person=3'],
'INF': ['VerbForm=Inf'],
'GRD': ['VerbForm=Ger'],
'PTPST': ['VerbForm=Part','Tense=Past'],
'PRS': ['Mood=Ind','Tense=Pres'],
'IMPF': ['Mood=Ind','Tense=Imp'],
'PRF': ['Mood=Ind','Tense=Past'],
'FUT': ['Mood=Ind','Tense=Fut'],
'PQP': ['Mood=Ind','Tense=Pqp'],
'SBJR': ['Mood=Sub','Tense=Pres'],
'SBJP': ['Mood=Sub','Tense=Imp'],
'SBJF': ['Mood=Sub','Tense=Fut'],
'IMP': ['Mood=Imp'],
'COND': ['Mood=Cod']
}
with open('morphobr_to_bosque.json', 'w') as f:
json.dump(morphobr_to_bosque, f)
|
py | b403a739bd2290ff04ac7544cd31d834eb7b344e | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
from oslo_versionedobjects import fixture
from neutron import objects
from neutron.objects import base
from neutron.tests import base as test_base
# NOTE: The hashes in this list should only be changed if they come with a
# corresponding version bump in the affected objects. Please keep the list in
# alphabetic order.
object_data = {
'AddressScope': '1.0-dd0dfdb67775892d3adc090e28e43bd8',
'Agent': '1.1-64b670752d57b3c7602cb136e0338507',
'AllowedAddressPair': '1.0-9f9186b6f952fbf31d257b0458b852c0',
'AutoAllocatedTopology': '1.0-74642e58c53bf3610dc224c59f81b242',
'DefaultSecurityGroup': '1.0-971520cb2e0ec06d747885a0cf78347f',
'DistributedPortBinding': '1.0-39c0d17b281991dcb66716fee5a8bef2',
'DNSNameServer': '1.0-bf87a85327e2d812d1666ede99d9918b',
'ExternalNetwork': '1.0-53d885e033cb931f9bb3bdd6bbe3f0ce',
'DVRMacAddress': '1.0-d3c61a8338d20da74db2364d4d6554f2',
'ExtraDhcpOpt': '1.0-632f689cbeb36328995a7aed1d0a78d3',
'FlatAllocation': '1.0-bf666f24f4642b047eeca62311fbcb41',
'Flavor': '1.0-82194de5c9aafce08e8527bb7977f5c6',
'FlavorServiceProfileBinding': '1.0-a2c8731e16cefdac4571f80abf1f8930',
'FloatingIP': '1.0-0205cc99ec79e8089d641ed1b565ddae',
'FloatingIPDNS': '1.0-ee3db848500fa1825235f701828c06d5',
'GeneveAllocation': '1.0-d5f76e8eac60a778914d61dd8e23e90f',
'GeneveEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6',
'GreAllocation': '1.0-9ee1bbc4d999bea84c99425484b11ac5',
'GreEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6',
'IPAllocation': '1.0-47251b4c6d45c3b5feb0297fe5c461f2',
'IPAllocationPool': '1.0-371016a6480ed0b4299319cb46d9215d',
'IpamAllocation': '1.0-ace65431abd0a7be84cc4a5f32d034a3',
'IpamAllocationPool': '1.0-c4fa1460ed1b176022ede7af7d1510d5',
'IpamSubnet': '1.0-713de401682a70f34891e13af645fa08',
'L3HARouterAgentPortBinding': '1.0-d1d7ee13f35d56d7e225def980612ee5',
'L3HARouterNetwork': '1.0-87acea732853f699580179a94d2baf91',
'L3HARouterVRIdAllocation': '1.0-37502aebdbeadc4f9e3bd5e9da714ab9',
'MeteringLabel': '1.0-cc4b620a3425222447cbe459f62de533',
'MeteringLabelRule': '1.0-b5c5717e7bab8d1af1623156012a5842',
'Log': '1.0-6391351c0f34ed34375a19202f361d24',
'Network': '1.0-f2f6308f79731a767b92b26b0f4f3849',
'NetworkDhcpAgentBinding': '1.0-6eeceb5fb4335cd65a305016deb41c68',
'NetworkDNSDomain': '1.0-420db7910294608534c1e2e30d6d8319',
'NetworkPortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3',
'NetworkRBAC': '1.2-192845c5ed0718e1c54fac36936fcd7d',
'NetworkSegment': '1.0-57b7f2960971e3b95ded20cbc59244a8',
'NetworkSegmentRange': '1.0-bdec1fffc9058ea676089b1f2f2b3cf3',
'Port': '1.4-1b6183bccfc2cd210919a1a72faefce1',
'PortBinding': '1.0-3306deeaa6deb01e33af06777d48d578',
'PortBindingLevel': '1.1-50d47f63218f87581b6cd9a62db574e5',
'PortDataPlaneStatus': '1.0-25be74bda46c749653a10357676c0ab2',
'PortDNS': '1.1-c5ca2dc172bdd5fafee3fc986d1d7023',
'PortForwarding': '1.1-db61273978c497239be5389a8aeb1c61',
'PortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3',
'PortUplinkStatusPropagation': '1.0-3cfb3f7da716ca9687e4f04ca72b081d',
'ProviderResourceAssociation': '1.0-05ab2d5a3017e5ce9dd381328f285f34',
'ProvisioningBlock': '1.0-c19d6d05bfa8143533471c1296066125',
'QosBandwidthLimitRule': '1.3-51b662b12a8d1dfa89288d826c6d26d3',
'QosDscpMarkingRule': '1.3-0313c6554b34fd10c753cb63d638256c',
'QosMinimumBandwidthRule': '1.3-314c3419f4799067cc31cc319080adff',
'QosPolicyRBAC': '1.1-192845c5ed0718e1c54fac36936fcd7d',
'QosRuleType': '1.3-7286188edeb3a0386f9cf7979b9700fc',
'QosRuleTypeDriver': '1.0-7d8cb9f0ef661ac03700eae97118e3db',
'QosPolicy': '1.8-4adb0cde3102c10d8970ec9487fd7fe7',
'QosPolicyDefault': '1.0-59e5060eedb1f06dd0935a244d27d11c',
'QosPolicyFloatingIPBinding': '1.0-5625df4205a18778cd6aa40f99be024e',
'QosPolicyRouterGatewayIPBinding': '1.0-da064fbfe5ee18c950b905b483bf59e3',
'QosPolicyNetworkBinding': '1.0-df53a1e0f675aab8d27a1ccfed38dc42',
'QosPolicyPortBinding': '1.0-66cb364ac99aa64523ade07f9f868ea6',
'Quota': '1.0-6bb6a0f1bd5d66a2134ffa1a61873097',
'QuotaUsage': '1.0-6fbf820368681aac7c5d664662605cf9',
'Reservation': '1.0-49929fef8e82051660342eed51b48f2a',
'ResourceDelta': '1.0-a980b37e0a52618b5af8db29af18be76',
'Route': '1.0-a9883a63b416126f9e345523ec09483b',
'Router': '1.0-adb984d9b73aa11566d40abbeb790df1',
'RouterExtraAttributes': '1.0-ef8d61ae2864f0ec9af0ab7939cab318',
'RouterL3AgentBinding': '1.0-c5ba6c95e3a4c1236a55f490cd67da82',
'RouterPort': '1.0-c8c8f499bcdd59186fcd83f323106908',
'RouterRoute': '1.0-07fc5337c801fb8c6ccfbcc5afb45907',
'SecurityGroup': '1.1-f712265418f154f7c080e02857ffe2ef',
'SecurityGroupPortBinding': '1.0-6879d5c0af80396ef5a72934b6a6ef20',
'SecurityGroupRBAC': '1.0-192845c5ed0718e1c54fac36936fcd7d',
'SecurityGroupRule': '1.0-e9b8dace9d48b936c62ad40fe1f339d5',
'SegmentHostMapping': '1.0-521597cf82ead26217c3bd10738f00f0',
'ServiceProfile': '1.0-9beafc9e7d081b8258f3c5cb66ac5eed',
'StandardAttribute': '1.0-617d4f46524c4ce734a6fc1cc0ac6a0b',
'Subnet': '1.0-927155c1fdd5a615cbcb981dda97bce4',
'SubnetPool': '1.0-a0e03895d1a6e7b9d4ab7b0ca13c3867',
'SubnetPoolPrefix': '1.0-13c15144135eb869faa4a76dc3ee3b6c',
'SubnetServiceType': '1.0-05ae4cdb2a9026a697b143926a1add8c',
'SubPort': '1.0-72c8471068db1f0491b5480fe49b52bb',
'Tag': '1.0-1a0d20379920ffa3cebfd3e016d2f7a0',
'Trunk': '1.1-aa3922b39e37fbb89886c2ee8715cf49',
'VlanAllocation': '1.0-72636c1b7d5c8eef987bd09666e64f3e',
'VxlanAllocation': '1.0-934638cd32d00f81d6fbf93c8eb5755a',
'VxlanEndpoint': '1.0-40522eafdcf838758711dfa886cbdb2e',
}
class TestObjectVersions(test_base.BaseTestCase):
def setUp(self):
super(TestObjectVersions, self).setUp()
# NOTE(ihrachys): seed registry with all objects under neutron.objects
# before validating the hashes
objects.register_objects()
def test_versions(self):
checker = fixture.ObjectVersionChecker(
base.NeutronObjectRegistry.obj_classes())
fingerprints = checker.get_hashes()
if os.getenv('GENERATE_HASHES'):
with open('object_hashes.txt', 'w') as hashes_file:
hashes_file.write(pprint.pformat(fingerprints))
expected, actual = checker.test_hashes(object_data)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes in the object_data map in this test module.')
|
py | b403a8357cfdd95d0b4287500928896f473aaf16 | """
Sponge Knowledge Base
Action metadata dynamic types
"""
class DynamicResultAction(Action):
def onConfigure(self):
self.withArg(StringType("type")).withResult(DynamicType())
def onCall(self, type):
if type == "string":
return DynamicValue("text", StringType())
elif type == "boolean":
return DynamicValue(True, BooleanType())
else:
return None
class TypeResultAction(Action):
def onConfigure(self):
self.withArg(StringType("type")).withResult(TypeType())
def onCall(self, type):
if type == "string":
return StringType()
elif type == "boolean":
return BooleanType()
else:
return None |
bzl | b403a904ed77fe5a7bdd4b5dc19ccf90e08eb203 | load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
)
load(
"@io_bazel_rules_docker//container:container.bzl",
"container_pull",
)
load(":containers.bzl", "container_digests")
def _remote_config_workspace():
container_repositories()
container_pull(
name = "centos6",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-centos6",
digest = container_digests["centos6"],
)
container_pull(
name = "ubuntu16.04",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-ubuntu16.04",
digest = container_digests["ubuntu16.04"],
)
container_pull(
name = "cuda10.0-cudnn7-ubuntu14.04",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-cuda10.0-cudnn7-ubuntu14.04",
digest = container_digests["cuda10.0-cudnn7-ubuntu14.04"],
)
container_pull(
name = "cuda10.0-cudnn7-centos6",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-cuda10.0-cudnn7-centos6",
digest = container_digests["cuda10.0-cudnn7-centos6"],
)
container_pull(
name = "cuda10.0-cudnn7-ubuntu16.04-manylinux2010",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-cuda10.0-cudnn7-ubuntu16.04-manylinux2010",
digest = container_digests["cuda10.0-cudnn7-ubuntu16.04-manylinux2010"],
)
remote_config_workspace = _remote_config_workspace
|
py | b403a9b7c849d1a2f19f8cb37b693789fdf164cc | #!/usr/bin/env python
"""
This script squashes a PR tagged with the "typo" label into a single, dedicated
"squash PR".
"""
import subprocess
import sys
import os
def get_authors_and_emails_from_pr():
"""
Return all contributing authors and their emails for the PR on current branch.
This includes co-authors, meaning that if two authors are credited for a
single commit, which is possible with GitHub, then both will get credited.
"""
# Get a list of all authors involved in the pull request (including co-authors).
authors = subprocess.check_output(
[
"gh",
"pr",
"view",
os.environ["PR_NUMBER"],
"--json",
"commits",
"--jq",
".[][].authors.[].name",
],
text=True,
).splitlines()
# Get a list of emails of the aforementioned authors.
emails = subprocess.check_output(
[
"gh",
"pr",
"view",
os.environ["PR_NUMBER"],
"--json",
"commits",
"--jq",
".[][].authors.[].email",
],
text=True,
).splitlines()
authors_and_emails = [(author, mail) for author, mail in zip(authors, emails)]
return authors_and_emails
def rebase_onto_pr():
"""
Rebase current branch onto the PR.
"""
# Check out the pull request.
subprocess.call(["gh", "pr", "checkout", os.environ["PR_NUMBER"]])
rebase_onto_master()
# Change back to the original branch.
subprocess.call(["git", "switch", "-"])
# Rebase onto the pull request, aka include the commits in the pull request
# in the current branch. Abort with error message if rebase fails.
try:
subprocess.check_call(["git", "rebase", "-"])
except subprocess.CalledProcessError:
subprocess.call(["git", "rebase", "--abort"])
squash_url = subprocess.check_output(
["gh", "pr", "view", "--json", "url", "--jq", ".url"], text=True
).strip()
subprocess.call(
[
"gh",
"pr",
"comment",
os.environ["PR_NUMBER"],
"--body",
f"Your edit conflicts with an already scheduled fix \
({squash_url}). Please check that batch PR whether your fix is \
already included; if not, then please wait until the batch PR \
is merged and then rebase your PR on top of master.",
]
)
sys.exit(
f"\n\nERROR: Your edit conflicts with an already scheduled fix \
{squash_url} \n\n"
)
def rebase_onto_master():
"""
Rebase current branch onto the master i.e. make sure current branch is up
to date. Abort on error.
"""
default_branch = f"{os.environ['GITHUB_BASE_REF']}"
subprocess.check_call(["git", "rebase", default_branch])
def squash_all_commits(message_body_before):
"""
Squash all commits on the PR into a single commit. Credit all authors by
name and email.
"""
default_branch = f"{os.environ['GITHUB_BASE_REF']}"
subprocess.call(["git", "reset", "--soft", default_branch])
authors_and_emails = get_authors_and_emails_from_pr()
commit_message_coauthors = (
"\n"
+ "\n".join([f"Co-authored-by: {i[0]} <{i[1]}>" for i in authors_and_emails])
+ "\n"
+ message_body_before
)
subprocess.call(
["git", "commit", "-m", "chore: typo fixes", "-m", commit_message_coauthors]
)
def force_push(branch):
"""
Like the name implies, force push <branch>.
"""
gh_actor = os.environ["GITHUB_ACTOR"]
gh_token = os.environ["GITHUB_TOKEN"]
gh_repo = os.environ["GITHUB_REPOSITORY"]
subprocess.call(
[
"git",
"push",
"--force",
f"https://{gh_actor}:{gh_token}@github.com/{gh_repo}",
branch,
]
)
def checkout_branch(branch):
"""
Create and checkout <branch>. Check if branch exists on remote, if so then
sync local branch to remote.
Return True if remote branch exists, else False.
"""
# FIXME I'm not sure why the local branch isn't tracking the remote branch
# automatically. This works but I'm pretty sure it can be done in a more
# "elegant" fashion
show_ref_output = subprocess.check_output(["git", "show-ref"], text=True).strip()
if branch in show_ref_output:
subprocess.call(["git", "checkout", "-b", branch, f"origin/{branch}"])
return True
subprocess.call(["git", "checkout", "-b", branch])
return False
def get_all_pr_urls(pr_branch_exists):
"""
Return a list of URLs for the pull requests with the typo fixes. If a
squash branch exists then extract the URLs from the body text.
"""
all_pr_urls = ""
if pr_branch_exists:
all_pr_urls += subprocess.check_output(
["gh", "pr", "view", "--json", "body", "--jq", ".body"], text=True
)
all_pr_urls += subprocess.check_output(
["gh", "pr", "view", os.environ["PR_NUMBER"], "--json", "url", "--jq", ".url"],
text=True,
).strip()
return all_pr_urls
def main():
pr_branch = "marvim/squash-typos"
pr_branch_exists = checkout_branch(pr_branch)
rebase_onto_master()
force_push(pr_branch)
message_body_before = "\n".join(
subprocess.check_output(
["git", "log", "--format=%B", "-n1", pr_branch], text=True
).splitlines()[2:]
)
rebase_onto_pr()
force_push(pr_branch)
subprocess.call(
[
"gh",
"pr",
"create",
"--fill",
"--head",
pr_branch,
"--title",
"chore: typo fixes (automated)",
],
text=True,
)
squash_all_commits(message_body_before)
force_push(pr_branch)
all_pr_urls = get_all_pr_urls(pr_branch_exists)
subprocess.call(["gh", "pr", "edit", "--add-label", "typo", "--body", all_pr_urls])
subprocess.call(["gh", "pr", "close", os.environ["PR_NUMBER"]])
squash_url = subprocess.check_output(
["gh", "pr", "view", "--json", "url", "--jq", ".url"], text=True
).strip()
subprocess.call(
[
"gh",
"pr",
"comment",
os.environ["PR_NUMBER"],
"--body",
f"Thank you for your contribution! We collect all typo fixes \
into a single pull request and merge it once it gets big enough: \
{squash_url}",
]
)
if __name__ == "__main__":
main()
|
py | b403aa4f0d8dd29729672d22d6af1f132eea73be | # ----------------------------------------------------------------------
# managedobjectselector profile
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
from noc.core.model.fields import DocumentReferenceField
class Migration(BaseMigration):
def migrate(self):
# Get profile record mappings
pcoll = self.mongo_db["noc.profiles"]
pmap = {} # name -> id
for d in pcoll.find({}, {"_id": 1, "name": 1}):
pmap[d["name"]] = str(d["_id"])
# UPDATE profiles
s_profiles = list(
self.db.execute("SELECT DISTINCT filter_profile FROM sa_managedobjectselector")
)
for (p,) in s_profiles:
if not p:
continue
elif p not in pmap:
# If migrations on 0150_managed_object_profile
continue
self.db.execute(
"""
UPDATE sa_managedobjectselector
SET filter_profile = %s
WHERE filter_profile = %s
""",
[pmap[p], p],
)
# Alter .filter_profile column
self.db.execute(
"""
ALTER TABLE sa_managedobjectselector
ALTER filter_profile TYPE CHAR(24) USING SUBSTRING(\"filter_profile\", 1, 24)
"""
)
# Create .filter_vendor field
self.db.add_column(
"sa_managedobjectselector",
"filter_vendor",
DocumentReferenceField("inv.Vendor", null=True, blank=True),
)
# Create .filter_platform field
self.db.add_column(
"sa_managedobjectselector",
"filter_platform",
DocumentReferenceField("inv.Vendor", null=True, blank=True),
)
# Create .filter_version field
self.db.add_column(
"sa_managedobjectselector",
"filter_version",
DocumentReferenceField("inv.Firmware", null=True, blank=True),
)
# Create .filter_tt_system field
self.db.add_column(
"sa_managedobjectselector",
"filter_tt_system",
DocumentReferenceField("inv.Firmware", null=True, blank=True),
)
|
py | b403aad93dd51a1919f262a32e2e532c14d158c0 | discourse_indicators = {
"major_claim": [
"from my point of view",
"in my opinion",
"my opinion",
"i strongly believe that",
"i think that",
"i firmly believe that",
"my view",
"my belief"
],
"claim": [
"accordingly",
"as a result",
"consequently",
"conclude that",
"clearly",
"demonstrates that",
"entails",
"follows that",
"hence",
"implies",
"in short",
"in conclusion",
"indicates that",
"it follows that",
"it is highly probable that",
"it should be clear that",
"it should be clear",
"points to the conclusions",
"proves that",
"shows that",
"so",
"suggests that",
"the point I’m trying to make",
"therefore",
"thus",
"to sum up",
"we may deduce"
],
"premise": [
"assuming that",
"as",
"as indicated by",
"as shown",
"besides",
"because",
"deduced",
"derived from",
"due to",
"for",
"for example",
"for instance",
"for the reason that",
"furthermore",
"given that",
"in addition"
"in light of",
"in that",
"in view of",
"indicated by",
"is supported by",
"may be inferred",
"moreover",
"researchers found that",
"this can be seen from",
"since",
"since the evidence is",
"what’s more",
"whereas",
],
"conflict": [
"however",
"but",
"though",
"except",
"not",
"never",
"no",
"whereas",
"nonetheless",
"yet",
"despite"
],
"support": [
"because",
"therefore",
"after",
"for",
"since",
"when",
"assuming",
"so",
"accordingly",
"thus",
"hence",
"then",
"consequently",
],
"forward":
[
"As a result", "As the consequence", "Because", "Clearly", "Consequently", "Considering this subject",
"Furthermore", "Hence", "leading to the consequence", "so", "So", "taking account on this fact",
"That is the reason why", "The reason is that", "Therefore", "therefore", "This means that",
"This shows that", "This will result", "Thus", "thus", "Thus", "it is clearly seen that",
"Thus, it is seen", "Thus, the example shows"
],
"backward": [
"Additionally", "As a matter of fact", "because", "Besides", "due to", "Finally", "First of all", "Firstly",
"for example", "For example",
"For instance", "for instance", "Further-more", "has proved it", "In addition", "In addition to this",
"In the first place",
"is due to the fact that", "It should also be noted", "Moreover", "On one hand", "On the one hand",
"On the other hand", "One of the main reasons", "Secondly", "Similarly", "since", "Since", "So", "The reason",
"To begin with", "To offer an instance", "What is more"
],
"first_person": [
"I",
"me",
"my",
"mine",
"myself"
],
"obligation": [
"must",
"need",
"have to"
"imperative",
"duty"
],
"recommendation": [
"should",
"ought to",
"had better",
"need to"
],
"possible": [
"can",
"could",
"perhaps"
],
"option": [
"may",
"might"
],
"intention": [
"will",
"shall"
],
"rebuttal": [
"Admittedly",
"although",
"Although",
"besides these advantages",
"but",
"But",
"Even though",
"even though",
"However",
"Otherwise"
],
"thesis":
[
"All in all",
"All things considered",
"As far as I am concerned",
"Based on some reasons",
"by analyzing both the views",
"considering both the previous fact",
"Finally",
"For the reasons mentioned above",
"From explanation above",
"From this point of view",
"I agree that",
"I agree with",
"I agree with the statement that",
"I believe",
"I believe that",
"I do not agree with this statement",
"I firmly believe that",
"I highly advocate that",
"I highly recommend",
"I strongly believe that",
"I think that",
"I think the view is",
"I totally agree",
"I totally agree to this opinion",
"I would have to argue that",
"I would reaffirm my position that",
"In conclusion",
"in conclusion",
"in my opinion",
"In my opinion",
"In my personal point of view",
"in my point of view",
"In my point of view",
"In summary",
"In the light of the facts outlined above",
"it can be said that",
"it is clear that",
"it seems to me that",
"my deep conviction",
"My sentiments",
"Overall",
"Personally",
"the above explanations and example shows that",
"This, however",
"To conclude",
"To my way of thinking",
"To sum up",
"Ultimately",
]
}
|
py | b403ac4bf39081e353c90f46b65c727c8d489858 | from datetime import datetime, timedelta
from decimal import Decimal
from django.contrib import messages
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from livesettings.functions import config_get_group
from payment import active_gateways
from payment.forms import PaymentMethodForm, CustomChargeForm
from payment.views import contact
from satchmo_store.shop.models import Order, OrderItem, OrderPayment
from satchmo_utils.dynamic import lookup_url
from satchmo_utils.views import bad_or_missing
import logging
log = logging.getLogger('payment.views.balance')
def balance_remaining_order(request, order_id=None):
"""Load the order into the session, so we can charge the remaining amount"""
# this will create an "OrderCart" - a fake cart to allow us to check out
request.session['cart'] = 'order'
request.session['orderID'] = order_id
return balance_remaining(request)
def balance_remaining(request):
"""Allow the user to pay the remaining balance."""
order = None
orderid = request.session.get('orderID')
if orderid:
try:
order = Order.objects.get(pk=orderid)
except Order.DoesNotExist:
# TODO: verify user against current user
pass
if not order:
url = urlresolvers.reverse('satchmo_checkout-step1')
return HttpResponseRedirect(url)
if request.method == "POST":
new_data = request.POST.copy()
form = PaymentMethodForm(data=new_data, order=order)
if form.is_valid():
data = form.cleaned_data
modulename = data['paymentmethod']
if not modulename.startswith('PAYMENT_'):
modulename = 'PAYMENT_' + modulename
paymentmodule = config_get_group(modulename)
url = lookup_url(paymentmodule, 'satchmo_checkout-step2')
return HttpResponseRedirect(url)
else:
form = PaymentMethodForm(order=order)
ctx = RequestContext(request, {'form' : form,
'order' : order,
'paymentmethod_ct': len(active_gateways())
})
return render_to_response('shop/checkout/balance_remaining.html',
context_instance=ctx)
def charge_remaining(request, orderitem_id):
"""Given an orderitem_id, this returns a confirmation form."""
try:
orderitem = OrderItem.objects.get(pk = orderitem_id)
except OrderItem.DoesNotExist:
return bad_or_missing(request, _("The orderitem you have requested doesn't exist, or you don't have access to it."))
amount = orderitem.product.customproduct.full_price
data = {
'orderitem' : orderitem_id,
'amount' : amount,
}
form = CustomChargeForm(data)
ctx = RequestContext(request, {'form' : form})
return render_to_response('payment/admin/charge_remaining_confirm.html',
context_instance=ctx)
def charge_remaining_post(request):
if not request.method == 'POST':
return bad_or_missing(request, _("No form found in request."))
data = request.POST.copy()
form = CustomChargeForm(data)
if form.is_valid():
data = form.cleaned_data
try:
orderitem = OrderItem.objects.get(pk = data['orderitem'])
except OrderItem.DoesNotExist:
return bad_or_missing(request, _("The orderitem you have requested doesn't exist, or you don't have access to it."))
price = data['amount']
line_price = price*orderitem.quantity
orderitem.unit_price = price
orderitem.line_item_price = line_price
orderitem.save()
#print "Orderitem price now: %s" % orderitem.line_item_price
order = orderitem.order
if not order.shipping_cost:
order.shipping_cost = Decimal("0.00")
if data['shipping']:
order.shipping_cost += data['shipping']
order.recalculate_total()
messages.add_message(request, messages.INFO, 'Charged for custom product and recalculated totals.')
notes = data['notes']
if not notes:
notes = 'Updated total price'
order.add_status(notes=notes)
return HttpResponseRedirect('/admin/shop/order/%i' % order.id)
else:
ctx = RequestContext(request, {'form': form})
return render_to_response('admin/charge_remaining_confirm.html',
context_instance=ctx)
|
py | b403acb8d48a6fb1220dda8a0d461e9b66b7c709 | import logging
from dateutil import parser as dateparser
from newsplease.config import CrawlerConfig
class ExtractedInformationStorage(object):
"""
Provides basic functionality for Storages
"""
log = None
def __init__(self):
self.log = logging.getLogger(__name__)
self.log.addHandler(logging.NullHandler())
self.cfg = CrawlerConfig.get_instance()
@staticmethod
def ensure_str(text):
if isinstance(text, str):
return text
else:
return text.decode('utf-8')
@staticmethod
def extract_relevant_info(item):
"""
extracts from an item only fields that we want to output as extracted information
:rtype: object
:param item:
:return:
"""
article = {
'authors': item['article_author'],
'date_download': item['download_date'],
'date_modify': item['modified_date'],
'date_publish': item['article_publish_date'],
'description': item['article_description'],
'filename': item['filename'],
'image_url': item['article_image'],
'language': item['article_language'],
'localpath': item['local_path'],
'title': item['article_title'],
'title_page': ExtractedInformationStorage.ensure_str(item['html_title']),
'title_rss': ExtractedInformationStorage.ensure_str(item['rss_title']),
'source_domain': ExtractedInformationStorage.ensure_str(item['source_domain']),
'maintext': item['article_text'],
'url': item['url']
}
# clean values
for key in article:
value = article[key]
if isinstance(value, str) and not value:
article[key] = None
return article
@staticmethod
def values_changed(olditem, newitem):
ignored_keys = {'date_download', 'date_modify', 'version', 'ancestor', 'descendant', 'db_id'}
for key, value in newitem.items():
if key not in ignored_keys and olditem.get(key) != value:
return True
return False
@staticmethod
def datestring_to_date(text):
if text:
return dateparser.parse(text)
else:
return None
@staticmethod
def convert_to_class(item):
news_article = NewsArticle()
news_article.authors = item['authors']
news_article.date_download = ExtractedInformationStorage.datestring_to_date(item['date_download'])
news_article.date_modify = ExtractedInformationStorage.datestring_to_date(item['date_modify'])
news_article.date_publish = ExtractedInformationStorage.datestring_to_date(item['date_publish'])
news_article.description = item['description']
news_article.filename = item['filename']
news_article.image_url = item['image_url']
news_article.language = item['language']
news_article.localpath = item['localpath']
news_article.title = item['title']
news_article.title_page = item['title_page']
news_article.title_rss = item['title_rss']
news_article.source_domain = item['source_domain']
news_article.maintext = item['maintext']
news_article.url = item['url']
return news_article |
py | b403accf520db0ffe8b2abba3561adfa6e816875 | from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-25, 25, 500)
triangle = 10 * signal.sawtooth(40 * np.pi * 1/800 * x + 2, 0.5) - 7
plt.plot(x, triangle)
plt.show()
"""
def triangle2(length, amplitude):
section = length // 4
x = np.linspace(0, amplitude, section+1)
mx = -x
return np.r_[x, x[-2::-1], mx[1:], mx[-2:0:-1]]
plt.plot(triangle2(2,3))
plt.show()
"""
#plt.plot(x, triangle2(3, 3))
|
py | b403ad2c99298d48de7bdc0fb16f366fc1d0e5a5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AlertsOperations(object):
"""AlertsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AlertList"]
"""Gets all the alerts for a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AlertList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.models.AlertList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AlertList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AlertList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Alert"
"""Gets an alert by name.
Gets an alert by name.
:param device_name: The device name.
:type device_name: str
:param name: The alert name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Alert, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.models.Alert
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Alert"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Alert', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts/{name}'} # type: ignore
|
py | b403adb52cf6a953e5b807eb0e0a79077c8f70e1 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Storage token tests."""
from ppr_api.callback.document_storage.storage_service import GoogleStorageService
TEST_DOC_NAME = 'financing-statements_100348B.pdf'
TEST_DATAFILE = 'tests/unit/callback/financing-statements_100348B.pdf'
TEST_SAVE_DOC_NAME = 'search-results-report-200000008.pdf'
def test_cs_get_document(session):
"""Assert that getting a document from google cloud storage works as expected."""
raw_data = GoogleStorageService.get_document(TEST_DOC_NAME)
assert raw_data
assert len(raw_data) > 0
with open(TEST_DATAFILE, "wb") as pdf_file:
pdf_file.write(raw_data)
pdf_file.close()
def test_cs_save_document(session):
"""Assert that saving a document to google cloud storage works as expected."""
raw_data = None
with open(TEST_DATAFILE, 'rb') as data_file:
raw_data = data_file.read()
data_file.close()
response = GoogleStorageService.save_document(TEST_SAVE_DOC_NAME, raw_data)
print(response)
assert response
assert response['name'] == TEST_SAVE_DOC_NAME
|
py | b403adfc9ae1c7eaf4db19f574bf4b5dc8d76423 | from rest_framework.viewsets import ModelViewSet
from address.api.serializers import StateSerializer, ReadCitySerializer, WriteCitySerializer
from address.models import State, City
from lib.api.permissions import IsAdminOrReadOnly
class StateViewSet(ModelViewSet):
queryset = State.objects.all()
serializer_class = StateSerializer
permission_classes = [IsAdminOrReadOnly]
lookup_field = 'slug'
class CityViewSet(ModelViewSet):
queryset = City.objects.all()
permission_classes = [IsAdminOrReadOnly]
lookup_field = 'slug'
filterset_fields = ['state__name']
def get_serializer_class(self):
if self.action in ['create', 'update', 'partial_update']:
return WriteCitySerializer
return ReadCitySerializer
|
py | b403ae2d8cf6f9b9299d6e504dcaa0a843c382ec | from time import time
import math
def main():
start = time()
count = 1 #to account for two since it is never captured
for i in range(2, 1000001):
if prime(i):
if circPrime(i):
print(i)
count += 1
print(count)
print(time() - start)
def circPrime(n):
stri = str(n)
perms = []
#create the permutations
for i in range(0, len(stri)):
perms.append(stri[1:] + stri[0])
stri = stri[1:] + stri[0]
#check them all for prime
for each in perms:
if not prime(int(each)):
return False
return True
def prime(n):
for i in range(2, int(math.ceil( n ** 0.5)) + 1):
if n % i == 0:
return False
return True
if __name__ == '__main__':
main()
|
py | b403af8ce7ff9f8797ff697cb7ec66c7c38ded37 | import pika
import json
from config import LOG_PREFIX, CLASSIFICATION, RECOGNITION, \
MULTI_RECOGNITION, PREDICT_MODEL_QUEUE, HOST, \
MODELS_DIR, DATASETS_DIR
from core.predict_yolov5 import predict_yolov5
from pathlib import Path
conn_parameters = pika.ConnectionParameters(host=HOST,
connection_attempts=20,
retry_delay=5)
connection = pika.BlockingConnection(conn_parameters)
channel = connection.channel()
channel.queue_declare(queue=PREDICT_MODEL_QUEUE, durable=True)
def raise_not_implemeted(task):
raise RuntimeError(f"{task} model is not implemented")
def process_dataset_items(dataset_items):
return [{ **item,
"imageUrl": str(Path(DATASETS_DIR, item["imageUrl"])) }
for item in dataset_items]
def on_message(ch, method, props, body):
prediction_request = json.loads(body.decode("utf-8"))
print(LOG_PREFIX, "Got prediction request", prediction_request)
markup_type = prediction_request["markupType"]
dataset_items = prediction_request["datasetItems"]
markup_id = prediction_request["markupId"]
model_id = prediction_request["modelId"]
weights_path = prediction_request["weightsPath"]
dataset_items = process_dataset_items(dataset_items)
weights_path = str(Path(MODELS_DIR, weights_path))
try:
if markup_type == MULTI_RECOGNITION:
predictions = predict_yolov5(dataset_items, weights_path)
elif markup_type == CLASSIFICATION:
raise RuntimeError(f"Model for {markup_type} task is not implemented")
elif markup_type == RECOGNITION:
raise RuntimeError(f"Model for {markup_type} task is not implemented")
else:
raise RuntimeError(f"Unknown markup type: {markup_type}")
except Exception as e:
print(e)
else:
message_body = { "markupId": markup_id,
"modelId": model_id,
"markupType": markup_type,
"items": predictions }
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body=json.dumps(message_body))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue=PREDICT_MODEL_QUEUE, on_message_callback=on_message)
print(LOG_PREFIX, "Awaiting RPC requests")
channel.start_consuming()
|
py | b403afd9b74fcb0bc7050912a955549ec2adaa2c | # Generated by Django 3.1.7 on 2021-03-30 09:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20210330_1040'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_picture',
field=models.ImageField(default='default_yqsmou.jpg', upload_to='images/'),
),
]
|
py | b403b10306b00edc26a04045b47432833e91a324 | import requests
import json
import os
token = ""
key = ""
after = ""
data = ""
dataforall =""
name = ""
URL = 'https://openapi.band.us/v2/band/posts'
for i in range(0,800):
if after == "":
params = {'access_token': token, 'band_key': key}
response = requests.get(URL, params=params)
for i in response.json()['result_data']['items']:
if i['author']['name']==name:
dataforall += i['content']
dataforall += "\n"
data = i['content']
with open(os.getcwd() + "/txt/" + i['post_key'] + ".txt", 'w', encoding='utf-8') as f:
f.write(data)
after = response.json()['result_data']['paging']['next_params']['after']
dataforall+="\n\n\n\n-----------------------------"
else:
params = {'access_token': token, 'band_key': key, 'after': after}
response = requests.get(URL, params=params)
for i in response.json()['result_data']['items']:
if i['author']['name']==name:
dataforall += i['content']
dataforall += "\n"
data = i['content']
with open(os.getcwd() + "/txt/" + i['post_key'] + ".txt", 'w', encoding='utf-8') as f:
f.write(data)
after = response.json()['result_data']['paging']['next_params']['after']
print(response.json()['result_data']['paging']['next_params'])
dataforall += "\n\n\n\n-----------------------------"
# params = {'access_token': token, 'band_key': key}
# response = requests.get(URL, params=params)
# json_str = json.dumps(response.json(), ensure_ascii=False, indent=4)
# with open("last"+".json", 'w',encoding='utf-8') as f:
# f.write(json_str)
with open("full contents"+".txt", 'w',encoding='utf-8') as f:
f.write(dataforall) |
py | b403b1679310f40a45a0d35779b597a227d76de5 | from random import randint
import pygame
from pygame.locals import KEYDOWN, QUIT, K_ESCAPE, K_UP, K_DOWN, K_LEFT, K_RIGHT
pygame.init()
screen = pygame.display.set_mode((400, 300))
clock = pygame.time.Clock()
borders = [pygame.Rect(0, 0, 2, 300), pygame.Rect(0, 0, 400, 2),
pygame.Rect(398, 0, 2, 300), pygame.Rect(0, 298, 400, 2)]
pellets = [pygame.Rect(randint(10, 380), randint(10, 280), 5, 5) for _ in range(4)]
mybox = pygame.Rect(200, 150, 10, 10) # start in middle of the screen
dx, dy = 0, 1 # start direction: down
delay = 0 # start moving right away
while True:
clock.tick(50) # frames per second
for event in pygame.event.get(): # inputs
if event.type == QUIT:
exit()
if event.type == KEYDOWN:
key = event.key
if key == K_ESCAPE:
exit()
elif key == K_UP:
dx, dy = 0, -1
elif key == K_DOWN:
dx, dy = 0, 1
elif key == K_LEFT:
dx, dy = -1, 0
elif key == K_RIGHT:
dx, dy = 1, 0
delay -= 4
if mybox.collidelist(borders) != -1: # game over
mybox = pygame.Rect(200, 150, 10, 10) # back to middle of the screen
delay = 0 # move right away
if delay <= 0:
mybox.move_ip(dx, dy) # update position
delay = (mybox.width - 10) / 2 # number of pellets eaten so far
indices = mybox.collidelistall(pellets)
for p_index in indices: # ate a pellet: grow, and replace a pellet
pellets[p_index] = pygame.Rect(randint(10, 380), randint(10, 280), 5, 5)
mybox.size = mybox.width * 1.2, mybox.height * 1.2
screen.fill((0, 0, 64)) # dark blue
pygame.draw.rect(screen, (0, 191, 255), mybox) # Deep Sky Blue
[pygame.draw.rect(screen, (255, 192, 203), p) for p in pellets] # pink
[pygame.draw.rect(screen, (0, 191, 255), b) for b in borders] # deep sky blue
pygame.display.update()
|
py | b403b17927cbe9ce43122e089994ba917a32bec8 | import datetime
from molo.yourtips.models import (
YourTip, YourTipsEntry, YourTipsArticlePage
)
from molo.yourtips.tests.base import BaseYourTipsTestCase
class TestWagtailAdminActions(BaseYourTipsTestCase):
def test_export_entry_csv(self):
self.client.login(
username=self.superuser_name,
password=self.superuser_password
)
tip_page = YourTip(
title='Test Tip',
description='This is the description',
slug='test-tip')
self.tip_index.add_child(instance=tip_page)
tip_page.save_revision().publish()
YourTipsEntry.objects.create(
optional_name='Test',
tip_text='test body',
allow_share_on_social_media=True,
)
response = self.client.post('/admin/yourtips/yourtipsentry/')
date = str(datetime.datetime.now().date())
expected_output = (
'Content-Length: 132\r\n'
'Content-Language: en\r\n'
'Content-Disposition: attachment;'
' filename=yourtips_entries.csv\r\n'
'Vary: Accept-Language, Cookie\r\n'
'Cache-Control: no-cache, no-store, private, max-age=0'
'\r\nX-Frame-Options: SAMEORIGIN\r\n'
'Content-Type: csv\r\n\r\n'
'id,submission_date,optional_name,user,tip_text,'
'allow_share_on_social_media,converted_article_page\r\n'
'1,' + date + ',Test,,test body,1,\r\n')
self.assertEquals(str(response), expected_output)
response = self.client.post(
'/admin/yourtips/yourtipsentry/?drf__submission_date__gte=' +
date + '&drf__submission_date__lte=' + date
)
expected_output = (
'Content-Length: 132\r\n'
'Content-Language: en\r\n'
'Content-Disposition: attachment; '
'filename=yourtips_entries.csv\r\n'
'Vary: Accept-Language, Cookie\r\n'
'Cache-Control: no-cache, no-store, private, max-age=0\r\n'
'X-Frame-Options: SAMEORIGIN\r\n'
'Content-Type: csv\r\n\r\n'
'id,submission_date,optional_name,user,tip_text,'
'allow_share_on_social_media,converted_article_page\r\n'
'1,' + date + ',Test,,test body,1,\r\n')
self.assertEquals(str(response), expected_output)
response = self.client.post(
'/admin/yourtips/yourtipsentry/?drf__submission_date__gte='
'2017-01-01&drf__submission_date__lte=2017-01-01'
)
expected_output = (
'Content-Length: 99\r\n'
'Content-Language: en\r\n'
'Content-Disposition: attachment; '
'filename=yourtips_entries.csv\r\n'
'Vary: Accept-Language, Cookie\r\n'
'Cache-Control: no-cache, no-store, private, max-age=0\r\n'
'X-Frame-Options: SAMEORIGIN\r\nContent-Type: csv\r\n\r\n'
'id,submission_date,optional_name,user,tip_text,'
'allow_share_on_social_media,converted_article_page\r\n')
self.assertEquals(str(response), expected_output)
def test_export_article_page_csv(self):
self.client.login(
username=self.superuser_name,
password=self.superuser_password
)
tip_page = YourTip(
title='Test Tip',
description='This is the description',
slug='test-tip')
self.tip_index.add_child(instance=tip_page)
tip_page.save_revision().publish()
entry = YourTipsEntry.objects.create(
optional_name='Test',
tip_text='test body',
allow_share_on_social_media=True,
)
self.client.get(
'/django-admin/yourtips/yourtipsentry/%d/convert/' % entry.id
)
article = YourTipsArticlePage.objects.get(title='Tip-%s' % entry.id)
article.save_revision().publish()
response = self.client.post('/admin/yourtips/yourtipsarticlepage/')
expected_output = (
'Tip-1,1,test body,Test')
self.assertContains(response, expected_output)
response = self.client.post(
'/admin/yourtips/yourtipsarticlepage/?'
'drf__latest_revision_created_at__gte=2017-01-01'
'&drf__latest_revision_created_at__lte=2017-01-01'
)
expected_output = (
'Tip-1,1,test body,Test')
self.assertNotContains(response, expected_output)
|
py | b403b217e8e3f83cc542bd24d25ad69c91c3cc79 | # Generated by Django 2.1.7 on 2019-04-24 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contribution', '0003_comment_is_special'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='is_special',
field=models.BooleanField(default=False, verbose_name='is_special'),
),
]
|
py | b403b28fbe9d0424c884617519892c81f9fe039a | """Cheshire3 wrappers for Lucene vector space model."""
__all__ = ['normalizer', 'tokenizer', 'workflow', 'wrapper', 'indexStore']
# Initialize JVM ___ONCE___
try:
import lucene
except ImportError:
pass
else:
vm = lucene.initVM(lucene.CLASSPATH)
import cheshire3.lucene.normalizer
import cheshire3.lucene.tokenizer
import cheshire3.lucene.workflow
import cheshire3.lucene.wrapper
import cheshire3.lucene.indexStore
|
py | b403b3311fed3d2c6283996637dc3f53c04961ae | import re
def validate_user_funai(username, domain):
regex = re.compile('([^\\s]+)\\@[^\\s]+')
matched = regex.match(username)
if(matched):
username = "%s%s" % (matched.group(1), domain)
else:
username = "%s%s" % (username, domain)
return username |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.