code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import logging
import six
from eventemitter import EventEmitter
from dota2.enums import EDOTAGCMsg, DOTAChatChannelType_t
from dota2.protobufs.dota_gcmessages_client_chat_pb2 import CMsgDOTAJoinChatChannelResponse,\
CMsgDOTAChatChannelFullUpdate,\
CMsgDOTAOtherJoinedChatChannel,\
CMsgDOTAOtherLeftChatChannel,\
CMsgDOTAChatChannelMemberUpdate
class ChatBase(object):
def __init__(self):
super(ChatBase, self).__init__()
name = "%s.channels" % self.__class__.__name__
self.channels = ChannelManager(self, name)
class ChannelManager(EventEmitter):
EVENT_JOINED_CHANNEL = 'channel_joined'
"""When the client join a channel.
:param channel: channel instance
:type channel: :class:`ChatChannel`
"""
EVENT_LEFT_CHANNEL = 'channel_left'
"""When the client leaves a channel.
:param channel: channel instance
:type channel: :class:`ChatChannel`
"""
EVENT_MESSAGE = 'message'
"""On a new channel message
:param channel: channel instance
:type channel: :class:`ChatChannel`
:param message: message data
:type message: `CMsgDOTAChatMessage <https://github.com/ValvePython/dota2/blob/6cb1008f3070e008e9bed9521fad8d1438123aa1/protobufs/dota_gcmessages_client_chat.proto#L86-L122>`_
"""
EVENT_CHANNEL_MEMBERS_UPDATE = 'members_update'
"""When users join/leave a channel
:param channel: channel instance
:type channel: :class:`ChatChannel`
:param joined: list of members who joined
:type joined: list
:param left: list of members who left
:type left: list
"""
def emit(self, event, *args):
if event is not None:
self._LOG.debug("Emit event: %s" % repr(event))
EventEmitter.emit(self, event, *args)
def __init__(self, dota_client, logger_name):
super(ChannelManager, self).__init__()
self._LOG = logging.getLogger(logger_name if logger_name else self.__class__.__name__)
self._dota = dota_client
self._channels = {}
self._channels_by_name = {}
# register our handlers
self._dota.on('notready', self._cleanup)
self._dota.on(EDOTAGCMsg.EMsgGCJoinChatChannelResponse, self._handle_join_response)
self._dota.on(EDOTAGCMsg.EMsgGCChatMessage, self._handle_message)
self._dota.on(EDOTAGCMsg.EMsgGCOtherJoinedChannel, self._handle_members_update)
self._dota.on(EDOTAGCMsg.EMsgGCOtherLeftChannel, self._handle_members_update)
self._dota.on(EDOTAGCMsg.EMsgDOTAChatChannelMemberUpdate, self._handle_members_update)
def __repr__(self):
return "<ChannelManager(): %d channels>" % (
len(self),
)
def _cleanup(self):
self._channels.clear()
self._channels_by_name.clear()
def _remove_channel(self, channel_id):
channel = self._channels.pop(channel_id, None)
self._channels_by_name.pop((channel.name, channel.type), None)
def __contains__(self, key):
return (key in self._channels) or (key in self._channels_by_name)
def __getitem__(self, key):
if isinstance(key, tuple):
return self._channels_by_name[key]
else:
return self._channels[key]
def __len__(self):
return len(self._channels)
def __iter__(self):
return six.itervalues(self._channels)
def _handle_join_response(self, message):
key = (message.channel_name, message.channel_type)
self.emit(('join_result',) + key, message.result)
if message.result == message.JOIN_SUCCESS:
if message.channel_id in self:
channel = self[message.channel_id]
else:
channel = ChatChannel(self, message)
self._channels[channel.id] = channel
self._channels_by_name[key] = channel
self.emit(self.EVENT_JOINED_CHANNEL, channel)
def _handle_message(self, message):
if message.channel_id in self:
self.emit(self.EVENT_MESSAGE, self[message.channel_id], message)
def _handle_members_update(self, message):
if message.channel_id in self:
channel = self[message.channel_id]
joined = []
left = []
if isinstance(message, CMsgDOTAOtherLeftChatChannel):
left.append(message.steam_id or message.channel_user_id)
elif isinstance(message, CMsgDOTAOtherJoinedChatChannel):
joined.append(message.steam_id or message.channel_user_id)
elif isinstance(message, CMsgDOTAChatChannelMemberUpdate):
left = list(message.left_steam_ids)
joined = list(map(lambda x: x.steam_id or x.channel_user_id, message.joined_members))
elif isinstance(message, CMsgDOTAChatChannelFullUpdate):
pass
channel._process_members_from_proto(message)
if joined or left:
self.emit(self.EVENT_CHANNEL_MEMBERS_UPDATE, channel, joined, left)
def join_channel(self, channel_name, channel_type=DOTAChatChannelType_t.DOTAChannelType_Custom):
"""Join a chat channel
:param channel_name: channel name
:type channel_name: str
:param channel_type: channel type
:type channel_type: :class:`.DOTAChatChannelType_t`
:return: join result
:rtype: int
Response event: :attr:`EVENT_JOINED_CHANNEL`
"""
if self._dota.verbose_debug:
self._LOG.debug("Request to join chat channel: %s", channel_name)
self._dota.send(EDOTAGCMsg.EMsgGCJoinChatChannel, {
"channel_name": channel_name,
"channel_type": channel_type
})
resp = self.wait_event(('join_result', channel_name, channel_type), timeout=25)
if resp:
return resp[0]
else:
return None
def join_lobby_channel(self):
"""
Join the lobby channel if the client is in a lobby.
Response event: :attr:`EVENT_JOINED_CHANNEL`
"""
if self._dota.lobby:
key = "Lobby_%s" % self._dota.lobby.lobby_id, DOTAChatChannelType_t.DOTAChannelType_Lobby
return self.join_channel(*key)
@property
def lobby(self):
"""References lobby channel if client has joined it
:return: channel instance
:rtype: :class:`.ChatChannel`
"""
if self._dota.lobby:
key = "Lobby_%s" % self._dota.lobby.lobby_id, DOTAChatChannelType_t.DOTAChannelType_Lobby
return self._channels_by_name.get(key, None)
def join_party_channel(self):
"""
Join the lobby channel if the client is in a lobby.
Response event: :attr:`EVENT_JOINED_CHANNEL`
"""
if self._dota.party:
key = "Party_%s" % self._dota.party.party_id, DOTAChatChannelType_t.DOTAChannelType_Party
return self.join_channel(*key)
@property
def party(self):
"""References party channel if client has joined it
:return: channel instance
:rtype: :class:`.ChatChannel`
"""
if self._dota.party:
key = "Party_%s" % self._dota.party.party_id, DOTAChatChannelType_t.DOTAChannelType_Party
return self._channels_by_name.get(key, None)
def get_channel_list(self):
"""
Requests a list of chat channels from the GC.
:return: List of chat channels
:rtype: `CMsgDOTAChatGetUserListResponse <https://github.com/ValvePython/dota2/blob/6cb1008f3070e008e9bed9521fad8d1438123aa1/protobufs/dota_gcmessages_client_chat.proto#L210-L220>`_, ``None``
"""
if self._dota.verbose_debug:
self._LOG.debug("Requesting channel list from GC.")
jobid = self._dota.send_job(EDOTAGCMsg.EMsgGCRequestChatChannelList, {})
resp = self._dota.wait_msg(jobid, timeout=25)
return resp
def leave_channel(self, channel_id):
if channel_id in self:
channel = self[channel_id]
if self._dota.verbose_debug:
self._LOG.debug("Leaving chat channel: %s", repr(channel))
self._dota.send(EDOTAGCMsg.EMsgGCLeaveChatChannel, {
"channel_id": channel_id
})
self._remove_channel(channel_id)
self.emit(self.EVENT_LEFT_CHANNEL, channel)
class ChatChannel(object):
def __init__(self, channel_manager, join_data):
self._manager = channel_manager
self._dota = self._manager._dota
self._LOG = self._manager._LOG
self.members = {}
self.id = join_data.channel_id
self.name = join_data.channel_name
self.type = join_data.channel_type
self.user_id = join_data.channel_user_id
self.max_members = join_data.max_members
self._process_members_from_proto(join_data)
def __repr__(self):
return "<ChatChannel(%s, %s, %s)>" % (
self.id,
repr(self.name),
self.type,
)
def _process_members_from_proto(self, data):
if isinstance(data, CMsgDOTAOtherLeftChatChannel):
self.members.pop(data.steam_id or data.channel_user_id, None)
return
elif isinstance(data, CMsgDOTAOtherJoinedChatChannel):
members = [data]
elif isinstance(data, CMsgDOTAJoinChatChannelResponse):
members = data.members
elif isinstance(data, CMsgDOTAChatChannelMemberUpdate):
for steam_id in data.left_steam_ids:
self.members.pop(steam_id, None)
members = data.joined_members
for member in members:
self.members[member.steam_id or member.channel_user_id] = (
member.persona_name,
member.status
)
def leave(self):
"""Leave channel"""
self._manager.leave_channel(self.id)
def send(self, message):
"""Send a message to the channel
:param message: message text
:type message: str
"""
self._dota.send(EDOTAGCMsg.EMsgGCChatMessage, {
"channel_id": self.id,
"text": message
})
def share_lobby(self):
"""Share current lobby to the channel"""
if self._dota.lobby:
self._dota.send(EDOTAGCMsg.EMsgGCChatMessage, {
"channel_id": self.id,
"share_lobby_id": self._dota.lobby.lobby_id,
"share_lobby_passkey": self._dota.lobby.pass_key
})
def flip_coin(self):
"""Flip a coin"""
self._dota.send(EDOTAGCMsg.EMsgGCChatMessage, {
"channel_id": self.id,
"coin_flip": True
})
def roll_dice(self, rollmin=1, rollmax=100):
"""Roll a dice
:param rollmin: dice min value
:type rollmin: int
:param rollmax: dice max value
:type rollmax: int
"""
self._dota.send(EDOTAGCMsg.EMsgGCChatMessage, {
"channel_id": self.id,
"dice_roll": {
"roll_min": dmin,
"roll_max": dmax
}
})
| [
"logging.getLogger",
"six.itervalues",
"eventemitter.EventEmitter.emit"
] | [((1965, 2002), 'eventemitter.EventEmitter.emit', 'EventEmitter.emit', (['self', 'event', '*args'], {}), '(self, event, *args)\n', (1982, 2002), False, 'from eventemitter import EventEmitter\n'), ((2122, 2196), 'logging.getLogger', 'logging.getLogger', (['(logger_name if logger_name else self.__class__.__name__)'], {}), '(logger_name if logger_name else self.__class__.__name__)\n', (2139, 2196), False, 'import logging\n'), ((3582, 3612), 'six.itervalues', 'six.itervalues', (['self._channels'], {}), '(self._channels)\n', (3596, 3612), False, 'import six\n')] |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
#
# Beware s/he who enters: uncommented, non unit-tested,
# don't-fix-it-if-it-ain't-broken kind of threaded code ahead.
#
import logging
import threading
import time
from ant.core.constants import *
import ant.core.message
from ant.core.exceptions import MessageError
MAX_ACK_QUEUE = 25
MAX_MSG_QUEUE = 25
logger = logging.getLogger(__name__)
def ProcessBuffer(buffer_):
messages = []
while True:
# hf = Message()
try:
# msg = hf.get_handler(buffer_)
msg = ant.core.message.get_proper_message(buffer_)
if not msg:
break
buffer_ = buffer_[len(msg.get_payload()) + 4:]
messages.append(msg)
except MessageError as e:
# print("MessageError for: {}".format(hf))
if e.internal == "CHECKSUM":
buffer_ = buffer_[ord(buffer_[1]) + 4:]
else:
logger.debug(e)
break
return (buffer_, messages,)
class EventPumper(object):
def __init__(self):
self._forced_buffer = []
def force_buffer(self, byte_data=b''):
self._forced_buffer.append(byte_data)
def pump(self, evm):
evm.pump_lock.acquire()
evm.pump = True
evm.pump_lock.release()
go = True
buffer_ = b''
while go:
evm.running_lock.acquire()
if not evm.running:
go = False
evm.running_lock.release()
if len(self._forced_buffer):
buffer_ += self._forced_buffer[0]
self._forced_buffer = [] if len(self._forced_buffer) == 1 else self._forced_buffer[1:]
else:
buffer_ += evm.driver.read(30)
if len(buffer_) == 0:
continue
buffer_, messages = ProcessBuffer(buffer_)
evm.callbacks_lock.acquire()
# print("acquired callbacks_lock. messages: {}".format(messages))
for message in messages:
for callback in evm.callbacks:
# try:
callback.process(message)
# except Exception as e:
# pass
evm.callbacks_lock.release()
time.sleep(0.002)
evm.pump_lock.acquire()
evm.pump = False
evm.pump_lock.release()
class EventCallback(object):
def process(self, msg):
pass
class AckCallback(EventCallback):
def __init__(self, evm):
self.evm = evm
def process(self, msg):
if isinstance(msg, ant.core.message.ChannelEventMessage):
self.evm.ack_lock.acquire()
self.evm.ack.append(msg)
if len(self.evm.ack) > MAX_ACK_QUEUE:
self.evm.ack = self.evm.ack[-MAX_ACK_QUEUE:]
self.evm.ack_lock.release()
class MsgCallback(EventCallback):
def __init__(self, evm):
self.evm = evm
def process(self, msg):
self.evm.msg_lock.acquire()
self.evm.msg.append(msg)
if len(self.evm.msg) > MAX_MSG_QUEUE:
self.evm.msg = self.evm.msg[-MAX_MSG_QUEUE:]
self.evm.msg_lock.release()
class EventMachine(object):
callbacks_lock = threading.Lock()
running_lock = threading.Lock()
pump_lock = threading.Lock()
ack_lock = threading.Lock()
msg_lock = threading.Lock()
def __init__(self, driver):
self.driver = driver
self.callbacks = []
self.running = False
self.pump = False
self.ack = []
self.msg = []
self.registerCallback(AckCallback(self))
self.registerCallback(MsgCallback(self))
self.event_thread = None
self.event_pumper = EventPumper()
def registerCallback(self, callback):
self.callbacks_lock.acquire()
if callback not in self.callbacks:
self.callbacks.append(callback)
self.callbacks_lock.release()
def removeCallback(self, callback):
self.callbacks_lock.acquire()
if callback in self.callbacks:
self.callbacks.remove(callback)
self.callbacks_lock.release()
def waitForAck(self, msg):
while True:
self.ack_lock.acquire()
for emsg in self.ack:
# print("Original msg id:{:02X} received msg id:{:02X}".format(msg.msg_id,
# emsg.getMessageID()))
if msg.msg_id != emsg.getMessageID():
continue
self.ack.remove(emsg)
self.ack_lock.release()
return emsg.getMessageCode()
self.ack_lock.release()
time.sleep(0.002)
def waitForMessage(self, class_):
while True:
self.msg_lock.acquire()
for emsg in self.msg:
if not isinstance(emsg, class_):
continue
self.msg.remove(emsg)
self.msg_lock.release()
return emsg
self.msg_lock.release()
time.sleep(0.002)
def start(self, driver=None):
self.running_lock.acquire()
if self.running:
self.running_lock.release()
return
self.running = True
if driver is not None:
self.driver = driver
self.event_thread = threading.Thread(target=self.event_pumper.pump, args=(self,))
self.event_thread.start()
while True:
self.pump_lock.acquire()
if self.pump:
self.pump_lock.release()
break
self.pump_lock.release()
time.sleep(0.001)
self.running_lock.release()
def stop(self):
self.running_lock.acquire()
if not self.running:
self.running_lock.release()
return
self.running = False
self.running_lock.release()
while True:
self.pump_lock.acquire()
if not self.pump:
self.pump_lock.release()
break
self.pump_lock.release()
time.sleep(0.001)
self.event_thread.join()
| [
"logging.getLogger",
"threading.Lock",
"threading.Thread",
"time.sleep"
] | [((1593, 1620), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1610, 1620), False, 'import logging\n'), ((4491, 4507), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4505, 4507), False, 'import threading\n'), ((4527, 4543), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4541, 4543), False, 'import threading\n'), ((4560, 4576), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4574, 4576), False, 'import threading\n'), ((4592, 4608), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4606, 4608), False, 'import threading\n'), ((4624, 4640), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4638, 4640), False, 'import threading\n'), ((6651, 6712), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.event_pumper.pump', 'args': '(self,)'}), '(target=self.event_pumper.pump, args=(self,))\n', (6667, 6712), False, 'import threading\n'), ((3523, 3540), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (3533, 3540), False, 'import time\n'), ((5977, 5994), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (5987, 5994), False, 'import time\n'), ((6356, 6373), 'time.sleep', 'time.sleep', (['(0.002)'], {}), '(0.002)\n', (6366, 6373), False, 'import time\n'), ((6942, 6959), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (6952, 6959), False, 'import time\n'), ((7408, 7425), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (7418, 7425), False, 'import time\n')] |
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nBasisBits = 3
basisBits = np.asarray([0, 1, 2], dtype=np.int32)
# In real appliction, random number in range [0, 1) will be used.
randnum = 0.2
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.3+0.4j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.1+0.1j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
expected_sv = np.asarray([0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.2+0.4j,
0.0+0.0j, 0.6+0.6j, 0.2+0.2j, 0.0+0.0j], dtype=np.complex64)
expected_parity = 0
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# measurement on z basis
parity = cusv.measure_on_z_basis(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
basisBits.ctypes.data, nBasisBits, randnum, cusv.Collapse.NORMALIZE_AND_ZERO)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected_sv, d_sv):
raise ValueError("results mismatch")
if expected_parity != parity:
raise ValueError("results mismatch")
print("test passed")
| [
"cuquantum.custatevec.create",
"numpy.asarray",
"cuquantum.custatevec.measure_on_z_basis",
"cupy.allclose",
"cupy.asarray",
"cuquantum.custatevec.destroy"
] | [((282, 319), 'numpy.asarray', 'np.asarray', (['[0, 1, 2]'], {'dtype': 'np.int32'}), '([0, 1, 2], dtype=np.int32)\n', (292, 319), True, 'import numpy as np\n'), ((422, 554), 'numpy.asarray', 'np.asarray', (['[0.0 + 0.0j, 0.0 + 0.1j, 0.3 + 0.4j, 0.1 + 0.2j, 0.2 + 0.2j, 0.3 + 0.3j, \n 0.1 + 0.1j, 0.4 + 0.5j]'], {'dtype': 'np.complex64'}), '([0.0 + 0.0j, 0.0 + 0.1j, 0.3 + 0.4j, 0.1 + 0.2j, 0.2 + 0.2j, 0.3 +\n 0.3j, 0.1 + 0.1j, 0.4 + 0.5j], dtype=np.complex64)\n', (432, 554), True, 'import numpy as np\n'), ((578, 594), 'cupy.asarray', 'cp.asarray', (['h_sv'], {}), '(h_sv)\n', (588, 594), True, 'import cupy as cp\n'), ((611, 743), 'numpy.asarray', 'np.asarray', (['[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.2 + 0.4j, 0.0 + 0.0j, 0.6 + 0.6j, \n 0.2 + 0.2j, 0.0 + 0.0j]'], {'dtype': 'np.complex64'}), '([0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.2 + 0.4j, 0.0 + 0.0j, 0.6 +\n 0.6j, 0.2 + 0.2j, 0.0 + 0.0j], dtype=np.complex64)\n', (621, 743), True, 'import numpy as np\n'), ((902, 915), 'cuquantum.custatevec.create', 'cusv.create', ([], {}), '()\n', (913, 915), True, 'from cuquantum import custatevec as cusv\n'), ((951, 1131), 'cuquantum.custatevec.measure_on_z_basis', 'cusv.measure_on_z_basis', (['handle', 'd_sv.data.ptr', 'cuquantum.cudaDataType.CUDA_C_32F', 'nIndexBits', 'basisBits.ctypes.data', 'nBasisBits', 'randnum', 'cusv.Collapse.NORMALIZE_AND_ZERO'], {}), '(handle, d_sv.data.ptr, cuquantum.cudaDataType.\n CUDA_C_32F, nIndexBits, basisBits.ctypes.data, nBasisBits, randnum,\n cusv.Collapse.NORMALIZE_AND_ZERO)\n', (974, 1131), True, 'from cuquantum import custatevec as cusv\n'), ((1150, 1170), 'cuquantum.custatevec.destroy', 'cusv.destroy', (['handle'], {}), '(handle)\n', (1162, 1170), True, 'from cuquantum import custatevec as cusv\n'), ((1194, 1224), 'cupy.allclose', 'cp.allclose', (['expected_sv', 'd_sv'], {}), '(expected_sv, d_sv)\n', (1205, 1224), True, 'import cupy as cp\n')] |
import numpy as np
import pandas as pd
class Location:
"""
Component to initialize boid location and velocity, and update on each time step.
Includes hacky way of bouncing off walls
"""
configuration_defaults = {
'location': {
'max_velocity': 20,
'width': 1000, # Width of our field
'height': 1000, # Height of our field
}
}
def setup(self, builder):
self.width = builder.configuration.location.width
self.height = builder.configuration.location.height
self.max_velocity = builder.configuration.location.max_velocity
columns_created = ['x', 'vx', 'y', 'vy']
builder.population.initializes_simulants(self.on_create_simulants, columns_created)
builder.event.register_listener('time_step', self.on_time_step)
self.population_view = builder.population.get_view(columns_created)
def on_create_simulants(self, pop_data):
count = len(pop_data.index)
# Start clustered in the center with small random velocities
new_population = pd.DataFrame({
'x': np.random.uniform(0, self.width, count), # self.width * (0.4 + 0.2 * np.random.random(count)),
'y': np.random.uniform(0, self.height, count), # self.height * (0.4 + 0.2 * np.random.random(count)),
'vx': self.max_velocity * np.random.randn(count), # -0.5 + np.random.random(count),
'vy': self.max_velocity * np.random.randn(count), # -0.5 + np.random.random(count),
}, index=pop_data.index)
self.population_view.update(new_population)
def on_time_step(self, event):
pop = self.population_view.get(event.index)
# Limit velocity
pop.loc[pop.vx > self.max_velocity, 'vx'] = self.max_velocity
pop.loc[pop.vx < -self.max_velocity, 'vx'] = -self.max_velocity
pop.loc[pop.vy > self.max_velocity, 'vy'] = self.max_velocity
pop.loc[pop.vy < -self.max_velocity, 'vy'] = -self.max_velocity
pop['x'] = pop.apply(lambda row: self.move_boid(row.x, row.vx, self.width), axis=1)
pop['vx'] = pop.apply(lambda row: self.avoid_wall(row.x, row.vx, self.width), axis=1)
pop['y'] = pop.apply(lambda row: self.move_boid(row.y, row.vy, self.height), axis=1)
pop['vy'] = pop.apply(lambda row: self.avoid_wall(row.y, row.vy, self.height), axis=1)
self.population_view.update(pop)
def move_boid(self, position, velocity, limit):
if in_boundary(position, velocity, limit):
return position + velocity
else:
return position
def avoid_wall(self, position, velocity, limit):
if in_boundary(position, velocity, limit):
return velocity
else:
return velocity * -1
def in_boundary(position, velocity, limit):
return (position + velocity < limit) & (position + velocity > 0)
| [
"numpy.random.randn",
"numpy.random.uniform"
] | [((1124, 1163), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.width', 'count'], {}), '(0, self.width, count)\n', (1141, 1163), True, 'import numpy as np\n'), ((1237, 1277), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.height', 'count'], {}), '(0, self.height, count)\n', (1254, 1277), True, 'import numpy as np\n'), ((1373, 1395), 'numpy.random.randn', 'np.random.randn', (['count'], {}), '(count)\n', (1388, 1395), True, 'import numpy as np\n'), ((1470, 1492), 'numpy.random.randn', 'np.random.randn', (['count'], {}), '(count)\n', (1485, 1492), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals) # Avoid breaking Python 3
import uuid
from sqlalchemy import orm
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.types import TypeDecorator, TEXT
from pyramid_sqlalchemy import (
BaseObject,
metadata,
Session,
)
from ptah.util import json
def get_base():
"""Return the central SQLAlchemy declarative base."""
return BaseObject
def get_session():
"""Return the central SQLAlchemy contextual session.
To customize the kinds of sessions this contextual session creates, call
its ``configure`` method::
ptah.get_session().configure(...)
But if you do this, be careful about the 'ext' arg. If you pass it, the
ZopeTransactionExtension will be disabled and you won't be able to use this
contextual session with transaction managers. To keep the extension active
you'll have to re-add it as an argument. The extension is accessible under
the semi-private variable ``_zte``. Here's an example of adding your own
extensions without disabling the ZTE::
ptah.get_session().configure(ext=[ptah._zte, ...])
"""
return Session
class QueryFreezer(object):
""" A facade for sqla.Session.query which caches internal query structure.
:param builder: anonymous function containing SQLAlchemy query
.. code-block:: python
_sql_parent = ptah.QueryFreezer(
lambda: Session.query(Content)
.filter(Content.__uri__ == sqla.sql.bindparam('parent')))
"""
def __init__(self, builder):
self.id = uuid.uuid4().int
self.builder = builder
def reset(self):
pass
def iter(self, **params):
sa = get_session()
try:
data = sa.__ptah_cache__
except AttributeError:
sa.__ptah_cache__ = data = {}
query = data.get(self.id, None)
if query is None:
query = self.builder()
data[self.id] = query
return query.params(**params).with_session(sa())
def one(self, **params):
ret = list(self.iter(**params))
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm.exc.NoResultFound("No row was found for one()")
else:
raise orm.exc.MultipleResultsFound(
"Multiple rows were found for one()")
def first(self, **params):
ret = list(self.iter(**params))[0:1]
if len(ret) > 0:
return ret[0]
else:
return None
def all(self, **params):
return list(self.iter(**params))
def set_jsontype_serializer(serializer):
JsonType.serializer = serializer
class JsonType(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
serializer = json
def __init__(self, serializer=None, *args, **kw):
if serializer is not None:
self.serializer = serializer
super(JsonType, self).__init__(*args, **kw)
def process_bind_param(self, value, dialect):
if value is not None:
value = self.serializer.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = self.serializer.loads(value)
return value
class MutationList(Mutable, list):
@classmethod
def coerce(cls, key, value):
if not isinstance(value, MutationList):
if isinstance(value, list):
return MutationList(value)
return Mutable.coerce(key, value) # pragma: no cover
else:
return value # pragma: no cover
def append(self, value):
list.append(self, value)
self.changed()
def __setitem__(self, key, value):
list.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
list.__delitem__(self, key)
self.changed()
class MutationDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
return Mutable.coerce(key, value) # pragma: no cover
else:
return value # pragma: no cover
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
dict.__delitem__(self, key)
self.changed()
def JsonDictType(serializer=None):
"""
function which returns a SQLA Column Type suitable to store a Json dict.
:returns: ptah.sqla.MutationDict
"""
return MutationDict.as_mutable(JsonType(serializer=serializer))
def JsonListType(serializer=None):
"""
function which returns a SQLA Column Type suitable to store a Json array.
:returns: ptah.sqla.MutationList
"""
return MutationList.as_mutable(JsonType(serializer=serializer))
| [
"sqlalchemy.ext.mutable.Mutable.coerce",
"sqlalchemy.orm.exc.MultipleResultsFound",
"sqlalchemy.orm.exc.NoResultFound",
"uuid.uuid4"
] | [((1654, 1666), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1664, 1666), False, 'import uuid\n'), ((3641, 3667), 'sqlalchemy.ext.mutable.Mutable.coerce', 'Mutable.coerce', (['key', 'value'], {}), '(key, value)\n', (3655, 3667), False, 'from sqlalchemy.ext.mutable import Mutable\n'), ((4267, 4293), 'sqlalchemy.ext.mutable.Mutable.coerce', 'Mutable.coerce', (['key', 'value'], {}), '(key, value)\n', (4281, 4293), False, 'from sqlalchemy.ext.mutable import Mutable\n'), ((2289, 2340), 'sqlalchemy.orm.exc.NoResultFound', 'orm.exc.NoResultFound', (['"""No row was found for one()"""'], {}), "('No row was found for one()')\n", (2310, 2340), False, 'from sqlalchemy import orm\n'), ((2373, 2439), 'sqlalchemy.orm.exc.MultipleResultsFound', 'orm.exc.MultipleResultsFound', (['"""Multiple rows were found for one()"""'], {}), "('Multiple rows were found for one()')\n", (2401, 2439), False, 'from sqlalchemy import orm\n')] |
# -*- coding: utf-8 -*-
import asyncio
from irc3 import IrcBot
from irc3.utils import parse_config
def main():
from roboto import model, http, loop, disc, config, commands
# Parse & load config file
config.update(parse_config('bot', "config.ini"))
# Connect & Init DB
model.init_db(config)
# Load IRC Bot
irc_bot = IrcBot.from_config(config, loop=loop)
irc_bot.run(forever=False)
# HTTP Server for playlist
http.setup()
# Start background task queue processor
asyncio.ensure_future(commands.dispatcher.task_consumer(), loop=loop)
# Start discord client
disc.dc.run(config.get("discord_token"))
if __name__ == "__main__":
main()
| [
"roboto.model.init_db",
"roboto.commands.dispatcher.task_consumer",
"irc3.IrcBot.from_config",
"roboto.http.setup",
"roboto.config.get",
"irc3.utils.parse_config"
] | [((292, 313), 'roboto.model.init_db', 'model.init_db', (['config'], {}), '(config)\n', (305, 313), False, 'from roboto import model, http, loop, disc, config, commands\n'), ((348, 385), 'irc3.IrcBot.from_config', 'IrcBot.from_config', (['config'], {'loop': 'loop'}), '(config, loop=loop)\n', (366, 385), False, 'from irc3 import IrcBot\n'), ((453, 465), 'roboto.http.setup', 'http.setup', ([], {}), '()\n', (463, 465), False, 'from roboto import model, http, loop, disc, config, commands\n'), ((228, 261), 'irc3.utils.parse_config', 'parse_config', (['"""bot"""', '"""config.ini"""'], {}), "('bot', 'config.ini')\n", (240, 261), False, 'from irc3.utils import parse_config\n'), ((537, 572), 'roboto.commands.dispatcher.task_consumer', 'commands.dispatcher.task_consumer', ([], {}), '()\n', (570, 572), False, 'from roboto import model, http, loop, disc, config, commands\n'), ((629, 656), 'roboto.config.get', 'config.get', (['"""discord_token"""'], {}), "('discord_token')\n", (639, 656), False, 'from roboto import model, http, loop, disc, config, commands\n')] |
from main.switcher import Switcher
sw = Switcher()
sw.run()
| [
"main.switcher.Switcher"
] | [((42, 52), 'main.switcher.Switcher', 'Switcher', ([], {}), '()\n', (50, 52), False, 'from main.switcher import Switcher\n')] |
#!/usr/bin/env python
"""Brown Dwarf Mass calculator.
Uses stellar parameter databases to find host star parameters. The
magnitude of the low mass companion from the provided flux ratio and the
corresponding mass is looked up in the Baraffe evolutionary models.
Inputs
------
Star name: str
Stellar identification number. eg. HD30501
flux_ratio: float
Flux ratio between host and companion.
age: float
Stellar Age. (Closest model is used)
"""
import argparse
import sys
from typing import List, Optional
from astropy.constants import M_jup, M_sun
from baraffe_tables.calculations import calculate_companion_magnitude, absolute_magnitude
from baraffe_tables.db_queries import get_stellar_params
from baraffe_tables.table_search import magnitude_table_search
def _parser() -> object:
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(
description='Determine mass of stellar companion from a flux ratio')
parser.add_argument('star_name', help='Name of host star.', type=str)
parser.add_argument('flux_ratio', type=float,
help='Flux ratio between host and companion (F_companion/F_host)')
parser.add_argument('stellar_age', help='Star age (Gyr)', type=float)
parser.add_argument("-b", "--bands", choices=["All", "J", "H", "K"], default=["K"],
help='Magnitude bands for the flux ratio value', nargs="+", type=str)
parser.add_argument('-m', '--model', choices=['03', '15', '2003', '2015'],
help='Baraffe model to use [2003, 2015]',
default='2003', type=str)
parser.add_argument("-f", "--full_table", default=False, action="store_true",
help="Print full table.")
parser.add_argument("-s", "--star_pars", default=False, action="store_true",
help="Print star parameters for paper.")
parser.add_argument("--age_interp", default=False, action="store_true",
help="Interpolate age between tables, instead of closest age only.")
return parser.parse_args()
def main(star_name: str, flux_ratio: float, stellar_age: float,
bands: Optional[List[str]] = None, model: str = "2003",
star_pars: bool = False, full_table: bool = False, age_interp: bool = False) -> int:
"""Compute companion mass from flux ratio value.
Parameters
----------
star_name: str
Stellar identification number. eg. HD30501
flux_ratio: float
Flux ratio for the system (F_companion/F_host).
stellar_age: float
Age of star/system (Gyr).
bands: str
Wavelength band to use. (optional)
model: int (optional)
Year of Baraffe model to use [2003 (default), 2015].
full_table: bool
Print all parameters in table.
star_pars: bool
Print star parameters also.
age_interp: bool
Interpolate tables across age. Default=False.
"""
Jup_sol_mass = (M_sun / M_jup).value # Jupiter's in 1 M_sol
if (bands is None) or ("All" in bands):
bands = ["H", "J", "K"]
# Obtain Stellar parameters from astroquery
star_params = get_stellar_params(star_name) # returns a astroquery result table
for band in bands:
print("{0!s} band\n------".format(band))
mag_label = "FLUX_{0!s}".format(band)
# Convert stellar apparent mag to absolute magnitude.
apparent_mag = star_params[mag_label]
parallax = star_params['PLX_VALUE']
if parallax.unit != "mas":
raise ValueError("Parallax unit not correct")
absolute_mag = absolute_magnitude(parallax.data[0], apparent_mag.data[0])
# Calculate Absolute companion magnitude for this flux ratio
companion_mag = calculate_companion_magnitude(absolute_mag, flux_ratio)
print("Magnitude calculation for companion M{0} = {1}".format(band, companion_mag))
# Find companion parameters that match these magnitudes
companion_params = magnitude_table_search(companion_mag, stellar_age,
band=band, model=model, age_interp=age_interp)
# Print flux ratios using a generator
print("Estimated Companion Mass from {0} band flux ratio".format(band.upper()))
print("M/M_S = {0} (M_star)".format(companion_params["M/Ms"]) +
" = {} (M_Jup)".format(Jup_sol_mass * companion_params["M/Ms"]) +
", Temp = {} K".format(companion_params["Teff"]))
if full_table:
print("Companion parameters:")
print(companion_params)
if star_pars:
print("\nStellar parameters:")
star_params.pprint(show_unit=True)
return 0
if __name__ == '__main__':
args = vars(_parser())
opts = {k: args[k] for k in args}
sys.exit(main(**opts))
| [
"baraffe_tables.calculations.absolute_magnitude",
"baraffe_tables.calculations.calculate_companion_magnitude",
"argparse.ArgumentParser",
"baraffe_tables.table_search.magnitude_table_search",
"baraffe_tables.db_queries.get_stellar_params"
] | [((890, 987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Determine mass of stellar companion from a flux ratio"""'}), "(description=\n 'Determine mass of stellar companion from a flux ratio')\n", (913, 987), False, 'import argparse\n'), ((3203, 3232), 'baraffe_tables.db_queries.get_stellar_params', 'get_stellar_params', (['star_name'], {}), '(star_name)\n', (3221, 3232), False, 'from baraffe_tables.db_queries import get_stellar_params\n'), ((3658, 3716), 'baraffe_tables.calculations.absolute_magnitude', 'absolute_magnitude', (['parallax.data[0]', 'apparent_mag.data[0]'], {}), '(parallax.data[0], apparent_mag.data[0])\n', (3676, 3716), False, 'from baraffe_tables.calculations import calculate_companion_magnitude, absolute_magnitude\n'), ((3811, 3866), 'baraffe_tables.calculations.calculate_companion_magnitude', 'calculate_companion_magnitude', (['absolute_mag', 'flux_ratio'], {}), '(absolute_mag, flux_ratio)\n', (3840, 3866), False, 'from baraffe_tables.calculations import calculate_companion_magnitude, absolute_magnitude\n'), ((4052, 4153), 'baraffe_tables.table_search.magnitude_table_search', 'magnitude_table_search', (['companion_mag', 'stellar_age'], {'band': 'band', 'model': 'model', 'age_interp': 'age_interp'}), '(companion_mag, stellar_age, band=band, model=model,\n age_interp=age_interp)\n', (4074, 4153), False, 'from baraffe_tables.table_search import magnitude_table_search\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by <NAME> (c) 2018
"""
from geomdl.shapes import curve2d
from geomdl import operations
from geomdl import multi
from geomdl.visualization import VisMPL
# Generate a NURBS full circle from 7 control points
circle = curve2d.full_circle2(radius=5.0)
circle.sample_size = 75
# Render the circle and the control points polygon
vis_config = VisMPL.VisConfig(ctrlpts=True, figure_size=[9, 8])
vis_comp = VisMPL.VisCurve2D(config=vis_config)
circle.vis = vis_comp
circle.render()
# Decompose the circle into Bezier curve segments
segments = operations.decompose_curve(circle)
bezier_segments = multi.CurveContainer(segments)
# Set sample size (delta)
bezier_segments.sample_size = 25
# Render the Bezier curve segments and their control points polygons
vis_config = VisMPL.VisConfig(ctrlpts=True, figure_size=[9, 8])
vis_comp = VisMPL.VisCurve2D(config=vis_config)
bezier_segments.vis = vis_comp
bezier_segments.render()
# Good to have something here to put a breakpoint
pass
| [
"geomdl.visualization.VisMPL.VisConfig",
"geomdl.operations.decompose_curve",
"geomdl.shapes.curve2d.full_circle2",
"geomdl.multi.CurveContainer",
"geomdl.visualization.VisMPL.VisCurve2D"
] | [((355, 387), 'geomdl.shapes.curve2d.full_circle2', 'curve2d.full_circle2', ([], {'radius': '(5.0)'}), '(radius=5.0)\n', (375, 387), False, 'from geomdl.shapes import curve2d\n'), ((477, 527), 'geomdl.visualization.VisMPL.VisConfig', 'VisMPL.VisConfig', ([], {'ctrlpts': '(True)', 'figure_size': '[9, 8]'}), '(ctrlpts=True, figure_size=[9, 8])\n', (493, 527), False, 'from geomdl.visualization import VisMPL\n'), ((539, 575), 'geomdl.visualization.VisMPL.VisCurve2D', 'VisMPL.VisCurve2D', ([], {'config': 'vis_config'}), '(config=vis_config)\n', (556, 575), False, 'from geomdl.visualization import VisMPL\n'), ((676, 710), 'geomdl.operations.decompose_curve', 'operations.decompose_curve', (['circle'], {}), '(circle)\n', (702, 710), False, 'from geomdl import operations\n'), ((729, 759), 'geomdl.multi.CurveContainer', 'multi.CurveContainer', (['segments'], {}), '(segments)\n', (749, 759), False, 'from geomdl import multi\n'), ((903, 953), 'geomdl.visualization.VisMPL.VisConfig', 'VisMPL.VisConfig', ([], {'ctrlpts': '(True)', 'figure_size': '[9, 8]'}), '(ctrlpts=True, figure_size=[9, 8])\n', (919, 953), False, 'from geomdl.visualization import VisMPL\n'), ((965, 1001), 'geomdl.visualization.VisMPL.VisCurve2D', 'VisMPL.VisCurve2D', ([], {'config': 'vis_config'}), '(config=vis_config)\n', (982, 1001), False, 'from geomdl.visualization import VisMPL\n')] |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as wn
import torch.distributions as D
# Basic Layers
# -------------------------------------------------------------------------------------------------------
# taken from https://github.com/jzbontar/pixelcnn-pytorch
class MaskedConv2d(nn.Conv2d):
def __init__(self, mask_type, *args, **kwargs):
super(MaskedConv2d, self).__init__(*args, **kwargs)
assert mask_type in {'A', 'B'}
self.register_buffer('mask', self.weight.data.clone())
_, _, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kH // 2, kW // 2 + (mask_type == 'B'):] = 0
self.mask[:, :, kH // 2 + 1:] = 0
def forward(self, x):
self.weight.data *= self.mask
return super(MaskedConv2d, self).forward(x)
class ARMultiConv2d(nn.Module):
def __init__(self, n_h, n_out, args, nl=F.elu):
super(ARMultiConv2d, self).__init__()
self.nl = nl
convs, out_convs = [], []
for i, size in enumerate(n_h):
convs += [MaskedConv2d('A' if i == 0 else 'B', args.z_size if i == 0 else args.h_size, args.h_size, 3, 1, 1)]
for i, size in enumerate(n_out):
out_convs += [MaskedConv2d('B', args.h_size, args.z_size, 3, 1, 1)]
self.convs = nn.ModuleList(convs)
self.out_convs = nn.ModuleList(out_convs)
def forward(self, x, context):
for i, conv_layer in enumerate(self.convs):
x = conv_layer(x)
if i == 0:
x += context
x = self.nl(x)
return [conv_layer(x) for conv_layer in self.out_convs]
# IAF building block
# -------------------------------------------------------------------------------------------------------
class IAFLayer(nn.Module):
def __init__(self, args, downsample):
super(IAFLayer, self).__init__()
n_in = args.h_size
n_out = args.h_size * 2 + args.z_size * 2
self.z_size = args.z_size
self.h_size = args.h_size
self.iaf = args.iaf
self.ds = downsample
self.args = args
if downsample:
stride, padding, filter_size = 2, 1, 4
self.down_conv_b = wn(nn.ConvTranspose2d(args.h_size + args.z_size, args.h_size, 4, 2, 1))
else:
stride, padding, filter_size = 1, 1, 3
self.down_conv_b = wn(nn.Conv2d(args.h_size + args.z_size, args.h_size, 3, 1, 1))
# create modules for UP pass:
self.up_conv_a = wn(nn.Conv2d(n_in, n_out, filter_size, stride, padding))
self.up_conv_b = wn(nn.Conv2d(args.h_size, args.h_size, 3, 1, 1))
# create modules for DOWN pass:
self.down_conv_a = wn(nn.Conv2d(n_in, 4 * self.z_size + 2 * self.h_size, 3, 1, 1))
if args.iaf:
self.down_ar_conv = ARMultiConv2d([args.h_size] * 2, [args.z_size] * 2, args)
def up(self, input):
x = F.elu(input)
out_conv = self.up_conv_a(x)
self.qz_mean, self.qz_logsd, self.up_context, h = out_conv.split([self.z_size] * 2 + [self.h_size] * 2, 1)
h = F.elu(h)
h = self.up_conv_b(h)
if self.ds:
input = F.upsample(input, scale_factor=0.5)
return input + 0.1 * h
def down(self, input, sample=False):
x = F.elu(input)
x = self.down_conv_a(x)
pz_mean, pz_logsd, rz_mean, rz_logsd, down_context, h_det = x.split([self.z_size] * 4 + [self.h_size] * 2, 1)
prior = D.Normal(pz_mean, torch.exp(2 * pz_logsd))
if sample:
z = prior.rsample()
kl = kl_obj = torch.zeros(input.size(0)).to(input.device)
else:
posterior = D.Normal(rz_mean + self.qz_mean, torch.exp(rz_logsd + self.qz_logsd))
z = posterior.rsample()
logqs = posterior.log_prob(z)
context = self.up_context + down_context
if self.iaf:
x = self.down_ar_conv(z, context)
arw_mean, arw_logsd = x[0] * 0.1, x[1] * 0.1
z = (z - arw_mean) / torch.exp(arw_logsd)
# the density at the new point is the old one + determinant of transformation
logq = logqs
logqs += arw_logsd
logps = prior.log_prob(z)
kl = logqs - logps
# free bits (doing as in the original repo, even if weird)
kl_obj = kl.sum(dim=(-2, -1)).mean(dim=0, keepdim=True)
kl_obj = kl_obj.clamp(min=self.args.free_bits)
kl_obj = kl_obj.expand(kl.size(0), -1)
kl_obj = kl_obj.sum(dim=1)
# sum over all the dimensions, but the batch
kl = kl.sum(dim=(1,2,3))
h = torch.cat((z, h_det), 1)
h = F.elu(h)
if self.ds:
input = F.upsample(input, scale_factor=2.)
h = self.down_conv_b(h)
return input + 0.1 * h, kl, kl_obj
| [
"torch.nn.functional.upsample",
"torch.nn.ModuleList",
"torch.nn.functional.elu",
"torch.exp",
"torch.nn.Conv2d",
"torch.nn.ConvTranspose2d",
"torch.cat"
] | [((1377, 1397), 'torch.nn.ModuleList', 'nn.ModuleList', (['convs'], {}), '(convs)\n', (1390, 1397), True, 'import torch.nn as nn\n'), ((1423, 1447), 'torch.nn.ModuleList', 'nn.ModuleList', (['out_convs'], {}), '(out_convs)\n', (1436, 1447), True, 'import torch.nn as nn\n'), ((3016, 3028), 'torch.nn.functional.elu', 'F.elu', (['input'], {}), '(input)\n', (3021, 3028), True, 'import torch.nn.functional as F\n'), ((3194, 3202), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (3199, 3202), True, 'import torch.nn.functional as F\n'), ((3405, 3417), 'torch.nn.functional.elu', 'F.elu', (['input'], {}), '(input)\n', (3410, 3417), True, 'import torch.nn.functional as F\n'), ((4858, 4882), 'torch.cat', 'torch.cat', (['(z, h_det)', '(1)'], {}), '((z, h_det), 1)\n', (4867, 4882), False, 'import torch\n'), ((4895, 4903), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (4900, 4903), True, 'import torch.nn.functional as F\n'), ((2603, 2655), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_in', 'n_out', 'filter_size', 'stride', 'padding'], {}), '(n_in, n_out, filter_size, stride, padding)\n', (2612, 2655), True, 'import torch.nn as nn\n'), ((2685, 2729), 'torch.nn.Conv2d', 'nn.Conv2d', (['args.h_size', 'args.h_size', '(3)', '(1)', '(1)'], {}), '(args.h_size, args.h_size, 3, 1, 1)\n', (2694, 2729), True, 'import torch.nn as nn\n'), ((2804, 2863), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_in', '(4 * self.z_size + 2 * self.h_size)', '(3)', '(1)', '(1)'], {}), '(n_in, 4 * self.z_size + 2 * self.h_size, 3, 1, 1)\n', (2813, 2863), True, 'import torch.nn as nn\n'), ((3274, 3309), 'torch.nn.functional.upsample', 'F.upsample', (['input'], {'scale_factor': '(0.5)'}), '(input, scale_factor=0.5)\n', (3284, 3309), True, 'import torch.nn.functional as F\n'), ((3611, 3634), 'torch.exp', 'torch.exp', (['(2 * pz_logsd)'], {}), '(2 * pz_logsd)\n', (3620, 3634), False, 'import torch\n'), ((4945, 4980), 'torch.nn.functional.upsample', 'F.upsample', (['input'], {'scale_factor': '(2.0)'}), '(input, scale_factor=2.0)\n', (4955, 4980), True, 'import torch.nn.functional as F\n'), ((2307, 2374), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(args.h_size + args.z_size)', 'args.h_size', '(4)', '(2)', '(1)'], {}), '(args.h_size + args.z_size, args.h_size, 4, 2, 1)\n', (2325, 2374), True, 'import torch.nn as nn\n'), ((2475, 2533), 'torch.nn.Conv2d', 'nn.Conv2d', (['(args.h_size + args.z_size)', 'args.h_size', '(3)', '(1)', '(1)'], {}), '(args.h_size + args.z_size, args.h_size, 3, 1, 1)\n', (2484, 2533), True, 'import torch.nn as nn\n'), ((3841, 3876), 'torch.exp', 'torch.exp', (['(rz_logsd + self.qz_logsd)'], {}), '(rz_logsd + self.qz_logsd)\n', (3850, 3876), False, 'import torch\n'), ((4198, 4218), 'torch.exp', 'torch.exp', (['arw_logsd'], {}), '(arw_logsd)\n', (4207, 4218), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
from cparser import *
from collections import OrderedDict
from collections import namedtuple
import sys
###############################################################################
# #
# TABLES, SEMANTIC ANALYSIS #
# #
###############################################################################
# scope type for asm_code generation
STRUCTTYPE = 1
COMMONTYPE = 2
class ScopedSymbolTable(object):
"""scoped symbol table for look up"""
# Tables.vars
# Tables.structs
Tables = namedtuple('Tables', ['vars', 'structs'])
def __init__(self, name, scope_type=None, enclosing_scope=None):
self.name = name
self.type = scope_type if scope_type is not None else COMMONTYPE
self._symbols = self.Tables(OrderedDict(), OrderedDict())
# parent scope, append itself
self.enclosing_scope = enclosing_scope
if enclosing_scope is not None:
self.enclosing_scope.children.append(self)
# the child scopes of current scope
self.children = []
@property
def symbols(self):
return self._symbols
# insert symbols
def insert(self, name, value):
"""add symbol with the given value"""
if name in self._symbols.vars:
if not self._symbols.vars[name].type.get_string() == value.type.get_string():
comment = f"Variable '{name}' as '{self._symbols.vars[name].type.get_string()}' " \
f"has declared before({value.line})."
raise CompilerError(comment)
else:
comment = f"Variable '{name}' has declared in line" \
f"{self._symbols.vars[name].line} before({value.line})."
raise CompilerError(comment)
self._symbols.vars[name] = value
def insert_struct(self, name, value):
"""add symbol with the given value"""
if name not in self._symbols.structs:
self._symbols.structs[name] = value
else:
symbol = self.lookup_struct(name)
if symbol.exprs.is_null():
# replace the null member one
self._symbols.structs[name] = value
else:
comment = f"Redeclare the struct"
issue_collector.add(WarningIssue(comment))
def lookup(self, name, recursively_search=True):
# 'symbol' is either an instance of the Symbol class or None
symbol = self._symbols.vars.get(name)
if symbol is not None:
return symbol
# if we do not want search recursively, like searching member inside struct scope
if not recursively_search:
return None
# recursively go up the chain and lookup the name
if self.enclosing_scope is not None:
return self.enclosing_scope.lookup(name)
else:
return None
def lookup_struct(self, name):
symbol = self._symbols.structs.get(name)
# for the embedded struct declaration, find the upper one
if symbol is not None and not symbol.exprs.is_null():
return symbol
# recursively go up the chain and lookup the name
if self.enclosing_scope is not None:
return self.enclosing_scope.lookup_struct(name)
else:
return None
class SymbolTableVisitor(NodeVisitor):
def __init__(self):
self.current_scope = None
self.enclosing_scope = None
self.recursively_search = True
def push_table(self, node, name, scope_type=None):
"""Pushes a new symbol table onto the visitor's symbol table stack"""
self.current_scope = ScopedSymbolTable(
name=name,
scope_type=scope_type,
enclosing_scope=self.current_scope # None
)
# every node has its own symbol table, easy for code generation
node.scope_symbol = self.current_scope
def pop_table(self):
# return to parent scope
self.current_scope = self.current_scope.enclosing_scope
def visit_ast(self, node):
# noting to do
pass
def visit_id(self, node):
symbol = self.current_scope.lookup(node.expr, self.recursively_search)
if symbol is not None:
# used in the type check
# obtain the type of the Id node
node.symbol = symbol
node.symbol.is_used = True
if not isinstance(node.symbol.type, FunctionType):
node.set_lvalue()
# in StructOp to search the member inside the struct scope
base_type = node.symbol.get_base_type()
if isinstance(base_type, StructType):
if base_type.exprs.is_null():
struct_symbol = self.current_scope.lookup_struct(base_type.name)
if struct_symbol is not None:
base_type.exprs = struct_symbol.exprs
node.symbol.scope_symbol = struct_symbol.scope_symbol
else:
comment = f"Struct '{base_type.name}' not found({node.line})"
raise CompilerError(comment)
else:
comment = f"Identifier '{node.expr}' not found({node.line})"
raise CompilerError(comment)
def visit_unary_op(self, node):
"""Pointer, AddrOf, and Negative"""
node.expr.visit(self)
node.symbol = node.expr.symbol
def visit_function_op(self, node):
"""FunctionOp"""
node.expr.visit(self)
node.args.visit(self)
def visit_array_op(self, node):
node.expr.visit(self)
node.symbol = node.expr.symbol
# if enter a new area, we need change the current scope
super_recursive = self.recursively_search
self.recursively_search = True
self.current_scope = self.enclosing_scope
node.index.visit(self)
self.recursively_search = super_recursive
def visit_struct_op(self, node):
node.parent.visit(self)
# it is special that we need to check only in the struct itself
self.recursively_search = False
self.current_scope = node.parent.symbol.scope_symbol
node.expr.visit(self)
node.symbol = node.expr.symbol
self.recursively_search = True
self.current_scope = self.enclosing_scope
def visit_bin_op(self, node):
node.left.visit(self)
node.right.visit(self)
def visit_node_list(self, node):
self.visit_list(node.nodes)
def visit_translation_unit(self, node):
"""the entry of the file"""
self.current_scope = ScopedSymbolTable(
name="entry",
enclosing_scope=self.current_scope # None
)
self.visit_node_list(node)
node.scope_symbol = self.current_scope
def add_symbol(self, node):
# for struct Point* s; the type is PointerType(StructType)
base_type = node.get_base_type()
if not node.name:
self.current_scope.insert_struct(base_type.name, base_type)
else:
self.current_scope.insert(node.name, node)
if isinstance(base_type, StructType):
self.current_scope.insert_struct(base_type.name, base_type)
def visit_declaration(self, node):
self.add_symbol(node)
# the members inside struct are a new scope
base_type = node.get_base_type()
if node.name is not None and isinstance(base_type, StructType):
if isinstance(node.type, StructType) or isinstance(node.type, ArrayType):
struct_str = 'struct_' + node.name
self.push_table(node, struct_str, STRUCTTYPE)
struct_symbol = self.current_scope.lookup_struct(base_type.name)
if struct_symbol is None:
comment = f"Struct '{base_type.name}' not found({node.line})"
raise CompilerError(comment)
struct_symbol.scope_symbol = self.current_scope
if base_type.exprs.is_null():
base_type.exprs = struct_symbol.exprs
base_type.exprs.visit(self)
self.pop_table()
if node.has_initialized():
init_node = self.assign_init_variables(node, node.init_value)
node.add_init_value(init_node)
# now visit its initial values
node.init_value.visit(self)
def assign_init_variables(self, decl_node, init_values, scope_name=None):
# set the init_node to the node
nodes = NodeList()
if isinstance(decl_node.type, ArrayType):
array_index = 0
for child_node in init_values.nodes:
sub_node = ArrayOp(Id(decl_node.name, lineno=decl_node.line), Const(array_index, BaseType('int')),
lineno=decl_node.line)
if scope_name is not None:
sub_node = StructOp(scope_name, op='.', right=sub_node, lineno=decl_node.line)
child_init_node = BinOp(left=sub_node, op='=', right=child_node, lineno=decl_node.line)
nodes.add(child_init_node)
array_index += 1
elif isinstance(decl_node.type, StructType):
for index in range(len(init_values.nodes)):
init_value = init_values.nodes[index]
variable = decl_node.type.exprs.nodes[index]
init_nodes = self.assign_init_variables(variable, init_value, Id(decl_node.name))
for child_init_node in init_nodes.nodes:
if scope_name is not None:
child_init_node = StructOp(scope_name, op='.', right=child_init_node)
nodes.add(child_init_node)
else:
sub_node = Id(decl_node.name, lineno=decl_node.line)
if scope_name is not None:
init_node = BinOp(StructOp(scope_name, op='.', right=sub_node), op='=', right=init_values,
lineno=decl_node.line)
else:
init_node = BinOp(left=sub_node, op='=', right=init_values, lineno=decl_node.line)
nodes.add(init_node)
return nodes
def visit_function_type(self, node):
node.parms.visit(self)
def visit_parameter_list(self, node):
"""Assign a number to each parameter. This will later be
useful for the code generation phase."""
parms_index = 0
for parm in node.nodes:
parm.visit(self)
parm.parms_index = parms_index
parms_index += 1
def visit_function_defn(self, node):
self.add_symbol(node)
self.push_table(node, node.name)
if not node.type.is_null():
# insert parameters into current symbols
node.type.visit(self)
# insert local variables into children
node.body.visit(self)
self.pop_table()
def visit_compound_statement(self, node):
# because compound statement will use BEGIN and END to create a scope
self.push_table(node, "compound statements")
self.enclosing_scope = self.current_scope
node.declarations.visit(self)
node.statements.visit(self)
self.pop_table()
def visit_expression_statement(self, node):
node.expr.visit(self)
def visit_if_statement(self, node):
node.expr.visit(self)
node.then_stmt.visit(self)
node.else_stmt.visit(self)
def visit_return_statement(self, node):
node.expr.visit(self)
def visit_for_loop(self, node):
node.begin_stmt.visit(self)
node.expr.visit(self)
node.end_stmt.visit(self)
node.stmt.visit(self)
###############################################################################
# #
# FLOW CONTROL #
# #
###############################################################################
class FlowControlVisitor(NodeVisitor):
"""Performs flow control checking on the AST. This makes sure
that functions return properly through all branches, that
break/continue statements are only present within loops, and so
forth."""
def __init__(self):
self.in_loop = False
self.returns = []
self.cur_loop = []
self.curr_func_name = ''
self.curr_return_line = sys.maxsize
def visit_empty_node(self, node):
node.has_return = False
def visit_statements_list(self, node):
node.has_return = False
for stmt in node.nodes:
if node.has_return:
comment = f"Statements starting at line{stmt.line} is unreachable in '{self.curr_func_name}' "
issue_collector.add(WarningIssue(comment))
# now we need judge that we can prevent generating codes for the rest parts
stmt.set_to_ignore()
stmt.visit(self)
if stmt.has_return:
node.has_return = True
# obtain the statement with the smallest line, actually the first time
if stmt.line < self.curr_return_line:
self.curr_return_line = stmt.line
def visit_declaration_list(self, node):
for child_node in node.nodes:
if child_node.is_used and child_node.line > self.curr_return_line:
child_node.is_used = False
def visit_translation_unit(self, node):
self.visit_list(node.nodes)
def visit_for_loop(self, node):
"""whether it is already in a loop"""
node.in_func_name = self.curr_func_name
self.cur_loop.append(self.in_loop)
self.in_loop = True
node.stmt.visit(self)
node.has_return = node.stmt.has_return
self.in_loop = self.cur_loop.pop()
def visit_break_statement(self, node):
if not self.in_loop:
comment = f"Break statement outside of loop({node.line})."
raise CompilerError(comment)
def visit_continue_statement(self, node):
if not self.in_loop:
comment = f"Continue statement outside of loop({node.line})."
raise CompilerError(comment)
def visit_if_statement(self, node):
node.in_func_name = self.curr_func_name
node.expr.visit(self)
node.then_stmt.visit(self)
node.else_stmt.visit(self)
if node.then_stmt.has_return and node.else_stmt.has_return:
node.has_return = True
elif (node.expr.is_const() and int(node.expr.expr) > 0) and node.then_stmt.has_return:
# if (1) then return, is definitely a return
node.has_return = True
def visit_function_defn(self, node):
self.curr_func_name = node.name
self.in_loop = False
node.body.visit(self)
if not node.return_type.get_outer_string() == 'void' and not node.body.has_return:
comment = f"Function '{self.curr_func_name}' doesn't return through all branches."
raise CompilerError(comment)
elif node.return_type.get_outer_string() == 'void' and node.body.has_return:
comment = f"Function '{self.curr_func_name}' return values while it is void."
raise CompilerError(comment)
# determine whether function has the ignore parts
if len(self.returns) > 1:
node.has_ignore_parts = True
self.returns[-1].is_final_one = True
def visit_return_statement(self, node):
node.has_return = True
node.in_func_name = self.curr_func_name
self.returns.append(node)
def visit_compound_statement(self, node):
self.curr_return_line = sys.maxsize
node.statements.visit(self)
node.has_return = node.statements.has_return
node.declarations.visit(self)
###############################################################################
# #
# TYPE CHECKING #
# #
###############################################################################
class TypeCheckVisitor(NodeVisitor):
"""Visitor that performs type checking on the AST, attaching a
Type object subclass to every eligible node and making sure these
types don't conflict."""
def __init__(self):
self.curr_func = None
@staticmethod
def process_condition(expr):
"""Does simple type checking for an expression."""
if expr.type.get_outer_string() not in ('int', 'char'):
if expr.type.get_outer_string() is 'pointer':
# the expr has pointer comparison
pass
else:
comment = f"Conditional expression is '{expr.type.get_outer_string()}', "\
f"which doesn't evaluate to an int/char."
raise CompilerError(comment)
@staticmethod
def coerce_const(var, _type):
"""If the given typed terminal is a constant, coerces it to
the given type."""
if var.is_const() and _type.get_string() in ('int', 'char'):
var.type = _type
def coerce_consts(self, lhs, rhs):
"""Looks at two typed terminals to see if one of them
is a constant integral. If it is, then coerce it to
the type of the other terminal."""
if lhs.is_const():
self.coerce_const(lhs, rhs.type)
elif rhs.is_const():
self.coerce_const(rhs, lhs.type)
@staticmethod
def compare_types(name_str, from_type, to_type, lineno):
"""Compares the two types to see if it's possible to perform a
binary operation on them."""
conflict = 0
from_str = from_type.get_string()
to_str = to_type.get_string()
if from_str != to_str:
if from_str == 'char':
if to_str == 'int':
pass
else:
conflict = 2
elif from_str == 'int':
if to_str == 'char':
conflict = 1
else:
sub_from_str = from_type.get_outer_string()
sub_to_str = to_type.get_outer_string()
# allow cast
if sub_from_str != sub_to_str:
conflict = 2
if conflict == 1:
comment = f"{name_str}: Conversion from '{from_str}' to '{to_str}' may result in data loss({lineno})."
issue_collector.add(WarningIssue(comment))
elif conflict == 2:
comment = f"{name_str}: Cannot convert from '{from_str}' to '{to_str}'({lineno})."
raise CompilerError(comment)
def visit_ast(self, node):
pass
def visit_empty_node(self, node):
node.type = BaseType('void')
def visit_id(self, node):
node.type = node.symbol.type
def visit_negative(self, node):
node.expr.visit(self)
node.type = node.expr.type
def visit_addr_of(self, node):
node.expr.visit(self)
if not node.expr.is_lvalue():
comment = f"Address-of (&) target has no address!({node.line})"
raise CompilerError(comment)
else:
node.expr.set_oaddr()
node.type = PointerType(node.expr.type)
def visit_pointer(self, node):
node.expr.visit(self)
if node.expr.type.get_outer_string() == 'pointer':
# add pointer, iterate
node.type = node.expr.type.child
node.set_lvalue()
else:
comment = f"Pointer dereference (*) target is not a pointer!({node.line})"
raise CompilerError(comment)
def visit_struct_op(self, node):
# use the member type instead
node.parent.visit(self)
# determine the current operator
if isinstance(node.parent.type, PointerType) and not node.op == '->':
comment = f"Access the point struct member using '.'({node.line})"
issue_collector.add(ErrorIssue(comment))
elif isinstance(node.parent.type, StructType) and not node.op == '.':
comment = f"Access the struct member using '->'({node.line})"
issue_collector.add(ErrorIssue(comment))
node.expr.visit(self)
node.type = node.expr.type
# lvalue control
if node.expr.is_lvalue():
node.set_lvalue()
def visit_bin_op(self, node):
node.left.visit(self)
node.right.visit(self)
if node.op == '=':
if not node.left.is_lvalue():
comment = f"'{node.left.expr}' is an invalid lvalue: not an address!"
raise CompilerError(comment)
if isinstance(node.left, Pointer):
node.left.set_oaddr()
self.coerce_const(node.right, node.left.type)
self.compare_types("Assignment", node.right.type, node.left.type, node.line)
node.type = node.left.type
else:
# specification for binary operand type coercion.
self.coerce_consts(node.left, node.right)
self.compare_types("BinOp '%s'" % node.op, node.right.type, node.left.type, node.line)
node.type = node.left.type
def visit_node_list(self, node):
self.visit_list(node.nodes)
def visit_compound_statement(self, node):
# since we need to check there are initial values set
node.declarations.visit(self)
node.statements.visit(self)
def visit_declaration(self, node):
# 1. determine the lvalue
node.set_lvalue()
# 2. prevent void a;
if node.type.get_outer_string() == 'void':
comment = f"Cannot declare '{node.name}' with void type({node.line})"
raise CompilerError(comment)
if node.has_initialized():
node.init_value.visit(self)
def visit_array_op(self, node):
node.expr.visit(self)
node.index.visit(self)
if node.index.type.get_outer_string() not in ('int', 'char'):
comment = f"Array index is not an int or char!({node.line})"
raise CompilerError(comment)
elif node.expr.type.get_outer_string() != 'pointer':
comment = f"Array expression is not a pointer!({node.line})"
raise CompilerError(comment)
else:
node.type = node.expr.type.child
# mark it is a valid left value
node.set_lvalue()
def visit_function_op(self, node):
node.expr.visit(self)
if not isinstance(node.expr.type, FunctionType):
comment = f"Target of function expression is not a function!({node.line})"
raise CompilerError(comment)
node.type = node.expr.symbol.type.get_return_type()
node.args.visit(self)
parms = node.expr.symbol.type.parms
num_args = len(node.args.nodes)
num_parms = len(parms.nodes)
if num_args > num_parms:
comment = f"Too many arguments passed to function.({node.line})"
raise CompilerError(comment)
elif num_args < num_parms:
comment = f"Too few arguments passed to function.({node.line})"
raise CompilerError(comment)
for arg, parm in zip(node.args.nodes, parms.nodes):
self.coerce_const(arg, parm.type)
self.compare_types("Function call argument", arg.type, parm.type, node.line)
def visit_function_defn(self, node):
self.curr_func = node
node.body.visit(self)
def visit_return_statement(self, node):
node.expr.visit(self)
return_type = self.curr_func.return_type
self.coerce_const(node.expr, return_type)
self.compare_types("Return expression", node.expr.type, return_type, node.line)
node.expr.coerce_to_type = return_type
def visit_if_statement(self, node):
node.expr.visit(self)
# process_condition is necessary
self.process_condition(node.expr)
node.then_stmt.visit(self)
node.else_stmt.visit(self)
def visit_for_loop(self, node):
node.begin_stmt.visit(self)
node.end_stmt.visit(self)
node.expr.visit(self)
# support for(;;)
if not node.expr.is_null():
self.process_condition(node.expr)
node.stmt.visit(self)
def visit_expression_statement(self, node):
node.expr.visit(self)
###############################################################################
# #
# SYNTAX CHECKING #
# #
###############################################################################
class Syntax:
def __init__(self, ast_tree):
self.ast_tree = ast_tree
# syntax check
def check(self):
# 1. scope check
scope_check = SymbolTableVisitor()
with compiler_error_protect():
scope_check.visit(self.ast_tree)
if not issue_collector.ok():
return None
# 2. flow check
flow_check = FlowControlVisitor()
with compiler_error_protect():
flow_check.visit(self.ast_tree)
if not issue_collector.ok():
return None
# 3. type check
type_check = TypeCheckVisitor()
with compiler_error_protect():
type_check.visit(self.ast_tree)
if not issue_collector.ok():
return None
| [
"collections.OrderedDict",
"collections.namedtuple"
] | [((728, 769), 'collections.namedtuple', 'namedtuple', (['"""Tables"""', "['vars', 'structs']"], {}), "('Tables', ['vars', 'structs'])\n", (738, 769), False, 'from collections import namedtuple\n'), ((974, 987), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (985, 987), False, 'from collections import OrderedDict\n'), ((989, 1002), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1000, 1002), False, 'from collections import OrderedDict\n')] |
import mysql.connector
from datetime import datetime
TAG = {}
def tag_id(tag, c):
try:
return TAG[tag]
except KeyError:
c.execute("INSERT INTO tagInfo (tagName) VALUE (%s)", (tag,))
new_tag_id = c.lastrowid
TAG[tag] = new_tag_id
return new_tag_id
def main():
wav_folder = "ogg/"
db_name = "cocktail"
with open("wav_info.txt", "r") as info:
lines = info.readlines()
cnx = mysql.connector.connect(user="root",
password="<PASSWORD>", host="localhost", database=db_name)
c = cnx.cursor()
add_file = ("INSERT INTO fileInfo "
"(fileName, file, fileSizeKb, fileLenSec) "
"VALUES "
"(%s, %s, %s, %s)")
add_snippet = ("INSERT INTO snippetInfo "
"(fileID, sizeKb, startTime, lenSec, creationDate, lastModifiedDate, userId) "
"VALUES "
"(%s, %s, %s, %s, %s, %s, %s)")
add_tag = ("INSERT INTO bridgeSnippetTagTable "
"(snippetID, tagID) "
"VALUES "
"(%s, %s)")
i = 0
for line in lines:
file_name, env, _, sex, age, text, t, file_type, sample_rate, channels, encoding = line.split("\t")
print(file_name, sex, age, t)
with open(wav_folder + file_name, "rb") as f:
data = f.read()
c.execute(add_file, (
file_name[:-4],
data,
len(data),
t)
)
file_id = c.lastrowid
c.execute(add_snippet, (
file_id,
len(data),
0,
t,
datetime.now(),
datetime.now(),
1)
)
snippet_id = c.lastrowid
for tag in [sex, age]:
c.execute(add_tag, (
snippet_id,
tag_id(tag, c))
)
cnx.commit()
cnx.commit()
c.close()
cnx.close()
if __name__ == "__main__":
main()
| [
"datetime.datetime.now"
] | [((1668, 1682), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1680, 1682), False, 'from datetime import datetime\n'), ((1700, 1714), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1712, 1714), False, 'from datetime import datetime\n')] |
r"""
It's used to check basic rnn features with cuda.
For example, it would throw exception if missing some components are missing
"""
import torch
import torch.nn as nn
device = torch.device("cuda:0")
rnn = nn.RNN(10, 20, 2).to(device)
inputs = torch.randn(5, 3, 10).to(device)
h0 = torch.randn(2, 3, 20).to(device)
output, hn = rnn(inputs, h0)
| [
"torch.randn",
"torch.nn.RNN",
"torch.device"
] | [((181, 203), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (193, 203), False, 'import torch\n'), ((210, 227), 'torch.nn.RNN', 'nn.RNN', (['(10)', '(20)', '(2)'], {}), '(10, 20, 2)\n', (216, 227), True, 'import torch.nn as nn\n'), ((248, 269), 'torch.randn', 'torch.randn', (['(5)', '(3)', '(10)'], {}), '(5, 3, 10)\n', (259, 269), False, 'import torch\n'), ((286, 307), 'torch.randn', 'torch.randn', (['(2)', '(3)', '(20)'], {}), '(2, 3, 20)\n', (297, 307), False, 'import torch\n')] |
from datetime import datetime
from yahoofinancials import YahooFinancials
from ewah.constants import EWAHConstants as EC
from ewah.operators.base import EWAHBaseOperator
from ewah.ewah_utils.airflow_utils import datetime_utcnow_with_tz
class EWAHFXOperator(EWAHBaseOperator):
_NAMES = ["fx"]
_ACCEPTED_EXTRACT_STRATEGIES = {
EC.ES_FULL_REFRESH: True,
EC.ES_INCREMENTAL: True,
}
def __init__(
self,
currency_pair, # iterable of length 2
frequency="daily", # daily, weekly, or monthly
*args,
**kwargs
):
if not frequency in ("daily", "weekly", "monthly"):
raise Exception("Frequency must be one of: daily, weekly, monthly")
if not len(currency_pair) == 2:
raise Exception(
"currency_pair must be iterable of length 2 "
+ "containing the currency pair."
)
# No connection id required for this operator
if kwargs.get("source_conn_id"):
self.log.info(
"source_conn_id is not required for operator! Ignoring argument."
)
kwargs["source_conn_id"] = None
# If incremental, the primary key column is 'date'
if kwargs.get("update_on_columns"):
self.log.info(
"update_on_columns is fixed for this operator. Using the default."
)
kwargs["update_on_columns"] = ["date"]
self.currency_pair = currency_pair
self.frequency = frequency
super().__init__(*args, **kwargs)
def ewah_execute(self, context):
data_from = self.data_from or context["dag"].start_date
data_until = self.data_until or datetime_utcnow_with_tz()
format_str = "%Y-%m-%d"
currency_str = "{0}{1}=X".format(*self.currency_pair)
data = YahooFinancials([currency_str]).get_historical_price_data(
data_from.strftime(format_str),
data_until.strftime(format_str),
self.frequency,
)
self.upload_data(data[currency_str]["prices"])
| [
"ewah.ewah_utils.airflow_utils.datetime_utcnow_with_tz",
"yahoofinancials.YahooFinancials"
] | [((1720, 1745), 'ewah.ewah_utils.airflow_utils.datetime_utcnow_with_tz', 'datetime_utcnow_with_tz', ([], {}), '()\n', (1743, 1745), False, 'from ewah.ewah_utils.airflow_utils import datetime_utcnow_with_tz\n'), ((1856, 1887), 'yahoofinancials.YahooFinancials', 'YahooFinancials', (['[currency_str]'], {}), '([currency_str])\n', (1871, 1887), False, 'from yahoofinancials import YahooFinancials\n')] |
import io
import os
import warnings
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*", category=UserWarning)
import logging
from typing import List
import matplotlib.pyplot as plt
from . import MasterSizerReport as msreport
logger = logging.getLogger(__name__)
class MultipleFilesReport:
# constructor
def __init__(
self,
files_mem: List[io.BytesIO],
files: List[str],
meanType: msreport.DiameterMeanType,
logScale: bool,
number_of_zero_first: int,
number_of_zero_last: int,
custom_plot_dict: dict,
show_labels: bool,
):
self.__files_mem: List[io.BytesIO] = files_mem
self.__files: List[str] = files
self.__number_of_files: int = len(self.__files)
self.__meanType: msreport.DiameterMeanType = meanType
self.__log_scale: bool = logScale
self.__number_of_zero_first: int = number_of_zero_first
self.__number_of_zero_last: int = number_of_zero_last
self.__reporters: List[msreport.MasterSizerReport] = []
self.__labels: List[str] = []
self.__custom_plot_kwargs: dict = custom_plot_dict
self.__show_labels: bool = show_labels
self.__create_reporters()
# public methods
def sizeDistributionPlot(self, output_path: str) -> plt.figure:
# plot
logger.info("sizeDistributionPlot called")
fig, ax = plt.subplots()
ax.set_ylabel("volume fraction (dX) [-]")
ax.grid()
ax.tick_params(axis="y", which="both")
if self.__log_scale:
ax.set_xlabel("log scale - diameter [$\mu m$]")
msreport.MasterSizerReport.formatLogScaleXaxis(ax)
else:
ax.set_xlabel("diameter [$\mu m$]")
for reporter, labelName in zip(self.__reporters, self.__labels):
logger.info('Adding curve of file "{}"'.format(reporter.getInputFile()))
ax.plot(
reporter.getXmeanValues(),
reporter.getYvalues(),
linestyle="--",
# marker="o",
label=labelName,
**self.__custom_plot_kwargs,
)
if self.__show_labels:
ax.legend()
filename = os.path.join(output_path + ".svg")
plt.savefig(filename, dpi=1200)
logger.info('Saved multiuple curves to "{}"'.format(filename))
return fig
# end of plot
def frequencyPlot(self, output_path: str) -> None:
# plot
logger.info("frequencyPlot called")
fig, ax = plt.subplots()
ax.set_ylabel("cumulative distribution (X) [-]")
ax.grid()
ax.tick_params(axis="y", which="both")
if self.__log_scale:
ax.set_xlabel("log scale - diameter [$\mu m$]")
msreport.MasterSizerReport.formatLogScaleXaxis(ax)
else:
ax.set_xlabel("diameter [$\mu m$]")
for reporter, labelName in zip(self.__reporters, self.__labels):
logger.info('Adding curve of file "{}"'.format(reporter.getInputFile()))
ax.plot(
reporter.getXmeanValues(),
reporter.getCumulativeYvalues(),
linestyle="--",
# marker="o",
label=labelName,
)
if self.__show_labels:
ax.legend()
filename = os.path.join(output_path + ".svg")
plt.savefig(filename, dpi=1200)
logger.info('Saved multiuple curves to "{}"'.format(filename))
# end of plot
def setLabels(self, labels: List[str]) -> None:
assert len(labels) == len(self.__reporters)
self.__labels = labels
return
# private methods
def __create_reporters(self) -> None:
for f_mem, f in zip(self.__files_mem, self.__files):
logger.info('Setting up file "{}"'.format(f))
reporter = msreport.MasterSizerReport()
logger.info('Created reporter object for file "{}"'.format(f))
reporter.setXPSfile(f_mem, f)
reporter.setDiameterMeanType(self.__meanType)
reporter.cutFirstZeroPoints(self.__number_of_zero_first, tol=1e-8)
reporter.cutLastZeroPoints(self.__number_of_zero_last, tol=1e-8)
reporter.setLogScale(logscale=self.__log_scale)
reporter.evaluateData()
self.__reporters.append(reporter)
self.__labels.append(reporter.getInputFile())
logger.info('Reporter object for file "{}" setted up'.format(f))
def __check_all_files_exist(self) -> List[str]:
doesnt_exist: List[str] = []
for f in self.__files:
if not os.path.isfile(f):
doesnt_exist.append(f)
return doesnt_exist
| [
"logging.getLogger",
"warnings.filterwarnings",
"matplotlib.pyplot.savefig",
"os.path.join",
"os.path.isfile",
"matplotlib.pyplot.subplots"
] | [((37, 123), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""(?s).*MATPLOTLIBDATA.*"""'], {'category': 'UserWarning'}), "('ignore', '(?s).*MATPLOTLIBDATA.*', category=\n UserWarning)\n", (60, 123), False, 'import warnings\n'), ((246, 273), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (263, 273), False, 'import logging\n'), ((1420, 1434), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1432, 1434), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2293), 'os.path.join', 'os.path.join', (["(output_path + '.svg')"], {}), "(output_path + '.svg')\n", (2271, 2293), False, 'import os\n'), ((2302, 2333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(1200)'}), '(filename, dpi=1200)\n', (2313, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2595), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2593, 2595), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3426), 'os.path.join', 'os.path.join', (["(output_path + '.svg')"], {}), "(output_path + '.svg')\n", (3404, 3426), False, 'import os\n'), ((3435, 3466), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(1200)'}), '(filename, dpi=1200)\n', (3446, 3466), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4718), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (4715, 4718), False, 'import os\n')] |
"""
Abstractions for our instance of a dynamo db table that lives in the global context for each request.
NOTE: It is assumed that this table is pre-made and the table name (and url) are set in the current_app
config object at initialization (with test handled via conftest)
"""
import os
from flask import current_app, g
import boto3
from core.helpers import metrics_collector
# from botocore.exceptions import ClientError
@metrics_collector
def set_dynamo_table(db=None):
"""
place a dynamo table in the global env for this request
NOTE: test env will pass a mocked dynamo
"""
if 'table' not in g: # check is here as test env will stub before the request cycle begins
if db == None:
current_app.logger.info('setting dynamodb table in the global env')
db = boto3.resource('dynamodb', current_app.config['REGION'], endpoint_url=current_app.config['DB_URL'])
g.table = db.Table(current_app.config['TABLE_NAME'])
@metrics_collector
def get_listings():
"""
Fetch and return all DB listings
:return: dict
TODO: paging? offsets? can we use a list of hashes to filter?
"""
response = g.table.scan()
if 'Items' in response: # stupid fukin capital i...
current_app.logger.info('returning db query results')
return response['Items']
else:
current_app.logger.info('no items returned in db query')
return {}
@metrics_collector
def get_listing(hash):
"""
Given a listing hash fetch a single listing from dynamo
"""
return g.table.get_item(
Key={
'listing_hash': hash # should already be in the correct format here
}
)
| [
"flask.current_app.logger.info",
"boto3.resource",
"flask.g.table.scan",
"flask.g.table.get_item"
] | [((1165, 1179), 'flask.g.table.scan', 'g.table.scan', ([], {}), '()\n', (1177, 1179), False, 'from flask import current_app, g\n'), ((1554, 1598), 'flask.g.table.get_item', 'g.table.get_item', ([], {'Key': "{'listing_hash': hash}"}), "(Key={'listing_hash': hash})\n", (1570, 1598), False, 'from flask import current_app, g\n'), ((1244, 1297), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""returning db query results"""'], {}), "('returning db query results')\n", (1267, 1297), False, 'from flask import current_app, g\n'), ((1349, 1405), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""no items returned in db query"""'], {}), "('no items returned in db query')\n", (1372, 1405), False, 'from flask import current_app, g\n'), ((726, 793), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""setting dynamodb table in the global env"""'], {}), "('setting dynamodb table in the global env')\n", (749, 793), False, 'from flask import current_app, g\n'), ((811, 915), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""', "current_app.config['REGION']"], {'endpoint_url': "current_app.config['DB_URL']"}), "('dynamodb', current_app.config['REGION'], endpoint_url=\n current_app.config['DB_URL'])\n", (825, 915), False, 'import boto3\n')] |
from db import get_connection, get_from_datamaster
requirements = []
def build():
with get_connection() as con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS TraitTypes")
cur.execute("CREATE TABLE TraitTypes("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"TraitTypeName TEXT)")
trait_types = set([row["TraitTypeName"]
for row in get_from_datamaster('EquipTraits.csv')])
for trait_type in trait_types:
cur.execute("INSERT INTO TraitTypes ("
"TraitTypeName) "
"VALUES (\"{}\")".format(trait_type))
| [
"db.get_connection",
"db.get_from_datamaster"
] | [((94, 110), 'db.get_connection', 'get_connection', ([], {}), '()\n', (108, 110), False, 'from db import get_connection, get_from_datamaster\n'), ((439, 477), 'db.get_from_datamaster', 'get_from_datamaster', (['"""EquipTraits.csv"""'], {}), "('EquipTraits.csv')\n", (458, 477), False, 'from db import get_connection, get_from_datamaster\n')] |
# import dependencies we need to query a sqlite databse with sqlalchemy and return output to flask as json's
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# create enginge to link to sqlite file/database
engine = create_engine("sqlite:////Users/danvaldes/Desktop/bootcamp/sqlalchemy-challenge/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables into classes
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
# FLASK APP INITIALIZED
app = Flask(__name__)
@app.route("/")
def Home():
return(
f"<h3>Available Routes:<hr></h3>"
f"/api/v1.0/precipitation</br>"
f"/api/v1.0/stations</br>"
f"/api/v1.0/tobs</br>"
f"/api/v1.0/start_date</br>"
f"/api/v1.0/start_date/end_date</br>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# INITIATE A SESSION, CREATE QUERY, CLOSE SESSION
session = Session(engine)
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= year_ago).\
order_by(Measurement.date).all()
session.close()
prcp_dict = dict(results)
return jsonify(prcp_dict)
@app.route("/api/v1.0/stations")
def stations():
# INITIATE A SESSION, CREATE QUERY TO RETURN A LIST OF STATIONS, CLOSE SESSION
session = Session(engine)
sel = [Measurement.station, Station.name, func.count(Measurement.station)]
# SESSION.QUERY METHOD RETURNS A LIST OF TUPLES
results = session.query(*sel).filter(Measurement.station == Station.station).\
group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
session.close()
# LIST(NP.RAVLE(LIST_NAME)) UNRAVELS THE LIST
# station_list = list(np.ravel(results))
return jsonify(results)
@app.route("/api/v1.0/tobs")
def tobs():
# INITIATE A SESSION, CREATE QUERY, CLOSE SESSION
session = Session(engine)
year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)
datapoints = session.query(Measurement.tobs).\
filter(Measurement.date >= year_ago).\
filter(Measurement.station == 'USC00519281').all()
session.close()
return jsonify(datapoints)
# DEFINE THE CALC_TEMPS FUNCTION
def calc_temps(start_date, end_date):
my_data_returned= session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
return my_data_returned
@app.route("/api/v1.0/<start>")
def start(start):
session = Session(engine)
date_to_pass=start
# INLINE FUNCTION TO CALCULATE MIN, AVG, AND MAX. CAN'T CALL CALC_TEMPS BECAUSE IT REQUIRES TWO PARAMS
last_date = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date==date_to_pass).all()
my_date=list(np.ravel(last_date))
session.close()
return jsonify(my_date)
@app.route("/api/v1.0/<start_date>/<end_date>")
def start_end(start_date=None, end_date=None):
temps = calc_temps(start_date, end_date)
return jsonify(temps)
if __name__ == "__main__":
app.run(debug=True)
| [
"sqlalchemy.func.count",
"sqlalchemy.func.min",
"flask.Flask",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"sqlalchemy.func.max",
"datetime.date",
"sqlalchemy.func.avg",
"numpy.ravel",
"datetime.timedelta",
"flask.jsonify"
] | [((389, 494), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:////Users/danvaldes/Desktop/bootcamp/sqlalchemy-challenge/hawaii.sqlite"""'], {}), "(\n 'sqlite:////Users/danvaldes/Desktop/bootcamp/sqlalchemy-challenge/hawaii.sqlite'\n )\n", (402, 494), False, 'from sqlalchemy import create_engine, func\n'), ((541, 555), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (553, 555), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((740, 755), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (747, 755), False, 'from sqlalchemy.orm import Session\n'), ((787, 802), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (792, 802), False, 'from flask import Flask, jsonify\n'), ((1231, 1246), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1238, 1246), False, 'from sqlalchemy.orm import Session\n'), ((1548, 1566), 'flask.jsonify', 'jsonify', (['prcp_dict'], {}), '(prcp_dict)\n', (1555, 1566), False, 'from flask import Flask, jsonify\n'), ((1724, 1739), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1731, 1739), False, 'from sqlalchemy.orm import Session\n'), ((2216, 2232), 'flask.jsonify', 'jsonify', (['results'], {}), '(results)\n', (2223, 2232), False, 'from flask import Flask, jsonify\n'), ((2353, 2368), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (2360, 2368), False, 'from sqlalchemy.orm import Session\n'), ((2638, 2657), 'flask.jsonify', 'jsonify', (['datapoints'], {}), '(datapoints)\n', (2645, 2657), False, 'from flask import Flask, jsonify\n'), ((3075, 3090), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (3082, 3090), False, 'from sqlalchemy.orm import Session\n'), ((3494, 3510), 'flask.jsonify', 'jsonify', (['my_date'], {}), '(my_date)\n', (3501, 3510), False, 'from flask import Flask, jsonify\n'), ((3683, 3697), 'flask.jsonify', 'jsonify', (['temps'], {}), '(temps)\n', (3690, 3697), False, 'from flask import Flask, jsonify\n'), ((1267, 1287), 'datetime.date', 'dt.date', (['(2017)', '(8)', '(23)'], {}), '(2017, 8, 23)\n', (1274, 1287), True, 'import datetime as dt\n'), ((1290, 1312), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1302, 1312), True, 'import datetime as dt\n'), ((1791, 1822), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (1801, 1822), False, 'from sqlalchemy import create_engine, func\n'), ((2389, 2409), 'datetime.date', 'dt.date', (['(2017)', '(8)', '(23)'], {}), '(2017, 8, 23)\n', (2396, 2409), True, 'import datetime as dt\n'), ((2412, 2434), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (2424, 2434), True, 'import datetime as dt\n'), ((3433, 3452), 'numpy.ravel', 'np.ravel', (['last_date'], {}), '(last_date)\n', (3441, 3452), True, 'import numpy as np\n'), ((2025, 2056), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (2035, 2056), False, 'from sqlalchemy import create_engine, func\n'), ((3264, 3290), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3272, 3290), False, 'from sqlalchemy import create_engine, func\n'), ((3292, 3318), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3300, 3318), False, 'from sqlalchemy import create_engine, func\n'), ((3320, 3346), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (3328, 3346), False, 'from sqlalchemy import create_engine, func\n'), ((2780, 2806), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2788, 2806), False, 'from sqlalchemy import create_engine, func\n'), ((2808, 2834), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2816, 2834), False, 'from sqlalchemy import create_engine, func\n'), ((2836, 2862), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (2844, 2862), False, 'from sqlalchemy import create_engine, func\n')] |
import datetime
import json
import responses
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.authtoken.models import Token
from rest_hooks.models import model_saved
from hellomama_registration import utils
from registrations.models import (
Source, Registration, SubscriptionRequest, registration_post_save,
fire_created_metric, fire_unique_operator_metric, fire_message_type_metric,
fire_receiver_type_metric, fire_source_metric, fire_language_metric,
fire_state_metric, fire_role_metric)
from .models import (
Change, change_post_save, fire_language_change_metric,
fire_baby_change_metric, fire_loss_change_metric,
fire_message_change_metric)
from .tasks import implement_action
def override_get_today():
return datetime.datetime.strptime("20150817", "%Y%m%d")
class APITestCase(TestCase):
def setUp(self):
self.adminclient = APIClient()
self.normalclient = APIClient()
self.otherclient = APIClient()
utils.get_today = override_get_today
class AuthenticatedAPITestCase(APITestCase):
def _replace_post_save_hooks_change(self):
def has_listeners():
return post_save.has_listeners(Change)
assert has_listeners(), (
"Change model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
post_save.disconnect(receiver=change_post_save,
sender=Change)
post_save.disconnect(receiver=fire_language_change_metric,
sender=Change)
post_save.disconnect(receiver=fire_baby_change_metric,
sender=Change)
post_save.disconnect(receiver=fire_loss_change_metric,
sender=Change)
post_save.disconnect(receiver=fire_message_change_metric,
sender=Change)
post_save.disconnect(receiver=model_saved,
dispatch_uid='instance-saved-hook')
assert not has_listeners(), (
"Change model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
def _restore_post_save_hooks_change(self):
def has_listeners():
return post_save.has_listeners(Change)
assert not has_listeners(), (
"Change model still has post_save listeners. Make sure"
" helpers removed them properly in earlier tests.")
post_save.connect(receiver=change_post_save,
sender=Change)
post_save.connect(receiver=fire_language_change_metric,
sender=Change)
post_save.connect(receiver=fire_baby_change_metric,
sender=Change)
post_save.connect(receiver=fire_loss_change_metric,
sender=Change)
post_save.connect(receiver=fire_message_change_metric,
sender=Change)
post_save.connect(receiver=model_saved,
dispatch_uid='instance-saved-hook')
def _replace_post_save_hooks_registration(self):
def has_listeners():
return post_save.has_listeners(Registration)
assert has_listeners(), (
"Registration model has no post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
post_save.disconnect(receiver=registration_post_save,
sender=Registration)
post_save.disconnect(receiver=fire_created_metric,
sender=Registration)
post_save.disconnect(receiver=fire_source_metric,
sender=Registration)
post_save.disconnect(receiver=fire_unique_operator_metric,
sender=Registration)
post_save.disconnect(receiver=fire_message_type_metric,
sender=Registration)
post_save.disconnect(receiver=fire_receiver_type_metric,
sender=Registration)
post_save.disconnect(receiver=fire_language_metric,
sender=Registration)
post_save.disconnect(receiver=fire_state_metric,
sender=Registration)
post_save.disconnect(receiver=fire_role_metric,
sender=Registration)
post_save.disconnect(receiver=model_saved,
dispatch_uid='instance-saved-hook')
assert not has_listeners(), (
"Registration model still has post_save listeners. Make sure"
" helpers cleaned up properly in earlier tests.")
def _restore_post_save_hooks_registration(self):
def has_listeners():
return post_save.has_listeners(Registration)
post_save.connect(receiver=registration_post_save,
sender=Registration)
post_save.connect(receiver=fire_created_metric,
sender=Registration)
post_save.connect(receiver=fire_source_metric,
sender=Registration)
post_save.connect(receiver=fire_unique_operator_metric,
sender=Registration)
post_save.connect(receiver=fire_language_metric,
sender=Registration)
post_save.connect(receiver=fire_state_metric,
sender=Registration)
post_save.connect(receiver=fire_role_metric,
sender=Registration)
post_save.connect(receiver=model_saved,
dispatch_uid='instance-saved-hook')
def make_source_adminuser(self):
data = {
"name": "test_ussd_source_adminuser",
"authority": "hw_full",
"user": User.objects.get(username='testadminuser')
}
return Source.objects.create(**data)
def make_source_normaluser(self):
data = {
"name": "test_voice_source_normaluser",
"authority": "patient",
"user": User.objects.get(username='testnormaluser')
}
return Source.objects.create(**data)
def make_change_adminuser(self):
data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {"test_adminuser_change": "test_adminuser_changed"},
"source": self.make_source_adminuser()
}
return Change.objects.create(**data)
def make_change_normaluser(self):
data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {"test_normaluser_change": "test_normaluser_changed"},
"source": self.make_source_normaluser()
}
return Change.objects.create(**data)
def make_registration_mother_only(self):
data = {
"stage": "prebirth",
"mother_id": "<PASSWORD>-<PASSWORD>-acb1-<PASSWORD>",
"data": {
"receiver_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"operator_id": "nurse000-6a07-4377-a4f6-c0485ccba234",
"language": "eng_NG",
"msg_type": "text",
"gravida": "1",
"last_period_date": "20150202",
"msg_receiver": "mother_only",
# data added during validation
"reg_type": "hw_pre",
"preg_week": "15"
},
"source": self.make_source_adminuser()
}
return Registration.objects.create(**data)
def make_registration_friend_only(self):
data = {
"stage": "prebirth",
"mother_id": "<PASSWORD>-af<PASSWORD>-acb1-<PASSWORD>",
"data": {
"receiver_id": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"operator_id": "nurse000-6a07-4377-a4f6-c0485ccba234",
"language": "pcm_NG",
"msg_type": "text",
"gravida": "2",
"last_period_date": "20150302",
"msg_receiver": "friend_only",
# data added during validation
"reg_type": "hw_pre",
"preg_week": "11",
},
"source": self.make_source_adminuser()
}
return Registration.objects.create(**data)
def setUp(self):
super(AuthenticatedAPITestCase, self).setUp()
self._replace_post_save_hooks_change()
self._replace_post_save_hooks_registration()
# Normal User setup
self.normalusername = 'testnormaluser'
self.normalpassword = '<PASSWORD>'
self.normaluser = User.objects.create_user(
self.normalusername,
'<EMAIL>',
self.normalpassword)
normaltoken = Token.objects.create(user=self.normaluser)
self.normaltoken = normaltoken.key
self.normalclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.normaltoken)
# Admin User setup
self.adminusername = 'testadminuser'
self.adminpassword = '<PASSWORD>'
self.adminuser = User.objects.create_superuser(
self.adminusername,
'<EMAIL>',
self.adminpassword)
admintoken = Token.objects.create(user=self.adminuser)
self.admintoken = admintoken.key
self.adminclient.credentials(
HTTP_AUTHORIZATION='Token ' + self.admintoken)
def tearDown(self):
self._restore_post_save_hooks_change()
self._restore_post_save_hooks_registration()
class TestLogin(AuthenticatedAPITestCase):
def test_login_normaluser(self):
""" Test that normaluser can login successfully
"""
# Setup
post_auth = {"username": "testnormaluser",
"password": "<PASSWORD>"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNotNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(
request.status_code, 200,
"Status code on /api/token-auth was %s (should be 200)."
% request.status_code)
def test_login_adminuser(self):
""" Test that adminuser can login successfully
"""
# Setup
post_auth = {"username": "testadminuser",
"password": "<PASSWORD>"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNotNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(
request.status_code, 200,
"Status code on /api/token-auth was %s (should be 200)."
% request.status_code)
def test_login_adminuser_wrong_password(self):
""" Test that adminuser cannot log in with wrong password
"""
# Setup
post_auth = {"username": "testadminuser",
"password": "<PASSWORD>"}
# Execute
request = self.client.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)
def test_login_otheruser(self):
""" Test that an unknown user cannot log in
"""
# Setup
post_auth = {"username": "testotheruser",
"password": "<PASSWORD>"}
# Execute
request = self.otherclient.post(
'/api/token-auth/', post_auth)
token = request.data.get('token', None)
# Check
self.assertIsNone(
token, "Could not receive authentication token on login post.")
self.assertEqual(request.status_code, status.HTTP_400_BAD_REQUEST)
class TestChangeAPI(AuthenticatedAPITestCase):
def test_get_change_adminuser(self):
# Setup
change = self.make_change_adminuser()
# Execute
response = self.adminclient.get(
'/api/v1/change/%s/' % change.id,
content_type='application/json')
# Check
# Currently only posts are allowed
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_change_normaluser(self):
# Setup
change = self.make_change_normaluser()
# Execute
response = self.normalclient.get(
'/api/v1/change/%s/' % change.id,
content_type='application/json')
# Check
# Currently only posts are allowed
self.assertEqual(response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED)
def test_create_change_adminuser(self):
# Setup
self.make_source_adminuser()
post_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.post('/api/v1/change/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'change_language')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
self.assertEqual(d.created_by, self.adminuser)
def test_create_change_normaluser(self):
# Setup
self.make_source_normaluser()
post_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.normalclient.post('/api/v1/change/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_voice_source_normaluser')
self.assertEqual(d.action, 'change_language')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"test_key1": "test_value1"})
def test_create_change_set_readonly_field(self):
# Setup
self.make_source_adminuser()
post_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {"test_key1": "test_value1"},
"validated": True
}
# Execute
response = self.adminclient.post('/api/v1/change/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'change_language')
self.assertEqual(d.validated, False) # Should ignore True post_data
self.assertEqual(d.data, {"test_key1": "test_value1"})
class TestChangeListAPI(AuthenticatedAPITestCase):
def test_list_changes(self):
# Setup
change1 = self.make_change_adminuser()
change2 = self.make_change_normaluser()
change3 = self.make_change_normaluser()
# Execute
response = self.adminclient.get(
'/api/v1/changes/',
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
body = response.json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], str(change3.id))
self.assertEqual(body["results"][1]["id"], str(change2.id))
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
# Check pagination
body = self.adminclient.get(body["next"]).json()
self.assertEqual(len(body["results"]), 1)
self.assertEqual(body["results"][0]["id"], str(change1.id))
self.assertIsNotNone(body["previous"])
self.assertIsNone(body["next"])
body = self.adminclient.get(body["previous"]).json()
self.assertEqual(len(body["results"]), 2)
self.assertEqual(body["results"][0]["id"], str(change3.id))
self.assertEqual(body["results"][1]["id"], str(change2.id))
self.assertIsNone(body["previous"])
self.assertIsNotNone(body["next"])
def test_list_changes_filtered(self):
# Setup
self.make_change_adminuser()
change2 = self.make_change_normaluser()
# Execute
response = self.adminclient.get(
'/api/v1/changes/?source=%s' % change2.source.id,
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data["results"]), 1)
result = response.data["results"][0]
self.assertEqual(result["id"], str(change2.id))
class TestRegistrationCreation(AuthenticatedAPITestCase):
def test_make_registration_mother_only(self):
# Setup
# Execute
self.make_registration_mother_only()
# Test
d = Registration.objects.last()
self.assertEqual(d.mother_id, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.data["msg_receiver"], "mother_only")
class TestChangeMessaging(AuthenticatedAPITestCase):
@responses.activate
def test_prebirth_text_to_audio_week28_new_short_name(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"new_short_name": "prebirth.mother.audio.10_42.tue_thu.9_11",
"new_language": "ibo_NG"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 54,
"messageset": 1,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/1/',
json={
"id": 1,
"short_name": 'prebirth.mother.text.10_42',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock messageset via shortname lookup
query_string = '?short_name=prebirth.mother.audio.10_42.tue_thu.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 4,
"short_name": 'prebirth.mother.audio.10_42.tue_thu.9_11',
"default_schedule": 6
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 6 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/6/',
json={"id": 6, "day_of_week": "2,4"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 4)
self.assertEqual(d.next_sequence_number, 36) # week 28 - 18*2
self.assertEqual(d.lang, "ibo_NG")
self.assertEqual(d.schedule, 6)
@responses.activate
def test_prebirth_text_to_text_new_short_name(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"new_short_name": "prebirth.mother.text.0_9",
"new_language": "ibo_NG"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 10,
"messageset": 1,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/1/',
json={
"id": 1,
"short_name": 'prebirth.mother.text.10_42',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock messageset via shortname lookup
query_string = '?short_name=prebirth.mother.text.0_9'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 4,
"short_name": 'prebirth.mother.text.0_9',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 4)
self.assertEqual(d.next_sequence_number, 10)
self.assertEqual(d.lang, "ibo_NG")
self.assertEqual(d.schedule, 1)
@responses.activate
def test_prebirth_text_to_audio_week28(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "tue_thu",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 54,
"messageset": 1,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/1/',
json={
"id": 1,
"short_name": 'prebirth.mother.text.10_42',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock messageset via shortname lookup
query_string = '?short_name=prebirth.mother.audio.10_42.tue_thu.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 4,
"short_name": 'prebirth.mother.audio.10_42.tue_thu.9_11',
"default_schedule": 6
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 6 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/6/',
json={"id": 6, "day_of_week": "2,4"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 4)
self.assertEqual(d.next_sequence_number, 36) # week 28 - 18*2
self.assertEqual(d.lang, "eng_NG")
self.assertEqual(d.schedule, 6)
@responses.activate
def test_postbirth_text_to_audio_week12(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "mon_wed",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 36,
"messageset": 7,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/7/',
json={
"id": 7,
"short_name": 'postbirth.mother.text.0_12',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=postbirth.mother.audio.0_12.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 9,
"short_name": 'postbirth.mother.audio.0_12.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 9)
self.assertEqual(d.next_sequence_number, 24) # week 12 - 12*2
self.assertEqual(d.schedule, 4)
@responses.activate
def test_postbirth_text_to_audio_week13(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "mon_wed",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 2,
"messageset": 8,
"schedule": 2
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/8/',
json={
"id": 8,
"short_name": 'postbirth.mother.text.13_52',
"default_schedule": 2
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 2 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/2/',
json={"id": 2, "day_of_week": "2,4"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=postbirth.mother.audio.13_52.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 13,
"short_name": 'postbirth.mother.audio.13_52.mon_wed.9_11',
"default_schedule": 8
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 8 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/8/',
json={"id": 8, "day_of_week": "3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 13)
self.assertEqual(d.next_sequence_number, 1) # week 13 - 1*1
self.assertEqual(d.schedule, 8)
@responses.activate
def test_postbirth_text_to_audio_week14(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "mon_wed",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 4,
"messageset": 8,
"schedule": 2
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/8/',
json={
"id": 8,
"short_name": 'postbirth.mother.text.13_52',
"default_schedule": 2
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 2 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/2/',
json={"id": 2, "day_of_week": "2,4"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=postbirth.mother.audio.13_52.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 13,
"short_name": 'postbirth.mother.audio.13_52.mon_wed.9_11',
"default_schedule": 8
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 8 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/8/',
json={"id": 8, "day_of_week": "3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 13)
self.assertEqual(d.next_sequence_number, 2) # week 14 - 2*1
self.assertEqual(d.schedule, 8)
@responses.activate
def test_miscarriage_text_to_audio_week1(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "mon_wed",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get current subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 1,
"messageset": 18,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/18/',
json={
"id": 18,
"short_name": 'miscarriage.mother.text.0_2',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=miscarriage.mother.audio.0_2.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 19)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.schedule, 4)
@responses.activate
def test_miscarriage_text_to_audio_week2(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "audio",
"voice_days": "mon_wed",
"voice_times": "9_11"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get current subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 3,
"messageset": 18,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/18/',
json={
"id": 18,
"short_name": 'miscarriage.mother.text.0_2',
"default_schedule": 1
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=miscarriage.mother.audio.0_2.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 19)
self.assertEqual(d.next_sequence_number, 2)
self.assertEqual(d.schedule, 4)
@responses.activate
def test_prebirth_audio_to_text_week28(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "text",
"voice_days": None,
"voice_times": None
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 36,
"messageset": 2,
"schedule": 4
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/2/',
json={
"id": 2,
"short_name": 'prebirth.mother.audio.10_42.mon_wed.9_11',
"default_schedule": 4
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=prebirth.mother.text.10_42'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 1,
"short_name": 'prebirth.mother.text.10_42',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 1)
self.assertEqual(d.next_sequence_number, 54) # week 28 - 18*3
self.assertEqual(d.schedule, 1)
@responses.activate
def test_postbirth_audio_to_text_week12(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "text",
"voice_days": None,
"voice_times": None
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 24,
"messageset": 9,
"schedule": 4
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/9/',
json={
"id": 9,
"short_name": 'postbirth.mother.audio.0_12.mon_wed.9_11',
"default_schedule": 4
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=postbirth.mother.text.0_12'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 7,
"short_name": 'postbirth.mother.text.0_12',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 7)
self.assertEqual(d.next_sequence_number, 36) # week 12 - 12*3
self.assertEqual(d.schedule, 1)
@responses.activate
def test_postbirth_audio_to_text_week13(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "text",
"voice_days": None,
"voice_times": None
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 1,
"messageset": 13,
"schedule": 8
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/13/',
json={
"id": 13,
"short_name": 'postbirth.mother.audio.13_52.mon_wed.9_11',
"default_schedule": 8
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 8 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/8/',
json={"id": 8, "day_of_week": "3"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=postbirth.mother.text.13_52'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 8,
"short_name": 'postbirth.mother.text.13_52',
"default_schedule": 2
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 2 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/2/',
json={"id": 2, "day_of_week": "2,4"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 8)
self.assertEqual(d.next_sequence_number, 2) # week 13 - 1*2
self.assertEqual(d.schedule, 2)
@responses.activate
def test_miscarriage_audio_to_text_week2(self):
# Setup
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {
"msg_type": "text",
"voice_days": None,
"voice_times": None
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get current subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 4,
"messageset": 19,
"schedule": 4
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock current messageset lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/19/',
json={
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 4 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock new messageset via shortname lookup
query_string = '?short_name=miscarriage.mother.text.0_2'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 18,
"short_name": 'miscarriage.mother.text.0_2',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock schedule 1 lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3,5"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change messaging completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.messageset, 18)
self.assertEqual(d.next_sequence_number, 6)
self.assertEqual(d.schedule, 1)
class TestChangeBaby(AuthenticatedAPITestCase):
@responses.activate
def test_change_baby_multiple_registrations(self):
# Setup
# make registration
self.make_registration_mother_only()
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_baby",
"data": {},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=postbirth.mother.audio.0_12.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 2,
"short_name": 'postbirth.mother.audio.0_12.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change baby completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 2)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 4)
@responses.activate
def test_mother_only_change_baby(self):
# Setup
# make registration
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_baby",
"data": {},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=postbirth.mother.audio.0_12.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 2,
"short_name": 'postbirth.mother.audio.0_12.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change baby completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 2)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 4)
@responses.activate
def test_friend_only_change_baby(self):
# Setup
# make registration
self.make_registration_friend_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_baby",
"data": {},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock mother identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=postbirth.mother.audio.0_12.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 2,
"short_name": 'postbirth.mother.audio.0_12.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock household messageset lookup
query_string = '?short_name=postbirth.household.audio.0_52.fri.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 17,
"short_name": 'postbirth.household.audio.0_52.fri.9_11',
"default_schedule": 3
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# mock household schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/3/',
json={"id": 3, "day_of_week": "5"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change baby completed")
d_mom = SubscriptionRequest.objects.filter(
identity=change_data["mother_id"])[0]
self.assertEqual(d_mom.identity,
"846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d_mom.messageset, 2)
self.assertEqual(d_mom.next_sequence_number, 1)
self.assertEqual(d_mom.lang, "hau_NG")
self.assertEqual(d_mom.schedule, 4)
d_hh = SubscriptionRequest.objects.filter(
identity="629eaf3c-04e5-4404-8a27-3ab3b811326a")[0]
self.assertEqual(d_hh.identity, "629eaf3c-04e5-4404-8a27-3ab3b811326a")
self.assertEqual(d_hh.messageset, 17)
self.assertEqual(d_hh.next_sequence_number, 1)
self.assertEqual(d_hh.lang, "hau_NG")
self.assertEqual(d_hh.schedule, 3)
@responses.activate
def test_mother_only_change_baby_text(self):
# Setup
# make registration
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_baby",
"data": {},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "text",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=postbirth.mother.text.0_12'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 7,
"short_name": 'postbirth.mother.text.0_12',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change baby completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-<PASSWORD>-<PASSWORD>-acb1-0<PASSWORD>")
self.assertEqual(d.messageset, 7)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 1)
class TestChangeLanguage(AuthenticatedAPITestCase):
@responses.activate
def test_mother_only_change_language(self):
# Setup
# make registration
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "8<PASSWORD>-af<PASSWORD>-<PASSWORD>",
"action": "change_language",
"data": {
"household_id": None,
"new_language": "pcm_NG"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"lang": "pcm_NG"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change language completed")
assert len(responses.calls) == 2
@responses.activate
def test_friend_only_change_language(self):
# Setup
# make registration
self.make_registration_friend_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_language",
"data": {
"household_id": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"new_language": "pcm_NG"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock mother get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"lang": "pcm_NG"},
status=200, content_type='application/json',
)
# mock household get subscription request
subscription_id = "ece53dbd-962f-4b9a-8546-759b059a2ae1"
query_string = '?active=True&identity=%s' % change_data["data"][
"household_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["data"]["household_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"lang": "pcm_NG"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change language completed")
assert len(responses.calls) == 4
class TestChangeUnsubscribeHousehold(AuthenticatedAPITestCase):
@responses.activate
def test_unsubscribe_household(self):
# Setup
# make registration
self.make_registration_friend_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "unsubscribe_household_only",
"data": {
"household_id": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"reason": "miscarriage"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["data"][
"household_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["data"]["household_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Unsubscribe household completed")
assert len(responses.calls) == 2
class TestChangeUnsubscribeMother(AuthenticatedAPITestCase):
@responses.activate
def test_unsubscribe_mother(self):
# Setup
# make registration
self.make_registration_friend_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "unsubscribe_mother_only",
"data": {
"reason": "miscarriage"
},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Unsubscribe mother completed")
assert len(responses.calls) == 2
class TestChangeLoss(AuthenticatedAPITestCase):
@responses.activate
def test_change_loss_multiple_registrations(self):
# Setup
# make registration
self.make_registration_mother_only()
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_loss",
"data": {"reason": "miscarriage"},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=miscarriage.mother.audio.0_2.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change loss completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 19)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 4)
@responses.activate
def test_mother_only_change_loss(self):
# Setup
# make registration
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_loss",
"data": {"reason": "miscarriage"},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=miscarriage.mother.audio.0_2.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change loss completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 19)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 4)
@responses.activate
def test_friend_only_change_loss(self):
# Setup
# make registration
self.make_registration_friend_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_loss",
"data": {"reason": "miscarriage"},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock mother get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock mother identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"preferred_msg_type": "audio",
"preferred_msg_days": "mon_wed",
"preferred_msg_times": "9_11",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=miscarriage.mother.audio.0_2.mon_wed.9_11'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 19,
"short_name": 'miscarriage.mother.audio.0_2.mon_wed.9_11',
"default_schedule": 4
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/4/',
json={"id": 4, "day_of_week": "1,3"},
status=200, content_type='application/json',
)
# mock friend get subscription request
subscription_id = "ece53dbd-962f-4b9a-8546-759b059a2ae1"
query_string = '?active=True&identity=%s' % (
"629eaf3c-04e5-4404-8a27-3ab3b811326a")
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": "629eaf3c-04e5-4404-8a27-3ab3b811326a",
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock household patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change loss completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 19)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 4)
@responses.activate
def test_mother_only_change_loss_text(self):
# Setup
# make registration
self.make_registration_mother_only()
# make change object
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_loss",
"data": {"reason": "miscarriage"},
"source": self.make_source_adminuser()
}
change = Change.objects.create(**change_data)
# mock get subscription request
subscription_id = "07f4d95c-ad78-4bf1-8779-c47b428e89d0"
query_string = '?active=True&identity=%s' % change_data["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": change_data["mother_id"],
"active": True,
"lang": "eng_NG"
}],
},
status=200, content_type='application/json',
match_querystring=True
)
# mock patch subscription request
responses.add(
responses.PATCH,
'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,
json={"active": False},
status=200, content_type='application/json',
)
# mock identity lookup
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/%s/' % change_data[
"mother_id"],
json={
"id": change_data["mother_id"],
"version": 1,
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {
"+2345059992222": {}
}
},
"receiver_role": "mother",
"linked_to": None,
"preferred_msg_type": "text",
"preferred_language": "hau_NG"
},
"created_at": "2015-07-10T06:13:29.693272Z",
"updated_at": "2015-07-10T06:13:29.693298Z"
},
status=200, content_type='application/json',
)
# mock mother messageset lookup
query_string = '?short_name=miscarriage.mother.text.0_2'
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": 18,
"short_name": 'miscarriage.mother.text.0_2',
"default_schedule": 1
}]
},
status=200, content_type='application/json',
match_querystring=True
)
# mock mother schedule lookup
responses.add(
responses.GET,
'http://localhost:8005/api/v1/schedule/1/',
json={"id": 1, "day_of_week": "0,2"},
status=200, content_type='application/json',
)
# Execute
result = implement_action.apply_async(args=[change.id])
# Check
self.assertEqual(result.get(), "Change loss completed")
d = SubscriptionRequest.objects.last()
self.assertEqual(d.identity, "846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(d.messageset, 18)
self.assertEqual(d.next_sequence_number, 1)
self.assertEqual(d.lang, "hau_NG")
self.assertEqual(d.schedule, 1)
class TestMetrics(AuthenticatedAPITestCase):
@responses.activate
def test_language_change_metric(self):
"""
When a new change is created, a sum metric should be fired if it is a
language change
"""
# deactivate Testsession for this test
self.session = None
# add metric post response
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
post_save.connect(fire_language_change_metric, sender=Change)
self.make_change_normaluser()
[last_call1, last_call2] = responses.calls
self.assertEqual(json.loads(last_call1.request.body), {
"registrations.change.language.sum": 1.0
})
self.assertEqual(json.loads(last_call2.request.body), {
"registrations.change.language.total.last": 1.0
})
post_save.disconnect(fire_language_change_metric, sender=Change)
@responses.activate
def test_baby_change_metric(self):
"""
When a new change is created, a sum metric should be fired if it is a
pregnancy to baby change
"""
# deactivate Testsession for this test
self.session = None
# add metric post response
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
post_save.connect(fire_baby_change_metric, sender=Change)
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_baby",
"data": {},
"source": self.make_source_adminuser()
}
Change.objects.create(**change_data)
[last_call1, last_call2] = responses.calls
self.assertEqual(json.loads(last_call1.request.body), {
"registrations.change.pregnant_to_baby.sum": 1.0
})
self.assertEqual(json.loads(last_call2.request.body), {
"registrations.change.pregnant_to_baby.total.last": 1.0
})
post_save.disconnect(fire_baby_change_metric, sender=Change)
@responses.activate
def test_loss_change_metric(self):
"""
When a new change is created, a sum metric should be fired if it is a
pregnancy to loss change
"""
# deactivate Testsession for this test
self.session = None
# add metric post response
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
post_save.connect(fire_loss_change_metric, sender=Change)
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_loss",
"data": {},
"source": self.make_source_adminuser()
}
Change.objects.create(**change_data)
[last_call1, last_call2] = responses.calls
self.assertEqual(json.loads(last_call1.request.body), {
"registrations.change.pregnant_to_loss.sum": 1.0
})
self.assertEqual(json.loads(last_call2.request.body), {
"registrations.change.pregnant_to_loss.total.last": 1.0
})
post_save.disconnect(fire_loss_change_metric, sender=Change)
@responses.activate
def test_message_change_metric(self):
"""
When a new change is created, a sum metric should be fired if it is a
messaging change
"""
# deactivate Testsession for this test
self.session = None
# add metric post response
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
post_save.connect(fire_message_change_metric, sender=Change)
change_data = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"action": "change_messaging",
"data": {},
"source": self.make_source_adminuser()
}
Change.objects.create(**change_data)
[last_call1, last_call2] = responses.calls
self.assertEqual(json.loads(last_call1.request.body), {
"registrations.change.messaging.sum": 1.0
})
self.assertEqual(json.loads(last_call2.request.body), {
"registrations.change.messaging.total.last": 1.0
})
post_save.disconnect(fire_message_change_metric, sender=Change)
class IdentityStoreOptoutViewTest(AuthenticatedAPITestCase):
"""
Tests related to the optout identity store view.
"""
url = '/optout/'
def setUp(self):
self.factory = RequestFactory()
super(IdentityStoreOptoutViewTest, self).setUp()
def optout_search_callback(self, request):
headers = {'Content-Type': "application/json"}
resp = {
"next": None,
"previous": None,
"results": [{
'identity': "846877e6-afaa-43de-acb1-09f61ad4de99",
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "miscarriage",
'optout_source': "ussd_public",
}, {
'identity': "846877e6-afaa-43de-1111-09f61ad4de99",
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "miscarriage",
'optout_source': "ussd_public",
}]
}
return (200, headers, json.dumps(resp))
def optout_search_callback_other(self, request):
headers = {'Content-Type': "application/json"}
resp = {
"next": None,
"previous": None,
"results": [{
'identity': "629eaf3c-04e5-1111-8a27-3ab3b811326a",
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "other",
'optout_source': "ivr_public",
}]
}
return (200, headers, json.dumps(resp))
@responses.activate
def test_identity_optout_valid(self):
self.make_registration_mother_only()
registration = self.make_registration_mother_only()
registration.mother_id = '846877e6-afaa-43de-1111-09f61ad4de99'
registration.save()
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
url = 'http://localhost:8001/api/v1/optouts/search/?' \
'reason=miscarriage'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback,
match_querystring=True, content_type="application/json")
url = 'http://localhost:8001/api/v1/optouts/search/'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback,
match_querystring=True, content_type="application/json")
url = 'http://localhost:8001/api/v1/optouts/search/?' \
'request_source=ussd_public'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback,
match_querystring=True, content_type="application/json")
request = {
'identity': "846877e6-afaa-43de-acb1-09f61ad4de99",
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "miscarriage",
'optout_source': "ussd_public",
}
response = self.adminclient.post('/api/v1/optout/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses.calls), 12)
self.assertEqual(json.loads(responses.calls[0].request.body), {
"optout.receiver_type.mother_only.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[2].request.body), {
"optout.receiver_type.mother_only.total.last": 2.0
})
self.assertEqual(json.loads(responses.calls[3].request.body), {
"optout.reason.miscarriage.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[5].request.body), {
"optout.reason.miscarriage.total.last": 2.0
})
self.assertEqual(json.loads(responses.calls[6].request.body), {
"optout.msg_type.text.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[8].request.body), {
"optout.msg_type.text.total.last": 2.0
})
self.assertEqual(json.loads(responses.calls[9].request.body), {
"optout.source.ussd.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[11].request.body), {
"optout.source.ussd.total.last": 2.0
})
@responses.activate
def test_identity_optout_friend_only(self):
friend_registration = self.make_registration_friend_only()
friend_registration.data['receiver_id'] = '629eaf3c-04e5-1111-8a27-3ab3b811326a' # noqa
friend_registration.data['msg_type'] = 'audio'
friend_registration.mother_id = '846877e6-afaa-1111-1111-09f61ad4de99'
friend_registration.save()
responses.add(responses.POST,
"http://metrics-url/metrics/",
json={"foo": "bar"},
status=200, content_type='application/json')
url = 'http://localhost:8001/api/v1/optouts/search/?' \
'reason=other'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback_other,
match_querystring=True, content_type="application/json")
url = 'http://localhost:8001/api/v1/optouts/search/'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback_other,
match_querystring=True, content_type="application/json")
url = 'http://localhost:8001/api/v1/optouts/search/?' \
'request_source=ivr_public'
responses.add_callback(
responses.GET, url, callback=self.optout_search_callback_other,
match_querystring=True, content_type="application/json")
request = {
'identity': "629eaf3c-04e5-1111-8a27-3ab3b811326a",
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "other",
'optout_source': "ivr_public",
}
response = self.adminclient.post('/api/v1/optout/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(responses.calls), 12)
self.assertEqual(json.loads(responses.calls[0].request.body), {
"optout.receiver_type.friend_only.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[2].request.body), {
"optout.receiver_type.friend_only.total.last": 1.0
})
self.assertEqual(json.loads(responses.calls[3].request.body), {
"optout.reason.other.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[5].request.body), {
"optout.reason.other.total.last": 1.0
})
self.assertEqual(json.loads(responses.calls[6].request.body), {
"optout.msg_type.audio.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[8].request.body), {
"optout.msg_type.audio.total.last": 1.0
})
self.assertEqual(json.loads(responses.calls[9].request.body), {
"optout.source.ivr.sum": 1.0
})
self.assertEqual(json.loads(responses.calls[11].request.body), {
"optout.source.ivr.total.last": 1.0
})
@responses.activate
def test_identity_optout_invalid(self):
self.make_registration_mother_only()
request = {
'details': {
'name': "testing",
'addresses': {
'msisdn': {
'+1234': {}
},
},
'language': "eng_NG",
},
'optout_type': "forget",
'optout_reason': "miscarriage",
}
response = self.adminclient.post('/api/v1/optout/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(utils.json_decode(response.content),
{'reason':
'"identity", "optout_reason" and "optout_source" '
'must be specified.'})
self.assertEqual(len(responses.calls), 0)
class AdminViewsTest(AuthenticatedAPITestCase):
"""
Tests related to the optout control interface view.
"""
def add_messageset_language_callback(self):
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset_languages/',
json={
"2": ["afr_ZA", "eng_ZA"],
"4": ["afr_ZA", "eng_ZA", "zul_ZA"]
},
status=200,
content_type='application/json')
def add_messageset_via_short_name(self, short_name, id=13, schedule=8):
query_string = '?short_name=%s' % short_name
responses.add(
responses.GET,
'http://localhost:8005/api/v1/messageset/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": id,
"short_name": short_name,
"default_schedule": schedule
}]
},
status=200, content_type='application/json',
match_querystring=True
)
def test_ci_optout_invalid(self):
request = {}
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/optout_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(utils.json_decode(response.content),
{"mother_id": ["This field is required."]})
self.assertEqual(len(responses.calls), 0)
def test_ci_optout(self):
request = {
"mother_id": "mother-id-123"
}
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/optout_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
change = Change.objects.last()
self.assertEqual(change.mother_id, "mother-id-123")
self.assertEqual(change.action, "unsubscribe_mother_only")
self.assertEqual(change.source.name, "test_ussd_source_adminuser")
def test_ci_optout_no_source_username(self):
request = {
"mother_id": "mother-id-123"
}
user = User.objects.get(username="testnormaluser")
response = self.normalclient.post('/api/v1/optout_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
change = Change.objects.last()
self.assertEqual(change.mother_id, "mother-id-123")
self.assertEqual(change.action, "unsubscribe_mother_only")
source = Source.objects.last()
self.assertEqual(source.name, user.username)
self.assertEqual(source.user, user)
self.assertEqual(source.authority, "advisor")
def test_ci_optout_no_source(self):
request = {
"mother_id": "mother-id-123"
}
user = User.objects.get(username="testnormaluser")
user.first_name = "John"
user.last_name = "Doe"
user.save()
response = self.normalclient.post('/api/v1/optout_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
change = Change.objects.last()
self.assertEqual(change.mother_id, "mother-id-123")
self.assertEqual(change.action, "unsubscribe_mother_only")
source = Source.objects.last()
self.assertEqual(source.name, user.get_full_name())
self.assertEqual(source.user, user)
self.assertEqual(source.authority, "advisor")
def test_ci_change_no_identity(self):
request = {}
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(utils.json_decode(response.content),
{"mother_id": ["This field is required."]})
self.assertEqual(len(responses.calls), 0)
def test_ci_change_invalid(self):
request = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99"
}
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 400)
self.assertEqual(
utils.json_decode(response.content),
{"non_field_errors": ["One of these fields must be populated: messageset, language"]}) # noqa
@responses.activate
def test_ci_change_language(self):
request = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"language": "eng_ZA"
}
self.add_messageset_language_callback()
# mock get subscription request
subscription_id = "846877e6-afaa-43de-acb1-09f61ad4de99"
query_string = '?active=True&identity=%s' % request["mother_id"]
responses.add(
responses.GET,
'http://localhost:8005/api/v1/subscriptions/%s' % query_string,
json={
"next": None,
"previous": None,
"results": [{
"id": subscription_id,
"identity": request["mother_id"],
"active": True,
"lang": "eng_NG",
"next_sequence_number": 36,
"messageset": 2,
"schedule": 1
}],
},
status=200, content_type='application/json',
match_querystring=True
)
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
change = Change.objects.last()
self.assertEqual(change.mother_id,
"846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(change.action, "change_language")
self.assertEqual(change.data, {"new_language": "eng_ZA"})
def test_ci_change_messaging(self):
request = {
"mother_id": "846877e6-afaa-43de-acb1-09f61ad4de99",
"messageset": "messageset_one"
}
self.make_source_adminuser()
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
change = Change.objects.last()
self.assertEqual(change.mother_id,
"846877e6-afaa-43de-acb1-09f61ad4de99")
self.assertEqual(change.action, "change_messaging")
self.assertEqual(change.data, {"new_short_name": "messageset_one"})
@responses.activate
def test_ci_change_language_and_messaging(self):
identity = "846877e6-afaa-43de-acb1-09f61ad4de99"
request = {
"mother_id": identity,
"messageset": "messageset_one",
"language": "eng_ZA"
}
self.make_source_adminuser()
self.add_messageset_language_callback()
self.add_messageset_via_short_name("messageset_one", 2)
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 201)
changes = Change.objects.filter(mother_id=identity)
self.assertEqual(changes.count(), 1)
self.assertEqual(changes[0].action, "change_messaging")
self.assertEqual(changes[0].data, {
"new_short_name": "messageset_one",
"new_language": "eng_ZA"
})
@responses.activate
def test_ci_change_language_and_messaging_invalid(self):
identity = "846877e6-afaa-43de-acb1-09f61ad4de99"
request = {
"mother_id": identity,
"messageset": "messageset_one",
"language": "zul_ZA"
}
self.make_source_adminuser()
self.add_messageset_language_callback()
self.add_messageset_via_short_name("messageset_one", 2)
response = self.adminclient.post('/api/v1/change_admin/',
json.dumps(request),
content_type='application/json')
self.assertEqual(response.status_code, 400)
class AddChangeViewsTest(AuthenticatedAPITestCase):
"""
Tests related to the adding of changes view.
"""
def mock_identity_lookup(self, msisdn, identity_id, details={}):
responses.add(
responses.GET,
'http://localhost:8001/api/v1/identities/search/?details__addresses__msisdn=%s' % msisdn, # noqa
json={
"next": None, "previous": None,
"results": [{
"id": identity_id,
"details": details
}]
},
status=200, content_type='application/json',
match_querystring=True
)
def mock_identity_optout(self):
responses.add(responses.POST,
"http://localhost:8001/api/v1/optout/",
json={"foo": "bar"},
status=200, content_type='application/json')
@responses.activate
def test_add_change_language(self):
# Setup
self.make_source_adminuser()
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
self.mock_identity_lookup("%2B2347031221927", mother_id)
post_data = {
"msisdn": "07031221927",
"action": "change_language",
"data": {"new_language": "english"}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'change_language')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"new_language": "eng_NG"})
self.assertEqual(d.created_by, self.adminuser)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_add_change_messaging(self):
# Setup
self.make_source_adminuser()
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
self.mock_identity_lookup("%2B2347031221927", mother_id)
post_data = {
"msisdn": "07031221927",
"action": "change_messaging",
"data": {
"voice_days": "tuesday_and_thursday",
"voice_times": "2-5pm",
"msg_type": "voice"
}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'change_messaging')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {
"voice_days": "tue_thu",
"voice_times": "2_5",
"msg_type": "audio"
})
self.assertEqual(d.created_by, self.adminuser)
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_add_change_unsubscribe(self):
# Setup
self.make_source_adminuser()
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
self.mock_identity_lookup("%2B2347031221927", mother_id)
self.mock_identity_optout()
post_data = {
"msisdn": "07031221927",
"action": "unsubscribe_mother_only",
"data": {"reason": "miscarriage"}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'unsubscribe_mother_only')
self.assertEqual(d.validated, False)
self.assertEqual(d.data, {"reason": "miscarriage"})
self.assertEqual(d.created_by, self.adminuser)
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_add_change_unsubscribe_household(self):
# Setup
self.make_source_adminuser()
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
household_id = "4038a518-2940-4b15-9c5c-9ix9cvx09cv8"
self.mock_identity_lookup("%2B2347031221927", mother_id)
self.mock_identity_lookup("%2B2347031221928", household_id)
self.mock_identity_optout()
post_data = {
"msisdn": "07031221927",
"action": "unsubscribe_household_only",
"data": {
"reason": "not_useful",
"household_msisdn": "07031221928"
}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'unsubscribe_household_only')
self.assertEqual(d.validated, False)
self.assertEqual(d.data["reason"], "not_useful")
self.assertEqual(d.data["household_id"], household_id)
self.assertEqual(d.created_by, self.adminuser)
self.assertEqual(len(responses.calls), 3)
@responses.activate
def test_add_change_unsubscribe_household_no_msisdn(self):
# Setup
self.make_source_adminuser()
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
household_id = "4038a518-2940-4b15-9c5c-9ix9cvx09cv8"
details = {
'linked_to': mother_id
}
self.mock_identity_lookup("%2B2347031221928", household_id, details)
self.mock_identity_optout()
post_data = {
"action": "unsubscribe_household_only",
"data": {
"reason": "not_useful",
"household_msisdn": "07031221928"
}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
d = Change.objects.last()
self.assertEqual(d.source.name, 'test_ussd_source_adminuser')
self.assertEqual(d.action, 'unsubscribe_household_only')
self.assertEqual(d.validated, False)
self.assertEqual(d.data["reason"], "not_useful")
self.assertEqual(d.data["household_id"], household_id)
self.assertEqual(d.mother_id, mother_id)
self.assertEqual(d.created_by, self.adminuser)
self.assertEqual(len(responses.calls), 2)
@responses.activate
def test_add_change_missing_field(self):
# Setup
post_data = {
"msisdn": "07031221927",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
utils.json_decode(response.content),
{"action": ["This field is required."]})
self.assertEqual(len(responses.calls), 0)
@responses.activate
def test_add_change_no_source(self):
# Setup
mother_id = "4038a518-2940-4b15-9c5c-2b7b123b8735"
self.mock_identity_lookup("%2B2347031221927", mother_id)
post_data = {
"msisdn": "07031221927",
"action": "change_language",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
utils.json_decode(response.content),
"Source not found for user.")
self.assertEqual(len(responses.calls), 0)
@responses.activate
def test_add_change_invalid_field(self):
# Setup
post_data = {
"msisdn": "07031221927",
"action": "change_everything",
"data": {"test_key1": "test_value1"}
}
# Execute
response = self.adminclient.post('/api/v1/addchange/',
json.dumps(post_data),
content_type='application/json')
# Check
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
utils.json_decode(response.content),
{'action': ['"change_everything" is not a valid choice.']})
self.assertEqual(len(responses.calls), 0)
| [
"django.db.models.signals.post_save.disconnect",
"rest_framework.test.APIClient",
"django.contrib.auth.models.User.objects.get",
"registrations.models.Source.objects.last",
"django.test.RequestFactory",
"json.dumps",
"registrations.models.SubscriptionRequest.objects.last",
"registrations.models.SubscriptionRequest.objects.filter",
"rest_framework.authtoken.models.Token.objects.create",
"django.contrib.auth.models.User.objects.create_superuser",
"json.loads",
"responses.add_callback",
"django.db.models.signals.post_save.connect",
"django.db.models.signals.post_save.has_listeners",
"responses.add",
"registrations.models.Registration.objects.create",
"django.contrib.auth.models.User.objects.create_user",
"datetime.datetime.strptime",
"registrations.models.Source.objects.create",
"registrations.models.Registration.objects.last",
"hellomama_registration.utils.json_decode"
] | [((939, 987), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['"""20150817"""', '"""%Y%m%d"""'], {}), "('20150817', '%Y%m%d')\n", (965, 987), False, 'import datetime\n'), ((1068, 1079), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1077, 1079), False, 'from rest_framework.test import APIClient\n'), ((1108, 1119), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1117, 1119), False, 'from rest_framework.test import APIClient\n'), ((1147, 1158), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1156, 1158), False, 'from rest_framework.test import APIClient\n'), ((1548, 1610), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'change_post_save', 'sender': 'Change'}), '(receiver=change_post_save, sender=Change)\n', (1568, 1610), False, 'from django.db.models.signals import post_save\n'), ((1648, 1721), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_language_change_metric', 'sender': 'Change'}), '(receiver=fire_language_change_metric, sender=Change)\n', (1668, 1721), False, 'from django.db.models.signals import post_save\n'), ((1759, 1828), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_baby_change_metric', 'sender': 'Change'}), '(receiver=fire_baby_change_metric, sender=Change)\n', (1779, 1828), False, 'from django.db.models.signals import post_save\n'), ((1866, 1935), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_loss_change_metric', 'sender': 'Change'}), '(receiver=fire_loss_change_metric, sender=Change)\n', (1886, 1935), False, 'from django.db.models.signals import post_save\n'), ((1973, 2045), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_message_change_metric', 'sender': 'Change'}), '(receiver=fire_message_change_metric, sender=Change)\n', (1993, 2045), False, 'from django.db.models.signals import post_save\n'), ((2083, 2161), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'model_saved', 'dispatch_uid': '"""instance-saved-hook"""'}), "(receiver=model_saved, dispatch_uid='instance-saved-hook')\n", (2103, 2161), False, 'from django.db.models.signals import post_save\n'), ((2665, 2724), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'change_post_save', 'sender': 'Change'}), '(receiver=change_post_save, sender=Change)\n', (2682, 2724), False, 'from django.db.models.signals import post_save\n'), ((2759, 2829), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_language_change_metric', 'sender': 'Change'}), '(receiver=fire_language_change_metric, sender=Change)\n', (2776, 2829), False, 'from django.db.models.signals import post_save\n'), ((2864, 2930), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_baby_change_metric', 'sender': 'Change'}), '(receiver=fire_baby_change_metric, sender=Change)\n', (2881, 2930), False, 'from django.db.models.signals import post_save\n'), ((2965, 3031), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_loss_change_metric', 'sender': 'Change'}), '(receiver=fire_loss_change_metric, sender=Change)\n', (2982, 3031), False, 'from django.db.models.signals import post_save\n'), ((3066, 3135), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_message_change_metric', 'sender': 'Change'}), '(receiver=fire_message_change_metric, sender=Change)\n', (3083, 3135), False, 'from django.db.models.signals import post_save\n'), ((3170, 3245), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'model_saved', 'dispatch_uid': '"""instance-saved-hook"""'}), "(receiver=model_saved, dispatch_uid='instance-saved-hook')\n", (3187, 3245), False, 'from django.db.models.signals import post_save\n'), ((3587, 3661), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'registration_post_save', 'sender': 'Registration'}), '(receiver=registration_post_save, sender=Registration)\n', (3607, 3661), False, 'from django.db.models.signals import post_save\n'), ((3699, 3770), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_created_metric', 'sender': 'Registration'}), '(receiver=fire_created_metric, sender=Registration)\n', (3719, 3770), False, 'from django.db.models.signals import post_save\n'), ((3808, 3878), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_source_metric', 'sender': 'Registration'}), '(receiver=fire_source_metric, sender=Registration)\n', (3828, 3878), False, 'from django.db.models.signals import post_save\n'), ((3916, 3995), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_unique_operator_metric', 'sender': 'Registration'}), '(receiver=fire_unique_operator_metric, sender=Registration)\n', (3936, 3995), False, 'from django.db.models.signals import post_save\n'), ((4033, 4109), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_message_type_metric', 'sender': 'Registration'}), '(receiver=fire_message_type_metric, sender=Registration)\n', (4053, 4109), False, 'from django.db.models.signals import post_save\n'), ((4147, 4224), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_receiver_type_metric', 'sender': 'Registration'}), '(receiver=fire_receiver_type_metric, sender=Registration)\n', (4167, 4224), False, 'from django.db.models.signals import post_save\n'), ((4262, 4334), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_language_metric', 'sender': 'Registration'}), '(receiver=fire_language_metric, sender=Registration)\n', (4282, 4334), False, 'from django.db.models.signals import post_save\n'), ((4372, 4441), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_state_metric', 'sender': 'Registration'}), '(receiver=fire_state_metric, sender=Registration)\n', (4392, 4441), False, 'from django.db.models.signals import post_save\n'), ((4479, 4547), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'fire_role_metric', 'sender': 'Registration'}), '(receiver=fire_role_metric, sender=Registration)\n', (4499, 4547), False, 'from django.db.models.signals import post_save\n'), ((4585, 4663), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', ([], {'receiver': 'model_saved', 'dispatch_uid': '"""instance-saved-hook"""'}), "(receiver=model_saved, dispatch_uid='instance-saved-hook')\n", (4605, 4663), False, 'from django.db.models.signals import post_save\n'), ((5015, 5086), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'registration_post_save', 'sender': 'Registration'}), '(receiver=registration_post_save, sender=Registration)\n', (5032, 5086), False, 'from django.db.models.signals import post_save\n'), ((5121, 5189), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_created_metric', 'sender': 'Registration'}), '(receiver=fire_created_metric, sender=Registration)\n', (5138, 5189), False, 'from django.db.models.signals import post_save\n'), ((5224, 5291), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_source_metric', 'sender': 'Registration'}), '(receiver=fire_source_metric, sender=Registration)\n', (5241, 5291), False, 'from django.db.models.signals import post_save\n'), ((5326, 5402), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_unique_operator_metric', 'sender': 'Registration'}), '(receiver=fire_unique_operator_metric, sender=Registration)\n', (5343, 5402), False, 'from django.db.models.signals import post_save\n'), ((5437, 5506), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_language_metric', 'sender': 'Registration'}), '(receiver=fire_language_metric, sender=Registration)\n', (5454, 5506), False, 'from django.db.models.signals import post_save\n'), ((5541, 5607), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_state_metric', 'sender': 'Registration'}), '(receiver=fire_state_metric, sender=Registration)\n', (5558, 5607), False, 'from django.db.models.signals import post_save\n'), ((5642, 5707), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'fire_role_metric', 'sender': 'Registration'}), '(receiver=fire_role_metric, sender=Registration)\n', (5659, 5707), False, 'from django.db.models.signals import post_save\n'), ((5742, 5817), 'django.db.models.signals.post_save.connect', 'post_save.connect', ([], {'receiver': 'model_saved', 'dispatch_uid': '"""instance-saved-hook"""'}), "(receiver=model_saved, dispatch_uid='instance-saved-hook')\n", (5759, 5817), False, 'from django.db.models.signals import post_save\n'), ((6073, 6102), 'registrations.models.Source.objects.create', 'Source.objects.create', ([], {}), '(**data)\n', (6094, 6102), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((6336, 6365), 'registrations.models.Source.objects.create', 'Source.objects.create', ([], {}), '(**data)\n', (6357, 6365), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((7787, 7822), 'registrations.models.Registration.objects.create', 'Registration.objects.create', ([], {}), '(**data)\n', (7814, 7822), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((8563, 8598), 'registrations.models.Registration.objects.create', 'Registration.objects.create', ([], {}), '(**data)\n', (8590, 8598), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((8920, 8997), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (['self.normalusername', '"""<EMAIL>"""', 'self.normalpassword'], {}), "(self.normalusername, '<EMAIL>', self.normalpassword)\n", (8944, 8997), False, 'from django.contrib.auth.models import User\n'), ((9057, 9099), 'rest_framework.authtoken.models.Token.objects.create', 'Token.objects.create', ([], {'user': 'self.normaluser'}), '(user=self.normaluser)\n', (9077, 9099), False, 'from rest_framework.authtoken.models import Token\n'), ((9382, 9467), 'django.contrib.auth.models.User.objects.create_superuser', 'User.objects.create_superuser', (['self.adminusername', '"""<EMAIL>"""', 'self.adminpassword'], {}), "(self.adminusername, '<EMAIL>', self.adminpassword\n )\n", (9411, 9467), False, 'from django.contrib.auth.models import User\n'), ((9521, 9562), 'rest_framework.authtoken.models.Token.objects.create', 'Token.objects.create', ([], {'user': 'self.adminuser'}), '(user=self.adminuser)\n', (9541, 9562), False, 'from rest_framework.authtoken.models import Token\n'), ((18078, 18105), 'registrations.models.Registration.objects.last', 'Registration.objects.last', ([], {}), '()\n', (18103, 18105), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((19030, 19423), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 54, 'messageset': 1, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 54, 'messageset': 1, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (19043, 19423), False, 'import responses\n'), ((19741, 19979), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/1/"""'], {'json': "{'id': 1, 'short_name': 'prebirth.mother.text.10_42', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/1/',\n json={'id': 1, 'short_name': 'prebirth.mother.text.10_42',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (19754, 19979), False, 'import responses\n'), ((20141, 20307), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (20154, 20307), False, 'import responses\n'), ((20408, 20584), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (20421, 20584), False, 'import responses\n'), ((20768, 21087), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 4, 'short_name':\n 'prebirth.mother.audio.10_42.tue_thu.9_11', 'default_schedule': 6}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 4, 'short_name': 'prebirth.mother.audio.10_42.tue_thu.9_11',\n 'default_schedule': 6}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (20781, 21087), False, 'import responses\n'), ((21322, 21486), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/6/"""'], {'json': "{'id': 6, 'day_of_week': '2,4'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/6/',\n json={'id': 6, 'day_of_week': '2,4'}, status=200, content_type=\n 'application/json')\n", (21335, 21486), False, 'import responses\n'), ((21718, 21752), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (21750, 21752), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((22729, 23122), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 10, 'messageset': 1, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 10, 'messageset': 1, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (22742, 23122), False, 'import responses\n'), ((23440, 23678), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/1/"""'], {'json': "{'id': 1, 'short_name': 'prebirth.mother.text.10_42', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/1/',\n json={'id': 1, 'short_name': 'prebirth.mother.text.10_42',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (23453, 23678), False, 'import responses\n'), ((23840, 24006), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (23853, 24006), False, 'import responses\n'), ((24107, 24283), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (24120, 24283), False, 'import responses\n'), ((24451, 24750), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 4, 'short_name':\n 'prebirth.mother.text.0_9', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 4, 'short_name': 'prebirth.mother.text.0_9', 'default_schedule': 1}]},\n status=200, content_type='application/json', match_querystring=True)\n", (24464, 24750), False, 'import responses\n'), ((25129, 25163), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (25161, 25163), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((26128, 26521), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 54, 'messageset': 1, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 54, 'messageset': 1, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (26141, 26521), False, 'import responses\n'), ((26839, 27077), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/1/"""'], {'json': "{'id': 1, 'short_name': 'prebirth.mother.text.10_42', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/1/',\n json={'id': 1, 'short_name': 'prebirth.mother.text.10_42',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (26852, 27077), False, 'import responses\n'), ((27239, 27405), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (27252, 27405), False, 'import responses\n'), ((27506, 27682), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (27519, 27682), False, 'import responses\n'), ((27866, 28185), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 4, 'short_name':\n 'prebirth.mother.audio.10_42.tue_thu.9_11', 'default_schedule': 6}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 4, 'short_name': 'prebirth.mother.audio.10_42.tue_thu.9_11',\n 'default_schedule': 6}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (27879, 28185), False, 'import responses\n'), ((28420, 28584), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/6/"""'], {'json': "{'id': 6, 'day_of_week': '2,4'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/6/',\n json={'id': 6, 'day_of_week': '2,4'}, status=200, content_type=\n 'application/json')\n", (28433, 28584), False, 'import responses\n'), ((28816, 28850), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (28848, 28850), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((29834, 30227), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 7, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 7, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (29847, 30227), False, 'import responses\n'), ((30545, 30783), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/7/"""'], {'json': "{'id': 7, 'short_name': 'postbirth.mother.text.0_12', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/7/',\n json={'id': 7, 'short_name': 'postbirth.mother.text.0_12',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (30558, 30783), False, 'import responses\n'), ((30945, 31111), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (30958, 31111), False, 'import responses\n'), ((31212, 31388), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (31225, 31388), False, 'import responses\n'), ((31576, 31895), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 9, 'short_name':\n 'postbirth.mother.audio.0_12.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 9, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (31589, 31895), False, 'import responses\n'), ((32130, 32294), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (32143, 32294), False, 'import responses\n'), ((32526, 32560), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (32558, 32560), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((33424, 33816), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 2, 'messageset': 8, 'schedule': 2}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 2, 'messageset': 8, 'schedule': 2}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (33437, 33816), False, 'import responses\n'), ((34134, 34373), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/8/"""'], {'json': "{'id': 8, 'short_name': 'postbirth.mother.text.13_52', 'default_schedule': 2}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/8/',\n json={'id': 8, 'short_name': 'postbirth.mother.text.13_52',\n 'default_schedule': 2}, status=200, content_type='application/json',\n match_querystring=True)\n", (34147, 34373), False, 'import responses\n'), ((34535, 34699), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/2/"""'], {'json': "{'id': 2, 'day_of_week': '2,4'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/2/',\n json={'id': 2, 'day_of_week': '2,4'}, status=200, content_type=\n 'application/json')\n", (34548, 34699), False, 'import responses\n'), ((34800, 34976), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (34813, 34976), False, 'import responses\n'), ((35165, 35486), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 13, 'short_name':\n 'postbirth.mother.audio.13_52.mon_wed.9_11', 'default_schedule': 8}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 13, 'short_name': 'postbirth.mother.audio.13_52.mon_wed.9_11',\n 'default_schedule': 8}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (35178, 35486), False, 'import responses\n'), ((35721, 35883), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/8/"""'], {'json': "{'id': 8, 'day_of_week': '3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/8/',\n json={'id': 8, 'day_of_week': '3'}, status=200, content_type=\n 'application/json')\n", (35734, 35883), False, 'import responses\n'), ((36115, 36149), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (36147, 36149), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((37012, 37404), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 4, 'messageset': 8, 'schedule': 2}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 4, 'messageset': 8, 'schedule': 2}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (37025, 37404), False, 'import responses\n'), ((37722, 37961), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/8/"""'], {'json': "{'id': 8, 'short_name': 'postbirth.mother.text.13_52', 'default_schedule': 2}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/8/',\n json={'id': 8, 'short_name': 'postbirth.mother.text.13_52',\n 'default_schedule': 2}, status=200, content_type='application/json',\n match_querystring=True)\n", (37735, 37961), False, 'import responses\n'), ((38123, 38287), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/2/"""'], {'json': "{'id': 2, 'day_of_week': '2,4'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/2/',\n json={'id': 2, 'day_of_week': '2,4'}, status=200, content_type=\n 'application/json')\n", (38136, 38287), False, 'import responses\n'), ((38388, 38564), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (38401, 38564), False, 'import responses\n'), ((38753, 39074), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 13, 'short_name':\n 'postbirth.mother.audio.13_52.mon_wed.9_11', 'default_schedule': 8}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 13, 'short_name': 'postbirth.mother.audio.13_52.mon_wed.9_11',\n 'default_schedule': 8}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (38766, 39074), False, 'import responses\n'), ((39309, 39471), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/8/"""'], {'json': "{'id': 8, 'day_of_week': '3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/8/',\n json={'id': 8, 'day_of_week': '3'}, status=200, content_type=\n 'application/json')\n", (39322, 39471), False, 'import responses\n'), ((39703, 39737), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (39735, 39737), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((40609, 41002), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 1, 'messageset': 18, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 1, 'messageset': 18, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (40622, 41002), False, 'import responses\n'), ((41320, 41561), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/18/"""'], {'json': "{'id': 18, 'short_name': 'miscarriage.mother.text.0_2', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/18/',\n json={'id': 18, 'short_name': 'miscarriage.mother.text.0_2',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (41333, 41561), False, 'import responses\n'), ((41723, 41889), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (41736, 41889), False, 'import responses\n'), ((41990, 42166), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (42003, 42166), False, 'import responses\n'), ((42355, 42676), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (42368, 42676), False, 'import responses\n'), ((42911, 43075), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (42924, 43075), False, 'import responses\n'), ((43307, 43341), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (43339, 43341), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((44196, 44589), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 3, 'messageset': 18, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 3, 'messageset': 18, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (44209, 44589), False, 'import responses\n'), ((44907, 45148), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/18/"""'], {'json': "{'id': 18, 'short_name': 'miscarriage.mother.text.0_2', 'default_schedule': 1}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/18/',\n json={'id': 18, 'short_name': 'miscarriage.mother.text.0_2',\n 'default_schedule': 1}, status=200, content_type='application/json',\n match_querystring=True)\n", (44920, 45148), False, 'import responses\n'), ((45310, 45476), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (45323, 45476), False, 'import responses\n'), ((45577, 45753), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (45590, 45753), False, 'import responses\n'), ((45942, 46263), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (45955, 46263), False, 'import responses\n'), ((46498, 46662), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (46511, 46662), False, 'import responses\n'), ((46894, 46928), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (46926, 46928), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((47765, 48158), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 2, 'schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 2, 'schedule': 4}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (47778, 48158), False, 'import responses\n'), ((48476, 48728), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/2/"""'], {'json': "{'id': 2, 'short_name': 'prebirth.mother.audio.10_42.mon_wed.9_11',\n 'default_schedule': 4}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/2/',\n json={'id': 2, 'short_name': 'prebirth.mother.audio.10_42.mon_wed.9_11',\n 'default_schedule': 4}, status=200, content_type='application/json',\n match_querystring=True)\n", (48489, 48728), False, 'import responses\n'), ((48890, 49054), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (48903, 49054), False, 'import responses\n'), ((49155, 49331), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (49168, 49331), False, 'import responses\n'), ((49505, 49806), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 1, 'short_name':\n 'prebirth.mother.text.10_42', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 1, 'short_name': 'prebirth.mother.text.10_42', 'default_schedule': 1}]},\n status=200, content_type='application/json', match_querystring=True)\n", (49518, 49806), False, 'import responses\n'), ((50045, 50211), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (50058, 50211), False, 'import responses\n'), ((50443, 50477), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (50475, 50477), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((51333, 51726), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 24, 'messageset': 9, 'schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 24, 'messageset': 9, 'schedule': 4}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (51346, 51726), False, 'import responses\n'), ((52044, 52296), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/9/"""'], {'json': "{'id': 9, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/9/',\n json={'id': 9, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}, status=200, content_type='application/json',\n match_querystring=True)\n", (52057, 52296), False, 'import responses\n'), ((52458, 52622), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (52471, 52622), False, 'import responses\n'), ((52723, 52899), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (52736, 52899), False, 'import responses\n'), ((53073, 53374), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 7, 'short_name':\n 'postbirth.mother.text.0_12', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 7, 'short_name': 'postbirth.mother.text.0_12', 'default_schedule': 1}]},\n status=200, content_type='application/json', match_querystring=True)\n", (53086, 53374), False, 'import responses\n'), ((53613, 53779), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (53626, 53779), False, 'import responses\n'), ((54011, 54045), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (54043, 54045), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((54901, 55294), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 1, 'messageset': 13, 'schedule': 8}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 1, 'messageset': 13, 'schedule': 8}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (54914, 55294), False, 'import responses\n'), ((55612, 55867), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/13/"""'], {'json': "{'id': 13, 'short_name': 'postbirth.mother.audio.13_52.mon_wed.9_11',\n 'default_schedule': 8}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/13/',\n json={'id': 13, 'short_name':\n 'postbirth.mother.audio.13_52.mon_wed.9_11', 'default_schedule': 8},\n status=200, content_type='application/json', match_querystring=True)\n", (55625, 55867), False, 'import responses\n'), ((56029, 56191), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/8/"""'], {'json': "{'id': 8, 'day_of_week': '3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/8/',\n json={'id': 8, 'day_of_week': '3'}, status=200, content_type=\n 'application/json')\n", (56042, 56191), False, 'import responses\n'), ((56292, 56468), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (56305, 56468), False, 'import responses\n'), ((56643, 56946), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 8, 'short_name':\n 'postbirth.mother.text.13_52', 'default_schedule': 2}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 8, 'short_name': 'postbirth.mother.text.13_52', 'default_schedule': 2}]\n }, status=200, content_type='application/json', match_querystring=True)\n", (56656, 56946), False, 'import responses\n'), ((57184, 57348), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/2/"""'], {'json': "{'id': 2, 'day_of_week': '2,4'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/2/',\n json={'id': 2, 'day_of_week': '2,4'}, status=200, content_type=\n 'application/json')\n", (57197, 57348), False, 'import responses\n'), ((57580, 57614), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (57612, 57614), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((58477, 58870), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 4, 'messageset': 19, 'schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 4, 'messageset': 19, 'schedule': 4}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (58490, 58870), False, 'import responses\n'), ((59188, 59443), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset/19/"""'], {'json': "{'id': 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/19/',\n json={'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4},\n status=200, content_type='application/json', match_querystring=True)\n", (59201, 59443), False, 'import responses\n'), ((59605, 59769), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (59618, 59769), False, 'import responses\n'), ((59870, 60046), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (59883, 60046), False, 'import responses\n'), ((60221, 60525), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 18, 'short_name':\n 'miscarriage.mother.text.0_2', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 18, 'short_name': 'miscarriage.mother.text.0_2', 'default_schedule': 1}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (60234, 60525), False, 'import responses\n'), ((60763, 60929), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3,5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3,5'}, status=200, content_type=\n 'application/json')\n", (60776, 60929), False, 'import responses\n'), ((61161, 61195), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (61193, 61195), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((62079, 62408), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (62092, 62408), False, 'import responses\n'), ((62671, 62847), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (62684, 62847), False, 'import responses\n'), ((62937, 63527), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'audio', 'preferred_msg_days':\n 'mon_wed', 'preferred_msg_times': '9_11', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (62950, 63527), False, 'import responses\n'), ((64068, 64387), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 2, 'short_name':\n 'postbirth.mother.audio.0_12.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 2, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (64081, 64387), False, 'import responses\n'), ((64627, 64791), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (64640, 64791), False, 'import responses\n'), ((65018, 65052), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (65050, 65052), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((65949, 66278), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (65962, 66278), False, 'import responses\n'), ((66541, 66717), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (66554, 66717), False, 'import responses\n'), ((66807, 67397), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'audio', 'preferred_msg_days':\n 'mon_wed', 'preferred_msg_times': '9_11', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (66820, 67397), False, 'import responses\n'), ((67938, 68257), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 2, 'short_name':\n 'postbirth.mother.audio.0_12.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 2, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (67951, 68257), False, 'import responses\n'), ((68497, 68661), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (68510, 68661), False, 'import responses\n'), ((68888, 68922), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (68920, 68922), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((69819, 70148), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (69832, 70148), False, 'import responses\n'), ((70411, 70587), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (70424, 70587), False, 'import responses\n'), ((70684, 71312), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to':\n '629eaf3c-04e5-4404-8a27-3ab3b811326a', 'preferred_msg_type': 'audio',\n 'preferred_msg_days': 'mon_wed', 'preferred_msg_times': '9_11',\n 'preferred_language': 'hau_NG'}, 'created_at':\n '2015-07-10T06:13:29.693272Z', 'updated_at': '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': '629eaf3c-04e5-4404-8a27-3ab3b811326a',\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (70697, 71312), False, 'import responses\n'), ((71849, 72168), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 2, 'short_name':\n 'postbirth.mother.audio.0_12.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 2, 'short_name': 'postbirth.mother.audio.0_12.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (71862, 72168), False, 'import responses\n'), ((72490, 72809), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 17, 'short_name':\n 'postbirth.household.audio.0_52.fri.9_11', 'default_schedule': 3}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 17, 'short_name': 'postbirth.household.audio.0_52.fri.9_11',\n 'default_schedule': 3}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (72503, 72809), False, 'import responses\n'), ((73049, 73213), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (73062, 73213), False, 'import responses\n'), ((73313, 73475), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/3/"""'], {'json': "{'id': 3, 'day_of_week': '5'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/3/',\n json={'id': 3, 'day_of_week': '5'}, status=200, content_type=\n 'application/json')\n", (73326, 73475), False, 'import responses\n'), ((75124, 75453), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (75137, 75453), False, 'import responses\n'), ((75716, 75892), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (75729, 75892), False, 'import responses\n'), ((75982, 76503), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'text', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'text', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (75995, 76503), False, 'import responses\n'), ((76994, 77295), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 7, 'short_name':\n 'postbirth.mother.text.0_12', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 7, 'short_name': 'postbirth.mother.text.0_12', 'default_schedule': 1}]},\n status=200, content_type='application/json', match_querystring=True)\n", (77007, 77295), False, 'import responses\n'), ((77539, 77703), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (77552, 77703), False, 'import responses\n'), ((77930, 77964), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (77962, 77964), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((79025, 79354), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (79038, 79354), False, 'import responses\n'), ((79617, 79794), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'lang': 'pcm_NG'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'lang': 'pcm_NG'}, status=200, content_type='application/json')\n", (79630, 79794), False, 'import responses\n'), ((80837, 81166), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (80850, 81166), False, 'import responses\n'), ((81436, 81613), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'lang': 'pcm_NG'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'lang': 'pcm_NG'}, status=200, content_type='application/json')\n", (81449, 81613), False, 'import responses\n'), ((81888, 82231), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['data']['household_id'], 'active': True, 'lang':\n 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['data']['household_id'], 'active': True, 'lang':\n 'eng_NG'}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (81901, 82231), False, 'import responses\n'), ((82498, 82675), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'lang': 'pcm_NG'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'lang': 'pcm_NG'}, status=200, content_type='application/json')\n", (82511, 82675), False, 'import responses\n'), ((83805, 84148), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['data']['household_id'], 'active': True, 'lang':\n 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['data']['household_id'], 'active': True, 'lang':\n 'eng_NG'}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (83818, 84148), False, 'import responses\n'), ((84408, 84584), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (84421, 84584), False, 'import responses\n'), ((85615, 85944), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (85628, 85944), False, 'import responses\n'), ((86207, 86383), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (86220, 86383), False, 'import responses\n'), ((87417, 87746), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (87430, 87746), False, 'import responses\n'), ((88009, 88185), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (88022, 88185), False, 'import responses\n'), ((88275, 88865), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'audio', 'preferred_msg_days':\n 'mon_wed', 'preferred_msg_times': '9_11', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (88288, 88865), False, 'import responses\n'), ((89407, 89728), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (89420, 89728), False, 'import responses\n'), ((89968, 90132), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (89981, 90132), False, 'import responses\n'), ((90359, 90393), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (90391, 90393), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((91314, 91643), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (91327, 91643), False, 'import responses\n'), ((91906, 92082), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (91919, 92082), False, 'import responses\n'), ((92172, 92762), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'audio', 'preferred_msg_days':\n 'mon_wed', 'preferred_msg_times': '9_11', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (92185, 92762), False, 'import responses\n'), ((93304, 93625), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (93317, 93625), False, 'import responses\n'), ((93865, 94029), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (93878, 94029), False, 'import responses\n'), ((94256, 94290), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (94288, 94290), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((95218, 95547), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (95231, 95547), False, 'import responses\n'), ((95817, 95993), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (95830, 95993), False, 'import responses\n'), ((96090, 96718), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to':\n '629eaf3c-04e5-4404-8a27-3ab3b811326a', 'preferred_msg_type': 'audio',\n 'preferred_msg_days': 'mon_wed', 'preferred_msg_times': '9_11',\n 'preferred_language': 'hau_NG'}, 'created_at':\n '2015-07-10T06:13:29.693272Z', 'updated_at': '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': '629eaf3c-04e5-4404-8a27-3ab3b811326a',\n 'preferred_msg_type': 'audio', 'preferred_msg_days': 'mon_wed',\n 'preferred_msg_times': '9_11', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (96103, 96718), False, 'import responses\n'), ((97256, 97577), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 19, 'short_name':\n 'miscarriage.mother.audio.0_2.mon_wed.9_11', 'default_schedule': 4}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 19, 'short_name': 'miscarriage.mother.audio.0_2.mon_wed.9_11',\n 'default_schedule': 4}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (97269, 97577), False, 'import responses\n'), ((97817, 97981), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/4/"""'], {'json': "{'id': 4, 'day_of_week': '1,3'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/4/',\n json={'id': 4, 'day_of_week': '1,3'}, status=200, content_type=\n 'application/json')\n", (97830, 97981), False, 'import responses\n'), ((98258, 98604), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': '629eaf3c-04e5-4404-8a27-3ab3b811326a', 'active': True,\n 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': '629eaf3c-04e5-4404-8a27-3ab3b811326a', 'active': True,\n 'lang': 'eng_NG'}]}, status=200, content_type='application/json',\n match_querystring=True)\n", (98271, 98604), False, 'import responses\n'), ((98874, 99050), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (98887, 99050), False, 'import responses\n'), ((99277, 99311), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (99309, 99311), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((100237, 100566), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': change_data['mother_id'], 'active': True, 'lang': 'eng_NG'}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (100250, 100566), False, 'import responses\n'), ((100829, 101005), 'responses.add', 'responses.add', (['responses.PATCH', "('http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id)"], {'json': "{'active': False}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.PATCH, \n 'http://localhost:8005/api/v1/subscriptions/%s/' % subscription_id,\n json={'active': False}, status=200, content_type='application/json')\n", (100842, 101005), False, 'import responses\n'), ((101095, 101616), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/%s/' % change_data['mother_id'])"], {'json': "{'id': change_data['mother_id'], 'version': 1, 'details': {\n 'default_addr_type': 'msisdn', 'addresses': {'msisdn': {\n '+2345059992222': {}}}, 'receiver_role': 'mother', 'linked_to': None,\n 'preferred_msg_type': 'text', 'preferred_language': 'hau_NG'},\n 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8001/api/v1/identities/%s/' %\n change_data['mother_id'], json={'id': change_data['mother_id'],\n 'version': 1, 'details': {'default_addr_type': 'msisdn', 'addresses': {\n 'msisdn': {'+2345059992222': {}}}, 'receiver_role': 'mother',\n 'linked_to': None, 'preferred_msg_type': 'text', 'preferred_language':\n 'hau_NG'}, 'created_at': '2015-07-10T06:13:29.693272Z', 'updated_at':\n '2015-07-10T06:13:29.693298Z'}, status=200, content_type='application/json'\n )\n", (101108, 101616), False, 'import responses\n'), ((102108, 102412), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': 18, 'short_name':\n 'miscarriage.mother.text.0_2', 'default_schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id': \n 18, 'short_name': 'miscarriage.mother.text.0_2', 'default_schedule': 1}\n ]}, status=200, content_type='application/json', match_querystring=True)\n", (102121, 102412), False, 'import responses\n'), ((102655, 102819), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/schedule/1/"""'], {'json': "{'id': 1, 'day_of_week': '0,2'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://localhost:8005/api/v1/schedule/1/',\n json={'id': 1, 'day_of_week': '0,2'}, status=200, content_type=\n 'application/json')\n", (102668, 102819), False, 'import responses\n'), ((103046, 103080), 'registrations.models.SubscriptionRequest.objects.last', 'SubscriptionRequest.objects.last', ([], {}), '()\n', (103078, 103080), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((103695, 103825), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (103708, 103825), False, 'import responses\n'), ((103896, 103957), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['fire_language_change_metric'], {'sender': 'Change'}), '(fire_language_change_metric, sender=Change)\n', (103913, 103957), False, 'from django.db.models.signals import post_save\n'), ((104322, 104386), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', (['fire_language_change_metric'], {'sender': 'Change'}), '(fire_language_change_metric, sender=Change)\n', (104342, 104386), False, 'from django.db.models.signals import post_save\n'), ((104704, 104834), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (104717, 104834), False, 'import responses\n'), ((104905, 104962), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['fire_baby_change_metric'], {'sender': 'Change'}), '(fire_baby_change_metric, sender=Change)\n', (104922, 104962), False, 'from django.db.models.signals import post_save\n'), ((105561, 105621), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', (['fire_baby_change_metric'], {'sender': 'Change'}), '(fire_baby_change_metric, sender=Change)\n', (105581, 105621), False, 'from django.db.models.signals import post_save\n'), ((105939, 106069), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (105952, 106069), False, 'import responses\n'), ((106140, 106197), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['fire_loss_change_metric'], {'sender': 'Change'}), '(fire_loss_change_metric, sender=Change)\n', (106157, 106197), False, 'from django.db.models.signals import post_save\n'), ((106796, 106856), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', (['fire_loss_change_metric'], {'sender': 'Change'}), '(fire_loss_change_metric, sender=Change)\n', (106816, 106856), False, 'from django.db.models.signals import post_save\n'), ((107169, 107299), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (107182, 107299), False, 'import responses\n'), ((107370, 107430), 'django.db.models.signals.post_save.connect', 'post_save.connect', (['fire_message_change_metric'], {'sender': 'Change'}), '(fire_message_change_metric, sender=Change)\n', (107387, 107430), False, 'from django.db.models.signals import post_save\n'), ((108020, 108083), 'django.db.models.signals.post_save.disconnect', 'post_save.disconnect', (['fire_message_change_metric'], {'sender': 'Change'}), '(fire_message_change_metric, sender=Change)\n', (108040, 108083), False, 'from django.db.models.signals import post_save\n'), ((108282, 108298), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (108296, 108298), False, 'from django.test import TestCase, RequestFactory\n'), ((110689, 110819), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (110702, 110819), False, 'import responses\n'), ((110990, 111137), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback, match_querystring=True, content_type=\n 'application/json')\n", (111012, 111137), False, 'import responses\n'), ((111223, 111370), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback, match_querystring=True, content_type=\n 'application/json')\n", (111245, 111370), False, 'import responses\n'), ((111502, 111649), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback, match_querystring=True, content_type=\n 'application/json')\n", (111524, 111649), False, 'import responses\n'), ((113931, 114061), 'responses.add', 'responses.add', (['responses.POST', '"""http://metrics-url/metrics/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://metrics-url/metrics/', json={'foo':\n 'bar'}, status=200, content_type='application/json')\n", (113944, 114061), False, 'import responses\n'), ((114226, 114379), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback_other', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback_other, match_querystring=True, content_type=\n 'application/json')\n", (114248, 114379), False, 'import responses\n'), ((114465, 114618), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback_other', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback_other, match_querystring=True, content_type=\n 'application/json')\n", (114487, 114618), False, 'import responses\n'), ((114749, 114902), 'responses.add_callback', 'responses.add_callback', (['responses.GET', 'url'], {'callback': 'self.optout_search_callback_other', 'match_querystring': '(True)', 'content_type': '"""application/json"""'}), "(responses.GET, url, callback=self.\n optout_search_callback_other, match_querystring=True, content_type=\n 'application/json')\n", (114771, 114902), False, 'import responses\n'), ((117933, 118144), 'responses.add', 'responses.add', (['responses.GET', '"""http://localhost:8005/api/v1/messageset_languages/"""'], {'json': "{'2': ['afr_ZA', 'eng_ZA'], '4': ['afr_ZA', 'eng_ZA', 'zul_ZA']}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET,\n 'http://localhost:8005/api/v1/messageset_languages/', json={'2': [\n 'afr_ZA', 'eng_ZA'], '4': ['afr_ZA', 'eng_ZA', 'zul_ZA']}, status=200,\n content_type='application/json')\n", (117946, 118144), False, 'import responses\n'), ((118377, 118668), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/messageset/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': id, 'short_name':\n short_name, 'default_schedule': schedule}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://localhost:8005/api/v1/messageset/%s' %\n query_string, json={'next': None, 'previous': None, 'results': [{'id':\n id, 'short_name': short_name, 'default_schedule': schedule}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (118390, 118668), False, 'import responses\n'), ((120173, 120216), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""testnormaluser"""'}), "(username='testnormaluser')\n", (120189, 120216), False, 'from django.contrib.auth.models import User\n'), ((120660, 120681), 'registrations.models.Source.objects.last', 'Source.objects.last', ([], {}), '()\n', (120679, 120681), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((120961, 121004), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""testnormaluser"""'}), "(username='testnormaluser')\n", (120977, 121004), False, 'from django.contrib.auth.models import User\n'), ((121532, 121553), 'registrations.models.Source.objects.last', 'Source.objects.last', ([], {}), '()\n', (121551, 121553), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((123286, 123675), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8005/api/v1/subscriptions/%s' % query_string)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': request['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 2, 'schedule': 1}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8005/api/v1/subscriptions/%s' % query_string, json={\n 'next': None, 'previous': None, 'results': [{'id': subscription_id,\n 'identity': request['mother_id'], 'active': True, 'lang': 'eng_NG',\n 'next_sequence_number': 36, 'messageset': 2, 'schedule': 1}]}, status=\n 200, content_type='application/json', match_querystring=True)\n", (123299, 123675), False, 'import responses\n'), ((127151, 127450), 'responses.add', 'responses.add', (['responses.GET', "('http://localhost:8001/api/v1/identities/search/?details__addresses__msisdn=%s'\n % msisdn)"], {'json': "{'next': None, 'previous': None, 'results': [{'id': identity_id, 'details':\n details}]}", 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, \n 'http://localhost:8001/api/v1/identities/search/?details__addresses__msisdn=%s'\n % msisdn, json={'next': None, 'previous': None, 'results': [{'id':\n identity_id, 'details': details}]}, status=200, content_type=\n 'application/json', match_querystring=True)\n", (127164, 127450), False, 'import responses\n'), ((127659, 127799), 'responses.add', 'responses.add', (['responses.POST', '"""http://localhost:8001/api/v1/optout/"""'], {'json': "{'foo': 'bar'}", 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://localhost:8001/api/v1/optout/', json=\n {'foo': 'bar'}, status=200, content_type='application/json')\n", (127672, 127799), False, 'import responses\n'), ((1347, 1378), 'django.db.models.signals.post_save.has_listeners', 'post_save.has_listeners', (['Change'], {}), '(Change)\n', (1370, 1378), False, 'from django.db.models.signals import post_save\n'), ((2455, 2486), 'django.db.models.signals.post_save.has_listeners', 'post_save.has_listeners', (['Change'], {}), '(Change)\n', (2478, 2486), False, 'from django.db.models.signals import post_save\n'), ((3374, 3411), 'django.db.models.signals.post_save.has_listeners', 'post_save.has_listeners', (['Registration'], {}), '(Registration)\n', (3397, 3411), False, 'from django.db.models.signals import post_save\n'), ((4969, 5006), 'django.db.models.signals.post_save.has_listeners', 'post_save.has_listeners', (['Registration'], {}), '(Registration)\n', (4992, 5006), False, 'from django.db.models.signals import post_save\n'), ((6005, 6047), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""testadminuser"""'}), "(username='testadminuser')\n", (6021, 6047), False, 'from django.contrib.auth.models import User\n'), ((6267, 6310), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""testnormaluser"""'}), "(username='testnormaluser')\n", (6283, 6310), False, 'from django.contrib.auth.models import User\n'), ((13616, 13637), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (13626, 13637), False, 'import json\n'), ((14531, 14552), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (14541, 14552), False, 'import json\n'), ((15430, 15451), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (15440, 15451), False, 'import json\n'), ((73706, 73775), 'registrations.models.SubscriptionRequest.objects.filter', 'SubscriptionRequest.objects.filter', ([], {'identity': "change_data['mother_id']"}), "(identity=change_data['mother_id'])\n", (73740, 73775), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((74107, 74195), 'registrations.models.SubscriptionRequest.objects.filter', 'SubscriptionRequest.objects.filter', ([], {'identity': '"""629eaf3c-04e5-4404-8a27-3ab3b811326a"""'}), "(identity=\n '629eaf3c-04e5-4404-8a27-3ab3b811326a')\n", (74141, 74195), False, 'from registrations.models import Source, Registration, SubscriptionRequest, registration_post_save, fire_created_metric, fire_unique_operator_metric, fire_message_type_metric, fire_receiver_type_metric, fire_source_metric, fire_language_metric, fire_state_metric, fire_role_metric\n'), ((104074, 104109), 'json.loads', 'json.loads', (['last_call1.request.body'], {}), '(last_call1.request.body)\n', (104084, 104109), False, 'import json\n'), ((104203, 104238), 'json.loads', 'json.loads', (['last_call2.request.body'], {}), '(last_call2.request.body)\n', (104213, 104238), False, 'import json\n'), ((105297, 105332), 'json.loads', 'json.loads', (['last_call1.request.body'], {}), '(last_call1.request.body)\n', (105307, 105332), False, 'import json\n'), ((105434, 105469), 'json.loads', 'json.loads', (['last_call2.request.body'], {}), '(last_call2.request.body)\n', (105444, 105469), False, 'import json\n'), ((106532, 106567), 'json.loads', 'json.loads', (['last_call1.request.body'], {}), '(last_call1.request.body)\n', (106542, 106567), False, 'import json\n'), ((106669, 106704), 'json.loads', 'json.loads', (['last_call2.request.body'], {}), '(last_call2.request.body)\n', (106679, 106704), False, 'import json\n'), ((107770, 107805), 'json.loads', 'json.loads', (['last_call1.request.body'], {}), '(last_call1.request.body)\n', (107780, 107805), False, 'import json\n'), ((107900, 107935), 'json.loads', 'json.loads', (['last_call2.request.body'], {}), '(last_call2.request.body)\n', (107910, 107935), False, 'import json\n'), ((109620, 109636), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (109630, 109636), False, 'import json\n'), ((110389, 110405), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (110399, 110405), False, 'import json\n'), ((112240, 112259), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (112250, 112259), False, 'import json\n'), ((112465, 112508), 'json.loads', 'json.loads', (['responses.calls[0].request.body'], {}), '(responses.calls[0].request.body)\n', (112475, 112508), False, 'import json\n'), ((112604, 112647), 'json.loads', 'json.loads', (['responses.calls[2].request.body'], {}), '(responses.calls[2].request.body)\n', (112614, 112647), False, 'import json\n'), ((112750, 112793), 'json.loads', 'json.loads', (['responses.calls[3].request.body'], {}), '(responses.calls[3].request.body)\n', (112760, 112793), False, 'import json\n'), ((112882, 112925), 'json.loads', 'json.loads', (['responses.calls[5].request.body'], {}), '(responses.calls[5].request.body)\n', (112892, 112925), False, 'import json\n'), ((113021, 113064), 'json.loads', 'json.loads', (['responses.calls[6].request.body'], {}), '(responses.calls[6].request.body)\n', (113031, 113064), False, 'import json\n'), ((113148, 113191), 'json.loads', 'json.loads', (['responses.calls[8].request.body'], {}), '(responses.calls[8].request.body)\n', (113158, 113191), False, 'import json\n'), ((113282, 113325), 'json.loads', 'json.loads', (['responses.calls[9].request.body'], {}), '(responses.calls[9].request.body)\n', (113292, 113325), False, 'import json\n'), ((113407, 113451), 'json.loads', 'json.loads', (['responses.calls[11].request.body'], {}), '(responses.calls[11].request.body)\n', (113417, 113451), False, 'import json\n'), ((115486, 115505), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (115496, 115505), False, 'import json\n'), ((115711, 115754), 'json.loads', 'json.loads', (['responses.calls[0].request.body'], {}), '(responses.calls[0].request.body)\n', (115721, 115754), False, 'import json\n'), ((115850, 115893), 'json.loads', 'json.loads', (['responses.calls[2].request.body'], {}), '(responses.calls[2].request.body)\n', (115860, 115893), False, 'import json\n'), ((115996, 116039), 'json.loads', 'json.loads', (['responses.calls[3].request.body'], {}), '(responses.calls[3].request.body)\n', (116006, 116039), False, 'import json\n'), ((116122, 116165), 'json.loads', 'json.loads', (['responses.calls[5].request.body'], {}), '(responses.calls[5].request.body)\n', (116132, 116165), False, 'import json\n'), ((116255, 116298), 'json.loads', 'json.loads', (['responses.calls[6].request.body'], {}), '(responses.calls[6].request.body)\n', (116265, 116298), False, 'import json\n'), ((116383, 116426), 'json.loads', 'json.loads', (['responses.calls[8].request.body'], {}), '(responses.calls[8].request.body)\n', (116393, 116426), False, 'import json\n'), ((116518, 116561), 'json.loads', 'json.loads', (['responses.calls[9].request.body'], {}), '(responses.calls[9].request.body)\n', (116528, 116561), False, 'import json\n'), ((116642, 116686), 'json.loads', 'json.loads', (['responses.calls[11].request.body'], {}), '(responses.calls[11].request.body)\n', (116652, 116686), False, 'import json\n'), ((117331, 117350), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (117341, 117350), False, 'import json\n'), ((117504, 117539), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (117521, 117539), False, 'from hellomama_registration import utils\n'), ((119071, 119090), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (119081, 119090), False, 'import json\n'), ((119244, 119279), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (119261, 119279), False, 'from hellomama_registration import utils\n'), ((119647, 119666), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (119657, 119666), False, 'import json\n'), ((120327, 120346), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (120337, 120346), False, 'import json\n'), ((121199, 121218), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (121209, 121218), False, 'import json\n'), ((121921, 121940), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (121931, 121940), False, 'import json\n'), ((122094, 122129), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (122111, 122129), False, 'from hellomama_registration import utils\n'), ((122528, 122547), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (122538, 122547), False, 'import json\n'), ((122714, 122749), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (122731, 122749), False, 'from hellomama_registration import utils\n'), ((124089, 124108), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (124099, 124108), False, 'import json\n'), ((124833, 124852), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (124843, 124852), False, 'import json\n'), ((125802, 125821), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (125812, 125821), False, 'import json\n'), ((126807, 126826), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (126817, 126826), False, 'import json\n'), ((128384, 128405), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (128394, 128405), False, 'import json\n'), ((129585, 129606), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (129595, 129606), False, 'import json\n'), ((130799, 130820), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (130809, 130820), False, 'import json\n'), ((132155, 132176), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (132165, 132176), False, 'import json\n'), ((133557, 133578), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (133567, 133578), False, 'import json\n'), ((134559, 134580), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (134569, 134580), False, 'import json\n'), ((134786, 134821), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (134803, 134821), False, 'from hellomama_registration import utils\n'), ((135415, 135436), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (135425, 135436), False, 'import json\n'), ((135642, 135677), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (135659, 135677), False, 'from hellomama_registration import utils\n'), ((136140, 136161), 'json.dumps', 'json.dumps', (['post_data'], {}), '(post_data)\n', (136150, 136161), False, 'import json\n'), ((136367, 136402), 'hellomama_registration.utils.json_decode', 'utils.json_decode', (['response.content'], {}), '(response.content)\n', (136384, 136402), False, 'from hellomama_registration import utils\n')] |
import datetime
import os
import sys
from typing import List
from tqdm import tqdm
from discopy_data.data.doc import Document
from discopy_data.data.sentence import Sentence, DepRel
from discopy_data.data.token import Token
def load_parser(use_gpu=False):
import trankit
tmp_stdout = sys.stdout
sys.stdout = sys.stderr
parser = trankit.Pipeline('english', cache_dir=os.path.expanduser('~/.trankit/'), gpu=use_gpu)
parser.tokenize("Init")
sys.stdout = tmp_stdout
return parser
def get_tokenized_sentences(parser, text):
parsed = parser.tokenize(text)
token_offset = 0
sents = []
for sent_i, sent in enumerate(parsed['sentences']):
words = [
Token(token_offset + w_i, sent_i, w_i, t['dspan'][0], t['dspan'][1], t['text'])
for w_i, t in enumerate(sent['tokens'])
]
token_offset += len(words)
sents.append(Sentence(words).to_json())
return sents
def get_parsed_sentences(parser, text):
parsed = parser(text)
token_offset = 0
sents = []
for sent_i, sent in enumerate(parsed['sentences']):
words = [
Token(token_offset + w_i, sent_i, w_i, t['dspan'][0], t['dspan'][1], t['text'],
upos=t['upos'], xpos=t['xpos'], lemma=t['lemma'])
for w_i, t in enumerate(sent['tokens'])
]
dependencies = [
DepRel(rel=t['deprel'].lower(),
head=words[int(t['head']) - 1] if t['deprel'].lower() != 'root' else None,
dep=words[dep]
) for dep, t in enumerate(sent['tokens'])
]
token_offset += len(words)
sents.append(Sentence(words, dependencies=dependencies).to_json())
return sents
def load_texts(texts: List[str], tokenize_only=False) -> List[Document]:
parser = load_parser()
for text_i, text in tqdm(enumerate(texts)):
sentences = get_tokenized_sentences(parser, text) if tokenize_only else get_parsed_sentences(parser, text)
yield Document.from_json({
'docID': hash(text),
'meta': {
'fileID': f'raw_{text_i:05}',
'corpus': 'raw',
'created': datetime.datetime.now().isoformat(),
},
'text': text,
'sentences': sentences
}, load_relations=False)
def load_texts_fast(texts: List[str], tokenize_only=True) -> List[Document]:
from nltk.tokenize import sent_tokenize
from nltk.tokenize import TreebankWordTokenizer
for text_i, text in tqdm(enumerate(texts)):
sentence_splits = [len(s) for s in sent_tokenize(text)]
sents = []
sent_i = 0
w_i = 0
words = []
sent_offset = 0
for tok_i, (tok_start, tok_end) in enumerate(TreebankWordTokenizer().span_tokenize(text)):
form = text[tok_start:tok_end]
words.append(Token(tok_i, sent_i, w_i, tok_start, tok_end, form))
w_i += 1
if tok_end >= (sent_offset + sentence_splits[sent_i]):
sents.append(Sentence(words).to_json())
sent_i += 1
if sent_i >= len(sentence_splits):
break
w_i = 0
sent_offset += sentence_splits[sent_i]
yield Document.from_json({
'docID': hash(text),
'meta': {
'fileID': f'raw_{text_i:05}',
'corpus': 'raw',
'created': datetime.datetime.now().isoformat(),
},
'text': text,
'sentences': sents
}, load_relations=False)
| [
"discopy_data.data.sentence.Sentence",
"discopy_data.data.token.Token",
"datetime.datetime.now",
"nltk.tokenize.sent_tokenize",
"nltk.tokenize.TreebankWordTokenizer",
"os.path.expanduser"
] | [((386, 419), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.trankit/"""'], {}), "('~/.trankit/')\n", (404, 419), False, 'import os\n'), ((710, 789), 'discopy_data.data.token.Token', 'Token', (['(token_offset + w_i)', 'sent_i', 'w_i', "t['dspan'][0]", "t['dspan'][1]", "t['text']"], {}), "(token_offset + w_i, sent_i, w_i, t['dspan'][0], t['dspan'][1], t['text'])\n", (715, 789), False, 'from discopy_data.data.token import Token\n'), ((1142, 1276), 'discopy_data.data.token.Token', 'Token', (['(token_offset + w_i)', 'sent_i', 'w_i', "t['dspan'][0]", "t['dspan'][1]", "t['text']"], {'upos': "t['upos']", 'xpos': "t['xpos']", 'lemma': "t['lemma']"}), "(token_offset + w_i, sent_i, w_i, t['dspan'][0], t['dspan'][1], t[\n 'text'], upos=t['upos'], xpos=t['xpos'], lemma=t['lemma'])\n", (1147, 1276), False, 'from discopy_data.data.token import Token\n'), ((2621, 2640), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (2634, 2640), False, 'from nltk.tokenize import sent_tokenize\n'), ((2906, 2957), 'discopy_data.data.token.Token', 'Token', (['tok_i', 'sent_i', 'w_i', 'tok_start', 'tok_end', 'form'], {}), '(tok_i, sent_i, w_i, tok_start, tok_end, form)\n', (2911, 2957), False, 'from discopy_data.data.token import Token\n'), ((908, 923), 'discopy_data.data.sentence.Sentence', 'Sentence', (['words'], {}), '(words)\n', (916, 923), False, 'from discopy_data.data.sentence import Sentence, DepRel\n'), ((1676, 1718), 'discopy_data.data.sentence.Sentence', 'Sentence', (['words'], {'dependencies': 'dependencies'}), '(words, dependencies=dependencies)\n', (1684, 1718), False, 'from discopy_data.data.sentence import Sentence, DepRel\n'), ((2792, 2815), 'nltk.tokenize.TreebankWordTokenizer', 'TreebankWordTokenizer', ([], {}), '()\n', (2813, 2815), False, 'from nltk.tokenize import TreebankWordTokenizer\n'), ((3076, 3091), 'discopy_data.data.sentence.Sentence', 'Sentence', (['words'], {}), '(words)\n', (3084, 3091), False, 'from discopy_data.data.sentence import Sentence, DepRel\n'), ((2208, 2231), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2229, 2231), False, 'import datetime\n'), ((3484, 3507), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3505, 3507), False, 'import datetime\n')] |
'''
Created by trangvu on 1/10/19
'''
import argparse
import logging
import os
import re
import json
import ast
from nltk.tokenize import sent_tokenize
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--prefix_name",
type=str,
default="wiki",
help="Prefix name")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
args = parser.parse_args()
rx = r'{}_([0-9]+)'.format(re.escape(args.prefix_name))
partitions = [re.findall(rx, f)[0] for f in os.listdir(args.input_dir) if re.match(rx, f)]
for idx in partitions:
discard = 0
select = 0
with open("{}/{}_{}".format(args.output_dir, args.prefix_name, idx), 'w') as fout:
with open("{}/{}_{}".format(args.input_dir, args.prefix_name, idx), 'r') as fin:
for line in fin:
doc = json.loads(line.strip())
txt = doc['text']
# txt = ast.literal_eval(doc['text'])
#cleanup text: remove links
uri=r'<?\w+:\/?\/?[^\s]+>?'
clean_txt = re.sub(uri, ' ', txt)
tempstyles=r'<\/?templatestyles[^>]*>'
ref = r'<\/?ref[^>]*>'
clean_txt = re.sub(tempstyles, ' ', clean_txt)
clean_txt = re.sub(ref, ' ', clean_txt)
docs = clean_txt.split('\n')
for doc in docs:
sentences = sent_tokenize(doc)
if len(sentences) < 3:
discard += 1
else:
select += 1
for sent in sentences:
sent = sent.strip()
if len(sent) > 0:
fout.write("{}\n".format(sent))
fout.write("\n")
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"re.escape",
"os.listdir",
"argparse.ArgumentParser",
"re.match",
"nltk.tokenize.sent_tokenize",
"re.sub",
"re.findall"
] | [((153, 296), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (172, 296), False, 'import logging\n'), ((336, 363), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (353, 363), False, 'import logging\n'), ((390, 415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (413, 415), False, 'import argparse\n'), ((1199, 1226), 're.escape', 're.escape', (['args.prefix_name'], {}), '(args.prefix_name)\n', (1208, 1226), False, 'import re\n'), ((1246, 1263), 're.findall', 're.findall', (['rx', 'f'], {}), '(rx, f)\n', (1256, 1263), False, 'import re\n'), ((1276, 1302), 'os.listdir', 'os.listdir', (['args.input_dir'], {}), '(args.input_dir)\n', (1286, 1302), False, 'import os\n'), ((1306, 1321), 're.match', 're.match', (['rx', 'f'], {}), '(rx, f)\n', (1314, 1321), False, 'import re\n'), ((1881, 1902), 're.sub', 're.sub', (['uri', '""" """', 'txt'], {}), "(uri, ' ', txt)\n", (1887, 1902), False, 'import re\n'), ((2037, 2071), 're.sub', 're.sub', (['tempstyles', '""" """', 'clean_txt'], {}), "(tempstyles, ' ', clean_txt)\n", (2043, 2071), False, 'import re\n'), ((2104, 2131), 're.sub', 're.sub', (['ref', '""" """', 'clean_txt'], {}), "(ref, ' ', clean_txt)\n", (2110, 2131), False, 'import re\n'), ((2254, 2272), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['doc'], {}), '(doc)\n', (2267, 2272), False, 'from nltk.tokenize import sent_tokenize\n')] |
import numpy as np
import pandas as pd
import logging
import cerf.package_data as pkg
def generate_random_lmp_dataframe(n_zones=57, low_value=10, mid_value=300, high_value=500, n_samples=5000):
"""Generate a random dataframe of hourly 8760 LMP values per lmp zone. Let high value LMPs only be used
for 15 percent of the data.
:param n_zones: Number of zones to process
:param low_value: Desired minimum value of MWh
:param mid_value: Desired mid value of MWh to split the 85-15 split to
:param high_value: Desired max value of MWh
:param n_samples: Number of intervals to split the min, max choices by
:return: Data frame of LMPs per zone
"""
# initialize a dictionary with the hour count for the number of hours in a year
d = {'hour': list(range(1, 8761, 1))}
# create an array with n_samples covering an equal space from low to mid values
array_1 = np.linspace(low_value, mid_value, n_samples)
# create an array with n_samples covering an equal space from mid to low values
array_2 = np.linspace(mid_value, high_value, n_samples)
# let only 15 percent of values come from high cost values
threshold = 8760 - (8760 * 0.15)
# create an LMP array for each zone
for i in range(n_zones):
# construct a list of random LMP values
l = []
for j in range(8760):
if j < threshold:
l.append(np.random.choice(array_1))
else:
l.append(np.random.choice(array_2))
# shuffle the list
np.random.shuffle(l)
# assign to dict
d[i] = l
# convert to data frame
return pd.DataFrame(d)
class LocationalMarginalPricing:
"""Create a 3D array of locational marginal pricing per technology by capacity factor.
Locational Marginal Pricing (LMP) represents the cost of making and delivering electricity
over an interconnected network of service nodes. LMPs are delivered on an hourly basis
(8760 hours for the year) and help us to understand aspects of generation and congestion
costs relative to the supply and demand of electricity when considering existing transmission
infrastructure. LMPs are a also driven by factors such as the cost of fuel which cerf also
takes into account when calculating a power plants :ref:`Net Operating Value`. When working
with a scenario-driven grid operations model to evaluate the future evolution of the electricity
system, **cerf** can ingest LMPs, return the sited generation per service area for the time
step, and then continue this iteration through all future years to provide a harmonized view
how the electricity system may respond to stressors in the future.
:param lmp_zone_dict: A dictionary containing lmp related settings from the config file
:type lmp_zone_dict: dict
:param technology_dict: A dictionary containing technology related settings from the config file
:type technology_dict: dict
:param technology_order: A list of technologies in the order by which they should be processed
:type lmp_zone_dict: list
:param zones_arr: An array containing the lmp zones per grid cell
:type lmp_zone_dict: dict
"""
def __init__(self, lmp_zone_dict, technology_dict, technology_order, zones_arr):
# dictionary containing lmp zones information
self.lmp_zone_dict = lmp_zone_dict
# dictionary containing technology specific information
self.technology_dict = technology_dict
# order of technologies to process
self.technology_order = technology_order
# array containing the lmp zones per grid cell
self.zones_arr = zones_arr
@staticmethod
def get_cf_bin(capacity_factor):
"""Get the correct start and through index values to average over for calculating LMP."""
if capacity_factor == 1.0:
start_index = 0
through_index = 8760
elif capacity_factor >= 0.5:
start_index = int(np.ceil(8760 * (1 - capacity_factor)))
through_index = 8760
elif capacity_factor == 0.0:
msg = f"The capacity factor provided `{capacity_factor}` is outside the bounds of 0.0 through 1.0"
raise ValueError(msg)
else:
start_index = 0
through_index = int(np.ceil(8760 * capacity_factor))
return start_index, through_index
def get_lmp(self):
"""Create LMP array for the current technology.
:return: 3D numpy array of LMP where [tech_id, x, y]
"""
# number of technologies
n_technologies = len(self.technology_dict)
lmp_arr = np.zeros(shape=(n_technologies, self.zones_arr.shape[0], self.zones_arr.shape[1]))
# get the LMP file for the technology from the configuration file
lmp_file = self.lmp_zone_dict.get('lmp_hourly_data_file', None)
# use illustrative default if none provided
if lmp_file is None:
# default illustrative LMP file
lmp_file = pkg.get_sample_lmp_file()
logging.info(f"Using LMP from default illustrative package data: {lmp_file}")
else:
logging.info(f"Using LMP file: {lmp_file}")
lmp_df = pd.read_csv(lmp_file)
# drop the hour field
lmp_df.drop('hour', axis=1, inplace=True)
for index, i in enumerate(self.technology_order):
# assign the correct LMP based on the capacity factor of the technology
start_index, through_index = self.get_cf_bin(self.technology_dict[i]['capacity_factor'])
# sort by descending lmp for each zone
for j in lmp_df.columns:
lmp_df[j] = lmp_df[j].sort_values(ascending=False).values
# create a dictionary of LMP values for each power zone based on tech capacity factor
lmp_dict = lmp_df.iloc[start_index:through_index].mean(axis=0).to_dict()
lmp_dict = {int(k): lmp_dict[k] for k in lmp_dict.keys()}
# add in no data
lmp_dict[self.lmp_zone_dict['lmp_zone_raster_nodata_value']] = np.nan
# create LMP array for the current technology
lmp_arr[index, :, :] = np.vectorize(lmp_dict.get)(self.zones_arr)
return lmp_arr
| [
"numpy.ceil",
"pandas.read_csv",
"numpy.random.choice",
"cerf.package_data.get_sample_lmp_file",
"numpy.linspace",
"numpy.zeros",
"numpy.vectorize",
"pandas.DataFrame",
"logging.info",
"numpy.random.shuffle"
] | [((1028, 1072), 'numpy.linspace', 'np.linspace', (['low_value', 'mid_value', 'n_samples'], {}), '(low_value, mid_value, n_samples)\n', (1039, 1072), True, 'import numpy as np\n'), ((1172, 1217), 'numpy.linspace', 'np.linspace', (['mid_value', 'high_value', 'n_samples'], {}), '(mid_value, high_value, n_samples)\n', (1183, 1217), True, 'import numpy as np\n'), ((1777, 1792), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (1789, 1792), True, 'import pandas as pd\n'), ((1673, 1693), 'numpy.random.shuffle', 'np.random.shuffle', (['l'], {}), '(l)\n', (1690, 1693), True, 'import numpy as np\n'), ((5004, 5091), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_technologies, self.zones_arr.shape[0], self.zones_arr.shape[1])'}), '(shape=(n_technologies, self.zones_arr.shape[0], self.zones_arr.\n shape[1]))\n', (5012, 5091), True, 'import numpy as np\n'), ((5591, 5612), 'pandas.read_csv', 'pd.read_csv', (['lmp_file'], {}), '(lmp_file)\n', (5602, 5612), True, 'import pandas as pd\n'), ((5384, 5409), 'cerf.package_data.get_sample_lmp_file', 'pkg.get_sample_lmp_file', ([], {}), '()\n', (5407, 5409), True, 'import cerf.package_data as pkg\n'), ((5422, 5500), 'logging.info', 'logging.info', (['f"""Using LMP from default illustrative package data: {lmp_file}"""'], {}), "(f'Using LMP from default illustrative package data: {lmp_file}')\n", (5434, 5500), False, 'import logging\n'), ((5528, 5572), 'logging.info', 'logging.info', (['f"""Using LMP file: {lmp_file}"""'], {}), "(f'Using LMP file: {lmp_file}')\n", (5540, 5572), False, 'import logging\n'), ((6562, 6588), 'numpy.vectorize', 'np.vectorize', (['lmp_dict.get'], {}), '(lmp_dict.get)\n', (6574, 6588), True, 'import numpy as np\n'), ((1539, 1564), 'numpy.random.choice', 'np.random.choice', (['array_1'], {}), '(array_1)\n', (1555, 1564), True, 'import numpy as np\n'), ((1610, 1635), 'numpy.random.choice', 'np.random.choice', (['array_2'], {}), '(array_2)\n', (1626, 1635), True, 'import numpy as np\n'), ((4320, 4357), 'numpy.ceil', 'np.ceil', (['(8760 * (1 - capacity_factor))'], {}), '(8760 * (1 - capacity_factor))\n', (4327, 4357), True, 'import numpy as np\n'), ((4650, 4681), 'numpy.ceil', 'np.ceil', (['(8760 * capacity_factor)'], {}), '(8760 * capacity_factor)\n', (4657, 4681), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import argparse
import asyncore
import email.parser
import logging
import os
import smtpd
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "www"))
import gevent
import gevent.monkey
gevent.monkey.patch_select()
from flask_app import messages
parser = argparse.ArgumentParser(usage="%(prog)s [options] args...")
parser.add_argument('-v', action='append_const', const=1, dest='verbosity', default=[],
help="Be more verbose. Can be specified multiple times to increase verbosity further")
parser.add_argument("-p", "--port", default=25, type=int)
_parse_email_str = email.parser.Parser().parsestr
def main(args):
server = SMTPServer(("0.0.0.0", args.port), None)
try:
asyncore.loop()
except KeyboardInterrupt:
logging.info('Finished')
return 0
class SMTPServer(smtpd.SMTPServer, object):
def process_message(self, peer, mailfrom, rcpttos, data):
subject = _parse_email_str(data)['subject']
logging.debug("Got message: %s", dict(to=rcpttos, sender=mailfrom, subject=subject, body=data))
messages.process_message(peer, mailfrom, rcpttos, data)
################################## Boilerplate ################################
def _configure_logging(args):
verbosity_level = len(args.verbosity)
if verbosity_level == 0:
level = 'WARNING'
elif verbosity_level == 1:
level = 'INFO'
else:
level = 'DEBUG'
logging.basicConfig(
stream=sys.stderr,
level=level,
format='%(asctime)s -- %(message)s'
)
#### For use with entry_points/console_scripts
def main_entry_point():
args = parser.parse_args()
_configure_logging(args)
sys.exit(main(args))
if __name__ == '__main__':
main_entry_point()
################################################################################
| [
"logging.basicConfig",
"argparse.ArgumentParser",
"gevent.monkey.patch_select",
"asyncore.loop",
"os.path.dirname",
"flask_app.messages.process_message",
"logging.info"
] | [((252, 280), 'gevent.monkey.patch_select', 'gevent.monkey.patch_select', ([], {}), '()\n', (278, 280), False, 'import gevent\n'), ((323, 382), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""%(prog)s [options] args..."""'}), "(usage='%(prog)s [options] args...')\n", (346, 382), False, 'import argparse\n'), ((1494, 1587), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'level', 'format': '"""%(asctime)s -- %(message)s"""'}), "(stream=sys.stderr, level=level, format=\n '%(asctime)s -- %(message)s')\n", (1513, 1587), False, 'import logging\n'), ((181, 206), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((775, 790), 'asyncore.loop', 'asyncore.loop', ([], {}), '()\n', (788, 790), False, 'import asyncore\n'), ((1138, 1193), 'flask_app.messages.process_message', 'messages.process_message', (['peer', 'mailfrom', 'rcpttos', 'data'], {}), '(peer, mailfrom, rcpttos, data)\n', (1162, 1193), False, 'from flask_app import messages\n'), ((829, 853), 'logging.info', 'logging.info', (['"""Finished"""'], {}), "('Finished')\n", (841, 853), False, 'import logging\n')] |
"""URLs for authentication module."""
from django.urls import path
# This should contain urls related to auth app ONLY
urlpatterns = [
'cpovc_auth.views',
path(r'^$', 'home'),
path(r'^register/$', 'register'),
path(r'^ping/$', 'user_ping', name='user_ping'),
path(r'^roles/$', 'roles_home', name='roles_home'),
path(r'^roles/edit/(?P<user_id>\d+)/$', 'roles_edit',
name='roles_edit'),
]
| [
"django.urls.path"
] | [((164, 182), 'django.urls.path', 'path', (['"""^$"""', '"""home"""'], {}), "('^$', 'home')\n", (168, 182), False, 'from django.urls import path\n'), ((189, 220), 'django.urls.path', 'path', (['"""^register/$"""', '"""register"""'], {}), "('^register/$', 'register')\n", (193, 220), False, 'from django.urls import path\n'), ((227, 273), 'django.urls.path', 'path', (['"""^ping/$"""', '"""user_ping"""'], {'name': '"""user_ping"""'}), "('^ping/$', 'user_ping', name='user_ping')\n", (231, 273), False, 'from django.urls import path\n'), ((280, 329), 'django.urls.path', 'path', (['"""^roles/$"""', '"""roles_home"""'], {'name': '"""roles_home"""'}), "('^roles/$', 'roles_home', name='roles_home')\n", (284, 329), False, 'from django.urls import path\n'), ((336, 408), 'django.urls.path', 'path', (['"""^roles/edit/(?P<user_id>\\\\d+)/$"""', '"""roles_edit"""'], {'name': '"""roles_edit"""'}), "('^roles/edit/(?P<user_id>\\\\d+)/$', 'roles_edit', name='roles_edit')\n", (340, 408), False, 'from django.urls import path\n')] |
from typing import List
from sqlalchemy.orm import Session
import models
def get_restaurant(db: Session, id: int):
return db.query(models.Restaurant).filter(models.Restaurant.id == id).first()
def get_restaurants(db: Session):
return db.query(models.Restaurant).all()
def get_identifiers(db: Session, restaurant_id: int):
return db.query(models.Identifier).filter(models.Identifier.restaurant_id == restaurant_id).all()
def get_menu_items(db: Session, menu_item_ids: List[int]):
return db.query(models.MenuItem).filter(models.MenuItem.id.in_(menu_item_ids)).all()
def reset_db(db: Session):
# clear db (everything cascade deletes on restaurants)
restaurants = get_restaurants(db)
for restaurant in restaurants:
db.delete(restaurant)
db.commit()
dunkin = models.Restaurant(
id = 1,
name="Dunkin Donuts",
img=r"https://upload.wikimedia.org/wikipedia/en/thumb/b/b8/Dunkin%27_Donuts_logo.svg/1200px-Dunkin%27_Donuts_logo.svg.png",
menu_items=[
models.MenuItem(
name="Coffee",
desc="Our famous Hot Coffee is made from high-quality 100% Arabica beans and is freshly ground and brewed continually throughout the day.",
price=1.99,
img=r"https://www.dunkindonuts.com/content/dam/dd/img/menu-redesign/espresso-coffee/pdpespressocoffee/Coffee_570x570.png",
identifiers=[
models.Identifier(
restaurant_id=1,
identifier="coffee"
),
models.Identifier(
restaurant_id=1,
identifier="cup of joe"
)
]
),
models.MenuItem(
name="Americano",
desc="Our Hot Americano puts the oh! in Americano by combining two shots of Dunkin’s 100% Rainforest Alliance Certified™ espresso with hot water creating a rich, robust drink.",
price=3.99,
img=r"https://www.dunkindonuts.com/content/dam/dd/img/menu-redesign/espresso-coffee/pdpespressocoffee/Americano_570x570.png",
identifiers=[
models.Identifier(
restaurant_id=1,
identifier="americano"
),
]
),
models.MenuItem(
name="Latte",
desc="Made with steamed, frothy milk, blended with our rich, freshly ground and brewed espresso. Our Latte has a layer of foam and is the perfect balance of creamy and smooth to get you goin'.",
img=r"https://www.dunkindonuts.com/content/dam/dd/img/menu-redesign/espresso-coffee/pdpespressocoffee/Latte_570x570.png",
price=3.99,
identifiers=[
models.Identifier(
restaurant_id=1,
identifier="latte"
),
]
)
]
)
mcdonalds = models.Restaurant(
id=2,
name="McDonalds",
img=r"https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/McDonald%27s_Golden_Arches.svg/2339px-McDonald%27s_Golden_Arches.svg.png",
menu_items=[
models.MenuItem(
name="Big Mac",
desc=r"Mouthwatering perfection starts with two 100% pure beef patties and Big Mac® sauce sandwiched between a sesame seed bun. It’s topped off with pickles, crisp shredded lettuce, finely chopped onion and American cheese for a 100% beef burger with a taste like no other.",
price=7.89,
img=r"https://www.mcdonalds.com/is/image/content/dam/usa/nfl/nutrition/items/hero/desktop/t-mcdonalds-Big-Mac.jpg?$Product_Desktop$",
identifiers=[
models.Identifier(
restaurant_id=2,
identifier="big mac"
),
models.Identifier(
restaurant_id=2,
identifier="number one"
)
]
),
models.MenuItem(
name="Quarter Pounder",
desc=r"Each Quarter Pounder® with Cheese burger features a ¼ lb.* of 100% fresh beef that’s hot, deliciously juicy and cooked when you order. It’s seasoned with just a pinch of salt and pepper, sizzled on a flat iron grill, then topped with slivered onions, tangy pickles and two slices of melty American cheese on a sesame seed bun.",
price=6.29,
img=r"https://www.mcdonalds.com/is/image/content/dam/usa/nfl/nutrition/items/hero/desktop/t-mcdonalds-Quarter-Pounder-with-Cheese.jpg?$Product_Desktop$",
identifiers=[
models.Identifier(
restaurant_id=2,
identifier="quarter pounder"
),
models.Identifier(
restaurant_id=2,
identifier="qp"
),
models.Identifier(
restaurant_id=2,
identifier="number two"
)
]
),
],
)
penera = models.Restaurant(
id=3,
name='Panera Bread',
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/branding/panera-bread-logo-no-mother-bread.svg",
menu_items =[
models.MenuItem(
name="Chipotle Chicken Avocado Melt",
desc=r"Smoked, pulled chicken raised without antibiotics, smoked Gouda, fresh avocado and cilantro, zesty sweet Peppadew™ peppers and chipotle sauce on Black Pepper Focaccia.",
price=9.99,
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/grid/rect/chipotle-chicken-avocado-sandwich-sku-test-whole.jpg.transform/rect-grid-image/image.20211207.jpg",
identifiers=[
models.Identifier(
restaurant_id=3,
identifier='chipotle chicken avocado'
),
models.Identifier(
restaurant_id=3,
identifier='chicken chipotle avocado'
)
]
),
models.MenuItem(
name="Hazelnut Coffee",
desc=r"Smooth and sweet with a buttery toasted hazelnut flavor. 100% Arabica coffee balanced with the flavor of buttery, toasted hazelnuts.",
price=2.39,
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/grid/rect/hot-coffee-medium.jpg.transform/rect-grid-image/image.20211207.jpg",
identifiers=[
models.Identifier(
restaurant_id=3,
identifier='hazelnut coffee'
),
models.Identifier(
restaurant_id=3,
identifier='hazelnut'
),
models.Identifier(
restaurant_id=3,
identifier='hot hazelnut'
),
models.Identifier(
restaurant_id=3,
identifier='coffee'
)
]
),
models.MenuItem(
name="BBQ Chicken Salad",
desc=r"Chicken raised without antibiotics, romaine, black bean and corn salsa and BBQ ranch dressing topped with frizzled onions and apple cider vinegar BBQ sauce.",
price=10.29,
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/grid/rect/bbq-chicken-salad-whole.jpg.transform/rect-grid-image/image.20211207.jpg",
identifiers=[
models.Identifier(
restaurant_id=3,
identifier='barbecue chicken salad'
),
models.Identifier(
restaurant_id=3,
identifier='BBQ chicken salad'
),
models.Identifier(
restaurant_id=3,
identifier='chicken salad with barbecue'
),
models.Identifier(
restaurant_id=3,
identifier='barbecue salad'
)
]
),
models.MenuItem(
name="Toasted Steak & White Cheddar",
desc=r"Grass fed beef, aged white cheddar, pickled red onions and horseradish sauce on Artisan Ciabatta.",
price=11.19,
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/grid/rect/steak-and-white-cheddar-panini-on-artisan-ciabatta-whole.jpg.transform/rect-grid-image/image.20211207.jpg",
identifiers=[
models.Identifier(
restaurant_id=3,
identifier='steak and white cheddar'
),
models.Identifier(
restaurant_id=3,
identifier='steak and cheddar sandwich'
),
models.Identifier(
restaurant_id=3,
identifier='steak and white cheddar sandwich'
),
models.Identifier(
restaurant_id=3,
identifier='steak and cheese melt'
),
models.Identifier(
restaurant_id=3,
identifier='steak'
)
]
),
models.MenuItem(
name="Soda",
desc=r"Soda. Customize to choose flavor. Nutrition reflects beverage with no ice.",
price=2.59,
img=r"https://www.panerabread.com/content/dam/panerabread/menu-omni/integrated-web/grid/rect/custom-soda-flavor.jpg.transform/rect-grid-image/image.20211207.jpg",
identifiers=[
models.Identifier(
restaurant_id=3,
identifier='coke'
),
models.Identifier(
restaurant_id=3,
identifier='sprite'
),
models.Identifier(
restaurant_id=3,
identifier='ginger ale'
),
models.Identifier(
restaurant_id=3,
identifier='lemonade'
),
]
)
]
)
db.add(dunkin)
db.add(mcdonalds)
db.add(penera)
db.commit() | [
"models.MenuItem.id.in_",
"models.Identifier"
] | [((540, 577), 'models.MenuItem.id.in_', 'models.MenuItem.id.in_', (['menu_item_ids'], {}), '(menu_item_ids)\n', (562, 577), False, 'import models\n'), ((1457, 1512), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(1)', 'identifier': '"""coffee"""'}), "(restaurant_id=1, identifier='coffee')\n", (1474, 1512), False, 'import models\n'), ((1604, 1663), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(1)', 'identifier': '"""cup of joe"""'}), "(restaurant_id=1, identifier='cup of joe')\n", (1621, 1663), False, 'import models\n'), ((2244, 2302), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(1)', 'identifier': '"""americano"""'}), "(restaurant_id=1, identifier='americano')\n", (2261, 2302), False, 'import models\n'), ((2893, 2947), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(1)', 'identifier': '"""latte"""'}), "(restaurant_id=1, identifier='latte')\n", (2910, 2947), False, 'import models\n'), ((3891, 3947), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(2)', 'identifier': '"""big mac"""'}), "(restaurant_id=2, identifier='big mac')\n", (3908, 3947), False, 'import models\n'), ((4039, 4098), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(2)', 'identifier': '"""number one"""'}), "(restaurant_id=2, identifier='number one')\n", (4056, 4098), False, 'import models\n'), ((4871, 4935), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(2)', 'identifier': '"""quarter pounder"""'}), "(restaurant_id=2, identifier='quarter pounder')\n", (4888, 4935), False, 'import models\n'), ((5027, 5078), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(2)', 'identifier': '"""qp"""'}), "(restaurant_id=2, identifier='qp')\n", (5044, 5078), False, 'import models\n'), ((5170, 5229), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(2)', 'identifier': '"""number two"""'}), "(restaurant_id=2, identifier='number two')\n", (5187, 5229), False, 'import models\n'), ((6150, 6223), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""chipotle chicken avocado"""'}), "(restaurant_id=3, identifier='chipotle chicken avocado')\n", (6167, 6223), False, 'import models\n'), ((6315, 6388), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""chicken chipotle avocado"""'}), "(restaurant_id=3, identifier='chicken chipotle avocado')\n", (6332, 6388), False, 'import models\n'), ((6975, 7039), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""hazelnut coffee"""'}), "(restaurant_id=3, identifier='hazelnut coffee')\n", (6992, 7039), False, 'import models\n'), ((7131, 7188), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""hazelnut"""'}), "(restaurant_id=3, identifier='hazelnut')\n", (7148, 7188), False, 'import models\n'), ((7280, 7341), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""hot hazelnut"""'}), "(restaurant_id=3, identifier='hot hazelnut')\n", (7297, 7341), False, 'import models\n'), ((7433, 7488), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""coffee"""'}), "(restaurant_id=3, identifier='coffee')\n", (7450, 7488), False, 'import models\n'), ((8108, 8179), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""barbecue chicken salad"""'}), "(restaurant_id=3, identifier='barbecue chicken salad')\n", (8125, 8179), False, 'import models\n'), ((8271, 8337), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""BBQ chicken salad"""'}), "(restaurant_id=3, identifier='BBQ chicken salad')\n", (8288, 8337), False, 'import models\n'), ((8429, 8505), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""chicken salad with barbecue"""'}), "(restaurant_id=3, identifier='chicken salad with barbecue')\n", (8446, 8505), False, 'import models\n'), ((8597, 8660), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""barbecue salad"""'}), "(restaurant_id=3, identifier='barbecue salad')\n", (8614, 8660), False, 'import models\n'), ((9266, 9338), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""steak and white cheddar"""'}), "(restaurant_id=3, identifier='steak and white cheddar')\n", (9283, 9338), False, 'import models\n'), ((9430, 9505), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""steak and cheddar sandwich"""'}), "(restaurant_id=3, identifier='steak and cheddar sandwich')\n", (9447, 9505), False, 'import models\n'), ((9597, 9683), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""steak and white cheddar sandwich"""'}), "(restaurant_id=3, identifier=\n 'steak and white cheddar sandwich')\n", (9614, 9683), False, 'import models\n'), ((9770, 9840), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""steak and cheese melt"""'}), "(restaurant_id=3, identifier='steak and cheese melt')\n", (9787, 9840), False, 'import models\n'), ((9932, 9986), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""steak"""'}), "(restaurant_id=3, identifier='steak')\n", (9949, 9986), False, 'import models\n'), ((10505, 10558), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""coke"""'}), "(restaurant_id=3, identifier='coke')\n", (10522, 10558), False, 'import models\n'), ((10650, 10705), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""sprite"""'}), "(restaurant_id=3, identifier='sprite')\n", (10667, 10705), False, 'import models\n'), ((10797, 10856), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""ginger ale"""'}), "(restaurant_id=3, identifier='ginger ale')\n", (10814, 10856), False, 'import models\n'), ((10948, 11005), 'models.Identifier', 'models.Identifier', ([], {'restaurant_id': '(3)', 'identifier': '"""lemonade"""'}), "(restaurant_id=3, identifier='lemonade')\n", (10965, 11005), False, 'import models\n')] |
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from applications.patient_panel.views import home, offices
class TestPatientHomeUrls(SimpleTestCase):
def test_home_url_resolves(self):
url = reverse('patient_panel:home')
self.assertEquals(resolve(url).func.view_class, home.PatientHome)
class TestPatientOfficesUrls(SimpleTestCase):
def test_offices_list_url_resolves(self):
url = reverse('patient_panel:offices')
self.assertEquals(resolve(url).func.view_class, offices.OfficesListView)
| [
"django.urls.resolve",
"django.urls.reverse"
] | [((238, 267), 'django.urls.reverse', 'reverse', (['"""patient_panel:home"""'], {}), "('patient_panel:home')\n", (245, 267), False, 'from django.urls import reverse, resolve\n'), ((451, 483), 'django.urls.reverse', 'reverse', (['"""patient_panel:offices"""'], {}), "('patient_panel:offices')\n", (458, 483), False, 'from django.urls import reverse, resolve\n'), ((294, 306), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (301, 306), False, 'from django.urls import reverse, resolve\n'), ((510, 522), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (517, 522), False, 'from django.urls import reverse, resolve\n')] |
import random
import string
import math
import warnings
import copy
import os
import shutil
class Character:
def __init__(self, name=None, gender=None, stat_generation=None, stat_info=None, batch=None, tags=None):
#stats
self.stats = {
'luck' : 1,
'skill' : 1,
'endurance' : 1,
'agility' : 1,
'strength' : 1
}
#stats - random
if stat_generation == 'random':
self.stats = self.generate_stats_with_random()
#stats - distributed
elif stat_generation == 'distributed':
self.stats = self.generate_stats_with_distributed(stat_info)
#stats - triangular
elif stat_generation == 'triangular' or stat_generation == 'random triangular':
self.stats = self.generate_stats_with_random_triangular(stat_info)
#stats - list
elif stat_generation == 'list':
self.stats = self.generate_skills_from_list(stat_info)
#stats - default
else:
if stat_info == None:
default_stat_value = 1
warnings.warn(f'Insufficient stats info. Using default of {default_stat_value}!')
self.stats = self.default_stats(default_stat_value)
else:
self.stats = self.default_stats(stat_info)
#gender
if gender == None:
self.gender = self.generate_gender()
else:
self.gender = gender
#name
if name == None:
self.name = self.generate_name()
else:
self.name = str(name)
#image
self.image = f'{self.name}.png'
#colors
self.colors = self.generate_colors()
#tags
if tags != None:
self.tags = ','.join(tags)
else:
self.tags=None
#batch
if batch == None:
self.creator = 1234.5
else:
self.creator = batch
def generate_name(self):
"""Generates a name for the character"""
name = ''.join([random.choice('BCDGHJKLMNPRSTVWYZ'),
random.choice('aeiou'),
random.choice('bcfhjklmnprstvwxyz'),
random.choice('aiouy'),
random.choice('abcdefghijklmnopqrstuvwxyz')])
name_len = random.choices([2,3,4,5],weights=(1,4,5,2))[0]
name = name[0:name_len]
return name
def generate_gender(self, male_chance=None, female_chance=None, other_chance=None):
"""Generates gender for character"""
if male_chance == None:
m = 33.33
if female_chance == None:
f = 33.33
if other_chance == None:
o = 33.33
gender = random.choices([0.0,1.0,2.0],weights=(m,f,o))[0]
return gender
def generate_stats_with_distributed(self, points):
"""Generates stats by distributing points"""
if points == None:
warnings.warn(f'No point pool allocated!')
return dict.fromkeys(self.stats, 1)
stat_max = 45
if points > stat_max:
warnings.warn(f'{points} is greater than maximum allowed {stat_max} points per character!')
return dict.fromkeys(self.stats, 10)
stats = copy.copy(self.stats)
options = list(stats.keys())
for x in range(0,points):
#grab a random stat
stat = random.choice(options)
#allocate a point
stats[stat] += 1.0
#remove the stat from next round if it is maxed
if stats[stat] == 10.0:
options.remove(stat)
return stats
def default_stats(self, default_stat):
return dict.fromkeys(self.stats, float(default_stat))
def generate_skills_from_list(self, skill_list):
if skill_list == None or len(skill_list) != 5:
default_skill_list = [1.0,1.0,1.0,1.0,1.0]
warnings.warn(f'{skill_list} is an invalid skill_list! Defaulting to {default_skill_list}')
skill_list = default_skill_list
stats = {}
index = 0
for k in self.stats.keys():
stats[k] = float(skill_list[index])
index += 1
return stats
def generate_stats_with_random(self):
"""Generates stats randomly"""
stats = {}
for k in self.stats.keys():
stats[k] = float(random.randint(1,10))
return stats
def generate_stats_with_random_triangular(self, mode):
"""Generates stats randomly waited around the given mode"""
if mode == None:
warnings.warn(f'No mode defined! Defaulting to 5')
mode = 5
elif mode > 10:
warning.warn(f'mode entered for random triangular stat distrubution {mode} is higher than maximum of 10!')
mode = 10
stats = {}
for k in self.stats.keys():
stats[k] = float(round(random.triangular(1,10,mode)))
return stats
def generate_colors(self):
"""Randomly generate hue, saturation, and color values"""
colors = {
'colorr' : float(round(random.randint(0,255))),
'colorg' : float(round(random.randint(0,255))),
'colorb' : float(round(random.randint(0,255)))
}
return colors
def create_character_folder(self, tags=None):
"""creates character folder for use in Ultimate Arena"""
#characters directory
if not os.path.exists("characters"):
try:
os.makedirs("characters")
except FileExistsError:
warning.warn("characters directory created during script runtime!")
pass
#individual character directory
break_loop = False
while break_loop == False:
char_dir = os.path.join('characters',self.name)
if not os.path.exists(char_dir):
break_loop = True
try:
os.makedirs(char_dir)
except FileExistsError:
warning.warn("character directory created during script runtime!")
pass
else:
filename = os.path.join('characters',self.name, f'{self.name}.ini')
with open (filename, 'r') as char_file:
#check if we just made the conflicting character
if str(f'creator="{self.creator}"') in char_file.read():
#change the name and continue
self.name = self.generate_name()
self.image = f'{self.name}.png'
else:
overwrite_permission = input(f'{self.name} already exists. Would you like to overwrite them?[Y/n]')
if overwrite_permission != 'Y':
return True
else:
break_loop = True
filename = os.path.join('characters',self.name, f'{self.name}.ini')
with open (filename, 'w+') as char_file:
#header
char_file.write('[character]\n')
#creator
char_file.write(f'creator="{self.creator}"\n')
#tags
if self.tags != None:
tag_string = self.tags
char_file.write(f'tags="{tag_string}"\n')
#name
char_file.write(f'name="{self.name}"\n')
#pic
char_file.write(f'image="{self.image}"\n')
#gender
gen_string = str(self.gender)#.ljust(8,"0")
char_file.write(f'gender="{gen_string}"\n')
#stats
for stat_name, stat_val in self.stats.items():
stat_string = str(stat_val)#.ljust(8,"0")
char_file.write(f'{stat_name}="{stat_string}"\n')
#colors
for color_name, color_val in self.colors.items():
col_string = str(color_val)#.ljust(8,"0")
char_file.write(f'{color_name}="{col_string}"\n')
#copy image
image_file = os.path.join('characters',self.name, f'{self.name}.png')
shutil.copy('default.png',image_file)
#print(self.name)
return True
| [
"os.path.exists",
"random.choice",
"os.makedirs",
"random.triangular",
"os.path.join",
"random.choices",
"shutil.copy",
"warnings.warn",
"copy.copy",
"random.randint"
] | [((3428, 3449), 'copy.copy', 'copy.copy', (['self.stats'], {}), '(self.stats)\n', (3437, 3449), False, 'import copy\n'), ((7317, 7374), 'os.path.join', 'os.path.join', (['"""characters"""', 'self.name', 'f"""{self.name}.ini"""'], {}), "('characters', self.name, f'{self.name}.ini')\n", (7329, 7374), False, 'import os\n'), ((8467, 8524), 'os.path.join', 'os.path.join', (['"""characters"""', 'self.name', 'f"""{self.name}.png"""'], {}), "('characters', self.name, f'{self.name}.png')\n", (8479, 8524), False, 'import os\n'), ((8533, 8571), 'shutil.copy', 'shutil.copy', (['"""default.png"""', 'image_file'], {}), "('default.png', image_file)\n", (8544, 8571), False, 'import shutil\n'), ((2450, 2500), 'random.choices', 'random.choices', (['[2, 3, 4, 5]'], {'weights': '(1, 4, 5, 2)'}), '([2, 3, 4, 5], weights=(1, 4, 5, 2))\n', (2464, 2500), False, 'import random\n'), ((2883, 2933), 'random.choices', 'random.choices', (['[0.0, 1.0, 2.0]'], {'weights': '(m, f, o)'}), '([0.0, 1.0, 2.0], weights=(m, f, o))\n', (2897, 2933), False, 'import random\n'), ((3110, 3152), 'warnings.warn', 'warnings.warn', (['f"""No point pool allocated!"""'], {}), "(f'No point pool allocated!')\n", (3123, 3152), False, 'import warnings\n'), ((3269, 3370), 'warnings.warn', 'warnings.warn', (['f"""{points} is greater than maximum allowed {stat_max} points per character!"""'], {}), "(\n f'{points} is greater than maximum allowed {stat_max} points per character!'\n )\n", (3282, 3370), False, 'import warnings\n'), ((3576, 3598), 'random.choice', 'random.choice', (['options'], {}), '(options)\n', (3589, 3598), False, 'import random\n'), ((4112, 4213), 'warnings.warn', 'warnings.warn', (['f"""{skill_list} is an invalid skill_list! Defaulting to {default_skill_list}"""'], {}), "(\n f'{skill_list} is an invalid skill_list! Defaulting to {default_skill_list}'\n )\n", (4125, 4213), False, 'import warnings\n'), ((4838, 4888), 'warnings.warn', 'warnings.warn', (['f"""No mode defined! Defaulting to 5"""'], {}), "(f'No mode defined! Defaulting to 5')\n", (4851, 4888), False, 'import warnings\n'), ((5776, 5804), 'os.path.exists', 'os.path.exists', (['"""characters"""'], {}), "('characters')\n", (5790, 5804), False, 'import os\n'), ((6142, 6179), 'os.path.join', 'os.path.join', (['"""characters"""', 'self.name'], {}), "('characters', self.name)\n", (6154, 6179), False, 'import os\n'), ((2162, 2197), 'random.choice', 'random.choice', (['"""BCDGHJKLMNPRSTVWYZ"""'], {}), "('BCDGHJKLMNPRSTVWYZ')\n", (2175, 2197), False, 'import random\n'), ((2224, 2246), 'random.choice', 'random.choice', (['"""aeiou"""'], {}), "('aeiou')\n", (2237, 2246), False, 'import random\n'), ((2273, 2308), 'random.choice', 'random.choice', (['"""bcfhjklmnprstvwxyz"""'], {}), "('bcfhjklmnprstvwxyz')\n", (2286, 2308), False, 'import random\n'), ((2335, 2357), 'random.choice', 'random.choice', (['"""aiouy"""'], {}), "('aiouy')\n", (2348, 2357), False, 'import random\n'), ((2384, 2427), 'random.choice', 'random.choice', (['"""abcdefghijklmnopqrstuvwxyz"""'], {}), "('abcdefghijklmnopqrstuvwxyz')\n", (2397, 2427), False, 'import random\n'), ((4606, 4627), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4620, 4627), False, 'import random\n'), ((5841, 5866), 'os.makedirs', 'os.makedirs', (['"""characters"""'], {}), "('characters')\n", (5852, 5866), False, 'import os\n'), ((6199, 6223), 'os.path.exists', 'os.path.exists', (['char_dir'], {}), '(char_dir)\n', (6213, 6223), False, 'import os\n'), ((6527, 6584), 'os.path.join', 'os.path.join', (['"""characters"""', 'self.name', 'f"""{self.name}.ini"""'], {}), "('characters', self.name, f'{self.name}.ini')\n", (6539, 6584), False, 'import os\n'), ((5200, 5230), 'random.triangular', 'random.triangular', (['(1)', '(10)', 'mode'], {}), '(1, 10, mode)\n', (5217, 5230), False, 'import random\n'), ((5424, 5446), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (5438, 5446), False, 'import random\n'), ((5485, 5507), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (5499, 5507), False, 'import random\n'), ((5546, 5568), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (5560, 5568), False, 'import random\n'), ((6303, 6324), 'os.makedirs', 'os.makedirs', (['char_dir'], {}), '(char_dir)\n', (6314, 6324), False, 'import os\n'), ((1167, 1253), 'warnings.warn', 'warnings.warn', (['f"""Insufficient stats info. Using default of {default_stat_value}!"""'], {}), "(\n f'Insufficient stats info. Using default of {default_stat_value}!')\n", (1180, 1253), False, 'import warnings\n')] |
import numpy as np
from numpy import asarray
from PIL import Image, ImageOps
from src.utils.image_operations import get_horizontal_slope, get_vertical_slope, prepare_slope_array
from src.data.constants import path
selector = "armchair_at_beach"
img = Image.open(path[selector])
gs_img = ImageOps.grayscale(img)
gs_img.show()
gs_array = asarray(gs_img)
hor_slope = np.zeros([gs_array.shape[0], gs_array.shape[1]])
ver_slope = np.zeros([gs_array.shape[0], gs_array.shape[1]])
for i in range(gs_array.shape[0]):
for j in range(gs_array.shape[1]):
hor_slope[i][j] = get_horizontal_slope(gs_array, i, j)
ver_slope[i][j] = get_vertical_slope(gs_array, i, j)
total_slope = prepare_slope_array(hor_slope + ver_slope)
image_total_slope = Image.fromarray(total_slope)
image_total_slope.show()
| [
"src.utils.image_operations.get_vertical_slope",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.asarray",
"PIL.ImageOps.grayscale",
"src.utils.image_operations.get_horizontal_slope",
"numpy.zeros",
"src.utils.image_operations.prepare_slope_array"
] | [((254, 280), 'PIL.Image.open', 'Image.open', (['path[selector]'], {}), '(path[selector])\n', (264, 280), False, 'from PIL import Image, ImageOps\n'), ((291, 314), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['img'], {}), '(img)\n', (309, 314), False, 'from PIL import Image, ImageOps\n'), ((341, 356), 'numpy.asarray', 'asarray', (['gs_img'], {}), '(gs_img)\n', (348, 356), False, 'from numpy import asarray\n'), ((370, 418), 'numpy.zeros', 'np.zeros', (['[gs_array.shape[0], gs_array.shape[1]]'], {}), '([gs_array.shape[0], gs_array.shape[1]])\n', (378, 418), True, 'import numpy as np\n'), ((431, 479), 'numpy.zeros', 'np.zeros', (['[gs_array.shape[0], gs_array.shape[1]]'], {}), '([gs_array.shape[0], gs_array.shape[1]])\n', (439, 479), True, 'import numpy as np\n'), ((694, 736), 'src.utils.image_operations.prepare_slope_array', 'prepare_slope_array', (['(hor_slope + ver_slope)'], {}), '(hor_slope + ver_slope)\n', (713, 736), False, 'from src.utils.image_operations import get_horizontal_slope, get_vertical_slope, prepare_slope_array\n'), ((757, 785), 'PIL.Image.fromarray', 'Image.fromarray', (['total_slope'], {}), '(total_slope)\n', (772, 785), False, 'from PIL import Image, ImageOps\n'), ((581, 617), 'src.utils.image_operations.get_horizontal_slope', 'get_horizontal_slope', (['gs_array', 'i', 'j'], {}), '(gs_array, i, j)\n', (601, 617), False, 'from src.utils.image_operations import get_horizontal_slope, get_vertical_slope, prepare_slope_array\n'), ((644, 678), 'src.utils.image_operations.get_vertical_slope', 'get_vertical_slope', (['gs_array', 'i', 'j'], {}), '(gs_array, i, j)\n', (662, 678), False, 'from src.utils.image_operations import get_horizontal_slope, get_vertical_slope, prepare_slope_array\n')] |
"""
Django settings for BlogApp project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import environ
env = environ.Env()
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(env("DJANGO_DEBUG"))
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'rest_auth',
'rest_auth.registration',
'corsheaders',
'mdeditor',
'webpack_loader', # 不确定是否必须
'django_filters',
'taggit', # 标签系统
'drf_yasg', #文档
'channels',
'users.apps.UsersConfig',
'blog.apps.BlogConfig',
'comments.apps.CommentsConfig',
'backend_res.apps.BackendResConfig',
'chat.apps.ChatConfig',
'links.apps.LinksConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BlogApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BlogApp.wsgi.application'
ASGI_APPLICATION = 'BlogApp.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': env("D_NAME"),
'USER': env("D_USER"),
'PASSWORD': env("D_PASSWORD"),
'HOST': env("D_HOST"),
'PORT': env("D_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR / 'upload'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [BASE_DIR.joinpath("static")]
STATIC_ROOT = BASE_DIR.joinpath('staticfiles')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'users.userModel'
SITE_ID = 1
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
CRISPY_TEMPLATE_PACK = "bootstrap5"
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_EMAIL_REQUIRED = True
LOGIN_URL = "accounts/login"
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
Rate = '20/min' if DEBUG is False else None
REST_FRAMEWORK = {
'DATETIME_FORMAT': "%Y/%m/%d %H:%M:%S",
'DEFAULT_PAGINATION_CLASS': 'blog.pagination.CustomPageNumber',
#'PAGE_SIZE': 20,
# 'EXCEPTION_HANDLER': 'blog.exception.exception_handler'
'DEFAULT_THROTTLE_CLASSES': ['rest_framework.throttling.UserRateThrottle'],
'DEFAULT_THROTTLE_RATES': {
'user': Rate,
}
}
""" WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': str(BASE_DIR.joinpath('frontend', 'webpack-stats.json')),
'POLL_INTERVAL': 0.1,
'IGNORE': [r'.+\.hot-update.js', r'.+\.map'],
}
} """
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
CORS_ALLOWED_ORIGINS = [
"https://domain.com", "https://api.domain.com", "http://localhost:8080",
"http://127.0.0.1:8080"
]
CORS_ORIGIN_WHITELIST = ('localhost:8080', '127.0.0.1:8080')
CSRF_COOKIE_HTTPONLY = False
GITHUB_CLIENT_ID = env("GITHUB_CLIENT_ID")
GITHUB_CLIENT_SECRET = env("GITHUB_CLIENT_SECRET")
# mdeditor
X_FRAME_OPTIONS = 'SAMEORIGIN'
# custom pagenation size
PAGE_SIZE = env("PAGE_SIZE")
# rest_auth修改密码
OLD_PASSWORD_FIELD_ENABLED = True
LOGOUT_ON_PASSWORD_CHANGE = False
# 上线前更改为smp
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# 配置默认缓存/上线考虑换成redis
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.126.com'
EMAIL_PORT = env("EMAIL_PORT")
EMAIL_HOST_USER = env("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
EMAIL_SUBJECT_PREFIX = '[HERSCHEL的博客] '
EMAIL_USE_SSL = True
DEFAULT_FROM_EMAIL = env("DEFAULT_FROM_EMAIL") | [
"environ.Env",
"environ.Env.read_env",
"pathlib.Path"
] | [((355, 368), 'environ.Env', 'environ.Env', ([], {}), '()\n', (366, 368), False, 'import environ\n'), ((389, 411), 'environ.Env.read_env', 'environ.Env.read_env', ([], {}), '()\n', (409, 411), False, 'import environ\n'), ((488, 502), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (492, 502), False, 'from pathlib import Path\n')] |
import os
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from time import sleep
class Agent:
def __init__(self):
self.memory = []
self.epsilon = 1.0 #exploration rate
self.model = self.__model()
def __model(self):
features = 4
learning_rate = 0.01
model = Sequential()
model.add(Dense(8, input_dim=features, activation='tanh'))
model.add(Dense(16, activation='tanh'))
model.add(Dense(2, activation='linear'))
#the loss function will be MSE between the action-value Q
model.compile(loss='mse', optimizer=Adam(lr=learning_rate, decay=0.01))
return model
def remember(self, state, action, reward, next_state, done):
#store in memory the different states, actions, rewards...
self.memory.append( (state, action, reward, next_state, done) )
def replay(self):
#fit model from memory
gamma = 1.0 #importance of the next reward
max_batch_size = 64
#take care the memory could be big, so using minibatch
minibatch = random.sample(self.memory, min(max_batch_size, len(self.memory)))
list_x_batch, list_y_batch = [], []
for state, action, reward, next_state, done in minibatch:
target = self.model.predict(state)[0]
if not done: #calculate discounted reward
action_values = self.model.predict(next_state)[0]
#following the formula of action-value expectation
reward = reward + gamma * np.amax(action_values)
#customize the obtained reward with the calculated
target[action] = reward
#append
list_x_batch.append(state)
list_y_batch.append(target)
#train the model
x_batch = np.vstack(list_x_batch)
y_batch = np.vstack(list_y_batch)
self.model.fit(x_batch, y_batch, verbose=0)
#decrease exploration rate
if self.epsilon > 0.01:
self.epsilon *= 0.997
def act(self, state):
if self.epsilon > np.random.rand():
return random.randint(0,1)
#predict the action to do
action_values = self.model.predict(state)[0]
return np.argmax(action_values)
if __name__ == "__main__":
backupfile = 'weights.hdf5'
trained = False
env = gym.make('CartPole-v1')
agent = Agent()
if os.path.isfile(backupfile):
print("Already trained. Recovering weights from backup")
agent.model.load_weights(backupfile)
trained = True
total_wins = 0
for episode in range(1000):
state = env.reset()
#row vector
state = state.reshape(1, -1)
for step in range(1, 700):
#env.render()
#perform the action
# 0->left, 1->right
action = agent.act(state)
#print("action: {}".format(action))
next_state, reward, done, info = env.step(action)
#row vector
next_state = next_state.reshape(1, -1)
#save the current observation
agent.remember(state, action, reward, next_state, done)
#update state
state = next_state
#evaluate
if done:
#solved when reward >= 195 before 100 episodes
if step > 195:
solved = 'SOLVED'
total_wins += 1
else:
solved = ''
total_wins = 0
print("Episode: {} Step: {} Epsilon: {:.3f} {}".format(episode, step, agent.epsilon, solved))
break
#at the end of episode, train the model
if not trained:
agent.replay()
#end game when 100 wins in a row
if total_wins == 100:
print("You win!!")
agent.model.save_weights(backupfile)
break
#before exit
sleep(2)
| [
"keras.optimizers.Adam",
"random.randint",
"numpy.random.rand",
"numpy.argmax",
"time.sleep",
"keras.models.Sequential",
"os.path.isfile",
"numpy.vstack",
"keras.layers.Dense",
"numpy.amax",
"gym.make"
] | [((2491, 2514), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (2499, 2514), False, 'import gym\n'), ((2543, 2569), 'os.path.isfile', 'os.path.isfile', (['backupfile'], {}), '(backupfile)\n', (2557, 2569), False, 'import os\n'), ((4094, 4102), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (4099, 4102), False, 'from time import sleep\n'), ((445, 457), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (455, 457), False, 'from keras.models import Sequential\n'), ((1938, 1961), 'numpy.vstack', 'np.vstack', (['list_x_batch'], {}), '(list_x_batch)\n', (1947, 1961), True, 'import numpy as np\n'), ((1980, 2003), 'numpy.vstack', 'np.vstack', (['list_y_batch'], {}), '(list_y_batch)\n', (1989, 2003), True, 'import numpy as np\n'), ((2373, 2397), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (2382, 2397), True, 'import numpy as np\n'), ((476, 523), 'keras.layers.Dense', 'Dense', (['(8)'], {'input_dim': 'features', 'activation': '"""tanh"""'}), "(8, input_dim=features, activation='tanh')\n", (481, 523), False, 'from keras.layers import Dense\n'), ((543, 571), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""tanh"""'}), "(16, activation='tanh')\n", (548, 571), False, 'from keras.layers import Dense\n'), ((591, 620), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""linear"""'}), "(2, activation='linear')\n", (596, 620), False, 'from keras.layers import Dense\n'), ((2212, 2228), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2226, 2228), True, 'import numpy as np\n'), ((2249, 2269), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (2263, 2269), False, 'import random\n'), ((733, 767), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate', 'decay': '(0.01)'}), '(lr=learning_rate, decay=0.01)\n', (737, 767), False, 'from keras.optimizers import Adam\n'), ((1671, 1693), 'numpy.amax', 'np.amax', (['action_values'], {}), '(action_values)\n', (1678, 1693), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
#from base import BaseModel
import torchvision.models as models
from collections import namedtuple
from .base_models.resnet import *
class Resnet50Model(nn.Module):
def __init__(self, num_classes=28):
super(Resnet50Model, self).__init__()
ResNetConfig = namedtuple('ResNetConfig', ['block', 'n_blocks', 'channels'])
resnet50_config = ResNetConfig(block = Bottleneck,
n_blocks = [3, 4, 6, 3],
channels = [64, 128, 256, 512])
self.resnet = ResNet(resnet50_config, num_classes)
w = self.resnet.conv1.weight
self.resnet.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.resnet.conv1.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(512 * 4),
nn.Dropout(0.5),
nn.Linear(512 * 4, num_classes),
)
def forward(self, x):
return self.resnet(x)
| [
"torch.nn.Dropout",
"collections.namedtuple",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.cat"
] | [((311, 372), 'collections.namedtuple', 'namedtuple', (['"""ResNetConfig"""', "['block', 'n_blocks', 'channels']"], {}), "('ResNetConfig', ['block', 'n_blocks', 'channels'])\n", (321, 372), False, 'from collections import namedtuple\n'), ((676, 740), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(4, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (685, 740), True, 'import torch.nn as nn\n'), ((789, 851), 'torch.cat', 'torch.cat', (['(w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :]))'], {'dim': '(1)'}), '((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1)\n', (798, 851), False, 'import torch\n'), ((905, 928), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512 * 4)'], {}), '(512 * 4)\n', (919, 928), True, 'import torch.nn as nn\n'), ((942, 957), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (952, 957), True, 'import torch.nn as nn\n'), ((971, 1002), 'torch.nn.Linear', 'nn.Linear', (['(512 * 4)', 'num_classes'], {}), '(512 * 4, num_classes)\n', (980, 1002), True, 'import torch.nn as nn\n')] |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetCharactersCharacterIdOk(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'corporation_id': 'int',
'alliance_id': 'int',
'birthday': 'datetime',
'gender': 'str',
'race_id': 'int',
'bloodline_id': 'int',
'ancestry_id': 'int',
'security_status': 'float',
'faction_id': 'int'
}
attribute_map = {
'name': 'name',
'description': 'description',
'corporation_id': 'corporation_id',
'alliance_id': 'alliance_id',
'birthday': 'birthday',
'gender': 'gender',
'race_id': 'race_id',
'bloodline_id': 'bloodline_id',
'ancestry_id': 'ancestry_id',
'security_status': 'security_status',
'faction_id': 'faction_id'
}
def __init__(self, name=None, description=None, corporation_id=None, alliance_id=None, birthday=None, gender=None, race_id=None, bloodline_id=None, ancestry_id=None, security_status=None, faction_id=None): # noqa: E501
"""GetCharactersCharacterIdOk - a model defined in Swagger""" # noqa: E501
self._name = None
self._description = None
self._corporation_id = None
self._alliance_id = None
self._birthday = None
self._gender = None
self._race_id = None
self._bloodline_id = None
self._ancestry_id = None
self._security_status = None
self._faction_id = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
self.corporation_id = corporation_id
if alliance_id is not None:
self.alliance_id = alliance_id
self.birthday = birthday
self.gender = gender
self.race_id = race_id
self.bloodline_id = bloodline_id
if ancestry_id is not None:
self.ancestry_id = ancestry_id
if security_status is not None:
self.security_status = security_status
if faction_id is not None:
self.faction_id = faction_id
@property
def name(self):
"""Gets the name of this GetCharactersCharacterIdOk. # noqa: E501
name string # noqa: E501
:return: The name of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GetCharactersCharacterIdOk.
name string # noqa: E501
:param name: The name of this GetCharactersCharacterIdOk. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this GetCharactersCharacterIdOk. # noqa: E501
description string # noqa: E501
:return: The description of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this GetCharactersCharacterIdOk.
description string # noqa: E501
:param description: The description of this GetCharactersCharacterIdOk. # noqa: E501
:type: str
"""
self._description = description
@property
def corporation_id(self):
"""Gets the corporation_id of this GetCharactersCharacterIdOk. # noqa: E501
The character's corporation ID # noqa: E501
:return: The corporation_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._corporation_id
@corporation_id.setter
def corporation_id(self, corporation_id):
"""Sets the corporation_id of this GetCharactersCharacterIdOk.
The character's corporation ID # noqa: E501
:param corporation_id: The corporation_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
if corporation_id is None:
raise ValueError("Invalid value for `corporation_id`, must not be `None`") # noqa: E501
self._corporation_id = corporation_id
@property
def alliance_id(self):
"""Gets the alliance_id of this GetCharactersCharacterIdOk. # noqa: E501
The character's alliance ID # noqa: E501
:return: The alliance_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._alliance_id
@alliance_id.setter
def alliance_id(self, alliance_id):
"""Sets the alliance_id of this GetCharactersCharacterIdOk.
The character's alliance ID # noqa: E501
:param alliance_id: The alliance_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
self._alliance_id = alliance_id
@property
def birthday(self):
"""Gets the birthday of this GetCharactersCharacterIdOk. # noqa: E501
Creation date of the character # noqa: E501
:return: The birthday of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: datetime
"""
return self._birthday
@birthday.setter
def birthday(self, birthday):
"""Sets the birthday of this GetCharactersCharacterIdOk.
Creation date of the character # noqa: E501
:param birthday: The birthday of this GetCharactersCharacterIdOk. # noqa: E501
:type: datetime
"""
if birthday is None:
raise ValueError("Invalid value for `birthday`, must not be `None`") # noqa: E501
self._birthday = birthday
@property
def gender(self):
"""Gets the gender of this GetCharactersCharacterIdOk. # noqa: E501
gender string # noqa: E501
:return: The gender of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this GetCharactersCharacterIdOk.
gender string # noqa: E501
:param gender: The gender of this GetCharactersCharacterIdOk. # noqa: E501
:type: str
"""
if gender is None:
raise ValueError("Invalid value for `gender`, must not be `None`") # noqa: E501
allowed_values = ["female", "male"] # noqa: E501
if gender not in allowed_values:
raise ValueError(
"Invalid value for `gender` ({0}), must be one of {1}" # noqa: E501
.format(gender, allowed_values)
)
self._gender = gender
@property
def race_id(self):
"""Gets the race_id of this GetCharactersCharacterIdOk. # noqa: E501
race_id integer # noqa: E501
:return: The race_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._race_id
@race_id.setter
def race_id(self, race_id):
"""Sets the race_id of this GetCharactersCharacterIdOk.
race_id integer # noqa: E501
:param race_id: The race_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
if race_id is None:
raise ValueError("Invalid value for `race_id`, must not be `None`") # noqa: E501
self._race_id = race_id
@property
def bloodline_id(self):
"""Gets the bloodline_id of this GetCharactersCharacterIdOk. # noqa: E501
bloodline_id integer # noqa: E501
:return: The bloodline_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._bloodline_id
@bloodline_id.setter
def bloodline_id(self, bloodline_id):
"""Sets the bloodline_id of this GetCharactersCharacterIdOk.
bloodline_id integer # noqa: E501
:param bloodline_id: The bloodline_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
if bloodline_id is None:
raise ValueError("Invalid value for `bloodline_id`, must not be `None`") # noqa: E501
self._bloodline_id = bloodline_id
@property
def ancestry_id(self):
"""Gets the ancestry_id of this GetCharactersCharacterIdOk. # noqa: E501
ancestry_id integer # noqa: E501
:return: The ancestry_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._ancestry_id
@ancestry_id.setter
def ancestry_id(self, ancestry_id):
"""Sets the ancestry_id of this GetCharactersCharacterIdOk.
ancestry_id integer # noqa: E501
:param ancestry_id: The ancestry_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
self._ancestry_id = ancestry_id
@property
def security_status(self):
"""Gets the security_status of this GetCharactersCharacterIdOk. # noqa: E501
security_status number # noqa: E501
:return: The security_status of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: float
"""
return self._security_status
@security_status.setter
def security_status(self, security_status):
"""Sets the security_status of this GetCharactersCharacterIdOk.
security_status number # noqa: E501
:param security_status: The security_status of this GetCharactersCharacterIdOk. # noqa: E501
:type: float
"""
if security_status is not None and security_status > 10: # noqa: E501
raise ValueError("Invalid value for `security_status`, must be a value less than or equal to `10`") # noqa: E501
if security_status is not None and security_status < -10: # noqa: E501
raise ValueError("Invalid value for `security_status`, must be a value greater than or equal to `-10`") # noqa: E501
self._security_status = security_status
@property
def faction_id(self):
"""Gets the faction_id of this GetCharactersCharacterIdOk. # noqa: E501
ID of the faction the character is fighting for, if the character is enlisted in Factional Warfare # noqa: E501
:return: The faction_id of this GetCharactersCharacterIdOk. # noqa: E501
:rtype: int
"""
return self._faction_id
@faction_id.setter
def faction_id(self, faction_id):
"""Sets the faction_id of this GetCharactersCharacterIdOk.
ID of the faction the character is fighting for, if the character is enlisted in Factional Warfare # noqa: E501
:param faction_id: The faction_id of this GetCharactersCharacterIdOk. # noqa: E501
:type: int
"""
self._faction_id = faction_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetCharactersCharacterIdOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((11643, 11676), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (11656, 11676), False, 'import six\n')] |
# -*- coding: utf-8 -*-
"""
jinja.contrib._djangosupport
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by <NAME>, <NAME>, <NAME>.
:license: BSD, see LICENSE for more details.
"""
from _djangosupport import *
def setup_django_module():
"""
create a new Jinja module for django.
"""
import new
import sys
from django import contrib
from jinja.contrib import _djangosupport
module = contrib.jinja = sys.modules['django.contrib.jinja'] = \
new.module('django.contrib.jinja')
module.__doc__ = _djangosupport.__doc__
module.__all__ = _djangosupport.__all__
get_name = globals().get
for name in _djangosupport.__all__:
setattr(module, name, get_name(name))
def configure(*args, **kwargs):
import warnings
warnings.warn("Magical configuration has been Deprecated. Please use jinja.contrib.djangosupport for imports.", DeprecationWarning)
setup_django_module() | [
"warnings.warn",
"new.module"
] | [((508, 542), 'new.module', 'new.module', (['"""django.contrib.jinja"""'], {}), "('django.contrib.jinja')\n", (518, 542), False, 'import new\n'), ((804, 945), 'warnings.warn', 'warnings.warn', (['"""Magical configuration has been Deprecated. Please use jinja.contrib.djangosupport for imports."""', 'DeprecationWarning'], {}), "(\n 'Magical configuration has been Deprecated. Please use jinja.contrib.djangosupport for imports.'\n , DeprecationWarning)\n", (817, 945), False, 'import warnings\n')] |
from ctypes import c_uint32
from sys import argv
import re
code_map = {
'ADD' : '000_000',
'SUB' : '000_001',
'AND' : '000_010',
'OR' : '000_011',
'SLT' : '000_100',
'MUL' : '000_101',
'HLT' : '011_111',
'LW' : '001_000',
'SW' : '001_001',
'ADDI' : '001_010',
'SUBI' : '001_011',
'SLTI' : '001_100',
'BNEQZ': '001_101',
'BEQZ' : '001_110',
}
type_map = {
'ADD' : 'REG_TYPE',
'SUB' : 'REG_TYPE',
'AND' : 'REG_TYPE',
'OR' : 'REG_TYPE',
'SLT' : 'REG_TYPE',
'MUL' : 'REG_TYPE',
'HLT' : 'HLT_TYPE',
'LW' : 'BASE_TYPE',
'SW' : 'BASE_TYPE',
'ADDI' : 'IMM_TYPE',
'SUBI' : 'IMM_TYPE',
'SLTI' : 'IMM_TYPE',
'BNEQZ': 'JMP_TYPE',
'BEQZ' : 'JMP_TYPE',
}
label_map = {}
def bin_str(int_str):
return format(c_uint32(int(int_str)).value, 'b').zfill(32)
def op_code(input_code):
return code_map[input_code.upper()].replace('_', '')
def op_type(input_code):
return type_map[input_code.upper()]
def remove_R(reg_str):
return reg_str.upper().replace('R', '')
def reg_type_func(input_list, line_no, abs_line_no):
assert len(input_list) == 4, "4 args needed for REG_TYPE at line: {0}".format(line_no)
op = input_list[0]
oc = op_code(op)
reg_list = list(map(remove_R, input_list[1:]))
(rd, rs, rt) = list(map(bin_str, reg_list))
return oc + rs[-5:] + rt[-5:] + rd[-5:] + '00000000000'
def imm_type_func(input_list, line_no, abs_line_no):
assert len(input_list) == 4, "4 args needed for IMM_TYPE at line: {0}".format(line_no)
op = input_list[0]
oc = op_code(op)
reg_list = list(map(remove_R, input_list[1:]))
(rt, rs, imm) = list(map(bin_str, reg_list))
return oc + rs[-5:] + rt[-5:] + imm[-16:]
def base_type_func(input_list, line_no, abs_line_no):
assert len(input_list) == 3, "3 args needed for BASE_TYPE at line: {0}".format(line_no)
op = input_list[0]
oc = op_code(op)
reg_list = list(map(remove_R, input_list[1:]))
(rt, source) = reg_list
assert '(' in source, "missing '(' at line: {0}".format(line_no)
assert ')' in source, "missing ')' at line: {0}".format(line_no)
for r in (('(', ' '), (')', '')):
source = source.replace(*r)
(imm, rs) = source.split()
reg_list = [rt, imm, rs]
(rt, imm, rs) = list(map(bin_str, reg_list))
return oc + rs[-5:] + rt[-5:] + imm[-16:]
def hlt_type_func(input_list, line_no, abs_line_no):
assert len(input_list) == 1, "1 arg needed for HLT_TYPE at line: {0}".format(line_no)
op = input_list[0]
oc = op_code(op)
return oc + '00000000000000000000000000'
def jmp_type_func(input_list, line_no, abs_line_no):
assert len(input_list) == 3, "3 args needed for JMP_TYPE at line: {0}".format(line_no)
op = input_list[0]
oc = op_code(op)
rs = remove_R(input_list[1])
label = input_list[2]
assert label in label_map, "undefined label: {0} at line: {1}".format(label, line_no)
imm = str(label_map[label] - abs_line_no - 1)
return oc + bin_str(rs)[-5:] + '00000' + bin_str(imm)[-16:]
def op_type_func(input_type):
func_map = {
'REG_TYPE' : reg_type_func,
'IMM_TYPE' : imm_type_func,
'BASE_TYPE': base_type_func,
'HLT_TYPE' : hlt_type_func,
'JMP_TYPE' : jmp_type_func,
}
return func_map[input_type]
with open(argv[1]) as file:
lines = [(line_no + 1, line.strip()) for line_no, line in enumerate(file)]
code_lines = list(filter(lambda elm:
not (elm[1].startswith('#') or len(elm[1]) == 0),
lines))
line_dict = {}
for abs_line_no, elm in enumerate(code_lines):
(line_no, line) = elm
clean_line = line.split('#')[0].strip()
label = ''
if ':' in clean_line:
split_res = clean_line.split(':')
assert len(split_res) == 2, "only one colon allowed at line: {0}".format(line_no)
(label, clean_line) = list(map(lambda s: s.strip(), split_res))
assert label not in label_map, "duplicated label: {0} at line: {1}".format(label, line_no)
label_map[label] = abs_line_no
line_dict[abs_line_no] = (line_no, clean_line)
for abs_line_no, elm in line_dict.items():
(line_no, clean_line) = elm
# print(line_no, abs_line_no, clean_line)
parts = clean_line.split(',')
op_parts = re.split('\s+', parts[0])
op = op_parts[0]
assert op in code_map, "unknown code op: {0} at line: {1}".format(op, line_no)
assert op in type_map, "unknown type op: {0} at line: {1}".format(op, line_no)
opt = op_type(op)
line_parts = list(map(lambda s: re.sub('\s+', '', s), op_parts + parts[1:]))
inst = op_type_func(opt)(line_parts, line_no, abs_line_no)
print(inst, '\t// ', abs_line_no + 1, ':\t', lines[line_no - 1][1])
# print(hex(int(inst, 2)), '\t// ', line_no, ':\t', lines[line_no - 1][1])
| [
"re.sub",
"re.split"
] | [((4444, 4470), 're.split', 're.split', (['"""\\\\s+"""', 'parts[0]'], {}), "('\\\\s+', parts[0])\n", (4452, 4470), False, 'import re\n'), ((4735, 4756), 're.sub', 're.sub', (['"""\\\\s+"""', '""""""', 's'], {}), "('\\\\s+', '', s)\n", (4741, 4756), False, 'import re\n')] |
#===============================================================================
# Imports
#===============================================================================
import os
import re
from os.path import (
join,
abspath,
dirname,
normpath,
)
#===============================================================================
# Helper Methods
#===============================================================================
def get_base_dir(path):
p = path
pc = p.count('/')
assert p and p[0] == '/' and pc >= 1
if p == '/' or pc == 1 or (pc == 2 and p[-1] == '/'):
return '/'
assert pc >= 2
return dirname(p[:-1] if p[-1] == '/' else p) + '/'
def reduce_path(p):
assert p and p[0] == '/'
r = list()
end = p.rfind('/')
while end != -1:
r.append(p[:end+1])
end = p.rfind('/', 0, end)
return r
def join_path(*args):
return abspath(normpath(join(*args)))
def format_path(path, is_dir=None):
"""
>>> format_path('src', True)
'/src/'
>>> format_path('src', False)
'/src'
>>> format_path('src/foo', True)
'/src/foo/'
>>> format_path('///src///foo///mexico.txt//', False)
'/src/foo/mexico.txt'
>>> format_path('///src///foo///mexico.txt//')
'/src/foo/mexico.txt/'
>>> format_path('///src///foo///mexico.txt')
'/src/foo/mexico.txt'
>>> format_path(r'\\the\\quick\\brown\\fox.txt', False)
'/\\\\the\\\\quick\\\\brown\\\\fox.txt'
>>> format_path('/')
'/'
>>> format_path('/', True)
'/'
>>> format_path('/', False)
Traceback (most recent call last):
...
AssertionError
>>> format_path('/a')
'/a'
>>> format_path('/ab')
'/ab'
>>> format_path(None)
Traceback (most recent call last):
...
AssertionError
>>> format_path('//')
Traceback (most recent call last):
...
AssertionError
>>> format_path('/', True)
'/'
# On Unix, '\' is a legitimate file name. Trying to wrangle the right
# escapes when testing '/' and '\' combinations is an absolute 'mare;
# so we use ord() instead to compare numerical values of characters.
>>> _w = lambda p: [ ord(c) for c in p ]
>>> b = chr(92) # forward slash
>>> f = chr(47) # backslash
>>> foo = [102, 111, 111] # ord repr for 'foo'
>>> b2 = b*2
>>> _w(format_path('/'+b))
[47, 92]
>>> _w(format_path('/'+b2))
[47, 92, 92]
>>> _w(format_path('/'+b2, is_dir=False))
[47, 92, 92]
>>> _w(format_path('/'+b2, is_dir=True))
[47, 92, 92, 47]
>>> _w(format_path(b2*2))
[47, 92, 92, 92, 92]
>>> _w(format_path(b2*2, is_dir=True))
[47, 92, 92, 92, 92, 47]
>>> _w(format_path('/foo/'+b))
[47, 102, 111, 111, 47, 92]
>>> _w(format_path('/foo/'+b, is_dir=False))
[47, 102, 111, 111, 47, 92]
>>> _w(format_path('/foo/'+b, is_dir=True))
[47, 102, 111, 111, 47, 92, 47]
"""
assert (
path and
path not in ('//', '///') and
is_dir in (True, False, None)
)
if path == '/':
assert is_dir in (True, None)
return '/'
p = path
while True:
if re.search('//', p):
p = p.replace('//', '/')
else:
break
if p == '/':
assert is_dir in (True, None)
return '/'
if p[0] != '/':
p = '/' + p
if is_dir is True:
if p[-1] != '/':
p += '/'
elif is_dir is False:
if p[-1] == '/':
p = p[:-1]
return p
def format_dir(path):
return format_path(path, is_dir=True)
def format_file(path):
return format_path(path, is_dir=False)
def assert_no_file_dir_clash(paths):
"""
>>> assert_no_file_dir_clash('lskdjf')
Traceback (most recent call last):
...
AssertionError
>>> assert_no_file_dir_clash(False)
Traceback (most recent call last):
...
AssertionError
>>> assert_no_file_dir_clash(['/src/', '/src/'])
Traceback (most recent call last):
...
AssertionError
>>> assert_no_file_dir_clash(['/src', '/src/'])
Traceback (most recent call last):
...
AssertionError
>>> assert_no_file_dir_clash(['/sr', '/src/', '/srcb/'])
>>>
"""
assert paths and hasattr(paths, '__iter__')
seen = set()
for p in paths:
assert not p in seen
seen.add(p)
assert all(
(p[:-1] if p[-1] == '/' else p + '/') not in seen
for p in paths
)
def get_root_path(paths):
"""
Given a list of paths (directories or files), return the root directory or
an empty string if no root can be found.
>>> get_root_path(['/src/', '/src/trunk/', '/src/trunk/test.txt'])
'/src/'
>>> get_root_path(['/src/', '/src/trk/', '/src/trk/test.txt', '/src/a'])
'/src/'
>>> get_root_path(['/', '/laksdjf', '/lkj'])
'/'
>>> get_root_path(['/'])
'/'
>>> get_root_path(['/a'])
'/'
>>>
>>> get_root_path(['/src/trunk/foo.txt', '/src/tas/2009.01.00/foo.txt'])
'/src/'
>>> get_root_path(['/src/branches/foo/'])
'/src/branches/foo/'
>>> get_root_path(['',])
Traceback (most recent call last):
...
AssertionError
>>> get_root_path(['lskdjf',])
Traceback (most recent call last):
...
AssertionError
>>> get_root_path(['src/trunk/',])
Traceback (most recent call last):
...
AssertionError
>>> get_root_path(['/src/trunk/', '/src/trunk'])
Traceback (most recent call last):
...
AssertionError
"""
assert (
hasattr(paths, '__iter__') and
#len(paths) >= 1 and
all(d and d[0] == '/' for d in paths)
)
#if len(paths) == 1 and paths[0] == '/':
# return '/'
def _parts(p):
parts = p.split('/')
return parts if p[-1] == '/' else parts[:-1]
paths = [ format_path(p) for p in paths ]
assert_no_file_dir_clash(paths)
common = _parts(paths[0])
for j in range(1, len(paths)):
parts = _parts(paths[j])
for i in range(len(common)):
if i == len(parts) or common[i] != parts[i]:
del common[i:]
break
if not common or (len(common) == 1 and common[0] == ''):
return '/'
return format_dir('/'.join(common))
def build_tree(tree, prefix=''):
jp = lambda k: join_path(prefix, k)
for (f, d) in ((jp(k), v) for (k, v) in tree.items()):
if not d:
try:
os.makedirs(f)
except OSError:
pass
else:
try:
os.makedirs(dirname(f))
except OSError:
pass
with open(f, 'w') as fp:
try:
fp.write(d)
except TypeError:
# Python 2.6 balks if this is a bytearray.
fp.write(str(d))
def extract_component_name(path):
"""
>>> extract_component_name('/foo/trunk/bar.txt')
'foo'
>>> extract_component_name('/foo/trunk/')
'foo'
>>> extract_component_name('/foo/branches/1.x/abcd.txt')
'foo'
>>> extract_component_name('/foo')
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name('/foo/')
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name('foo/')
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name('/foo')
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name('/foo/trunk')
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name(None)
Traceback (most recent call last):
...
AssertionError
>>> extract_component_name('')
Traceback (most recent call last):
...
AssertionError
"""
assert path and path[0] == '/'
assert path.count('/') >= 3
return path[1:path.find('/', 2)]
#===============================================================================
# Path Matching
#===============================================================================
class PathMatcherConfig(object):
singular = tuple()
plural = tuple()
match = tuple()
ending = tuple()
class DefaultPathMatcherConfig(PathMatcherConfig):
singular = ('tag', 'branch', 'trunk')
plural = ('tags', 'branches', 'trunks')
match = ('tags', 'branches', 'trunk')
ending = ('([^/]+)/', '([^/]+)/', None)
class PathMatcher(object):
def __init__(self, config=None, *args, **kwds):
if not config:
config = DefaultPathMatcherConfig
self.__dict__.update(
(k, getattr(config, k)) for k in dir(config)
if not k.startswith('_')
)
self.singular_to_plural = dict()
self.plural_to_singular = dict()
self.plural_to_basedir = dict()
self.plural_to_hint = dict()
self.excluded_paths = set()
self.excluded_pattern = ''
self.excluded_regex = None
data = zip(self.singular, self.plural, self.match, self.ending)
for (singular, plural, match, ending) in data:
setattr(self, plural, [])
self.singular_to_plural[singular] = plural
self.plural_to_singular[plural] = singular
self.plural_to_basedir[plural] = []
self.plural_to_hint[plural] = []
p = '.+?%s/' % match
if ending:
p += ending
getattr(self, plural).append(p)
functions = list()
for (s, p) in zip(self.singular, self.plural):
functions += [
(p, 'is_%s' % s, '_is_xxx'),
(p, 'get_%s' % s, '_get_xxx'),
(p, 'is_%s_path' % s, '_is_xxx_path'),
(p, 'get_%s_path' % s, '_get_xxx_path'),
(p, 'find_%s_paths' % s, '_find_xxx_paths'),
]
class _method_wrapper(object):
"""Helper class for wrapping our key (is|get|find)_xxx methods."""
def __init__(self, **kwds):
self.__dict__.update(**kwds)
def __call__(self, paths):
return getattr(self.s, self.f)(paths, self.p)
for (p, n, f) in functions:
self.__dict__.setdefault(f + '_methods', []).append(n)
setattr(self, n, _method_wrapper(s=self, p=p, n=n, f=f))
def add_path(self, path, singular):
"""
>>> pm = PathMatcher()
>>> pm.add_path('/head/', 'trunk')
>>> pm.get_root_details_tuple('/head/')
('/head/', 'trunk', 'head')
>>> pm.get_root_details_tuple('/head/head')
('/head/', 'trunk', 'head')
>>> pm.get_root_details_tuple('/head/src/trunk/branches/foo/')
('/head/', 'trunk', 'head')
>>> pm.get_root_details_tuple('/head/foo.txt')
('/head/', 'trunk', 'head')
>>> pm = PathMatcher()
>>> pm.add_path('/releng/10.1/', 'tag')
>>> pm.get_root_details_tuple('/releng/10.1/')
('/releng/10.1/', 'tag', '10.1')
>>> pm.is_tag('/releng/10.1/')
True
"""
assert path
assert path[0] == '/' and path[-1] == '/', path
assert len(path) >= 3, path
assert singular in ('trunk', 'tag', 'branch')
plural = self.singular_to_plural[singular]
assert plural in self.plural_to_hint
self.plural_to_hint[plural].append(path)
getattr(self, plural).append(path)
def add_trunk(self, path):
self.add_path(path, 'trunk')
def add_branch(self, path):
self.add_path(path, 'branch')
def add_tag(self, path):
self.add_path(path, 'tag')
def add_basedir(self, path, plural):
"""
>>> pm = PathMatcher()
>>> pm.add_branches_basedir('/stable/')
>>> pm.get_root_details_tuple('/stable/10/')
('/stable/10/', 'branch', '10')
>>> pm.get_root_details_tuple('/stable/stable/')
('/stable/stable/', 'branch', 'stable')
>>> pm.get_root_details_tuple('/stable/stable')
>>>
>>> pm.get_root_details_tuple('/stable/10/src/trunk/branches/foo/')
('/stable/10/', 'branch', '10')
>>> pm.get_root_details_tuple('/stable/10/foo.txt')
('/stable/10/', 'branch', '10')
>>> pm.is_branch('/stable/10/')
True
>>> pm.add_tags_basedir('/releng/')
>>> pm.get_root_details_tuple('/releng/10.1/')
('/releng/10.1/', 'tag', '10.1')
>>> pm.get_root_details_tuple('/releng/releng/')
('/releng/releng/', 'tag', 'releng')
>>> pm.get_root_details_tuple('/releng/releng')
>>>
>>> pm.get_root_details_tuple('/releng/10.1/src/trunk/tags/foo/')
('/releng/10.1/', 'tag', '10.1')
>>> pm.get_root_details_tuple('/releng/10.1/foo.txt')
('/releng/10.1/', 'tag', '10.1')
>>> pm.is_tag('/releng/10.1/')
True
"""
assert path
assert path[0] == '/' and path[-1] == '/', path
assert len(path) >= 3, path
assert plural in ('tags', 'branches'), plural
assert plural in self.plural_to_basedir, plural
self.plural_to_basedir[plural].append(path)
pattern = path + '([^/]+)/'
getattr(self, plural).append(pattern)
def add_branches_basedir(self, path):
self.add_basedir(path, 'branches')
def add_tags_basedir(self, path):
self.add_basedir(path, 'tags')
def add_exclusions(self, paths):
"""
>>> pm = PathMatcher()
>>> pm.get_root_dir('/src/trunk/')
'/src/trunk/'
>>> pm.get_root_dir('/src/trunk/foo.txt')
'/src/trunk/'
>>> pm.add_exclusions(['/src/trunk/'])
>>> pm.get_root_dir('/src/trunk/')
>>>
>>> pm.get_root_dir('/src/trunk/foo.txt')
>>>
>>> pm = PathMatcher()
>>> exclusions = ['/foo/trunk/', '/branches/1.0.x/']
>>> pm.add_exclusions(exclusions)
>>> pm.get_root_dir('/src/trunk/foo.txt')
'/src/trunk/'
>>> pm.get_root_dir('/foo/trunk/')
>>>
>>> pm.get_root_dir('/branches/1.0.x/')
>>>
>>> pm.get_root_dir('/branches/1.0.x/foo.txt')
>>>
"""
for path in paths:
self.excluded_paths.add(path)
pattern = '^(%s)' % '|'.join(self.excluded_paths)
self.excluded_pattern = pattern
self.excluded_regex = re.compile(pattern)
def is_excluded(self, path):
if self.excluded_paths and self.excluded_regex.match(path):
return True
else:
return False
def _get_xxx(self, path, xxx):
assert isinstance(path, str)
for pattern in getattr(self, xxx):
found = re.findall(pattern + '$', path)
if found and not self.is_excluded(path):
return found
def _is_xxx(self, path, xxx):
return bool(self._get_xxx(path, xxx))
def _get_xxx_path(self, path, xxx):
assert isinstance(path, str)
for pattern in getattr(self, xxx):
found = re.findall(pattern, path)
if found and not self.is_excluded(path):
return found
def _is_xxx_path(self, path, xxx):
return bool(self._get_xxx_path(path, xxx))
def _find_xxx_paths(self, paths, xxx):
assert isinstance(paths, (list, tuple))
f = dict()
for pattern in getattr(self, xxx):
for path in paths:
m = re.search(pattern, path)
if m and not self.is_excluded(path):
f.setdefault('/'.join(m.groups()), []).append(m.group(0))
return f
def is_unknown_orig(self, path):
"""
Returns true if all the is_xxx methods return false.
"""
return all(
not getattr(self, attr)(path)
for attr in dir(self)
if attr != 'is_unknown' and
attr.startswith('is_') and
not attr.endswith('_path')
)
def is_unknown(self, path):
"""
Returns true if all the is_xxx methods return false.
"""
return all(
not getattr(self, method_name)(path)
for method_name in self._is_xxx_methods
)
def get_root_dir(self, path):
"""
>>> pm = PathMatcher()
>>> pm.get_root_dir('/src/trunk/foo.txt')
'/src/trunk/'
>>> pm.get_root_dir('/src/trunk/')
'/src/trunk/'
>>> pm.get_root_dir('/src/trunk/foo/bar/mexico/tijuana.txt')
'/src/trunk/'
>>> pm.get_root_dir('/src/branches/GLB02030120-pricing/java/Foo.java')
'/src/branches/GLB02030120-pricing/'
>>> pm.get_root_dir('/src/joe.txt')
>>>
>>> pm.get_root_dir('/src/branches')
>>>
>>> pm.get_root_dir('/src/branches/')
>>>
>>> pm.get_root_dir('/src/tags')
>>>
>>> pm.get_root_dir('/src/tags/')
>>>
>>> pm.get_root_dir('/src/trunk')
>>>
>>> pm.get_root_dir('/src/trunk/trunk')
'/src/trunk/'
>>> pm.get_root_dir('/src/trunk/trunk/')
'/src/trunk/'
>>> pm.get_root_dir('/src/trunk/foobar/src/trunk/test.txt')
'/src/trunk/'
>>> pm.get_root_dir('/branches/foo/')
'/branches/foo/'
>>> pm.get_root_dir('/branches/trunk/')
'/branches/trunk/'
>>> pm.get_root_dir('/branches/trunk/foo')
'/branches/trunk/'
>>> pm.get_root_dir('/tags/trunk/')
'/tags/trunk/'
>>> pm.get_root_dir('/tags/trunk/foo')
'/tags/trunk/'
>>> pm.add_path('/branches/1.x/', 'branch')
>>> pm.get_root_dir('/branches/1.x/')
'/branches/1.x/'
>>>
"""
assert isinstance(path, str)
assert path[0] == '/' if path else True
root = None
min_root_length = None
for plural in self.plural:
patterns = [
'^(%s)(.*)$' % p.replace('(', '').replace(')', '')
for p in getattr(self, plural)
]
for pattern in patterns:
match = re.search(pattern, path)
if match and not self.is_excluded(path):
groups = match.groups()
assert len(groups) in (1, 2), groups
r = groups[0]
l = len(r)
if root is None:
root = r
min_root_length = l
else:
if r == root:
# If root hints are being used, we could get
# multiple hits for the same path, e.g.
# /branches/1.x/ would be hit by the normal
# /branches/ detection and the root hint logic if
# there was a /branches/1.x/ hint added.
continue
# The only way we could possibly have multiple matches
# with the exact same length for the root path is for
# paths like '/branches/trunk/' or '/tags/trunk/'.
if l == min_root_length:
assert r.endswith('trunk/'), (r, l, min_root_length)
elif l < min_root_length:
root = r
min_root_length = l
else:
assert l > min_root_length, (l, min_root_length)
return root
def get_root_details_tuple(self, path):
"""
>>> pm = PathMatcher()
>>> pm.get_root_details_tuple('/src/trunk/')
('/src/trunk/', 'trunk', 'trunk')
>>> pm.get_root_details_tuple('/src/trunk/trunk')
('/src/trunk/', 'trunk', 'trunk')
>>> pm.get_root_details_tuple('/src/trunk/branches/foo/')
('/src/trunk/', 'trunk', 'trunk')
>>> pm.get_root_details_tuple('/src/branches/GLB02051234/')
('/src/branches/GLB02051234/', 'branch', 'GLB02051234')
>>> pm.get_root_details_tuple('/src/branches/foo/tags')
('/src/branches/foo/', 'branch', 'foo')
>>> pm.get_root_details_tuple('/src/branches/foo/tags/1.0')
('/src/branches/foo/', 'branch', 'foo')
>>> pm.get_root_details_tuple('/src/branches/foo/tags/1.0/')
('/src/branches/foo/', 'branch', 'foo')
>>> pm.get_root_details_tuple('/branches/foo/')
('/branches/foo/', 'branch', 'foo')
>>> pm.get_root_details_tuple('/branches/trunk/')
('/branches/trunk/', 'branch', 'trunk')
>>> pm.get_root_details_tuple('/src/tags/2009.01.1/')
('/src/tags/2009.01.1/', 'tag', '2009.01.1')
>>> pm.get_root_details_tuple('/src/tags/2009.01.1/asldkjf/lkjd/')
('/src/tags/2009.01.1/', 'tag', '2009.01.1')
>>> pm.get_root_details_tuple('/src/tags/2009.01.1/lkjf/ljd/test.txt')
('/src/tags/2009.01.1/', 'tag', '2009.01.1')
>>> pm.get_root_details_tuple('/tags/1.0.0/')
('/tags/1.0.0/', 'tag', '1.0.0')
>>> pm.get_root_details_tuple('/tags/trunk/')
('/tags/trunk/', 'tag', 'trunk')
>>> pm.get_root_details_tuple('/')
('/', 'absolute', '/')
"""
if path == '/':
return ('/', 'absolute', '/')
found = False
matches = list()
root_dir = self.get_root_dir(path)
if not root_dir:
return
root_type = None
root_name = None
for method_name in self._get_xxx_methods:
match = getattr(self, method_name)(root_dir)
if match:
assert not self.is_excluded(root_dir), (method_name, root_dir)
if found:
# Yet more hackery to support crap like /branches/trunk/
# and /tags/trunk/. We're relying on the fact that the
# 'get_trunks' method will be called last.
assert 'trunk' in method_name
continue
found = True
assert len(match) == 1
m = match[0]
if m == root_dir:
# This feels a bit hacky but eh... Basically, if our
# regex matched one root dir, then the thing we're
# matching didn't supply an ending regex, so we use the
# last directory name in the path as the root name. In
# practise, this is used for matching trunk paths and
# returning 'trunk' as the root_name, versus, say,
# returning 2009.01 if we matched against /tags/2009.01.
assert m[-1] == '/'
root_name = m[m[:-1].rfind('/')+1:-1]
else:
root_name = m
root_type = method_name.replace('get_', '')
if not found:
return None
else:
assert (
root_type in self.singular and
root_dir and root_name
)
return (root_dir, root_type, root_name)
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| [
"os.makedirs",
"re.compile",
"os.path.join",
"os.path.dirname",
"re.findall",
"re.search"
] | [((655, 693), 'os.path.dirname', 'dirname', (["(p[:-1] if p[-1] == '/' else p)"], {}), "(p[:-1] if p[-1] == '/' else p)\n", (662, 693), False, 'from os.path import join, abspath, dirname, normpath\n'), ((3203, 3221), 're.search', 're.search', (['"""//"""', 'p'], {}), "('//', p)\n", (3212, 3221), False, 'import re\n'), ((14642, 14661), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (14652, 14661), False, 'import re\n'), ((936, 947), 'os.path.join', 'join', (['*args'], {}), '(*args)\n', (940, 947), False, 'from os.path import join, abspath, dirname, normpath\n'), ((14963, 14994), 're.findall', 're.findall', (["(pattern + '$')", 'path'], {}), "(pattern + '$', path)\n", (14973, 14994), False, 'import re\n'), ((15299, 15324), 're.findall', 're.findall', (['pattern', 'path'], {}), '(pattern, path)\n', (15309, 15324), False, 'import re\n'), ((6570, 6584), 'os.makedirs', 'os.makedirs', (['f'], {}), '(f)\n', (6581, 6584), False, 'import os\n'), ((15703, 15727), 're.search', 're.search', (['pattern', 'path'], {}), '(pattern, path)\n', (15712, 15727), False, 'import re\n'), ((18400, 18424), 're.search', 're.search', (['pattern', 'path'], {}), '(pattern, path)\n', (18409, 18424), False, 'import re\n'), ((6693, 6703), 'os.path.dirname', 'dirname', (['f'], {}), '(f)\n', (6700, 6703), False, 'from os.path import join, abspath, dirname, normpath\n')] |
import matplotlib.pyplot as plt
import numpy as np
import os
ALGORITHMS = [
'BEST',
'DFS',
'BFS',
'ASTAR'
]
SOLUTION_FOLDER_NAME_FORMAT = 'data/%s_solutions'
ERR_NO_DATA_FORMAT = 'Please generate some data for %s before running this app!'
class MatrixSolution:
def __init__(self, file_name):
self.file_name = file_name
with open(self.file_name, 'r') as f:
try:
self._solution = f.readlines()[0]
except:
self._solution = 'NONE'
f.close()
def __str__(self):
return self._solution
@property
def count(self):
return len(str(self).split(','))
def create_graph():
linestyles = {
'BEST': '-',
'DFS': '--',
'BFS': '.-',
'ASTAR': '--'
}
fig = plt.figure()
for algorithm in ALGORITHMS:
algorithm_solutions_folder = SOLUTION_FOLDER_NAME_FORMAT % algorithm
if not os.path.exists(algorithm_solutions_folder) or not os.listdir(algorithm_solutions_folder):
print(ERR_NO_DATA_FORMAT % algorithm)
exit(1)
import glob, re
solution_data = dict()
files = glob.glob(algorithm_solutions_folder + "/matrix*.txt")
for file_name in files:
dim = re.search('\\d+', file_name).group(0)
solution_data.setdefault(str(dim), []).append(MatrixSolution(file_name))
x, solutions = zip(*sorted(solution_data.items()))
y = list(np.average(list(res.count for res in results)) for results in solutions)
plt.plot(x, y, linestyles[algorithm], label=algorithm, linewidth=4.0);
plt.title("Benchmark results")
plt.xlabel("N")
plt.ylabel("avg. Nodes in solution");
plt.legend()
plt.savefig('result1.png')
if __name__ == '__main__':
create_graph()
| [
"os.path.exists",
"os.listdir",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"re.search"
] | [((817, 829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (827, 829), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1677), 'matplotlib.pyplot.title', 'plt.title', (['"""Benchmark results"""'], {}), "('Benchmark results')\n", (1656, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1682, 1697), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""N"""'], {}), "('N')\n", (1692, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1738), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""avg. Nodes in solution"""'], {}), "('avg. Nodes in solution')\n", (1712, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1756), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1754, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1787), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result1.png"""'], {}), "('result1.png')\n", (1772, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1241), 'glob.glob', 'glob.glob', (["(algorithm_solutions_folder + '/matrix*.txt')"], {}), "(algorithm_solutions_folder + '/matrix*.txt')\n", (1196, 1241), False, 'import glob, re\n'), ((1572, 1641), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'linestyles[algorithm]'], {'label': 'algorithm', 'linewidth': '(4.0)'}), '(x, y, linestyles[algorithm], label=algorithm, linewidth=4.0)\n', (1580, 1641), True, 'import matplotlib.pyplot as plt\n'), ((955, 997), 'os.path.exists', 'os.path.exists', (['algorithm_solutions_folder'], {}), '(algorithm_solutions_folder)\n', (969, 997), False, 'import os\n'), ((1005, 1043), 'os.listdir', 'os.listdir', (['algorithm_solutions_folder'], {}), '(algorithm_solutions_folder)\n', (1015, 1043), False, 'import os\n'), ((1292, 1320), 're.search', 're.search', (['"""\\\\d+"""', 'file_name'], {}), "('\\\\d+', file_name)\n", (1301, 1320), False, 'import glob, re\n')] |
from datetime import date
import factory
from factory import fuzzy
from django_cookie_app import models
class BasePerfumFactory(factory.django.DjangoModelFactory):
"""
BasePerfumFactory class
"""
date = fuzzy.FuzzyDate(date(2000, 1, 1))
total_ball = fuzzy.FuzzyInteger(0, 42)
number_time_fill = fuzzy.FuzzyInteger(0, 42)
percentage = fuzzy.FuzzyFloat(0.5, 42.7)
ball_bought = fuzzy.FuzzyInteger(0, 42)
class ChocoOrangeFactory(BasePerfumFactory):
"""
ChocoOrangeFactory class
"""
perfume = factory.Faker('name')
class Meta:
model = models.ChocoOrange
class MintChocoFactory(BasePerfumFactory):
"""
MintChocoFactory class
"""
perfume = factory.Faker('name')
class Meta:
model = models.MintChoco
class SyrupFactory(BasePerfumFactory):
"""
SyrupFactory class
"""
perfume = factory.Faker('name')
class Meta:
model = models.Syrup
class VanillaStrawberryChocolateFactory(BasePerfumFactory):
"""
VanillaStrawberryChocolateFactory class
"""
perfume = factory.Faker('name')
class Meta:
model = models.VanillaStrawberryChocolate
class RaspberryWhiteChocolateFactory(BasePerfumFactory):
"""
RaspberryWhiteChocolateFactory class
"""
perfume = factory.Faker('name')
class Meta:
model = models.RaspberryWhiteChocolate
class OrderFactory(factory.django.DjangoModelFactory):
"""
OrderFactory class
"""
price = fuzzy.FuzzyInteger(0, 42)
count_ball = fuzzy.FuzzyInteger(0, 42)
code = factory.Faker('name')
date = fuzzy.FuzzyDate(date(2000, 1, 1))
choco_oran = factory.SubFactory(ChocoOrangeFactory)
mint_choco = factory.SubFactory(MintChocoFactory)
syrup = factory.SubFactory(SyrupFactory)
vanilla = factory.SubFactory(VanillaStrawberryChocolateFactory)
raspberry = factory.SubFactory(RaspberryWhiteChocolateFactory)
class Meta:
model = models.Order
| [
"factory.SubFactory",
"factory.fuzzy.FuzzyFloat",
"factory.fuzzy.FuzzyInteger",
"factory.Faker",
"datetime.date"
] | [((277, 302), 'factory.fuzzy.FuzzyInteger', 'fuzzy.FuzzyInteger', (['(0)', '(42)'], {}), '(0, 42)\n', (295, 302), False, 'from factory import fuzzy\n'), ((326, 351), 'factory.fuzzy.FuzzyInteger', 'fuzzy.FuzzyInteger', (['(0)', '(42)'], {}), '(0, 42)\n', (344, 351), False, 'from factory import fuzzy\n'), ((369, 396), 'factory.fuzzy.FuzzyFloat', 'fuzzy.FuzzyFloat', (['(0.5)', '(42.7)'], {}), '(0.5, 42.7)\n', (385, 396), False, 'from factory import fuzzy\n'), ((415, 440), 'factory.fuzzy.FuzzyInteger', 'fuzzy.FuzzyInteger', (['(0)', '(42)'], {}), '(0, 42)\n', (433, 440), False, 'from factory import fuzzy\n'), ((551, 572), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (564, 572), False, 'import factory\n'), ((731, 752), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (744, 752), False, 'import factory\n'), ((901, 922), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (914, 922), False, 'import factory\n'), ((1109, 1130), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (1122, 1130), False, 'import factory\n'), ((1332, 1353), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (1345, 1353), False, 'import factory\n'), ((1530, 1555), 'factory.fuzzy.FuzzyInteger', 'fuzzy.FuzzyInteger', (['(0)', '(42)'], {}), '(0, 42)\n', (1548, 1555), False, 'from factory import fuzzy\n'), ((1573, 1598), 'factory.fuzzy.FuzzyInteger', 'fuzzy.FuzzyInteger', (['(0)', '(42)'], {}), '(0, 42)\n', (1591, 1598), False, 'from factory import fuzzy\n'), ((1610, 1631), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (1623, 1631), False, 'import factory\n'), ((1694, 1732), 'factory.SubFactory', 'factory.SubFactory', (['ChocoOrangeFactory'], {}), '(ChocoOrangeFactory)\n', (1712, 1732), False, 'import factory\n'), ((1750, 1786), 'factory.SubFactory', 'factory.SubFactory', (['MintChocoFactory'], {}), '(MintChocoFactory)\n', (1768, 1786), False, 'import factory\n'), ((1799, 1831), 'factory.SubFactory', 'factory.SubFactory', (['SyrupFactory'], {}), '(SyrupFactory)\n', (1817, 1831), False, 'import factory\n'), ((1846, 1899), 'factory.SubFactory', 'factory.SubFactory', (['VanillaStrawberryChocolateFactory'], {}), '(VanillaStrawberryChocolateFactory)\n', (1864, 1899), False, 'import factory\n'), ((1916, 1966), 'factory.SubFactory', 'factory.SubFactory', (['RaspberryWhiteChocolateFactory'], {}), '(RaspberryWhiteChocolateFactory)\n', (1934, 1966), False, 'import factory\n'), ((242, 258), 'datetime.date', 'date', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (246, 258), False, 'from datetime import date\n'), ((1659, 1675), 'datetime.date', 'date', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (1663, 1675), False, 'from datetime import date\n')] |
import os
import sys
import mock
from nose.tools import with_setup, raises, ok_, eq_
from stve.application import StveTestRunner
from stve.workspace import Workspace
from stve.exception import *
class TestStveTestRunner(object):
@classmethod
def setup(cls):
cls.runner = StveTestRunner()
cls.root = os.path.normpath(os.path.join(os.path.dirname(__file__)))
cls.script_path = os.path.join(cls.root, "data")
cls.workspace = Workspace(os.path.join(cls.root, "workspace"))
cls.report_path = cls.workspace.mkdir("report")
@classmethod
def teardown(cls):
cls.workspace.rmdir("")
@with_setup(setup, teardown)
def test_stvetestrunner_execute_success_01(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute("success.py", self.script_path, v=0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_success_02(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute("failed.py", self.script_path, v=0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_success_03(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute("notdefine.py", self.script_path, v=0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_success_04(self):
self.runner.execute("notdefine", self.script_path)
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_failed_01(self):
self.runner.execute("notexists.py", self.script_path)
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_failed_02(self):
self.runner.execute("success.py", self.workspace.mkdir("script"))
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_failed_03(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute("not.pydefine", self.script_path, v=0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_with_report_success_01(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"success.py", self.script_path, self.report_path)
ok_(len(os.listdir(self.report_path)) > 0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_with_report_success_02(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"failed.py", self.script_path, self.report_path)
ok_(len(os.listdir(self.report_path)) > 0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_with_report_success_03(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"notdefine.py", self.script_path, self.report_path)
ok_(len(os.listdir(self.report_path)) == 0)
@with_setup(setup, teardown)
def test_stvetestrunner_execute_with_report_success_04(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"notdefine", self.script_path, self.report_path)
ok_(len(os.listdir(self.report_path)) == 0)
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_with_report_failed_01(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"notexists.py", self.script_path, self.report_path)
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_with_report_failed_02(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"success.py", self.workspace.mkdir("script"), self.report_path)
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_with_report_failed_03(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"success.py", self.script_path, os.path.join(self.workspace.root(), "hoge"))
@with_setup(setup, teardown)
@raises(TestRunnerError)
def test_stvetestrunner_execute_with_report_failed_04(self):
with mock.patch('sys.argv', ['stvetestrunner.py', 'notdefine.py']):
self.runner.execute_with_report(
"not.pydefine", self.script_path, self.report_path)
| [
"mock.patch",
"nose.tools.with_setup",
"os.listdir",
"stve.application.StveTestRunner",
"os.path.join",
"os.path.dirname",
"nose.tools.raises"
] | [((647, 674), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (657, 674), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((880, 907), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (890, 907), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1112, 1139), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1122, 1139), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1347, 1374), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1357, 1374), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1494, 1521), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1504, 1521), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1527, 1550), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (1533, 1550), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1672, 1699), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1682, 1699), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1705, 1728), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (1711, 1728), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1862, 1889), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (1872, 1889), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((1895, 1918), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (1901, 1918), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((2125, 2152), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (2135, 2152), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((2467, 2494), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (2477, 2494), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((2808, 2835), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (2818, 2835), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((3153, 3180), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (3163, 3180), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((3495, 3522), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (3505, 3522), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((3528, 3551), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (3534, 3551), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((3812, 3839), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (3822, 3839), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((3845, 3868), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (3851, 3868), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((4141, 4168), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (4151, 4168), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((4174, 4197), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (4180, 4197), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((4483, 4510), 'nose.tools.with_setup', 'with_setup', (['setup', 'teardown'], {}), '(setup, teardown)\n', (4493, 4510), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((4516, 4539), 'nose.tools.raises', 'raises', (['TestRunnerError'], {}), '(TestRunnerError)\n', (4522, 4539), False, 'from nose.tools import with_setup, raises, ok_, eq_\n'), ((290, 306), 'stve.application.StveTestRunner', 'StveTestRunner', ([], {}), '()\n', (304, 306), False, 'from stve.application import StveTestRunner\n'), ((410, 440), 'os.path.join', 'os.path.join', (['cls.root', '"""data"""'], {}), "(cls.root, 'data')\n", (422, 440), False, 'import os\n'), ((475, 510), 'os.path.join', 'os.path.join', (['cls.root', '"""workspace"""'], {}), "(cls.root, 'workspace')\n", (487, 510), False, 'import os\n'), ((742, 803), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (752, 803), False, 'import mock\n'), ((975, 1036), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (985, 1036), False, 'import mock\n'), ((1207, 1268), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (1217, 1268), False, 'import mock\n'), ((1985, 2046), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (1995, 2046), False, 'import mock\n'), ((2232, 2293), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (2242, 2293), False, 'import mock\n'), ((2574, 2635), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (2584, 2635), False, 'import mock\n'), ((2915, 2976), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (2925, 2976), False, 'import mock\n'), ((3260, 3321), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (3270, 3321), False, 'import mock\n'), ((3630, 3691), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (3640, 3691), False, 'import mock\n'), ((3947, 4008), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (3957, 4008), False, 'import mock\n'), ((4276, 4337), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (4286, 4337), False, 'import mock\n'), ((4618, 4679), 'mock.patch', 'mock.patch', (['"""sys.argv"""', "['stvetestrunner.py', 'notdefine.py']"], {}), "('sys.argv', ['stvetestrunner.py', 'notdefine.py'])\n", (4628, 4679), False, 'import mock\n'), ((356, 381), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n'), ((2426, 2454), 'os.listdir', 'os.listdir', (['self.report_path'], {}), '(self.report_path)\n', (2436, 2454), False, 'import os\n'), ((2767, 2795), 'os.listdir', 'os.listdir', (['self.report_path'], {}), '(self.report_path)\n', (2777, 2795), False, 'import os\n'), ((3111, 3139), 'os.listdir', 'os.listdir', (['self.report_path'], {}), '(self.report_path)\n', (3121, 3139), False, 'import os\n'), ((3453, 3481), 'os.listdir', 'os.listdir', (['self.report_path'], {}), '(self.report_path)\n', (3463, 3481), False, 'import os\n')] |
import numpy as np
from scipy.signal import savgol_filter
from lagom.core.transform import BaseTransform
class SmoothFilter(BaseTransform):
r"""Smooth a sequence of noisy data points by applying `Savitzky–Golay filter`_. It uses least
squares to fit a polynomial with a small sliding window and use this polynomial to estimate
the point in the center of the sliding window.
This is useful when a curve is highly noisy, smoothing it out leads to better visualization quality.
.. _Savitzky–Golay filter:
https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter
Example::
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 4*2*np.pi, num=100)
>>> y = x*(np.sin(x) + np.random.random(100)*4)
>>> smooth = SmoothFilter()
>>> y2 = smooth(y, window_length=31, polyorder=10)
>>> plt.plot(x, y)
>>> plt.plot(x, y2, 'red')
"""
def __call__(self, x, **kwargs):
r"""Smooth the curve.
Args:
x (object): one-dimensional vector of scalar data points of a curve.
**kwargs: keyword arguments to specify Savitzky–Golay filter from scipy.
The required keys are ``[window_length, polyorder]``.
Returns
-------
out : ndarray
smoothed curve data
"""
assert not np.isscalar(x), 'does not support scalar value !'
assert 'window_length' in kwargs, 'kwargs must contain window_length'
assert 'polyorder' in kwargs, 'kwargs must contain polyorder'
# Convert input to ndarray
x = self.to_numpy(x, np.float32)
assert x.ndim == 1, 'only a single vector of scalar values is supported'
# Smooth the curve
out = savgol_filter(x, **kwargs)
out = out.astype(np.float32)
return out
| [
"numpy.isscalar",
"scipy.signal.savgol_filter"
] | [((1843, 1869), 'scipy.signal.savgol_filter', 'savgol_filter', (['x'], {}), '(x, **kwargs)\n', (1856, 1869), False, 'from scipy.signal import savgol_filter\n'), ((1420, 1434), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1431, 1434), True, 'import numpy as np\n')] |
# coding: utf-8
"""
TensorFlow target formatters.
"""
__all__ = ["TFConstantGraphFormatter"]
import os
from law.target.formatter import Formatter
from law.target.file import get_path
class TFConstantGraphFormatter(Formatter):
name = "tf_const_graph"
@classmethod
def import_tf(cls):
import tensorflow as tf
# keep a reference to the v1 API as long as v2 provides compatibility
tf1 = None
if tf.__version__.startswith("1."):
tf1 = tf
elif getattr(tf, "compat", None) and getattr(tf.compat, "v1"):
tf1 = tf.compat.v1
return tf, tf1
@classmethod
def accepts(cls, path, mode):
return get_path(path).endswith((".pb", ".pbtxt", ".pb.txt"))
@classmethod
def load(cls, path, create_session=None, as_text=None):
"""
Reads a saved TensorFlow graph from *path* and returns it. When *create_session* is *True*,
a session object (compatible with the v1 API) is created and returned as the second value of
a 2-tuple. The default value of *create_session* is *True* when TensorFlow v1 is detected,
and *False* otherwise. When *as_text* is *True*, or *None* and the file extension is
``".pbtxt"`` or ``".pb.txt"``, the content of the file at *path* is expected to be a
human-readable text file. Otherwise, it is expected to be a binary protobuf file. Example:
.. code-block:: python
graph = TFConstantGraphFormatter.load("path/to/model.pb", create_session=False)
graph, session = TFConstantGraphFormatter.load("path/to/model.pb", create_session=True)
"""
tf, tf1 = cls.import_tf()
path = get_path(path)
# default create_session value
if create_session is None:
create_session = tf1 is not None
# default as_text value
if as_text is None:
as_text = path.endswith((".pbtxt", ".pb.txt"))
graph = tf.Graph()
with graph.as_default():
graph_def = graph.as_graph_def()
if as_text:
# use a simple pb reader to load the file into graph_def
from google.protobuf import text_format
with open(path, "r") as f:
text_format.Merge(f.read(), graph_def)
else:
# use the gfile api depending on the TF version
if tf1:
from tensorflow.python.platform import gfile
with gfile.FastGFile(path, "rb") as f:
graph_def.ParseFromString(f.read())
else:
with tf.io.gfile.GFile(path, "rb") as f:
graph_def.ParseFromString(f.read())
# import the graph_def (pb object) into the actual graph
tf.import_graph_def(graph_def, name="")
if create_session:
if not tf1:
raise NotImplementedError("the v1 compatibility layer of TensorFlow v2 is missing, "
"but required by when create_session is True")
session = tf1.Session(graph=graph)
return graph, session
else:
return graph
@classmethod
def dump(cls, path, session, output_names, *args, **kwargs):
"""
Takes a TensorFlow *session* object (compatible with the v1 API), converts its contained
graph into a simpler version with variables translated into constant tensors, and saves it
to a protobuf file at *path*. *output_numes* must be a list of names of output tensors to
save. In turn, TensorFlow internally determines which subgraph(s) to convert and save. All
*args* and *kwargs* are forwarded to :py:func:`tf.compat.v1.train.write_graph`.
.. note::
When used with TensorFlow v2, this function requires the v1 API compatibility layer.
When :py:attr:`tf.compat.v1` is not available, a *NotImplementedError* is raised.
"""
_, tf1 = cls.import_tf()
# complain when the v1 compatibility layer is not existing
if not tf1:
raise NotImplementedError("the v1 compatibility layer of TensorFlow v2 is missing, but "
"required")
# convert the graph
constant_graph = tf1.graph_util.convert_variables_to_constants(session,
session.graph.as_graph_def(), output_names)
# default as_text value
kwargs.setdefault("as_text", path.endswith((".pbtxt", ".pb.txt")))
# write the graph
graph_dir, graph_name = os.path.split(get_path(path))
return tf1.train.write_graph(constant_graph, graph_dir, graph_name, *args, **kwargs)
| [
"tensorflow.Graph",
"tensorflow.io.gfile.GFile",
"law.target.file.get_path",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.import_graph_def",
"tensorflow.__version__.startswith"
] | [((446, 477), 'tensorflow.__version__.startswith', 'tf.__version__.startswith', (['"""1."""'], {}), "('1.')\n", (471, 477), True, 'import tensorflow as tf\n'), ((1709, 1723), 'law.target.file.get_path', 'get_path', (['path'], {}), '(path)\n', (1717, 1723), False, 'from law.target.file import get_path\n'), ((1981, 1991), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1989, 1991), True, 'import tensorflow as tf\n'), ((2842, 2881), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2861, 2881), True, 'import tensorflow as tf\n'), ((4617, 4631), 'law.target.file.get_path', 'get_path', (['path'], {}), '(path)\n', (4625, 4631), False, 'from law.target.file import get_path\n'), ((693, 707), 'law.target.file.get_path', 'get_path', (['path'], {}), '(path)\n', (701, 707), False, 'from law.target.file import get_path\n'), ((2523, 2550), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2538, 2550), False, 'from tensorflow.python.platform import gfile\n'), ((2664, 2693), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2681, 2693), True, 'import tensorflow as tf\n')] |
"""
Problem Statement:
- In this problem, you have to implement the sort_list() function
which will sort the elements of a list of strings.
Input:
- A list of strings.
Output:
- Returns the input list in a sorted state.
"""
from Trie import Trie
# Create Trie => trie = Trie()
# TrieNode => {children, is_end_word, char,
# mark_as_leaf(), unmark_as_leaf()}
# get_root => trie.get_root()
# Insert a Word => trie.insert(key)
# Search a Word => trie.search(key) return true or false
# Delete a Word => trie.delete(key)
# Recursive Function to generate all words in alphabetic order
def sort_list(arr):
trie = Trie()
for s in arr:
trie.insert(s)
return list(find(trie.root, ''))
def find(root, path):
path += root.char
if root.is_end_word:
yield path
for child in root.children:
if child:
yield from find(child, path)
if __name__ == '__main__':
keys = ["the", "a", "there", "answer", "any", "by", "bye", "their", "abc"]
print(sort_list(keys))
| [
"Trie.Trie"
] | [((619, 625), 'Trie.Trie', 'Trie', ([], {}), '()\n', (623, 625), False, 'from Trie import Trie\n')] |
import pandas as pd
def load_data():
data = pd.read_csv("hn_stories.csv")
data.columns = ['submission_time', 'upvotes', 'url', 'headline']
return data
if __name__=="__main__":
hacker_news = load_data()
print(hacker_news.head())
| [
"pandas.read_csv"
] | [((49, 78), 'pandas.read_csv', 'pd.read_csv', (['"""hn_stories.csv"""'], {}), "('hn_stories.csv')\n", (60, 78), True, 'import pandas as pd\n')] |
from collections import defaultdict
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList or not endWord or not beginWord or not wordList:
return 0
L=len(beginWord)
all_combo_dict=defaultdict(list)
for word in wordList:
for i in range(L):
all_combo_dict[word[:i]+"*"+word[i+1:]].append(word)
queue=[(beginWord,1)]
visited={beginWord:True}
while queue:
current_word,level=queue.pop(0)
for i in range(L):
intermediate_word=current_word[:i]+ "*" + current_word[i+1:]
for word in all_combo_dict[intermediate_word]:
if word==endWord:
return level+1
if word not in visited:
visited[word]=True
queue.append((word, level + 1))
all_combo_dict[intermediate_word] = []
return 0 | [
"collections.defaultdict"
] | [((309, 326), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (320, 326), False, 'from collections import defaultdict\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests.utils import jitVsGlow
import unittest
class TestNumToTensor(unittest.TestCase):
def test_view(self):
"""Test of the PyTorch NumToTensor on Glow."""
def test_f(a):
a = a.size(0)
b = a
return b
x = torch.randn(4)
jitVsGlow(test_f, x, expected_fused_ops={"prim::NumToTensor"})
| [
"torch.randn",
"tests.utils.jitVsGlow"
] | [((373, 387), 'torch.randn', 'torch.randn', (['(4)'], {}), '(4)\n', (384, 387), False, 'import torch\n'), ((397, 459), 'tests.utils.jitVsGlow', 'jitVsGlow', (['test_f', 'x'], {'expected_fused_ops': "{'prim::NumToTensor'}"}), "(test_f, x, expected_fused_ops={'prim::NumToTensor'})\n", (406, 459), False, 'from tests.utils import jitVsGlow\n')] |
from django.urls import path
from demoapp import views
urlpatterns = [
path('', views.index, name='index'),
path('author/<int:pk>',
views.AuthorDetailView.as_view(), name='author-detail'),
path('author/create/', views.AuthorCreate.as_view(), name='author-create'),
path(
'author/<int:pk>/update/',
views.AuthorUpdate.as_view(),
name='author-update'
),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(),
name='author-delete'),
path('authors/', views.AuthorListView.as_view(), name='authors'),
path('books/', views.BookListView.as_view(), name='books'),
path('book/<int:pk>', views.BookDetailView.as_view(), name='book-detail'),
path('book/<uuid:pk>/renew/', views.renew_book_librarian,
name='renew-book-librarian'),
path('book/create/', views.BookCreate.as_view(), name='book-create'),
path('book/<int:pk>/update/', views.BookUpdate.as_view(),
name='book-update'),
path('book/<int:pk>/delete/', views.BookDelete.as_view(),
name='book-delete'),
path(r'borrowed/', views.LoanedBooksAllListView.as_view(),
name='all-borrowed'),
path('genres/', views.GenresListView.as_view(), name='genres'),
path('genre/create/', views.GenreCreate.as_view(), name='genre-create'),
path('genre/<int:pk>', views.GenreView.as_view(), name='genre-detail'),
path('languages/', views.LanguagesListView.as_view(), name='languages'),
path(
'language/create/',
views.LanguageCreate.as_view(),
name='language-create'
),
path(
'language/<int:pk>',
views.LanguageView.as_view(),
name='language-detail'
),
path('mybooks/', views.LoanedBooksByUserListView.as_view(),
name='my-borrowed'),
]
| [
"demoapp.views.LanguageView.as_view",
"demoapp.views.AuthorListView.as_view",
"demoapp.views.AuthorDetailView.as_view",
"demoapp.views.LoanedBooksAllListView.as_view",
"django.urls.path",
"demoapp.views.LanguageCreate.as_view",
"demoapp.views.GenresListView.as_view",
"demoapp.views.BookCreate.as_view",
"demoapp.views.LoanedBooksByUserListView.as_view",
"demoapp.views.BookDetailView.as_view",
"demoapp.views.AuthorCreate.as_view",
"demoapp.views.BookDelete.as_view",
"demoapp.views.AuthorDelete.as_view",
"demoapp.views.BookListView.as_view",
"demoapp.views.BookUpdate.as_view",
"demoapp.views.GenreView.as_view",
"demoapp.views.LanguagesListView.as_view",
"demoapp.views.GenreCreate.as_view",
"demoapp.views.AuthorUpdate.as_view"
] | [((77, 112), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (81, 112), False, 'from django.urls import path\n'), ((722, 813), 'django.urls.path', 'path', (['"""book/<uuid:pk>/renew/"""', 'views.renew_book_librarian'], {'name': '"""renew-book-librarian"""'}), "('book/<uuid:pk>/renew/', views.renew_book_librarian, name=\n 'renew-book-librarian')\n", (726, 813), False, 'from django.urls import path\n'), ((151, 183), 'demoapp.views.AuthorDetailView.as_view', 'views.AuthorDetailView.as_view', ([], {}), '()\n', (181, 183), False, 'from demoapp import views\n'), ((235, 263), 'demoapp.views.AuthorCreate.as_view', 'views.AuthorCreate.as_view', ([], {}), '()\n', (261, 263), False, 'from demoapp import views\n'), ((341, 369), 'demoapp.views.AuthorUpdate.as_view', 'views.AuthorUpdate.as_view', ([], {}), '()\n', (367, 369), False, 'from demoapp import views\n'), ((443, 471), 'demoapp.views.AuthorDelete.as_view', 'views.AuthorDelete.as_view', ([], {}), '()\n', (469, 471), False, 'from demoapp import views\n'), ((526, 556), 'demoapp.views.AuthorListView.as_view', 'views.AuthorListView.as_view', ([], {}), '()\n', (554, 556), False, 'from demoapp import views\n'), ((594, 622), 'demoapp.views.BookListView.as_view', 'views.BookListView.as_view', ([], {}), '()\n', (620, 622), False, 'from demoapp import views\n'), ((665, 695), 'demoapp.views.BookDetailView.as_view', 'views.BookDetailView.as_view', ([], {}), '()\n', (693, 695), False, 'from demoapp import views\n'), ((844, 870), 'demoapp.views.BookCreate.as_view', 'views.BookCreate.as_view', ([], {}), '()\n', (868, 870), False, 'from demoapp import views\n'), ((927, 953), 'demoapp.views.BookUpdate.as_view', 'views.BookUpdate.as_view', ([], {}), '()\n', (951, 953), False, 'from demoapp import views\n'), ((1019, 1045), 'demoapp.views.BookDelete.as_view', 'views.BookDelete.as_view', ([], {}), '()\n', (1043, 1045), False, 'from demoapp import views\n'), ((1100, 1138), 'demoapp.views.LoanedBooksAllListView.as_view', 'views.LoanedBooksAllListView.as_view', ([], {}), '()\n', (1136, 1138), False, 'from demoapp import views\n'), ((1191, 1221), 'demoapp.views.GenresListView.as_view', 'views.GenresListView.as_view', ([], {}), '()\n', (1219, 1221), False, 'from demoapp import views\n'), ((1265, 1292), 'demoapp.views.GenreCreate.as_view', 'views.GenreCreate.as_view', ([], {}), '()\n', (1290, 1292), False, 'from demoapp import views\n'), ((1343, 1368), 'demoapp.views.GenreView.as_view', 'views.GenreView.as_view', ([], {}), '()\n', (1366, 1368), False, 'from demoapp import views\n'), ((1415, 1448), 'demoapp.views.LanguagesListView.as_view', 'views.LanguagesListView.as_view', ([], {}), '()\n', (1446, 1448), False, 'from demoapp import views\n'), ((1515, 1545), 'demoapp.views.LanguageCreate.as_view', 'views.LanguageCreate.as_view', ([], {}), '()\n', (1543, 1545), False, 'from demoapp import views\n'), ((1632, 1660), 'demoapp.views.LanguageView.as_view', 'views.LanguageView.as_view', ([], {}), '()\n', (1658, 1660), False, 'from demoapp import views\n'), ((1721, 1762), 'demoapp.views.LoanedBooksByUserListView.as_view', 'views.LoanedBooksByUserListView.as_view', ([], {}), '()\n', (1760, 1762), False, 'from demoapp import views\n')] |
# -*- coding: utf-8 -*-
import scrapy
from tourney_scrape.items import myDatabase
import re
class TourneySpider(scrapy.Spider):
name = "tourney"
allowed_domains = "databasesports.com"
start_urls = []
for i in range(1975,2014,1):
start_urls.append('http://www.databasesports.com/ncaab/tourney.htm?yr=%d' % i)
def chunks(self,l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
def parse(self, response):
for i in response.xpath("//tr[@class='region']/ancestor::table|//td[@class='region']/ancestor::table"):
for j in range(1, 5, 1):
out = i.xpath(".//tr[@class='tourney']/td[@valign='middle'][%d]/a/text()" % j).extract()
out=self.chunks(out,2)
outscore = i.xpath(".//tr[@class='tourney']/td[@valign='middle'][%d]" % j).extract()
outscore = re.findall(r'teamid.*?/a>.*?(\d{1,3})',str(outscore))
outscore=self.chunks(outscore,2)
for idx,m in enumerate(out):
if len(m)>1:
scrap_record = myDatabase()
scrap_record['year']=response.url[len(response.url)-4:len(response.url)]
scrap_record['round'] = j
scrap_record['team1'] = m[0]
scrap_record['team2'] = m[1]
scrap_record['team1score']=outscore[idx][0]
scrap_record['team2score']=outscore[idx][1]
# out=list(chain.from_iterable(out))
# scrap_record['matchups']
scrap_record['region'] = i.xpath("./tr[@class='region']/td/text()|.//td[@class='region']/text()").extract()
yield scrap_record
| [
"tourney_scrape.items.myDatabase"
] | [((1114, 1126), 'tourney_scrape.items.myDatabase', 'myDatabase', ([], {}), '()\n', (1124, 1126), False, 'from tourney_scrape.items import myDatabase\n')] |
# Copyright 2020 Catalyst Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trove.guestagent.datastore.mariadb import service
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.datastore.mysql_common import service as mysql_service
class Manager(manager.MySqlManager):
def __init__(self):
status = mysql_service.BaseMySqlAppStatus(self.docker_client)
app = service.MariaDBApp(status, self.docker_client)
adm = service.MariaDBAdmin(app)
super(Manager, self).__init__(app, status, adm)
def get_start_db_params(self, data_dir):
"""Get parameters for starting database.
Cinder volume initialization(after formatted) may leave a lost+found
folder.
"""
return (f'--ignore-db-dir=lost+found --ignore-db-dir=conf.d '
f'--datadir={data_dir}')
| [
"trove.guestagent.datastore.mysql_common.service.BaseMySqlAppStatus",
"trove.guestagent.datastore.mariadb.service.MariaDBAdmin",
"trove.guestagent.datastore.mariadb.service.MariaDBApp"
] | [((877, 929), 'trove.guestagent.datastore.mysql_common.service.BaseMySqlAppStatus', 'mysql_service.BaseMySqlAppStatus', (['self.docker_client'], {}), '(self.docker_client)\n', (909, 929), True, 'from trove.guestagent.datastore.mysql_common import service as mysql_service\n'), ((944, 990), 'trove.guestagent.datastore.mariadb.service.MariaDBApp', 'service.MariaDBApp', (['status', 'self.docker_client'], {}), '(status, self.docker_client)\n', (962, 990), False, 'from trove.guestagent.datastore.mariadb import service\n'), ((1005, 1030), 'trove.guestagent.datastore.mariadb.service.MariaDBAdmin', 'service.MariaDBAdmin', (['app'], {}), '(app)\n', (1025, 1030), False, 'from trove.guestagent.datastore.mariadb import service\n')] |
import inspect
import json
import os
import re
from urllib.parse import quote
from urllib.request import urlopen
import pandas as pd
import param
from .configuration import DEFAULTS
class TutorialData(param.Parameterized):
label = param.String(allow_None=True)
raw = param.Boolean()
verbose = param.Boolean()
return_meta = param.Boolean()
use_cache = param.Boolean()
_source = None
_base_url = None
_data_url = None
_description = None
def __init__(self, **kwds):
super().__init__(**kwds)
self._cache_dir = DEFAULTS["cache_kwds"]["directory"]
self._remove_href = re.compile(r"<(a|/a).*?>")
os.makedirs(self._cache_dir, exist_ok=True)
self._init_owid()
@property
def _cache_path(self):
cache_file = f"{self.label}.pkl"
return os.path.join(self._cache_dir, cache_file)
@property
def _dataset_options(self):
options = set([])
for method in dir(self):
if method.startswith("_load_") and "owid" not in method:
options.add(method.replace("_load_", ""))
return list(options) + list(self._owid_labels_df.columns)
@staticmethod
def _specify_cache(cache_path, **kwds):
if kwds:
cache_ext = "_".join(
f"{key}={val}".replace(os.sep, "") for key, val in kwds.items()
)
cache_path = f"{os.path.splitext(cache_path)[0]}_{cache_ext}.pkl"
return cache_path
def _cache_dataset(self, df, cache_path=None, **kwds):
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
df.to_pickle(cache_path)
def _read_cache(self, cache_path=None, **kwds):
if not self.use_cache:
return None
if cache_path is None:
cache_path = self._cache_path
cache_path = self._specify_cache(cache_path, **kwds)
try:
return pd.read_pickle(cache_path)
except Exception:
if os.path.exists(cache_path):
os.remove(cache_path)
return None
@staticmethod
def _snake_urlify(s):
# Replace all hyphens with underscore
s = s.replace(" - ", "_").replace("-", "_")
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", "", s)
# Replace all runs of whitespace with a underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def _init_owid(self):
cache_path = os.path.join(self._cache_dir, "owid_labels.pkl")
self._owid_labels_df = self._read_cache(cache_path=cache_path)
if self._owid_labels_df is not None:
return
owid_api_url = (
"https://api.github.com/"
"repos/owid/owid-datasets/"
"git/trees/master?recursive=1"
)
with urlopen(owid_api_url) as f:
sources = json.loads(f.read().decode("utf-8"))
owid_labels = {}
owid_raw_url = "https://raw.githubusercontent.com/owid/owid-datasets/master/"
for source_tree in sources["tree"]:
path = source_tree["path"]
if ".csv" not in path and ".json" not in path:
continue
label = "owid_" + self._snake_urlify(path.split("/")[-2].strip())
if label not in owid_labels:
owid_labels[label] = {}
url = f"{owid_raw_url}/{quote(path)}"
if ".csv" in path:
owid_labels[label]["data"] = url
elif ".json" in path:
owid_labels[label]["meta"] = url
self._owid_labels_df = pd.DataFrame(owid_labels)
self._cache_dataset(self._owid_labels_df, cache_path=cache_path)
def _load_owid(self, **kwds):
self._data_url = self._owid_labels_df[self.label]["data"]
meta_url = self._owid_labels_df[self.label]["meta"]
with urlopen(meta_url) as response:
meta = json.loads(response.read().decode())
self.label = meta["title"]
self._source = (
" & ".join(source["dataPublishedBy"] for source in meta["sources"])
+ " curated by Our World in Data (OWID)"
)
self._base_url = (
" & ".join(source["link"] for source in meta["sources"])
+ " through https://github.com/owid/owid-datasets"
)
self._description = re.sub(self._remove_href, "", meta["description"])
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = [self._snake_urlify(col) for col in df.columns]
return df
def _load_annual_co2(self, **kwds):
self._source = "NOAA ESRL"
self._base_url = "https://www.esrl.noaa.gov/"
self._data_url = (
"https://www.esrl.noaa.gov/"
"gmd/webdata/ccgg/trends/co2/co2_annmean_mlo.txt"
)
self._description = (
"The carbon dioxide data on Mauna Loa constitute the longest record "
"of direct measurements of CO2 in the atmosphere. They were started "
"by <NAME> of the Scripps Institution of Oceanography in "
"March of 1958 at a facility of the National Oceanic and Atmospheric "
"Administration [Keeling, 1976]. NOAA started its own CO2 measurements "
"in May of 1974, and they have run in parallel with those made by "
"Scripps since then [Thoning, 1989]."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(
header=None,
comment="#",
sep="\s+", # noqa
names=["year", "co2_ppm", "uncertainty"],
)
base_kwds.update(kwds)
df = pd.read_csv(self._data_url, **base_kwds)
self._cache_dataset(df, **kwds)
return df
def _load_tc_tracks(self, **kwds):
self._source = "IBTrACS v04 - USA"
self._base_url = "https://www.ncdc.noaa.gov/ibtracs/"
self._data_url = (
"https://www.ncei.noaa.gov/data/"
"international-best-track-archive-for-climate-stewardship-ibtracs/"
"v04r00/access/csv/ibtracs.last3years.list.v04r00.csv"
)
self._description = (
"The intent of the IBTrACS project is to overcome data availability "
"issues. This was achieved by working directly with all the Regional "
"Specialized Meteorological Centers and other international centers "
"and individuals to create a global best track dataset, merging storm "
"information from multiple centers into one product and archiving "
"the data for public use."
)
df = self._read_cache(**kwds)
if df is None:
base_kwds = dict(keep_default_na=False)
base_kwds.update(kwds)
df = pd.read_csv(self._data_url, **base_kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
cols = [
"BASIN",
"NAME",
"LAT",
"LON",
"ISO_TIME",
"USA_WIND",
"USA_PRES",
"USA_SSHS",
"USA_RMW",
]
df = df[cols]
df.columns = df.columns.str.lower()
df = df.iloc[1:]
df = df.set_index("iso_time")
df.index = pd.to_datetime(df.index)
numeric_cols = ["lat", "lon", "usa_rmw", "usa_pres", "usa_sshs", "usa_rmw"]
df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric, errors="coerce")
return df
def _load_covid19_us_cases(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://github.com/CSSEGISandData/COVID-19/raw/master/"
"csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_confirmed_US.csv"
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.drop(
["UID", "iso2", "iso3", "code3", "FIPS", "Admin2", "Country_Region"],
axis=1,
)
df.columns = df.columns.str.lower().str.rstrip("_")
df = df.melt(
id_vars=["lat", "long", "combined_key", "province_state"],
var_name="date",
value_name="cases",
)
df["date"] = pd.to_datetime(df["date"])
return df
def _load_covid19_global_cases(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://github.com/CSSEGISandData/COVID-19/raw/master/"
"csse_covid_19_data/csse_covid_19_time_series/"
"time_series_covid19_confirmed_global.csv"
)
self._description = (
"This is the data repository for the 2019 Novel Coronavirus "
"Visual Dashboard operated by the Johns Hopkins University Center "
"for Systems Science and Engineering (JHU CSSE). Also, Supported "
"by ESRI Living Atlas Team and the Johns Hopkins University "
"Applied Physics Lab (JHU APL)."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = df.columns.str.lower().str.rstrip("_")
df = df.melt(
id_vars=["province/state", "country/region", "lat", "long"],
var_name="date",
value_name="cases",
)
df.columns = df.columns.str.replace("/", "_")
df["date"] = pd.to_datetime(df["date"])
return df
def _load_covid19_population(self, **kwds):
self._source = "JHU CSSE COVID-19"
self._base_url = "https://github.com/CSSEGISandData/COVID-19"
self._data_url = (
"https://raw.githubusercontent.com/"
"CSSEGISandData/COVID-19/master/"
"csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
)
self._description = (
"This is the data repository for the 2019 Novel Coronavirus "
"Visual Dashboard operated by the Johns Hopkins University Center "
"for Systems Science and Engineering (JHU CSSE). Also, Supported "
"by ESRI Living Atlas Team and the Johns Hopkins University "
"Applied Physics Lab (JHU APL)."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df.columns = df.columns.str.lower().str.rstrip("_")
return df
def _load_gapminder_life_expectancy(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"countries-etc-datapoints/ddf--datapoints--"
"life_expectancy_years--by--geo--time.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.rename(columns={"life_expectancy_years": "life_expectancy"})
return df
def _load_gapminder_income(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"countries-etc-datapoints/ddf--datapoints--"
"income_per_person_gdppercapita_ppp_inflation_adjusted"
"--by--geo--time.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.rename(
columns={
"income_per_person_gdppercapita_ppp_inflation_adjusted": "income" # noqa
}
)
return df
def _load_gapminder_population(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"countries-etc-datapoints/ddf--datapoints--"
"population_total--by--geo--time.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df.rename(columns={"population_total": "population"})
return df
def _load_gapminder_country(self, **kwds):
self._source = "World Bank Gapminder"
self._base_url = (
"https://github.com/open-numbers/ddf--gapminder--systema_globalis"
)
self._data_url = (
"https://raw.githubusercontent.com/open-numbers/"
"ddf--gapminder--systema_globalis/master/"
"ddf--entities--geo--country.csv"
)
self._description = (
"This is the main dataset used in tools on the official Gapminder "
"website. It contains local & global statistics combined from "
"hundreds of sources."
)
df = self._read_cache(**kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **kwds)
if self.raw:
return df
df = df[["country", "name", "world_6region"]].rename(
columns={"world_6region": "region"}
)
df["region"] = df["region"].str.replace("_", " ").str.title()
return df
def _load_iem_asos(
self,
ini="2020-01-01",
end="2020-01-03",
stn="CMI",
tz="utc",
data="all",
latlon="no",
elev="no",
**kwds,
):
stn = stn.upper()
if isinstance(data, list):
data = ",".join(data)
tzs = {
"utc": "Etc/UTC",
"akst": "America/Anchorage",
"wst": "America/Los_Angeles",
"mst": "America/Denver",
"cst": "America/Chicago",
"est": "America/New_York",
}
tz = tzs.get(tz, tz)
if tz not in tzs.values():
raise ValueError(f"tz must be one of the following: {tzs}")
ini_dt = pd.to_datetime(ini)
end_dt = pd.to_datetime(end)
self._source = "Iowa Environment Mesonet ASOS"
self._base_url = "https://mesonet.agron.iastate.edu/ASOS/"
self._data_url = (
f"https://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
f"station={stn}&data={data}&latlon={latlon}&elev={elev}&"
f"year1={ini_dt:%Y}&month1={ini_dt:%m}&day1={ini_dt:%d}&"
f"year2={end_dt:%Y}&month2={end_dt:%m}&day2={end_dt:%d}&"
f"tz={tz}&format=onlycomma&"
f"missing=empty&trace=empty&"
f"direct=no&report_type=1&report_type=2"
)
self._description = (
"The IEM maintains an ever growing archive of automated airport "
"weather observations from around the world! These observations "
"are typically called 'ASOS' or sometimes 'AWOS' sensors. "
"A more generic term may be METAR data, which is a term that "
"describes the format the data is transmitted as. If you don't "
"get data for a request, please feel free to contact us for help. "
"The IEM also has a one minute interval dataset for US ASOS (2000-) "
"and Iowa AWOS (1995-2011) sites. This archive simply provides the "
"as-is collection of historical observations, very little "
"quality control is done."
)
cache_kwds = kwds.copy()
cache_kwds.update(
ini=ini, end=end, stn=stn, tz=tz, data=data, latlon=latlon, elev=elev
)
df = self._read_cache(**cache_kwds)
if df is None:
df = pd.read_csv(self._data_url, **kwds)
self._cache_dataset(df, **cache_kwds)
if self.raw:
return df
df["valid"] = pd.to_datetime(df["valid"])
df = df.set_index("valid")
return df
def open_dataset(self, **kwds):
if self.label is None or self.label not in self._dataset_options:
self.list_datasets()
raise ValueError("Select a valid dataset listed above")
if self.label.startswith("owid_"):
data = getattr(self, "_load_owid")(**kwds)
else:
data = getattr(self, f"_load_{self.label}")(**kwds)
label = self.label.replace("_", " ").upper()
attr = f"{label}\n\nSource: {self._source}\n{self._base_url}"
if self.verbose:
attr = (
f"{attr}\n\nDescription: {self._description}\n\nData: {self._data_url}"
)
if self.return_meta:
meta = {}
meta["label"] = self.label
meta["source"] = self._source
meta["base_url"] = self._base_url
meta["description"] = self._description
meta["data_url"] = self._data_url
return data, meta
else:
print(attr)
return data
def list_datasets(self):
signatures = {}
for option in self._dataset_options:
if "owid" in option:
signatures[option] = {}
continue
signature = inspect.signature(getattr(self, f"_load_{option}"))
signatures[option] = {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
for key, val in signatures.items():
print(f"- {key}")
if val:
print(" adjustable keywords")
for k, v in val.items():
print(f" {k}: {v}")
def open_dataset(
label=None, raw=False, verbose=False, return_meta=False, use_cache=True, **kwds
):
return TutorialData(
label=label,
raw=raw,
verbose=verbose,
return_meta=return_meta,
use_cache=use_cache,
).open_dataset(**kwds)
def list_datasets():
return TutorialData().list_datasets()
| [
"pandas.read_pickle",
"os.path.exists",
"os.makedirs",
"re.compile",
"pandas.read_csv",
"param.Boolean",
"os.path.join",
"urllib.parse.quote",
"os.path.splitext",
"param.String",
"pandas.DataFrame",
"re.sub",
"urllib.request.urlopen",
"pandas.to_datetime",
"os.remove"
] | [((240, 269), 'param.String', 'param.String', ([], {'allow_None': '(True)'}), '(allow_None=True)\n', (252, 269), False, 'import param\n'), ((280, 295), 'param.Boolean', 'param.Boolean', ([], {}), '()\n', (293, 295), False, 'import param\n'), ((310, 325), 'param.Boolean', 'param.Boolean', ([], {}), '()\n', (323, 325), False, 'import param\n'), ((344, 359), 'param.Boolean', 'param.Boolean', ([], {}), '()\n', (357, 359), False, 'import param\n'), ((376, 391), 'param.Boolean', 'param.Boolean', ([], {}), '()\n', (389, 391), False, 'import param\n'), ((634, 659), 're.compile', 're.compile', (['"""<(a|/a).*?>"""'], {}), "('<(a|/a).*?>')\n", (644, 659), False, 'import re\n'), ((669, 712), 'os.makedirs', 'os.makedirs', (['self._cache_dir'], {'exist_ok': '(True)'}), '(self._cache_dir, exist_ok=True)\n', (680, 712), False, 'import os\n'), ((837, 878), 'os.path.join', 'os.path.join', (['self._cache_dir', 'cache_file'], {}), '(self._cache_dir, cache_file)\n', (849, 878), False, 'import os\n'), ((2384, 2410), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 's'], {}), "('[^\\\\w\\\\s]', '', s)\n", (2390, 2410), False, 'import re\n'), ((2481, 2503), 're.sub', 're.sub', (['"""\\\\s+"""', '"""_"""', 's'], {}), "('\\\\s+', '_', s)\n", (2487, 2503), False, 'import re\n'), ((2577, 2625), 'os.path.join', 'os.path.join', (['self._cache_dir', '"""owid_labels.pkl"""'], {}), "(self._cache_dir, 'owid_labels.pkl')\n", (2589, 2625), False, 'import os\n'), ((3703, 3728), 'pandas.DataFrame', 'pd.DataFrame', (['owid_labels'], {}), '(owid_labels)\n', (3715, 3728), True, 'import pandas as pd\n'), ((4463, 4513), 're.sub', 're.sub', (['self._remove_href', '""""""', "meta['description']"], {}), "(self._remove_href, '', meta['description'])\n", (4469, 4513), False, 'import re\n'), ((7595, 7619), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (7609, 7619), True, 'import pandas as pd\n'), ((8764, 8790), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (8778, 8790), True, 'import pandas as pd\n'), ((10091, 10117), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (10105, 10117), True, 'import pandas as pd\n'), ((16078, 16097), 'pandas.to_datetime', 'pd.to_datetime', (['ini'], {}), '(ini)\n', (16092, 16097), True, 'import pandas as pd\n'), ((16115, 16134), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {}), '(end)\n', (16129, 16134), True, 'import pandas as pd\n'), ((17879, 17906), 'pandas.to_datetime', 'pd.to_datetime', (["df['valid']"], {}), "(df['valid'])\n", (17893, 17906), True, 'import pandas as pd\n'), ((1994, 2020), 'pandas.read_pickle', 'pd.read_pickle', (['cache_path'], {}), '(cache_path)\n', (2008, 2020), True, 'import pandas as pd\n'), ((2931, 2952), 'urllib.request.urlopen', 'urlopen', (['owid_api_url'], {}), '(owid_api_url)\n', (2938, 2952), False, 'from urllib.request import urlopen\n'), ((3976, 3993), 'urllib.request.urlopen', 'urlopen', (['meta_url'], {}), '(meta_url)\n', (3983, 3993), False, 'from urllib.request import urlopen\n'), ((4593, 4628), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (4604, 4628), True, 'import pandas as pd\n'), ((5957, 5997), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **base_kwds)\n', (5968, 5997), True, 'import pandas as pd\n'), ((7092, 7132), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **base_kwds)\n', (7103, 7132), True, 'import pandas as pd\n'), ((8260, 8295), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (8271, 8295), True, 'import pandas as pd\n'), ((9665, 9700), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (9676, 9700), True, 'import pandas as pd\n'), ((10964, 10999), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (10975, 10999), True, 'import pandas as pd\n'), ((11961, 11996), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (11972, 11996), True, 'import pandas as pd\n'), ((13013, 13048), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (13024, 13048), True, 'import pandas as pd\n'), ((14100, 14135), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (14111, 14135), True, 'import pandas as pd\n'), ((15030, 15065), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (15041, 15065), True, 'import pandas as pd\n'), ((17726, 17761), 'pandas.read_csv', 'pd.read_csv', (['self._data_url'], {}), '(self._data_url, **kwds)\n', (17737, 17761), True, 'import pandas as pd\n'), ((2062, 2088), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (2076, 2088), False, 'import os\n'), ((2106, 2127), 'os.remove', 'os.remove', (['cache_path'], {}), '(cache_path)\n', (2115, 2127), False, 'import os\n'), ((3494, 3505), 'urllib.parse.quote', 'quote', (['path'], {}), '(path)\n', (3499, 3505), False, 'from urllib.parse import quote\n'), ((1414, 1442), 'os.path.splitext', 'os.path.splitext', (['cache_path'], {}), '(cache_path)\n', (1430, 1442), False, 'import os\n')] |
#!/usr/bin/env python
#coding:utf-8
import zipfile
import glob
import os
def ZipFile(path,zipname):
allfiles=[]
for root, dirs, files in os.walk(path):
for filename in files:
#print(filename)
name, suf = os.path.splitext(filename)
if(suf!=".meta"):
allfiles.append(os.path.join(root, filename))
f = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
startindex=path.rindex('/');
print (startindex)
for file in allfiles:
print(file)
f.write(file,file[startindex:])
f.close()
print('zip success!!!')
def getFileSize(filePath,size=0):
for root,dirs,files in os.walk(filePath):
for f in files:
size+=os.path.getsize(os.path.join(root,f))
return size
curpath=os.getcwd()
#path="E:\UnityGitWorkSpace\ARHomeV2\Assets\StreamingAssets\AssetBundles"
#zipname="E:\UnityGitWorkSpace\ARHomeV2\Assets\StreamingAssets\AssetBundles.zip"
path=curpath+"/Assets/StreamingAssets/AssetBundles"
zipname=curpath+"/Assets/StreamingAssets/AssetBundles.zip"
print(path)
ZipFile(path,zipname)
size=getFileSize(path);
print("zipsize:{0}".format(size));
#files=glob.glob(path)
#result = input("Please any key to continue:")
| [
"zipfile.ZipFile",
"os.path.join",
"os.path.splitext",
"os.getcwd",
"os.walk"
] | [((694, 705), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (703, 705), False, 'import os\n'), ((142, 155), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (149, 155), False, 'import os\n'), ((320, 371), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipname', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(zipname, 'w', zipfile.ZIP_DEFLATED)\n", (335, 371), False, 'import zipfile\n'), ((587, 604), 'os.walk', 'os.walk', (['filePath'], {}), '(filePath)\n', (594, 604), False, 'import os\n'), ((217, 243), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (233, 243), False, 'import os\n'), ((649, 670), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (661, 670), False, 'import os\n'), ((285, 313), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (297, 313), False, 'import os\n')] |
"""Python script to add vote information to the precinct shapefile."""
import geopandas as gpd
import pandas as pd
import numpy as np
# load the shapefile that has been nicely reprojected
shp = gpd.read_file('TECP_reprojected_mod.shp')
# load the dataframe with scraped vote data
df = pd.read_csv('ScrapedResults_PropB_perPrecinct.csv')
# add column for "for" votes
shp['For'] = np.nan
# set values where we have them
for i in shp['PCT']:
# see where we have data
if int(i) in list(df['Precinct']):
# get index in shp
shp_ind = np.where(shp['PCT']==i)[0][0]
# get index in df
df_ind = np.where(df['Precinct']==int(i))[0][0]
# get value from df using index
df_val = df['For'][df_ind]
# place value into correct location using shp index
shp['For'][shp_ind] = df_val
# write out the new shapefile with propB "for" vote info
shp.to_file('PropB_For.shp')
| [
"numpy.where",
"geopandas.read_file",
"pandas.read_csv"
] | [((195, 236), 'geopandas.read_file', 'gpd.read_file', (['"""TECP_reprojected_mod.shp"""'], {}), "('TECP_reprojected_mod.shp')\n", (208, 236), True, 'import geopandas as gpd\n'), ((287, 338), 'pandas.read_csv', 'pd.read_csv', (['"""ScrapedResults_PropB_perPrecinct.csv"""'], {}), "('ScrapedResults_PropB_perPrecinct.csv')\n", (298, 338), True, 'import pandas as pd\n'), ((556, 581), 'numpy.where', 'np.where', (["(shp['PCT'] == i)"], {}), "(shp['PCT'] == i)\n", (564, 581), True, 'import numpy as np\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import sys
expname=sys.argv[1]
df1=pd.read_csv(expname+'/run-AR_0_hop-tag-AVTT_.csv')
df2=pd.read_csv(expname+'/run-AR_1_hop-tag-AVTT_.csv')
df3=pd.read_csv(expname+'/run-AR_2_hop-tag-AVTT_.csv')
df4=pd.read_csv(expname+'/run-Q_routing-tag-AVTT_.csv')
fig, ax = plt.subplots()
ax.plot(df1.loc[:,'Value'],'lightblue')
ax.plot(df1.loc[:,'Value'].rolling(10).mean(),'b',label='AN(0hop)')
ax.plot(df2.loc[:,'Value'],'lightgreen')
ax.plot(df2.loc[:,'Value'].rolling(10).mean(),'g',label='AN(1hop)')
ax.plot(df3.loc[:,'Value'],'lightgrey')
ax.plot(df3.loc[:,'Value'].rolling(10).mean(),'k',label='AN(2hop)')
ax.plot(df4.loc[:,'Value'],'lightpink')
ax.plot(df4.loc[:,'Value'].rolling(10).mean(),'r',label='Q-routing')
ax.set_xlabel('episode number')
ax.set_ylabel('AVTT')
ax.set_title(expname+ ' Average Travel Time')
ax.legend()
plt.savefig(expname+"/"+expname+" Average Travel Time.png")
# plt.show()
fig, ax = plt.subplots()
ax.plot(df1.loc[700:,'Value'],'lightblue')
ax.plot(df1.loc[700:,'Value'].rolling(10).mean(),'b',label='AN(0hop)')
ax.plot(df2.loc[700:,'Value'],'lightgreen')
ax.plot(df2.loc[700:,'Value'].rolling(10).mean(),'g',label='AN(1hop)')
ax.plot(df3.loc[700:,'Value'],'lightgrey')
ax.plot(df3.loc[700:,'Value'].rolling(10).mean(),'k',label='AN(2hop)')
ax.plot(df4.loc[700:,'Value'],'lightpink')
ax.plot(df4.loc[700:,'Value'].rolling(10).mean(),'r',label='Q-routing')
ax.set_xlabel('episode number')
ax.set_ylabel('AVTT')
ax.set_title(expname+ ' Average Travel Time (Last 100 Episodes)')
ax.legend()
plt.savefig(expname+"/"+expname+" Last 100 Episodes.png")
df1=pd.read_csv(expname+'/run-AR_0_hop-tag-Routing Success_.csv')
df2=pd.read_csv(expname+'/run-AR_1_hop-tag-Routing Success_.csv')
df3=pd.read_csv(expname+'/run-AR_2_hop-tag-Routing Success_.csv')
df4=pd.read_csv(expname+'/run-Q_routing-tag-Routing Success_.csv')
fig, ax = plt.subplots()
ax.plot(df1.loc[:,'Value'],'lightblue')
ax.plot(df1.loc[:,'Value'].rolling(10).mean(),'b',label='AN(0hop)')
ax.plot(df2.loc[:,'Value'],'lightgreen')
ax.plot(df2.loc[:,'Value'].rolling(10).mean(),'g',label='AN(1hop)')
ax.plot(df3.loc[:,'Value'],'lightgrey')
ax.plot(df3.loc[:,'Value'].rolling(10).mean(),'k',label='AN(2hop)')
ax.plot(df4.loc[:,'Value'],'lightpink')
ax.plot(df4.loc[:,'Value'].rolling(10).mean(),'r',label='Q-routing')
ax.set_xlabel('episode number')
ax.set_ylabel('|Routing Success|')
ax.set_title(expname+ ' Routing Success')
ax.legend()
plt.savefig(expname+"/"+expname+" Routing Success.png")
# plt.show() | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"pandas.read_csv"
] | [((88, 140), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_0_hop-tag-AVTT_.csv')"], {}), "(expname + '/run-AR_0_hop-tag-AVTT_.csv')\n", (99, 140), True, 'import pandas as pd\n'), ((143, 195), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_1_hop-tag-AVTT_.csv')"], {}), "(expname + '/run-AR_1_hop-tag-AVTT_.csv')\n", (154, 195), True, 'import pandas as pd\n'), ((198, 250), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_2_hop-tag-AVTT_.csv')"], {}), "(expname + '/run-AR_2_hop-tag-AVTT_.csv')\n", (209, 250), True, 'import pandas as pd\n'), ((253, 306), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-Q_routing-tag-AVTT_.csv')"], {}), "(expname + '/run-Q_routing-tag-AVTT_.csv')\n", (264, 306), True, 'import pandas as pd\n'), ((316, 330), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (328, 330), True, 'import matplotlib.pyplot as plt\n'), ((882, 947), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(expname + '/' + expname + ' Average Travel Time.png')"], {}), "(expname + '/' + expname + ' Average Travel Time.png')\n", (893, 947), True, 'import matplotlib.pyplot as plt\n'), ((967, 981), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (979, 981), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1640), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(expname + '/' + expname + ' Last 100 Episodes.png')"], {}), "(expname + '/' + expname + ' Last 100 Episodes.png')\n", (1588, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1704), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_0_hop-tag-Routing Success_.csv')"], {}), "(expname + '/run-AR_0_hop-tag-Routing Success_.csv')\n", (1652, 1704), True, 'import pandas as pd\n'), ((1707, 1770), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_1_hop-tag-Routing Success_.csv')"], {}), "(expname + '/run-AR_1_hop-tag-Routing Success_.csv')\n", (1718, 1770), True, 'import pandas as pd\n'), ((1773, 1836), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-AR_2_hop-tag-Routing Success_.csv')"], {}), "(expname + '/run-AR_2_hop-tag-Routing Success_.csv')\n", (1784, 1836), True, 'import pandas as pd\n'), ((1839, 1903), 'pandas.read_csv', 'pd.read_csv', (["(expname + '/run-Q_routing-tag-Routing Success_.csv')"], {}), "(expname + '/run-Q_routing-tag-Routing Success_.csv')\n", (1850, 1903), True, 'import pandas as pd\n'), ((1913, 1927), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1925, 1927), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2550), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(expname + '/' + expname + ' Routing Success.png')"], {}), "(expname + '/' + expname + ' Routing Success.png')\n", (2500, 2550), True, 'import matplotlib.pyplot as plt\n')] |
import paho.mqtt.client as mqtt
class Message:
def update(self, face_name):
mqttc = mqtt.Client()
mqttc.username_pw_set("miwdmkxc", "7SUxR3vX0v4N")
mqttc.connect('hairdresser.cloudmqtt.com', 15912)
mqttc.publish("face_name", face_name)
| [
"paho.mqtt.client.Client"
] | [((117, 130), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (128, 130), True, 'import paho.mqtt.client as mqtt\n')] |
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import errno
from zipfile import ZIP_DEFLATED
from twitter.common.collections import OrderedSet
from twitter.common.contextutil import open_zip
from twitter.common.dirutil import safe_mkdir
from twitter.pants import get_buildroot
from twitter.pants.fs import archive
from twitter.pants.java import Manifest
from twitter.pants.targets import JvmApp
from twitter.pants.tasks import TaskError
from twitter.pants.tasks.jvm_binary_task import JvmBinaryTask
class BundleCreate(JvmBinaryTask):
@classmethod
def setup_parser(cls, option_group, args, mkflag):
JvmBinaryTask.setup_parser(option_group, args, mkflag)
archive_flag = mkflag("archive")
option_group.add_option(archive_flag, dest="bundle_create_archive",
type="choice", choices=list(archive.TYPE_NAMES),
help="[%%default] Create an archive from the bundle. "
"Choose from %s" % sorted(archive.TYPE_NAMES))
option_group.add_option(mkflag("archive-prefix"), mkflag("archive-prefix", negate=True),
dest="bundle_create_prefix", default=False,
action="callback", callback=mkflag.set_bool,
help="[%%default] Used in conjunction with %s this packs the archive "
"with its basename as the path prefix." % archive_flag)
def __init__(self, context):
JvmBinaryTask.__init__(self, context)
self.outdir = (
context.options.jvm_binary_create_outdir
or context.config.get('bundle-create', 'outdir')
)
self.prefix = context.options.bundle_create_prefix
def fill_archiver_type():
self.archiver_type = context.options.bundle_create_archive
# If no option specified, check if anyone is requiring it
if not self.archiver_type:
for archive_type in archive.TYPE_NAMES:
if context.products.isrequired(archive_type):
self.archiver_type = archive_type
fill_archiver_type()
self.deployjar = context.options.jvm_binary_create_deployjar
if not self.deployjar:
self.context.products.require('jars', predicate=self.is_binary)
self.require_jar_dependencies()
def execute(self, targets):
def is_app(target):
return isinstance(target, JvmApp)
archiver = archive.archiver(self.archiver_type) if self.archiver_type else None
for app in filter(is_app, targets):
basedir = self.bundle(app)
if archiver:
archivemap = self.context.products.get(self.archiver_type)
archivepath = archiver.create(
basedir,
self.outdir,
app.basename,
prefix=app.basename if self.prefix else None
)
archivemap.add(app, self.outdir, [archivepath])
self.context.log.info('created %s' % os.path.relpath(archivepath, get_buildroot()))
def bundle(self, app):
bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)
self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))
safe_mkdir(bundledir, clean=True)
classpath = OrderedSet()
if not self.deployjar:
libdir = os.path.join(bundledir, 'libs')
os.mkdir(libdir)
for basedir, externaljar in self.list_jar_dependencies(app.binary):
src = os.path.join(basedir, externaljar)
link_name = os.path.join(libdir, externaljar)
try:
os.symlink(src, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
raise TaskError('Trying to symlink %s to %s, but it is already symlinked to %s. ' %
(link_name, src, os.readlink(link_name)) +
'Does the bundled target depend on multiple jvm_binary targets?')
else:
raise
classpath.add(externaljar)
for basedir, jars in self.context.products.get('jars').get(app.binary).items():
if len(jars) != 1:
raise TaskError('Expected 1 mapped binary but found: %s' % jars)
binary = jars.pop()
binary_jar = os.path.join(basedir, binary)
bundle_jar = os.path.join(bundledir, binary)
if not classpath:
os.symlink(binary_jar, bundle_jar)
else:
with open_zip(binary_jar, 'r') as src:
with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:
for item in src.infolist():
buffer = src.read(item.filename)
if Manifest.PATH == item.filename:
manifest = Manifest(buffer)
manifest.addentry(Manifest.CLASS_PATH,
' '.join(os.path.join('libs', jar) for jar in classpath))
buffer = manifest.contents()
dest.writestr(item, buffer)
for bundle in app.bundles:
for path, relpath in bundle.filemap.items():
bundlepath = os.path.join(bundledir, relpath)
safe_mkdir(os.path.dirname(bundlepath))
os.symlink(path, bundlepath)
return bundledir
| [
"twitter.pants.tasks.jvm_binary_task.JvmBinaryTask.setup_parser",
"os.readlink",
"twitter.pants.fs.archive.archiver",
"twitter.pants.tasks.jvm_binary_task.JvmBinaryTask.__init__",
"os.path.join",
"twitter.common.collections.OrderedSet",
"os.symlink",
"twitter.pants.get_buildroot",
"twitter.pants.java.Manifest",
"os.path.dirname",
"twitter.common.dirutil.safe_mkdir",
"os.mkdir",
"twitter.pants.tasks.TaskError",
"twitter.common.contextutil.open_zip"
] | [((1476, 1530), 'twitter.pants.tasks.jvm_binary_task.JvmBinaryTask.setup_parser', 'JvmBinaryTask.setup_parser', (['option_group', 'args', 'mkflag'], {}), '(option_group, args, mkflag)\n', (1502, 1530), False, 'from twitter.pants.tasks.jvm_binary_task import JvmBinaryTask\n'), ((2344, 2381), 'twitter.pants.tasks.jvm_binary_task.JvmBinaryTask.__init__', 'JvmBinaryTask.__init__', (['self', 'context'], {}), '(self, context)\n', (2366, 2381), False, 'from twitter.pants.tasks.jvm_binary_task import JvmBinaryTask\n'), ((3833, 3886), 'os.path.join', 'os.path.join', (['self.outdir', "('%s-bundle' % app.basename)"], {}), "(self.outdir, '%s-bundle' % app.basename)\n", (3845, 3886), False, 'import os\n'), ((3979, 4012), 'twitter.common.dirutil.safe_mkdir', 'safe_mkdir', (['bundledir'], {'clean': '(True)'}), '(bundledir, clean=True)\n', (3989, 4012), False, 'from twitter.common.dirutil import safe_mkdir\n'), ((4030, 4042), 'twitter.common.collections.OrderedSet', 'OrderedSet', ([], {}), '()\n', (4040, 4042), False, 'from twitter.common.collections import OrderedSet\n'), ((3245, 3281), 'twitter.pants.fs.archive.archiver', 'archive.archiver', (['self.archiver_type'], {}), '(self.archiver_type)\n', (3261, 3281), False, 'from twitter.pants.fs import archive\n'), ((4085, 4116), 'os.path.join', 'os.path.join', (['bundledir', '"""libs"""'], {}), "(bundledir, 'libs')\n", (4097, 4116), False, 'import os\n'), ((4123, 4139), 'os.mkdir', 'os.mkdir', (['libdir'], {}), '(libdir)\n', (4131, 4139), False, 'import os\n'), ((4994, 5023), 'os.path.join', 'os.path.join', (['basedir', 'binary'], {}), '(basedir, binary)\n', (5006, 5023), False, 'import os\n'), ((5043, 5074), 'os.path.join', 'os.path.join', (['bundledir', 'binary'], {}), '(bundledir, binary)\n', (5055, 5074), False, 'import os\n'), ((4229, 4263), 'os.path.join', 'os.path.join', (['basedir', 'externaljar'], {}), '(basedir, externaljar)\n', (4241, 4263), False, 'import os\n'), ((4284, 4317), 'os.path.join', 'os.path.join', (['libdir', 'externaljar'], {}), '(libdir, externaljar)\n', (4296, 4317), False, 'import os\n'), ((4889, 4947), 'twitter.pants.tasks.TaskError', 'TaskError', (["('Expected 1 mapped binary but found: %s' % jars)"], {}), "('Expected 1 mapped binary but found: %s' % jars)\n", (4898, 4947), False, 'from twitter.pants.tasks import TaskError\n'), ((5107, 5141), 'os.symlink', 'os.symlink', (['binary_jar', 'bundle_jar'], {}), '(binary_jar, bundle_jar)\n', (5117, 5141), False, 'import os\n'), ((5795, 5827), 'os.path.join', 'os.path.join', (['bundledir', 'relpath'], {}), '(bundledir, relpath)\n', (5807, 5827), False, 'import os\n'), ((5884, 5912), 'os.symlink', 'os.symlink', (['path', 'bundlepath'], {}), '(path, bundlepath)\n', (5894, 5912), False, 'import os\n'), ((3956, 3971), 'twitter.pants.get_buildroot', 'get_buildroot', ([], {}), '()\n', (3969, 3971), False, 'from twitter.pants import get_buildroot\n'), ((4341, 4367), 'os.symlink', 'os.symlink', (['src', 'link_name'], {}), '(src, link_name)\n', (4351, 4367), False, 'import os\n'), ((5167, 5192), 'twitter.common.contextutil.open_zip', 'open_zip', (['binary_jar', '"""r"""'], {}), "(binary_jar, 'r')\n", (5175, 5192), False, 'from twitter.common.contextutil import open_zip\n'), ((5847, 5874), 'os.path.dirname', 'os.path.dirname', (['bundlepath'], {}), '(bundlepath)\n', (5862, 5874), False, 'import os\n'), ((5216, 5267), 'twitter.common.contextutil.open_zip', 'open_zip', (['bundle_jar', '"""w"""'], {'compression': 'ZIP_DEFLATED'}), "(bundle_jar, 'w', compression=ZIP_DEFLATED)\n", (5224, 5267), False, 'from twitter.common.contextutil import open_zip\n'), ((3773, 3788), 'twitter.pants.get_buildroot', 'get_buildroot', ([], {}), '()\n', (3786, 3788), False, 'from twitter.pants import get_buildroot\n'), ((5440, 5456), 'twitter.pants.java.Manifest', 'Manifest', (['buffer'], {}), '(buffer)\n', (5448, 5456), False, 'from twitter.pants.java import Manifest\n'), ((4576, 4598), 'os.readlink', 'os.readlink', (['link_name'], {}), '(link_name)\n', (4587, 4598), False, 'import os\n'), ((5555, 5580), 'os.path.join', 'os.path.join', (['"""libs"""', 'jar'], {}), "('libs', jar)\n", (5567, 5580), False, 'import os\n')] |
import pandas as pd
import numpy as np
import hydra
from hydra.utils import instantiate, call
from omegaconf import DictConfig, OmegaConf
import logging
from typing import Dict
import plotnine as p9
from aim import Session
from pathlib import Path
import os
os.environ["AIM_UI_TELEMETRY_ENABLED"] = "0"
local_logger = logging.getLogger("experiment")
logging.getLogger("matplotlib").setLevel(logging.WARNING)
def steps_violin_plotter(df_ar, testbed, run=0):
df_estimate = testbed.estimate_distribution(1000)
df_estimate = df_estimate.astype({"action": "int32"})
df_ar = df_ar.loc[df_ar["run"] == run]
df_ar = df_ar.astype({"action": "int32"})
p = (
p9.ggplot(
p9.aes(
x="reorder(factor(action), action)",
y="reward",
)
)
+ p9.ggtitle(f"Action - Rewards across {df_ar.shape[0]} steps")
+ p9.xlab("k-arm")
+ p9.ylab("Reward")
+ p9.geom_violin(df_estimate, fill="#d0d3d4")
+ p9.geom_jitter(df_ar, p9.aes(color="step"))
+ p9.theme(figure_size=(20, 9))
)
fig = p.draw()
return fig
def average_runs(df, group=[]):
"""Average all dataframe columns across runs
Attributes:
group (list): Additional list of columns to group by before taking the average
"""
return df.groupby(["step"] + group).mean().reset_index()
def optimal_action(df, group=[]):
"""Create new column "optimal_action_percent"
Attributes:
group (list):
Additional list of columns to group by before calculating percent optimal action
"""
df["optimal_action_true"] = np.where(df["action"] == df["optimal_action"], 1, 0)
df["optimal_action_percent"] = df["step"].map(
df.groupby(["step"])["optimal_action_true"].sum() / (df["run"].max() + 1)
)
return df
def write_scalars(df, session, column: str, tag: str, hp: dict):
"""Write scalars to local using aim
Return
Value of last step
"""
df = average_runs(df)
df.apply(
lambda x: session.track(
x[column],
epoch=int(x.step),
name=tag,
),
axis=1,
)
return df[column].iloc[-1]
@hydra.main(config_path="configs/bandits", config_name="defaults")
def main(cfg: DictConfig):
session = Session(
repo=(Path.home() / "projects/rlbook/experiments/outputs/bandit").as_posix(),
experiment=cfg.bandit["_target_"].split(".")[-1],
)
testbed = instantiate(cfg.testbed)
bandit = instantiate(cfg.bandit, Q_init=call(cfg.Q_init, testbed))
local_logger.info(f"Running bandit: {cfg.run}")
local_logger.debug(f"Testbed expected values: {testbed.expected_values}")
local_logger.debug(f"bandit config: {cfg['bandit']}")
local_logger.debug(f"run config: {cfg['run']}")
session.set_params(OmegaConf.to_container(cfg.run), "experiment")
bandit.run(testbed, **OmegaConf.to_container(cfg.run))
df_ar = bandit.output_df()
df_ar = optimal_action(df_ar)
local_logger.debug(f"\n{df_ar[['run', 'step', 'action', 'optimal_action', 'reward']].head(15)}")
bandit_type = cfg.bandit._target_.split(".")[-1]
Q_init = cfg.Q_init._target_.split(".")[-1]
task_name = f"{bandit_type} - " + ", ".join(
[
f"{k}: {OmegaConf.select(cfg, v).split('.')[-1]}"
if isinstance(OmegaConf.select(cfg, v), str)
else f"{k}: {OmegaConf.select(cfg, v)}"
for k, v in cfg.task_labels.items()
]
)
local_logger.debug(f"{task_name}")
hp_testbed = OmegaConf.to_container(cfg.testbed)
hp = OmegaConf.to_container(cfg.bandit)
hp["Q_init"] = cfg.Q_init._target_
hp["p_drift"] = hp_testbed["p_drift"]
session.set_params(hp, "hyperparameters")
# for i in range(min(3, cfg.run.n_runs)):
# fig = steps_violin_plotter(df_ar, testbed, run=i)
# writer.add_figure(f"run{i}", fig, global_step=cfg.run.steps)
final_avg_reward = write_scalars(df_ar, session, "reward", "average_reward", hp)
final_optimal_action = write_scalars(
df_ar, session, "optimal_action_percent", "optimal_action_percent", hp
)
final_metrics = {
"average_reward": final_avg_reward,
"optimal_action_percent": final_optimal_action,
}
session.set_params(final_metrics, "final_metrics")
local_logger.debug(f"final_metrics: {final_metrics}")
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"omegaconf.OmegaConf.select",
"hydra.main",
"plotnine.ggtitle",
"numpy.where",
"hydra.utils.instantiate",
"pathlib.Path.home",
"plotnine.aes",
"omegaconf.OmegaConf.to_container",
"plotnine.ylab",
"hydra.utils.call",
"plotnine.xlab",
"plotnine.geom_violin",
"plotnine.theme"
] | [((320, 351), 'logging.getLogger', 'logging.getLogger', (['"""experiment"""'], {}), "('experiment')\n", (337, 351), False, 'import logging\n'), ((2226, 2291), 'hydra.main', 'hydra.main', ([], {'config_path': '"""configs/bandits"""', 'config_name': '"""defaults"""'}), "(config_path='configs/bandits', config_name='defaults')\n", (2236, 2291), False, 'import hydra\n'), ((1648, 1700), 'numpy.where', 'np.where', (["(df['action'] == df['optimal_action'])", '(1)', '(0)'], {}), "(df['action'] == df['optimal_action'], 1, 0)\n", (1656, 1700), True, 'import numpy as np\n'), ((2507, 2531), 'hydra.utils.instantiate', 'instantiate', (['cfg.testbed'], {}), '(cfg.testbed)\n', (2518, 2531), False, 'from hydra.utils import instantiate, call\n'), ((3594, 3629), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.testbed'], {}), '(cfg.testbed)\n', (3616, 3629), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((3639, 3673), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.bandit'], {}), '(cfg.bandit)\n', (3661, 3673), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((352, 383), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (369, 383), False, 'import logging\n'), ((1061, 1090), 'plotnine.theme', 'p9.theme', ([], {'figure_size': '(20, 9)'}), '(figure_size=(20, 9))\n', (1069, 1090), True, 'import plotnine as p9\n'), ((2867, 2898), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.run'], {}), '(cfg.run)\n', (2889, 2898), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((2576, 2601), 'hydra.utils.call', 'call', (['cfg.Q_init', 'testbed'], {}), '(cfg.Q_init, testbed)\n', (2580, 2601), False, 'from hydra.utils import instantiate, call\n'), ((2940, 2971), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.run'], {}), '(cfg.run)\n', (2962, 2971), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((953, 996), 'plotnine.geom_violin', 'p9.geom_violin', (['df_estimate'], {'fill': '"""#d0d3d4"""'}), "(df_estimate, fill='#d0d3d4')\n", (967, 996), True, 'import plotnine as p9\n'), ((1029, 1049), 'plotnine.aes', 'p9.aes', ([], {'color': '"""step"""'}), "(color='step')\n", (1035, 1049), True, 'import plotnine as p9\n'), ((925, 942), 'plotnine.ylab', 'p9.ylab', (['"""Reward"""'], {}), "('Reward')\n", (932, 942), True, 'import plotnine as p9\n'), ((898, 914), 'plotnine.xlab', 'p9.xlab', (['"""k-arm"""'], {}), "('k-arm')\n", (905, 914), True, 'import plotnine as p9\n'), ((2356, 2367), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (2365, 2367), False, 'from pathlib import Path\n'), ((3390, 3414), 'omegaconf.OmegaConf.select', 'OmegaConf.select', (['cfg', 'v'], {}), '(cfg, v)\n', (3406, 3414), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((826, 887), 'plotnine.ggtitle', 'p9.ggtitle', (['f"""Action - Rewards across {df_ar.shape[0]} steps"""'], {}), "(f'Action - Rewards across {df_ar.shape[0]} steps')\n", (836, 887), True, 'import plotnine as p9\n'), ((3446, 3470), 'omegaconf.OmegaConf.select', 'OmegaConf.select', (['cfg', 'v'], {}), '(cfg, v)\n', (3462, 3470), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((703, 758), 'plotnine.aes', 'p9.aes', ([], {'x': '"""reorder(factor(action), action)"""', 'y': '"""reward"""'}), "(x='reorder(factor(action), action)', y='reward')\n", (709, 758), True, 'import plotnine as p9\n'), ((3322, 3346), 'omegaconf.OmegaConf.select', 'OmegaConf.select', (['cfg', 'v'], {}), '(cfg, v)\n', (3338, 3346), False, 'from omegaconf import DictConfig, OmegaConf\n')] |
#!/usr/bin/env python
"""
Tester module for Vkontakte downloader.
Works with Python 3.4
Uses download.py
Tests the main function calls,
like vk('users.get', 137589139)
"""
__author__ = '<NAME>'
__name__ = 'Tester module for Vkontakte downloader'
from prototype.vk.download import vk, getfriends
import time
import json
import io
# import sys
# import os
# import download
# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
output = {'users.get': {37740532: vk('users.get', 37740532)}}
time.sleep(0.36)
output['friends.get'] = {37740532: vk('friends.get', 37740532)}
time.sleep(0.36)
output['groups.getById'] = {38463621: vk('groups.getById', 38463621)}
time.sleep(0.36)
output['wall.get'] = {37740532: vk('wall.get', 37740532)}
# print(vk('wall.get', 9393002)) Get an error, wall isn't accessible
time.sleep(0.36)
output['users.getSubscriptions'] = {37740532: vk('users.getSubscriptions', 37740532)}
time.sleep(0.36)
output['getfriends()'] = {37740532: getfriends(37740532)}
## output = {'execute': {37740532: vk('execute', 37740532)}} # Test for execute
with open('tester.json', 'w') as f:
json.dump(output, f, indent=4)
print('Hurray, something was dumped!') | [
"json.dump",
"prototype.vk.download.getfriends",
"time.sleep",
"prototype.vk.download.vk"
] | [((500, 516), 'time.sleep', 'time.sleep', (['(0.36)'], {}), '(0.36)\n', (510, 516), False, 'import time\n'), ((581, 597), 'time.sleep', 'time.sleep', (['(0.36)'], {}), '(0.36)\n', (591, 597), False, 'import time\n'), ((668, 684), 'time.sleep', 'time.sleep', (['(0.36)'], {}), '(0.36)\n', (678, 684), False, 'import time\n'), ((812, 828), 'time.sleep', 'time.sleep', (['(0.36)'], {}), '(0.36)\n', (822, 828), False, 'import time\n'), ((915, 931), 'time.sleep', 'time.sleep', (['(0.36)'], {}), '(0.36)\n', (925, 931), False, 'import time\n'), ((552, 579), 'prototype.vk.download.vk', 'vk', (['"""friends.get"""', '(37740532)'], {}), "('friends.get', 37740532)\n", (554, 579), False, 'from prototype.vk.download import vk, getfriends\n'), ((636, 666), 'prototype.vk.download.vk', 'vk', (['"""groups.getById"""', '(38463621)'], {}), "('groups.getById', 38463621)\n", (638, 666), False, 'from prototype.vk.download import vk, getfriends\n'), ((717, 741), 'prototype.vk.download.vk', 'vk', (['"""wall.get"""', '(37740532)'], {}), "('wall.get', 37740532)\n", (719, 741), False, 'from prototype.vk.download import vk, getfriends\n'), ((875, 913), 'prototype.vk.download.vk', 'vk', (['"""users.getSubscriptions"""', '(37740532)'], {}), "('users.getSubscriptions', 37740532)\n", (877, 913), False, 'from prototype.vk.download import vk, getfriends\n'), ((968, 988), 'prototype.vk.download.getfriends', 'getfriends', (['(37740532)'], {}), '(37740532)\n', (978, 988), False, 'from prototype.vk.download import vk, getfriends\n'), ((1112, 1142), 'json.dump', 'json.dump', (['output', 'f'], {'indent': '(4)'}), '(output, f, indent=4)\n', (1121, 1142), False, 'import json\n'), ((472, 497), 'prototype.vk.download.vk', 'vk', (['"""users.get"""', '(37740532)'], {}), "('users.get', 37740532)\n", (474, 497), False, 'from prototype.vk.download import vk, getfriends\n')] |
from django.test import TestCase
from django.shortcuts import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from buyer.factory import fake_buyer
from shop.factory import fake_painting
from cart.models import Cart, CartItem
class CartModelMethodTests(TestCase):
"""
Tests cart.Cart's custom methods.
"""
def test_cart_creation(self) -> None:
buyer = fake_buyer()
self.assertEqual(buyer.cart, Cart.objects.get())
def test_cart_add(self):
painting0, painting1 = fake_painting(), fake_painting()
buyer = fake_buyer()
# add new items
buyer.cart.add(painting0.id)
buyer.cart.add(painting1.id)
self.assertEqual(buyer.cart.items.get(painting=painting0).quantity, 1)
self.assertEqual(buyer.cart.items.get(painting=painting1).quantity, 1)
self.assertEqual(buyer.cart.items.count(), 2, msg='2 items not found in cart')
# add old item thus increasing count
buyer.cart.add(painting0.id)
self.assertEqual(buyer.cart.items.get(painting=painting0).quantity, 2)
class CartAPIViewTests(APITestCase):
""" Cohesive unit tests for views relating to the Cart model. """
@classmethod
def setUpTestData(cls):
cls.buyer = fake_buyer()
cls.painting0, cls.painting1 = fake_painting(), fake_painting()
def test_add_cart_item_view(self):
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
response = self.client.post(
reverse('cart:add'), {'painting_id': self.painting0.id}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CartItem.objects.get().painting.id, self.painting0.id)
def test_add_cart_view_no_credentials(self):
response = self.client.post(
reverse('cart:add'), {'painting_id': self.painting1.id}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_add_cart_no_painting_id(self):
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
response = self.client.post(reverse('cart:add'))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_add_cart_invalid_painting_id(self):
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
response = self.client.post(reverse('cart:add'), {'painting_id': 314931})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_remove_cart_item_view(self):
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
cart_item = self.buyer.cart.add(self.painting1.id)
response = self.client.post(
reverse('cart:remove'), {'cart_item_id': cart_item.id}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_decrement_cart_item_view(self):
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
# Add items to increment quantity to 2.
self.buyer.cart.add(self.painting0.id)
cart_item = self.buyer.cart.add(self.painting0.id)
# Decrement once.
response = self.client.post(
reverse('cart:decrement'), {'cart_item_id': cart_item.id}
)
# Check value was decremented by one.
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
CartItem.objects.values_list('quantity', flat=True).get(),
cart_item.quantity - 1,
)
# Decrement and thus delete cart item.
response = self.client.post(
reverse('cart:decrement'), {'cart_item_id': cart_item.id}
)
# Check cart item was deleted.
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(CartItem.objects.count(), 0)
def test_increment_cart_item_view(self):
"""
Similar to test_decrement_cart_item_view except instead of decrementing,
it increases the quality of the cart_item by 1.
"""
# noinspection PyUnresolvedReferences
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.buyer.auth_token.key)
# Add items to increment quantity to 2.
self.buyer.cart.add(self.painting0.id)
cart_item = self.buyer.cart.add(self.painting0.id)
# Increment once.
response = self.client.post(
reverse('cart:increment'), {'cart_item_id': cart_item.id}
)
# Check value was incremented by one.
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
CartItem.objects.values_list('quantity', flat=True).get(),
cart_item.quantity + 1,
)
# Increment once again.
response = self.client.post(
reverse('cart:increment'), {'cart_item_id': cart_item.id}
)
# Check cart item's quantity.
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(CartItem.objects.values_list('quantity', flat=True).get(), 4)
| [
"cart.models.CartItem.objects.values_list",
"cart.models.CartItem.objects.get",
"shop.factory.fake_painting",
"buyer.factory.fake_buyer",
"django.shortcuts.reverse",
"cart.models.CartItem.objects.count",
"cart.models.Cart.objects.get"
] | [((418, 430), 'buyer.factory.fake_buyer', 'fake_buyer', ([], {}), '()\n', (428, 430), False, 'from buyer.factory import fake_buyer\n'), ((598, 610), 'buyer.factory.fake_buyer', 'fake_buyer', ([], {}), '()\n', (608, 610), False, 'from buyer.factory import fake_buyer\n'), ((1293, 1305), 'buyer.factory.fake_buyer', 'fake_buyer', ([], {}), '()\n', (1303, 1305), False, 'from buyer.factory import fake_buyer\n'), ((468, 486), 'cart.models.Cart.objects.get', 'Cart.objects.get', ([], {}), '()\n', (484, 486), False, 'from cart.models import Cart, CartItem\n'), ((549, 564), 'shop.factory.fake_painting', 'fake_painting', ([], {}), '()\n', (562, 564), False, 'from shop.factory import fake_painting\n'), ((566, 581), 'shop.factory.fake_painting', 'fake_painting', ([], {}), '()\n', (579, 581), False, 'from shop.factory import fake_painting\n'), ((1345, 1360), 'shop.factory.fake_painting', 'fake_painting', ([], {}), '()\n', (1358, 1360), False, 'from shop.factory import fake_painting\n'), ((1362, 1377), 'shop.factory.fake_painting', 'fake_painting', ([], {}), '()\n', (1375, 1377), False, 'from shop.factory import fake_painting\n'), ((1602, 1621), 'django.shortcuts.reverse', 'reverse', (['"""cart:add"""'], {}), "('cart:add')\n", (1609, 1621), False, 'from django.shortcuts import reverse\n'), ((1919, 1938), 'django.shortcuts.reverse', 'reverse', (['"""cart:add"""'], {}), "('cart:add')\n", (1926, 1938), False, 'from django.shortcuts import reverse\n'), ((2278, 2297), 'django.shortcuts.reverse', 'reverse', (['"""cart:add"""'], {}), "('cart:add')\n", (2285, 2297), False, 'from django.shortcuts import reverse\n'), ((2596, 2615), 'django.shortcuts.reverse', 'reverse', (['"""cart:add"""'], {}), "('cart:add')\n", (2603, 2615), False, 'from django.shortcuts import reverse\n'), ((3002, 3024), 'django.shortcuts.reverse', 'reverse', (['"""cart:remove"""'], {}), "('cart:remove')\n", (3009, 3024), False, 'from django.shortcuts import reverse\n'), ((3546, 3571), 'django.shortcuts.reverse', 'reverse', (['"""cart:decrement"""'], {}), "('cart:decrement')\n", (3553, 3571), False, 'from django.shortcuts import reverse\n'), ((3968, 3993), 'django.shortcuts.reverse', 'reverse', (['"""cart:decrement"""'], {}), "('cart:decrement')\n", (3975, 3993), False, 'from django.shortcuts import reverse\n'), ((4168, 4192), 'cart.models.CartItem.objects.count', 'CartItem.objects.count', ([], {}), '()\n', (4190, 4192), False, 'from cart.models import Cart, CartItem\n'), ((4770, 4795), 'django.shortcuts.reverse', 'reverse', (['"""cart:increment"""'], {}), "('cart:increment')\n", (4777, 4795), False, 'from django.shortcuts import reverse\n'), ((5177, 5202), 'django.shortcuts.reverse', 'reverse', (['"""cart:increment"""'], {}), "('cart:increment')\n", (5184, 5202), False, 'from django.shortcuts import reverse\n'), ((1765, 1787), 'cart.models.CartItem.objects.get', 'CartItem.objects.get', ([], {}), '()\n', (1785, 1787), False, 'from cart.models import Cart, CartItem\n'), ((3766, 3817), 'cart.models.CartItem.objects.values_list', 'CartItem.objects.values_list', (['"""quantity"""'], {'flat': '(True)'}), "('quantity', flat=True)\n", (3794, 3817), False, 'from cart.models import Cart, CartItem\n'), ((4990, 5041), 'cart.models.CartItem.objects.values_list', 'CartItem.objects.values_list', (['"""quantity"""'], {'flat': '(True)'}), "('quantity', flat=True)\n", (5018, 5041), False, 'from cart.models import Cart, CartItem\n'), ((5376, 5427), 'cart.models.CartItem.objects.values_list', 'CartItem.objects.values_list', (['"""quantity"""'], {'flat': '(True)'}), "('quantity', flat=True)\n", (5404, 5427), False, 'from cart.models import Cart, CartItem\n')] |
# Import Person_detection_tan
from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten
import testdata as testdata
import logging
from pathlib import Path
# import cv2
from skimage.filters import threshold_otsu, threshold_multiotsu
from matplotlib import image
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import matplotlib.patches as patches
try:
from PIL import Image
except ImportError:
import Image
# Set Constants
FILENAME = 'RAISE_all.csv'
RAISE_DIR = '/original/RAISE/'
# Variables
global MODEL_DIR
# global CHECKPOINT_DIR
global FIRST_ONLY
global TOTAL_IMAGES
MODEL_DIR = os.path.dirname(os.path.abspath(__file__))
MODEL_DIR = '/scratch/projekt1/demo/lr-0.000001_bs-4_ep-160_ti-8155_nm-True_is-1_us-Tan_kr-0.01/tmp/'
# CHECKPOINT_DIR = os.path.dirname(os.path.abspath(__file__))
# PLOT_DIR = '/scratch/projekt1/submitSkript/plots/'
PLOT_DIR = 'plots/'
plot_path = ''
# Set up logging
# local_logger = logging.getLogger('Localization')
# local_logger.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt='%m-%d %H:%M', filename='output_loc.log', filemode='w')
# logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s', datefmt='%m-%d %H:%M', filename='/scratch/projekt1/Source/localization.log', filemode='w')
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# formatter = logging.Formatter('%(asctime)s: %(message)s')
# console.setFormatter(formatter)
# logging.getLogger().addHandler(console)
# local_logger = logging.getLogger()
local_logger = logging.getLogger('localization')
for hdlr in local_logger.handlers[:]: # remove all old handlers
local_logger.removeHandler(hdlr)
def preprocess_raise_test_path(total_images = 0):
all_img_path_vector = []
raise_db = pd.read_csv(RAISE_DIR + FILENAME)
image_paths = raise_db.File
total_img = image_paths.shape[0]
if total_images > 0:
total_img = total_images
if FIRST_ONLY:
total_img = 50
for row in tqdm(range(total_img)):
for root, dirs, files in os.walk(RAISE_DIR + 'RaiseTest'):
for file in files:
if file.endswith(str(image_paths.loc[row]) + '.TIF'):
all_img_path_vector.append(root+'/'+str(file))
return all_img_path_vector
def preprocess_raise_test_binary(total_images = 0):
all_img_binary_vector = []
raise_db = pd.read_csv(RAISE_DIR + FILENAME)
keywords = raise_db.Keywords
image_paths = raise_db.File
image_paths = raise_db.File
total_img = image_paths.shape[0]
if total_images > 0:
total_img = total_images
if FIRST_ONLY:
total_img = 50
for row in tqdm(range(total_img)):
for root, dirs, files in os.walk(RAISE_DIR + 'RaiseTest'):
for file in files:
if file.endswith(str(image_paths.loc[row]) + '.TIF'):
if('people' in str(keywords.loc[row]) ):
all_img_binary_vector.append([1, 0])
else:
all_img_binary_vector.append([0, 1])
return all_img_binary_vector
# Load model
# def load_model(model_dir = MODEL_DIR, checkpoint_dir = CHECKPOINT_DIR):
# # # Load existing model
# # model = tf.keras.models.load_model(MODEL_PATH + model_name, compile=False)
# model = tf.keras.models.load_model(model_dir)
# latest = tf.train.latest_checkpoint(checkpoint_dir)
# model.load_weights(latest)
# # # Print summary
# local_logger.info(model.summary())
# return model
def get_weights(model):
# local_logger.info(model.trainable_variables)
# trainable_variables = tf.Variable(model.trainable_variables)
# model_weights = trainable_variables.eval()
# local_logger.info('Layer weight: ')
# local_logger.info('Trainable variables: ' + repr(model.trainable_variables))
# local_logger.info('[0]: ' + repr(model.trainable_variables[0]))
# local_logger.info('[1]: ' + repr(model.trainable_variables[1]))
model_weights = np.asarray(model.trainable_variables[0].numpy())
# local_logger.info('Model weights: ' + repr(model_weights))
# local_logger.info(model_weights.shape)
model_weights_2 = model_weights[:,1]
# local_logger.info('w2: ' + repr(model_weights_2))
# local_logger.info(model_weights_2.shape)
model_weights_1 = model_weights[:,0]
# local_logger.info('w1: ' + repr(model_weights_1))
# local_logger.info(model_weights_1.shape)
if WEIGHT_MATRIX == 'w1':
# local_logger.info('weight w1')
return model_weights_1
else:
# local_logger.info('weight w2')
return model_weights_2
# Get flatten layer output
def get_flat_img(img):
if (FIRST_ONLY):
layer_last_model = tf.keras.Model(model.inputs, model.layers[-1].output)
layer_last_out = layer_last_model(img, training=False)
local_logger.info('Layer [-1]: ' + repr(layer_last_out))
local_logger.info('Layer [-1] shape: ' + repr(layer_last_out.numpy().shape))
flatten_layer_model = tf.keras.Model(model.inputs, model.layers[-2].output)
flatten_layer_out = flatten_layer_model(img, training=False)
# local_logger.info('Layer [-2]: ' + repr(flatten_layer_out))
# local_logger.info('Layer [-2] shape: ' + repr(flatten_layer_out.numpy().shape))
if (FIRST_ONLY):
layer_first_out = image_features_extract_model(img, training=False)
local_logger.info('Layer [-3]: ' + repr(layer_first_out))
local_logger.info('Layer [-3] shape: ' + repr(layer_first_out.numpy().shape))
flat_img = flatten_layer_out.numpy()[0]
# features_extract = image_features_extract_model(img)
# flat_img = image_flatten(features_extract).numpy()[0]
# local_logger.info(flat_img)
return flat_img
# # Calculate the weight matrix to locate people
def get_2d_sum_mat(weight_matrix, layer_matrix):
# local_logger.info('Weight matrix shape: ' + repr(weight_matrix.shape))
# local_logger.info('Layer matrix shape: ' + repr(layer_matrix.shape))
flat_product_mat = [a*b for a, b in zip(weight_matrix , layer_matrix)]
flat_product_mat = np.asarray(flat_product_mat)
# cubic_product_mat = flat_product_mat.reshape((9, 15, 2048))
cubic_product_mat = flat_product_mat.reshape(9, 15, 2048)
sum_mat = np.sum(cubic_product_mat, axis=2)
if (FIRST_ONLY):
# For debugging only
cubic_layer_matrix = layer_matrix.reshape(9, 15, 2048)
local_logger.info('Cubic layer after reshape: ' + repr(cubic_layer_matrix))
local_logger.info('Cubic layer shape: ' + repr(cubic_layer_matrix.shape))
local_logger.info('Flat product shape: ' + repr(flat_product_mat.shape))
local_logger.info('Cubic product: ' + repr(cubic_product_mat))
local_logger.info('Cubic product shape: ' + repr(cubic_product_mat.shape))
local_logger.info('Result matrix: ' + repr(sum_mat))
local_logger.info('Result matrix shape: ' + repr(sum_mat.shape))
return sum_mat
# Detect people in one image
def detect_people(img, img_path):
# local_logger.info(img.shape)
# First, predict if image has people
prediction = model(img, training=False).numpy()
# local_logger.info(prediction)
# Second, if image has peple then do the localisization
if prediction[0][0] > prediction[0][1]:
flat_img = get_flat_img(img)
sum_mat = get_2d_sum_mat(model_weights, flat_img)
# replace all negative values with 0
no_neg_sum_mat = sum_mat.copy()
no_neg_sum_mat[no_neg_sum_mat < 0] = 0
# Using Otsu threshold to locate people
# sum_mat = sum_mat.astype('float32')
# max = np.max(sum_mat)
# min = np.min(sum_mat)
# raw_th = max - 0.4*(max - min)
# otsu_th, otsu_mat = cv2.threshold(sum_mat, raw_th, max, cv2.THRESH_TOZERO+cv2.THRESH_OTSU)
# local_logger.info(max, min, otsu_th)
# draw_img_plot(img_path, otsu_mat, otsu_th, PLOT_DIR)
# thresh = threshold_otsu(sum_mat)
thresh_arr = threshold_multiotsu(no_neg_sum_mat, classes=3)
thresh = thresh_arr[-1]
# local_logger.info('Otsu threshold value: ' + repr(thresh))
# Considering threshold_multiotsu for better localization?
draw_img_plot(img_path, sum_mat, thresh, plot_path)
return 1
else:
return 0
# Get 1st image containing people
def get_first_detection(path_vector, class_vector, all_img_name_vector):
# img_pos = -1
# for index in range(len(class_vector)):
# if class_vector[index] == [1,0]:
# img_pos = index
# break
# img = all_img_name_vector[img_pos]
# return detect_people(img, path_vector[img_pos])
total_imgs = len(path_vector)
positive = 0
for i in tqdm(range(total_imgs)):
img_path = path_vector[i]
img = all_img_name_vector[i]
positive = detect_people(img, img_path)
if positive == 1:
break
return positive
# Detect people in all test dataset
def detect_people_all(path_vector, class_vector, all_img_name_vector):
total_imgs = len(path_vector)
total_positives = 0
for i in tqdm(range(total_imgs)):
img_path = path_vector[i]
# local_logger.info(img_path)
img = all_img_name_vector[i]
pos = detect_people(img, img_path)
total_positives += pos
return total_positives
# # Draw image and plot
def draw_img_plot(img_path, result, thres = 0.4, plot_dir = PLOT_DIR):
my_dpi=100.
temp_img = Image.open(img_path)
temp_img = temp_img.resize((600, 360))
# temp_img = np.asarray(temp_img)
# local_logger.info(temp_img.shape)
img_filename = os.path.basename(img_path)
# local_logger.info(img_filename)
fig = plt.figure(figsize=(15, 9),dpi=my_dpi)
ax=fig.add_subplot(111)
# Remove whitespace from around the image
# fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
# Set the gridding interval: here we use the major tick interval
myInterval=40.
loc = plticker.MultipleLocator(base=myInterval)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
# Add the grid
ax.set_xticks(np.arange(0, 15*myInterval, myInterval))
ax.grid(which='major', axis='both', linestyle='-')
# Add the image
ax.imshow(temp_img)
# fig.savefig(PLOT_DIR + '_img.jpg', dpi=my_dpi)
for (i, j), z in np.ndenumerate(result):
row = (i + 0.5)*myInterval
col = (j + 0.5)*myInterval
if z < thres:
text_color = 'w'
else:
text_color = 'r'
rect = patches.Rectangle((j*myInterval,i*myInterval),myInterval,myInterval,linewidth=2,edgecolor='r',facecolor=(0,1,0,0.3))
ax.add_patch(rect)
ax.text(col, row, '{:0.4f}'.format(z), ha='center', va='center', color=text_color)
# plt.show()
fig.savefig(plot_dir + img_filename + '_plot.jpg', dpi=my_dpi)
plt.close("all")
if (__name__ == "__main__"):
PARSER = argparse.ArgumentParser()
# Adding arguments for parser
PARSER.add_argument('--model_dir', type=str, default=MODEL_DIR, help='Model and checkpoint dir')
# PARSER.add_argument('--checkpoint', type=str, default=os.path.dirname(os.path.abspath(__file__)), help='Checkpoint dir')
PARSER.add_argument('--weight_matrix', type=str, default='w1', help='Which weight matrix to use: w1 or w2')
PARSER.add_argument('--first_only', type=str2bool, default=False, help='Get first result only')
PARSER.add_argument('--total_images', type=int, default=150, help='Defining size of loaded dataset')
PARSER.add_argument('--plot_dir', type=str, default=PLOT_DIR, help='Plot dir')
args = PARSER.parse_args()
for name, value in args._get_kwargs():
variable_name = name.upper()
exec(variable_name + " = value")
# if name=='model_dir':
# MODEL_DIR=value
# continue
# if name=='checkpoint':
# CHECKPOINT_DIR=value
# continue
# if name=='first_only':
# FIRST_ONLY=value
# continue
# if name=='total_images':
# TOTAL_IMAGES=value
# continue
# if name=='plot_dir':
# PLOT_DIR=value
# continue
# Create plots directory
plot_parent_dir = MODEL_DIR.rstrip('//').replace('/tmp', '/')
plot_path = plot_parent_dir + PLOT_DIR
Path(plot_path).mkdir(parents=True, exist_ok=True)
# Logging
local_logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
if FIRST_ONLY:
fh = logging.FileHandler(plot_parent_dir + 'localization_first.log')
else:
fh = logging.FileHandler(plot_parent_dir + 'localization.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
local_logger.addHandler(ch)
local_logger.addHandler(fh)
# local_logger.info(FIRST_ONLY)
# local_logger.info(TOTAL_IMAGES)
# local_logger.info(MODEL_DIR)
# Get model weights
model = load_model(model_dir = MODEL_DIR, checkpoint_dir = MODEL_DIR)
model_weights = get_weights(model)
# Get test dataset vector
local_logger.info('Processing dataset')
total_images = TOTAL_IMAGES
start_index = 300 #For demo purpose
# path_vector = preprocess_raise_test_path(total_images)
# class_vector = preprocess_raise_test_binary(total_images)
# all_img_name_vector = preprocess_raise_img_vector(path_vector)
path_vector = testdata.path_vector[start_index:total_images+start_index]
class_vector = testdata.class_vector[start_index:total_images+start_index]
image_vector = []
for i in tqdm(range(len(path_vector))):
img_path = path_vector[i]
img = image.imread(img_path)
img = tf.image.resize(img, (IMG_HEIGHT, IMG_WIDTH))
img = tf.expand_dims(img, axis=0)
img = tf.cast(img, tf.float32)
img = tf.keras.applications.inception_v3.preprocess_input(img)
# local_logger.info(img.shape)
image_vector.append(img)
# image_vector = np.asarray(image_vector)
# local_logger.info(len(path_vector))
# local_logger.info(len(class_vector))
# local_logger.info(image_vector.shape)
if FIRST_ONLY:
first = get_first_detection(path_vector, class_vector, image_vector)
else:
total_pos = detect_people_all(path_vector, class_vector, image_vector)
local_logger.info('Localization is complete')
local_logger.info('Number of true positives: '+repr(class_vector.count([1,0])))
local_logger.info('Number of predicted positives: '+repr(total_pos)) | [
"logging.getLogger",
"logging.StreamHandler",
"Person_detection_tan.tf.image.resize",
"Person_detection_tan.os.walk",
"Person_detection_tan.load_model",
"matplotlib.image.imread",
"Person_detection_tan.argparse.ArgumentParser",
"Person_detection_tan.pd.read_csv",
"Person_detection_tan.np.arange",
"Person_detection_tan.image_features_extract_model",
"Person_detection_tan.tf.keras.Model",
"pathlib.Path",
"matplotlib.pyplot.close",
"Person_detection_tan.os.path.basename",
"logging.FileHandler",
"skimage.filters.threshold_multiotsu",
"Person_detection_tan.tf.keras.applications.inception_v3.preprocess_input",
"Person_detection_tan.np.asarray",
"Person_detection_tan.np.sum",
"Image.open",
"matplotlib.patches.Rectangle",
"Person_detection_tan.tf.expand_dims",
"matplotlib.ticker.MultipleLocator",
"Person_detection_tan.np.ndenumerate",
"logging.Formatter",
"Person_detection_tan.tf.cast",
"matplotlib.pyplot.figure",
"Person_detection_tan.os.path.abspath"
] | [((1727, 1760), 'logging.getLogger', 'logging.getLogger', (['"""localization"""'], {}), "('localization')\n", (1744, 1760), False, 'import logging\n'), ((791, 816), 'Person_detection_tan.os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (806, 816), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((1966, 1999), 'Person_detection_tan.pd.read_csv', 'pd.read_csv', (['(RAISE_DIR + FILENAME)'], {}), '(RAISE_DIR + FILENAME)\n', (1977, 1999), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((2604, 2637), 'Person_detection_tan.pd.read_csv', 'pd.read_csv', (['(RAISE_DIR + FILENAME)'], {}), '(RAISE_DIR + FILENAME)\n', (2615, 2637), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((5352, 5405), 'Person_detection_tan.tf.keras.Model', 'tf.keras.Model', (['model.inputs', 'model.layers[-2].output'], {}), '(model.inputs, model.layers[-2].output)\n', (5366, 5405), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((6471, 6499), 'Person_detection_tan.np.asarray', 'np.asarray', (['flat_product_mat'], {}), '(flat_product_mat)\n', (6481, 6499), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((6647, 6680), 'Person_detection_tan.np.sum', 'np.sum', (['cubic_product_mat'], {'axis': '(2)'}), '(cubic_product_mat, axis=2)\n', (6653, 6680), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((9979, 9999), 'Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9989, 9999), False, 'import Image\n'), ((10146, 10172), 'Person_detection_tan.os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (10162, 10172), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((10225, 10264), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 9)', 'dpi': 'my_dpi'}), '(figsize=(15, 9), dpi=my_dpi)\n', (10235, 10264), True, 'import matplotlib.pyplot as plt\n'), ((10507, 10548), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': 'myInterval'}), '(base=myInterval)\n', (10531, 10548), True, 'import matplotlib.ticker as plticker\n'), ((10891, 10913), 'Person_detection_tan.np.ndenumerate', 'np.ndenumerate', (['result'], {}), '(result)\n', (10905, 10913), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((11439, 11455), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11448, 11455), True, 'import matplotlib.pyplot as plt\n'), ((11508, 11533), 'Person_detection_tan.argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11531, 11533), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((13414, 13437), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13435, 13437), False, 'import logging\n'), ((13538, 13613), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n", (13555, 13613), False, 'import logging\n'), ((13934, 13991), 'Person_detection_tan.load_model', 'load_model', ([], {'model_dir': 'MODEL_DIR', 'checkpoint_dir': 'MODEL_DIR'}), '(model_dir=MODEL_DIR, checkpoint_dir=MODEL_DIR)\n', (13944, 13991), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((2259, 2291), 'Person_detection_tan.os.walk', 'os.walk', (["(RAISE_DIR + 'RaiseTest')"], {}), "(RAISE_DIR + 'RaiseTest')\n", (2266, 2291), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((2970, 3002), 'Person_detection_tan.os.walk', 'os.walk', (["(RAISE_DIR + 'RaiseTest')"], {}), "(RAISE_DIR + 'RaiseTest')\n", (2977, 3002), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((5053, 5106), 'Person_detection_tan.tf.keras.Model', 'tf.keras.Model', (['model.inputs', 'model.layers[-1].output'], {}), '(model.inputs, model.layers[-1].output)\n', (5067, 5106), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((5677, 5726), 'Person_detection_tan.image_features_extract_model', 'image_features_extract_model', (['img'], {'training': '(False)'}), '(img, training=False)\n', (5705, 5726), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((8430, 8476), 'skimage.filters.threshold_multiotsu', 'threshold_multiotsu', (['no_neg_sum_mat'], {'classes': '(3)'}), '(no_neg_sum_mat, classes=3)\n', (8449, 8476), False, 'from skimage.filters import threshold_otsu, threshold_multiotsu\n'), ((10664, 10705), 'Person_detection_tan.np.arange', 'np.arange', (['(0)', '(15 * myInterval)', 'myInterval'], {}), '(0, 15 * myInterval, myInterval)\n', (10673, 10705), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((13171, 13234), 'logging.FileHandler', 'logging.FileHandler', (["(plot_parent_dir + 'localization_first.log')"], {}), "(plot_parent_dir + 'localization_first.log')\n", (13190, 13234), False, 'import logging\n'), ((13260, 13317), 'logging.FileHandler', 'logging.FileHandler', (["(plot_parent_dir + 'localization.log')"], {}), "(plot_parent_dir + 'localization.log')\n", (13279, 13317), False, 'import logging\n'), ((14661, 14683), 'matplotlib.image.imread', 'image.imread', (['img_path'], {}), '(img_path)\n', (14673, 14683), False, 'from matplotlib import image\n'), ((14699, 14744), 'Person_detection_tan.tf.image.resize', 'tf.image.resize', (['img', '(IMG_HEIGHT, IMG_WIDTH)'], {}), '(img, (IMG_HEIGHT, IMG_WIDTH))\n', (14714, 14744), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((14760, 14787), 'Person_detection_tan.tf.expand_dims', 'tf.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (14774, 14787), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((14803, 14827), 'Person_detection_tan.tf.cast', 'tf.cast', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (14810, 14827), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((14843, 14899), 'Person_detection_tan.tf.keras.applications.inception_v3.preprocess_input', 'tf.keras.applications.inception_v3.preprocess_input', (['img'], {}), '(img)\n', (14894, 14899), False, 'from Person_detection_tan import tf, pd, tqdm, os, np, argparse, IMG_HEIGHT, IMG_WIDTH, str2bool, preprocess_raise_img_vector, load_model, image_features_extract_model, image_flatten\n'), ((11105, 11238), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(j * myInterval, i * myInterval)', 'myInterval', 'myInterval'], {'linewidth': '(2)', 'edgecolor': '"""r"""', 'facecolor': '(0, 1, 0, 0.3)'}), "((j * myInterval, i * myInterval), myInterval, myInterval,\n linewidth=2, edgecolor='r', facecolor=(0, 1, 0, 0.3))\n", (11122, 11238), True, 'import matplotlib.patches as patches\n'), ((12969, 12984), 'pathlib.Path', 'Path', (['plot_path'], {}), '(plot_path)\n', (12973, 12984), False, 'from pathlib import Path\n')] |
import os
from keras.models import save_model
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import SGD, Adam
from keras.layers import Dense, Dropout
from keras.layers import LSTM
import h5py
import time
from keras.utils.io_utils import HDF5Matrix
from models.my_callbacks import *
USE_TITANX = True
def createModel(input_sequence_dim, audio_vector_dim):
# input_sequence_dim: tuple of dimensions e.g (1,4096)
# audio_vector_dim: int of dimension e.g 18
timesteps, features = input_sequence_dim
input_sequences = Input(shape=(timesteps, features)) # (1,4096)
# Note that LSTM expects input shape: (nb_samples, timesteps, feature_dim)
x = LSTM(256, dropout=0.2, return_sequences=True, name='LSTM_layer1')(input_sequences)
x = Dropout(0.2)(x)
x = LSTM(256, dropout=0.2, name='LSTM_layer2')(x)
network_output = Dense(audio_vector_dim, name='regression_out')(x)
model = Model(inputs=input_sequences, outputs=network_output)
# Use the Adam optimizer for gradient descent
adam = Adam(lr=0.5e-6)
#model.compile(loss='mean_squared_error', validation_split=0.1, optimizer='adam')
model.compile(loss='mean_squared_error', optimizer='adam')
print(model.summary())
return model
# Testing if the model compiles
# model = createModel((1,4096), 18)
print(">>> STARTING TIME:", str(time.strftime("%m-%d_%H-%M-%S")))
#############
### READING THE DATASET
# Define the external SSD where the dataset residesin
if USE_TITANX:
data_dir = '/home/zanoi/ZANOI/auditory_hallucinations_data/'
else:
data_dir = '/Volumes/SAMSUNG_SSD_256GB/ADV_CV/data/'
data_file = data_dir + 'TopAngleFC1_dataX_dataY.h5'
with h5py.File(data_file, 'r') as hf:
print("Reading data from file..")
dataX_sample = hf['dataX_train'][0]
dataY_sample = hf['dataY_train'][0]
print("dataX_sample.shape:", dataX_sample.shape)
print("dataY_sample.shape:", dataY_sample.shape)
dataX_train = hf['dataX_train'] # Adding the [:] actually loads it into memory
dataY_train = hf['dataY_train']
dataX_test = hf['dataX_test']
dataY_test = hf['dataY_test']
print("dataX_train.shape:", dataX_train.shape)
print("dataY_train.shape:", dataY_train.shape)
print("dataX_test.shape:", dataX_test.shape)
print("dataY_test.shape:", dataY_test.shape)
(frame_h, frame_w, channels) = dataX_sample.shape # (8377,1,4096)
audio_vector_dim = dataY_sample.shape[0]
# Load data into HDF5Matrix object, which reads the file from disk and does not put it into RAM
dataX_train = HDF5Matrix(data_file, 'dataX_train')
dataY_train = HDF5Matrix(data_file, 'dataY_train')
dataX_test = HDF5Matrix(data_file, 'dataX_test')
dataY_test = HDF5Matrix(data_file, 'dataY_test')
timesteps = 1
features = 4096
#############
### BUILD THE MODEL
model = createModel((timesteps, features),audio_vector_dim)
#############
# Uses a special callback class from models.my_callbacks
testSeqCallback = predictSeqCallback()
# Put these in a callback list
callbacks_list = [testSeqCallback]
# This function actually starts the training
#model.fit(dataX, dataY, epochs=500, batch_size=256, callbacks=callbacks_list, verbose=2)
model.fit(dataX_train, dataY_train, epochs=500, batch_size=10000, validation_data=[dataX_test,dataY_test], verbose=1, callbacks=callbacks_list)
print ("Saving trained model...")
model_prefix = 'FC_LSTM_TopAngleFC1_v2'
model_path = "../trained_models/" + model_prefix + ".h5"
save_model(model, model_path, overwrite=True) # saves weights, network topology and optimizer state (if any)
print ("--- {EVERYTHING COMPLETE HOMIEEEEEEEEE} ---")
| [
"keras.optimizers.Adam",
"time.strftime",
"keras.utils.io_utils.HDF5Matrix",
"h5py.File",
"keras.layers.LSTM",
"keras.layers.Input",
"keras.models.Model",
"keras.models.save_model",
"keras.layers.Dense",
"keras.layers.Dropout"
] | [((2579, 2615), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['data_file', '"""dataX_train"""'], {}), "(data_file, 'dataX_train')\n", (2589, 2615), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((2630, 2666), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['data_file', '"""dataY_train"""'], {}), "(data_file, 'dataY_train')\n", (2640, 2666), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((2680, 2715), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['data_file', '"""dataX_test"""'], {}), "(data_file, 'dataX_test')\n", (2690, 2715), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((2729, 2764), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['data_file', '"""dataY_test"""'], {}), "(data_file, 'dataY_test')\n", (2739, 2764), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((3481, 3526), 'keras.models.save_model', 'save_model', (['model', 'model_path'], {'overwrite': '(True)'}), '(model, model_path, overwrite=True)\n', (3491, 3526), False, 'from keras.models import save_model\n'), ((575, 609), 'keras.layers.Input', 'Input', ([], {'shape': '(timesteps, features)'}), '(shape=(timesteps, features))\n', (580, 609), False, 'from keras.layers import Input, Dense\n'), ((955, 1008), 'keras.models.Model', 'Model', ([], {'inputs': 'input_sequences', 'outputs': 'network_output'}), '(inputs=input_sequences, outputs=network_output)\n', (960, 1008), False, 'from keras.models import Model\n'), ((1071, 1085), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(5e-07)'}), '(lr=5e-07)\n', (1075, 1085), False, 'from keras.optimizers import SGD, Adam\n'), ((1713, 1738), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (1722, 1738), False, 'import h5py\n'), ((710, 775), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'dropout': '(0.2)', 'return_sequences': '(True)', 'name': '"""LSTM_layer1"""'}), "(256, dropout=0.2, return_sequences=True, name='LSTM_layer1')\n", (714, 775), False, 'from keras.layers import LSTM\n'), ((801, 813), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (808, 813), False, 'from keras.layers import Dense, Dropout\n'), ((825, 867), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'dropout': '(0.2)', 'name': '"""LSTM_layer2"""'}), "(256, dropout=0.2, name='LSTM_layer2')\n", (829, 867), False, 'from keras.layers import LSTM\n'), ((892, 938), 'keras.layers.Dense', 'Dense', (['audio_vector_dim'], {'name': '"""regression_out"""'}), "(audio_vector_dim, name='regression_out')\n", (897, 938), False, 'from keras.layers import Dense, Dropout\n'), ((1384, 1415), 'time.strftime', 'time.strftime', (['"""%m-%d_%H-%M-%S"""'], {}), "('%m-%d_%H-%M-%S')\n", (1397, 1415), False, 'import time\n')] |
# ---------------------------------------------------------------------
# Huawei.VRP.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
from .oidrules.slot import SlotRule
from .oidrules.sslot import SSlotRule
from noc.core.mib import mib
class Script(GetMetricsScript):
name = "Huawei.VRP.get_metrics"
OID_RULES = [SlotRule, SSlotRule]
@metrics(
["Interface | Status | Duplex"],
has_capability="DB | Interfaces",
matcher="is_cx200X",
volatile=False,
access="S",
)
def get_duplex_interface_metrics(self, metrics):
if_map = {
m.ifindex: m.labels
for m in metrics
if m.ifindex and m.metric == "Interface | Status | Duplex"
}
for oid, duplex in self.snmp.getnext(mib["EtherLike-MIB::dot3StatsDuplexStatus"]):
_, ifindex = oid.rsplit(".", 1)
if int(ifindex) not in if_map:
continue
self.set_metric(id=("Interface | Status | Duplex", if_map[int(ifindex)]), value=duplex)
@metrics(
["Subscribers | Summary"],
has_capability="BRAS | PPPoE",
volatile=False,
access="S", # not CLI version
)
def get_subscribers_metrics(self, metrics):
if "Slot | Member Ids" in self.capabilities:
hwSlotIndex = self.capabilities["Slot | Member Ids"].split(" | ")
for si in hwSlotIndex:
for mi in [0, 1]:
v = self.snmp.get(f"1.3.6.1.4.1.2011.5.2.1.33.1.8.{si}.{mi}")
if v:
self.set_metric(
id=("Subscribers | Summary", None),
labels=("noc::chassis::0", f"noc::slot::{si}", f"noc::module::{mi}"),
value=int(v),
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.2011.5.2.1.14.1.2.0")
if v:
self.set_metric(
id=("Subscribers | Summary", None),
labels=[],
value=int(v),
multi=True,
)
@metrics(
[
"Interface | CBQOS | Drops | In | Delta",
"Interface | CBQOS | Drops | Out | Delta",
"Interface | CBQOS | Octets | In | Delta",
"Interface | CBQOS | Octets | Out | Delta",
"Interface | CBQOS | Packets | In | Delta",
"Interface | CBQOS | Packets | Out | Delta",
],
volatile=False,
access="S", # CLI version
)
def get_interface_cbqos_metrics_snmp(self, metrics):
"""
Use available SNMP Table for collecting value
:param metrics:
:return:
"""
if self.has_capability("Huawei | OID | hwCBQoSPolicyStatisticsClassifierTable"):
self.get_interface_cbqos_metrics_policy_snmp(metrics)
elif self.has_capability("Huawei | OID | hwCBQoSClassifierStatisticsTable"):
self.get_interface_cbqos_metrics_classifier_snmp(metrics)
def get_interface_cbqos_metrics_classifier_snmp(self, metrics):
self.logger.debug("Use hwCBQoSClassifierStatisticsTable for collected metrics")
ifaces = {m.ifindex: m.labels for m in metrics if m.ifindex}
direction_map = {1: "In", 2: "Out"}
class_map = {}
for oid, name in self.snmp.getnext(mib["HUAWEI-CBQOS-MIB::hwCBQoSClassifierName"]):
class_map[oid.rsplit(".", 1)[-1]] = name
for index, packets, bytes, discards in self.snmp.get_tables(
[
mib["HUAWEI-CBQOS-MIB::hwCBQoSClassifierMatchedPackets"],
mib["HUAWEI-CBQOS-MIB::hwCBQoSClassifierMatchedBytes"],
mib["HUAWEI-CBQOS-MIB::hwCBQoSClassifierMatchedDropPackets"],
]
):
ifindex, direction, ifvlanid1, ifvlanid2, classifier = index.split(".")
if ifindex not in ifaces:
continue
ts = self.get_ts()
for metric, value in [
(f"Interface | CBQOS | Drops | {direction_map[direction]} | Delta", discards),
(f"Interface | CBQOS | Octets | {direction_map[direction]} | Delta", bytes),
# (f"Interface | CBQOS | Octets | {direction_map[direction]}", bytes),
(f"Interface | CBQOS | Packets | {direction_map[direction]} | Delta", packets),
# (f"Interface | CBQOS | Packets | {direction_map[direction]}", packets),
]:
scale = 1
self.set_metric(
id=(metric, ifaces[ifindex]),
metric=metric,
value=float(value),
ts=ts,
labels=ifaces[ifindex] + [f"noc::traffic_class::{class_map[classifier]}"],
multi=True,
type="delta" if metric.endswith("Delta") else "gauge",
scale=scale,
)
def get_interface_cbqos_metrics_policy_snmp(self, metrics):
self.logger.debug("Use hwCBQoSPolicyStatisticsClassifierTable for collected metrics")
ifaces = {m.ifindex: m.labels for m in metrics if m.ifindex}
direction_map = {"1": "In", "2": "Out"}
for index, packets, bytes, discards in self.snmp.get_tables(
[
mib["HUAWEI-CBQOS-MIB::hwCBQoSPolicyStatClassifierMatchedPassPackets"],
mib["HUAWEI-CBQOS-MIB::hwCBQoSPolicyStatClassifierMatchedPassBytes"],
mib["HUAWEI-CBQOS-MIB::hwCBQoSPolicyStatClassifierMatchedDropPackets"],
]
):
ifindex, ifvlanid1, direction, classifier = index.split(".", 3)
ifindex = int(ifindex)
if not ifindex or ifindex not in ifaces:
self.logger.info("Interface Vlan %s not collected", ifvlanid1)
# Interface vlan
continue
traffic_class = "".join(chr(int(c)) for c in classifier.split(".")[1:])
ts = self.get_ts()
for metric, value in [
(f"Interface | CBQOS | Drops | {direction_map[direction]} | Delta", discards),
(f"Interface | CBQOS | Octets | {direction_map[direction]} | Delta", bytes),
# (f"Interface | CBQOS | Load | {direction_map[direction]}", bytes),
(f"Interface | CBQOS | Packets | {direction_map[direction]} | Delta", packets),
# (f"Interface | CBQOS | Packets | {direction_map[direction]}", packets),
]:
mtype, scale = "gauge", 1
if metric.endswith("Delta"):
mtype = "delta"
self.set_metric(
id=(metric, ifaces[ifindex]),
metric=metric,
value=float(value),
ts=ts,
labels=ifaces[ifindex] + [f"noc::traffic_class::{traffic_class}"],
multi=True,
type=mtype,
scale=scale,
)
# @metrics(
# ["Interface | Errors | CRC", "Interface | Errors | Frame"],
# has_capability="DB | Interfaces",
# volatile=False,
# access="C", # CLI version
# )
# def get_vrp_interface_metrics(self, metrics):
# v = self.cli("display interface")
# ifdata = self.profile.parse_ifaces(v)
# for iface, data in ifdata.items():
# iface = self.profile.convert_interface_name(iface)
# ipath = ["", "", "", iface]
# if "CRC" in data:
# self.set_metric(id=("Interface | Errors | CRC", ipath), value=int(data["CRC"]))
# if "Frames" in data:
# self.set_metric(id=("Interface | Errors | Frame", ipath), value=int(data["Frames"]))
| [
"noc.sa.profiles.Generic.get_metrics.metrics"
] | [((626, 753), 'noc.sa.profiles.Generic.get_metrics.metrics', 'metrics', (["['Interface | Status | Duplex']"], {'has_capability': '"""DB | Interfaces"""', 'matcher': '"""is_cx200X"""', 'volatile': '(False)', 'access': '"""S"""'}), "(['Interface | Status | Duplex'], has_capability='DB | Interfaces',\n matcher='is_cx200X', volatile=False, access='S')\n", (633, 753), False, 'from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics\n'), ((1320, 1418), 'noc.sa.profiles.Generic.get_metrics.metrics', 'metrics', (["['Subscribers | Summary']"], {'has_capability': '"""BRAS | PPPoE"""', 'volatile': '(False)', 'access': '"""S"""'}), "(['Subscribers | Summary'], has_capability='BRAS | PPPoE', volatile=\n False, access='S')\n", (1327, 1418), False, 'from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics\n'), ((2400, 2718), 'noc.sa.profiles.Generic.get_metrics.metrics', 'metrics', (["['Interface | CBQOS | Drops | In | Delta',\n 'Interface | CBQOS | Drops | Out | Delta',\n 'Interface | CBQOS | Octets | In | Delta',\n 'Interface | CBQOS | Octets | Out | Delta',\n 'Interface | CBQOS | Packets | In | Delta',\n 'Interface | CBQOS | Packets | Out | Delta']"], {'volatile': '(False)', 'access': '"""S"""'}), "(['Interface | CBQOS | Drops | In | Delta',\n 'Interface | CBQOS | Drops | Out | Delta',\n 'Interface | CBQOS | Octets | In | Delta',\n 'Interface | CBQOS | Octets | Out | Delta',\n 'Interface | CBQOS | Packets | In | Delta',\n 'Interface | CBQOS | Packets | Out | Delta'], volatile=False, access='S')\n", (2407, 2718), False, 'from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics\n')] |
"""Module for the passage entity.
Available functions:
- current_passage_offset: Get the offset of the current passage in a scroll based on the time in seconds.
"""
import math
def current_passage_offset(scroll_size, passage_size, reading_every, seconds):
"""Get the offset of the current passage in a scroll. We assume that the scroll has been being
read since the beginning of time, where a new passage of `passage_size` characters is read every
`reading_every` seconds. We assume that after final passage in the scroll is read, the scroll is
started from the beginning, and so on. `seconds` represents the number of seconds since the
beginning of time. `scroll_size` represents the total number of characters in the scroll.
# From a 270-char scroll, we're reading 40-char passages every 4 seconds. Since we are at 0
# seconds, our offset is 0 -- the offset of the first passage.
>>> current_passage_offset(scroll_size=270, passage_size=40, reading_every=4, seconds=0)
0
# A second elapses. Since a full 4 second window has not yet elapsed, we are still at offset 0.
>>> current_passage_offset(scroll_size=270, passage_size=40, reading_every=4, seconds=1)
0
# Four seconds have elapsed. We are now reading from the second passage, at character 40.
>>> current_passage_offset(scroll_size=270, passage_size=40, reading_every=4, seconds=4)
40
# Five more windows (20 seconds) elapse. We are five passages (200 characters) further into the
# text. Note that the final passage in this case would not be a full length passage.
>>> current_passage_offset(scroll_size=270, passage_size=40, reading_every=4, seconds=24)
240
# Another four seconds elapse. We are back at the beginning of the scroll!
>>> current_passage_offset(scroll_size=270, passage_size=40, reading_every=4, seconds=28)
0
"""
num_passages_in_scroll = math.ceil(scroll_size / passage_size)
windows_elapsed = seconds // reading_every
current_passage_number = int(windows_elapsed % num_passages_in_scroll)
return current_passage_number * passage_size
| [
"math.ceil"
] | [((1918, 1955), 'math.ceil', 'math.ceil', (['(scroll_size / passage_size)'], {}), '(scroll_size / passage_size)\n', (1927, 1955), False, 'import math\n')] |
"""@package etddf
Shares the N most recent measurements of an agent into the buffer
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, COHRINT Lab"
__email__ = "<EMAIL>"
__status__ = "Development"
__license__ = "MIT"
__maintainer__ = "<NAME>"
from copy import deepcopy
from etddf.etfilter import ETFilter, ETFilter_Main
from etddf.ros2python import get_internal_meas_from_ros_meas
from etddf_minau.msg import Measurement
import time
from pdb import set_trace as st
import numpy as np
import scipy
import scipy.optimize
import rospy
# Just used for debugging
# class Measurement:
# def __init__(self, meas_type, stamp, src_asset, measured_asset, data, variance, global_pose):
# self.meas_type = meas_type
# self.stamp = stamp
# self.src_asset = src_asset
# self.measured_asset = measured_asset
# self.data = data
# self.variance = variance
# self.global_pose = global_pose
class MostRecent:
"""Windowed Communication Event Triggered Communication
Provides a buffer that can be pulled and received from another. Just shares the N most recent measurements of another agent
"""
def __init__(self, num_ownship_states, x0, P0, buffer_capacity, meas_space_table, delta_codebook_table, delta_multipliers, asset2id, my_name, default_meas_variance):
"""Constructor
Arguments:
num_ownship_states {int} -- Number of ownship states for each asset
x0 {np.ndarray} -- initial states
P0 {np.ndarray} -- initial uncertainty
buffer_capacity {int} -- capacity of measurement buffer
meas_space_table {dict} -- Hash that stores how much buffer space a measurement takes up. Str (meas type) -> int (buffer space)
delta_codebook_table {dict} -- Hash that stores delta trigger for each measurement type. Str(meas type) -> float (delta trigger)
delta_multipliers {list} -- List of delta trigger multipliers
asset2id {dict} -- Hash to get the id number of an asset from the string name
my_name {str} -- Name to loopkup in asset2id the current asset's ID#
default_meas_variance {dict} -- Hash to get measurement variance
"""
self.meas_ledger = []
self.asset2id = asset2id
self.my_name = my_name
self.default_meas_variance = default_meas_variance
self.filter = ETFilter(asset2id[my_name], num_ownship_states, 3, x0, P0, True)
# Remember for instantiating new LedgerFilters
self.num_ownship_states = num_ownship_states
self.buffer_capacity = buffer_capacity
self.meas_space_table = meas_space_table
self.last_update_time = None
def add_meas(self, ros_meas, common=False):
"""Adds a measurement to filter
Arguments:
ros_meas {etddf.Measurement.msg} -- Measurement taken
Keyword Arguments:
delta_multiplier {int} -- not used (left to keep consistent interface)
force_fuse {bool} -- not used
"""
src_id = self.asset2id[ros_meas.src_asset]
if ros_meas.measured_asset in self.asset2id.keys():
measured_id = self.asset2id[ros_meas.measured_asset]
elif ros_meas.measured_asset == "":
measured_id = -1 #self.asset2id["surface"]
else:
rospy.logerr("ETDDF doesn't recognize: " + ros_meas.measured_asset + " ... ignoring")
return
meas = get_internal_meas_from_ros_meas(ros_meas, src_id, measured_id)
self.filter.add_meas(meas)
self.meas_ledger.append(ros_meas)
@staticmethod
def run_covariance_intersection(xa, Pa, xb, Pb):
"""Runs covariance intersection on the two estimates A and B
Arguments:
xa {np.ndarray} -- mean of A
Pa {np.ndarray} -- covariance of A
xb {np.ndarray} -- mean of B
Pb {np.ndarray} -- covariance of B
Returns:
c_bar {np.ndarray} -- intersected estimate
Pcc {np.ndarray} -- intersected covariance
"""
Pa_inv = np.linalg.inv(Pa)
Pb_inv = np.linalg.inv(Pb)
fxn = lambda omega: np.trace(np.linalg.inv(omega*Pa_inv + (1-omega)*Pb_inv))
omega_optimal = scipy.optimize.minimize_scalar(fxn, bounds=(0,1), method="bounded").x
# print("Omega: {}".format(omega_optimal)) # We'd expect a value of 1
Pcc = np.linalg.inv(omega_optimal*Pa_inv + (1-omega_optimal)*Pb_inv)
c_bar = Pcc.dot( omega_optimal*Pa_inv.dot(xa) + (1-omega_optimal)*Pb_inv.dot(xb))
jump = max( [np.linalg.norm(c_bar - xa), np.linalg.norm(c_bar - xb)] )
if jump > 10: # Think this is due to a floating point error in the inversion
print("!!!!!!!!!!! BIG JUMP!!!!!!!")
print(xa)
print(xb)
print(c_bar)
print(omega_optimal)
print(Pa)
print(Pb)
print(Pcc)
return c_bar.reshape(-1,1), Pcc
def psci(self, x_prior, P_prior, c_bar, Pcc):
""" Partial State Update all other states of the filter using the result of CI
Arguments:
x_prior {np.ndarray} -- This filter's prior estimate (over common states)
P_prior {np.ndarray} -- This filter's prior covariance
c_bar {np.ndarray} -- intersected estimate
Pcc {np.ndarray} -- intersected covariance
Returns:
None
Updates self.main_filter.filter.x_hat and P, the delta tier's primary estimate
"""
# Full state estimates
x = self.filter.x_hat
P = self.filter.P
D_inv = np.linalg.inv(Pcc) - np.linalg.inv(P_prior)
D_inv_d = np.dot( np.linalg.inv(Pcc), c_bar) - np.dot( np.linalg.inv(P_prior), x_prior)
my_id = self.asset2id[self.my_name]
begin_ind = my_id*self.num_ownship_states
end_ind = (my_id+1)*self.num_ownship_states
info_vector = np.zeros( x.shape )
info_vector[begin_ind:end_ind] = D_inv_d
info_matrix = np.zeros( P.shape )
info_matrix[begin_ind:end_ind, begin_ind:end_ind] = D_inv
posterior_cov = np.linalg.inv( np.linalg.inv( P ) + info_matrix )
tmp = np.dot(np.linalg.inv( P ), x) + info_vector
posterior_state = np.dot( posterior_cov, tmp )
self.filter.x_hat = posterior_state
self.filter.P = posterior_cov
def intersect(self, x, P):
"""Runs covariance intersection with main filter's estimate
Arguments:
x {np.ndarray} -- other filter's mean
P {np.ndarray} -- other filter's covariance
Returns:
c_bar {np.ndarray} -- intersected estimate
Pcc {np.ndarray} -- intersected covariance
"""
my_id = self.asset2id[self.my_name]
# Slice out overlapping states in main filter
begin_ind = my_id*self.num_ownship_states
end_ind = (my_id+1)*self.num_ownship_states
x_prior = self.filter.x_hat[begin_ind:end_ind].reshape(-1,1)
P_prior = self.filter.P[begin_ind:end_ind,begin_ind:end_ind]
P_prior = P_prior.reshape(self.num_ownship_states, self.num_ownship_states)
c_bar, Pcc = MostRecent.run_covariance_intersection(x, P, x_prior, P_prior)
# Update main filter states
if Pcc.shape != self.filter.P.shape:
self.psci(x_prior, P_prior, c_bar, Pcc)
# self.filter.x_hat[begin_ind:end_ind] = c_bar
# self.filter.P[begin_ind:end_ind,begin_ind:end_ind] = Pcc
else:
self.filter.x_hat = c_bar
self.filter.P = Pcc
return c_bar, Pcc
def _add_variances(self, buffer):
for msg in buffer:
if "_burst" in msg.meas_type:
meas_type = msg.meas_type.split("_burst")[0]
else:
meas_type = msg.meas_type
msg.variance = self.default_meas_variance[meas_type] * 2.0
return buffer
def catch_up(self, index):
pass
def receive_buffer(self, buffer, mult, src_asset):
"""Updates estimate based on buffer
Arguments:
delta_multiplier {float} -- multiplier to scale et_delta's with
shared_buffer {list} -- buffer shared from another asset
Returns:
int -- implicit measurement count in shared_buffer
int -- explicit measurement count in this shared_buffer
"""
buffer = self._add_variances(buffer)
# buffer = self._add_etdeltas(buffer, delta_multiplier)
for meas in buffer: # Fuse all of the measurements now
self.add_meas(meas)
return 0, len(buffer)
def pull_buffer(self):
"""Pulls all measurements that'll fit
Returns:
multiplier {float} -- the delta multiplier that was chosen
buffer {list} -- the buffer of ros measurements
"""
buffer = []
cost = 0
ind = -1
while abs(ind) <= len(self.meas_ledger):
new_meas = self.meas_ledger[ind]
space = self.meas_space_table[new_meas.meas_type]
if cost + space <= self.buffer_capacity:
if "sonar_z" not in new_meas.meas_type and "modem" not in new_meas.meas_type and "gps" not in new_meas.meas_type:
buffer.append(new_meas)
cost += space
else:
break
ind -= 1
self.meas_ledger = []
return 1, buffer
def update(self, update_time, u, Q, nav_mean, nav_cov):
"""Execute Prediction & Correction Step in filter
Arguments:
update_time {time} -- Update time to record on the ledger update times
u {np.ndarray} -- control input (num_ownship_states / 2, 1)
Q {np.ndarray} -- motion/process noise (nstates, nstates)
nav filter mean
nav filter covariance
"""
if self.last_update_time is not None:
time_delta = (update_time - self.last_update_time).to_sec()
self.filter.predict(u, Q, time_delta, use_control_input=False)
# Run correction step on filter
self.filter.correct()
# Intersect
c_bar, Pcc = None, None
if nav_mean is not None and nav_cov is not None:
# print("***************************8 Intersecting **********************************")
c_bar, Pcc = self.intersect(nav_mean, nav_cov)
self.last_update_time = update_time
return c_bar, Pcc
def get_asset_estimate(self, asset_name):
"""Gets main filter's estimate of an asset
Arguments:
asset_name {str} -- Name of asset
Returns
np.ndarray -- Mean estimate of asset (num_ownship_states, 1)
np.ndarray -- Covariance of estimate of asset (num_ownship_states, num_ownship_states)
"""
asset_id = self.asset2id[asset_name]
begin_ind = asset_id*self.num_ownship_states
end_ind = (asset_id+1)*self.num_ownship_states
asset_mean = self.filter.x_hat[begin_ind:end_ind,0]
asset_unc = self.filter.P[begin_ind:end_ind,begin_ind:end_ind]
return deepcopy(asset_mean), deepcopy(asset_unc)
def debug_print_buffers(self):
return self.meas_ledger
if __name__ == "__main__":
# TODO to run these tests, uncomment Measurement class above
# Test plumbing
test_buffer_pull = True
test_catch_up = not test_buffer_pull
import numpy as np
x0 = np.zeros((6,1))
P = np.ones((6,6)) * 10
meas_space_table = {"depth":2, "dvl_x":2,"dvl_y":2, "bookend":1,"bookstart":1, "final_time":0}
delta_codebook_table = {"depth":1.0, "dvl_x":1, "dvl_y":1}
asset2id = {"my_name":0}
buffer_cap = 10
dt = MostRecent(6, x0, P, buffer_cap, meas_space_table, 0, delta_codebook_table, [0.5,1.5], asset2id, "my_name")
Q = np.eye(6); Q[3:,3:] = np.zeros(Q[3:,3:].shape)
u = np.array([[0.1,0.1,-0.1]]).T
t1 = time.time()
z = Measurement("depth", t1, "my_name","", -1, 0.1, [])
dvl_x = Measurement("dvl_x", t1, "my_name","", 1, 0.1, [])
dvl_y = Measurement("dvl_y", t1, "my_name","", 1, 0.1, [])
dt.add_meas(z)
dt.add_meas(dvl_x)
dt.add_meas(dvl_y)
dt.predict(u,Q)
dt.correct(t1)
t2 = time.time()
z.stamp = t2
dvl_x.stamp = t2
dvl_y.stamp = t2
dvl_x.data = 2
dvl_y.data = 2
dt.add_meas(z)
dt.add_meas(dvl_x)
dt.add_meas(dvl_y)
dt.predict(u,Q)
dt.correct(t2)
print(dt.get_asset_estimate("my_name"))
##### Test Buffer Pulling #####
if test_buffer_pull:
mult, buffer = dt.pull_buffer()
assert len(buffer) == 5
strbuffer = [x.meas_type for x in buffer]
print(strbuffer)
print("Should be empty:")
print(dt.debug_print_buffers())
##### Test catching up #####
if test_catch_up:
print("{:.20f}".format(t1))
print("{:.20f}".format(t2))
mult, buffer = dt.pull_buffer()
buf_contents = [x.meas_type for x in buffer]
print(buf_contents)
dt2 = MostRecent(6, x0, P, buffer_cap, meas_space_table, 0, delta_codebook_table, [0.5,1.5], asset2id, "my_name")
dt2.predict(u, Q)
dt2.correct(t1)
dt2.predict(u, Q)
print("catch up")
dt2.catch_up(mult, buffer)
dt2.correct(t2)
print(dt2.get_asset_estimate("my_name")) | [
"rospy.logerr",
"numpy.eye",
"copy.deepcopy",
"numpy.ones",
"numpy.linalg.norm",
"etddf.etfilter.ETFilter",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"etddf.ros2python.get_internal_meas_from_ros_meas",
"numpy.dot",
"scipy.optimize.minimize_scalar",
"etddf_minau.msg.Measurement",
"time.time"
] | [((11607, 11623), 'numpy.zeros', 'np.zeros', (['(6, 1)'], {}), '((6, 1))\n', (11615, 11623), True, 'import numpy as np\n'), ((11988, 11997), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (11994, 11997), True, 'import numpy as np\n'), ((12010, 12035), 'numpy.zeros', 'np.zeros', (['Q[3:, 3:].shape'], {}), '(Q[3:, 3:].shape)\n', (12018, 12035), True, 'import numpy as np\n'), ((12081, 12092), 'time.time', 'time.time', ([], {}), '()\n', (12090, 12092), False, 'import time\n'), ((12101, 12153), 'etddf_minau.msg.Measurement', 'Measurement', (['"""depth"""', 't1', '"""my_name"""', '""""""', '(-1)', '(0.1)', '[]'], {}), "('depth', t1, 'my_name', '', -1, 0.1, [])\n", (12112, 12153), False, 'from etddf_minau.msg import Measurement\n'), ((12165, 12216), 'etddf_minau.msg.Measurement', 'Measurement', (['"""dvl_x"""', 't1', '"""my_name"""', '""""""', '(1)', '(0.1)', '[]'], {}), "('dvl_x', t1, 'my_name', '', 1, 0.1, [])\n", (12176, 12216), False, 'from etddf_minau.msg import Measurement\n'), ((12228, 12279), 'etddf_minau.msg.Measurement', 'Measurement', (['"""dvl_y"""', 't1', '"""my_name"""', '""""""', '(1)', '(0.1)', '[]'], {}), "('dvl_y', t1, 'my_name', '', 1, 0.1, [])\n", (12239, 12279), False, 'from etddf_minau.msg import Measurement\n'), ((12394, 12405), 'time.time', 'time.time', ([], {}), '()\n', (12403, 12405), False, 'import time\n'), ((2411, 2475), 'etddf.etfilter.ETFilter', 'ETFilter', (['asset2id[my_name]', 'num_ownship_states', '(3)', 'x0', 'P0', '(True)'], {}), '(asset2id[my_name], num_ownship_states, 3, x0, P0, True)\n', (2419, 2475), False, 'from etddf.etfilter import ETFilter, ETFilter_Main\n'), ((3479, 3541), 'etddf.ros2python.get_internal_meas_from_ros_meas', 'get_internal_meas_from_ros_meas', (['ros_meas', 'src_id', 'measured_id'], {}), '(ros_meas, src_id, measured_id)\n', (3510, 3541), False, 'from etddf.ros2python import get_internal_meas_from_ros_meas\n'), ((4121, 4138), 'numpy.linalg.inv', 'np.linalg.inv', (['Pa'], {}), '(Pa)\n', (4134, 4138), True, 'import numpy as np\n'), ((4156, 4173), 'numpy.linalg.inv', 'np.linalg.inv', (['Pb'], {}), '(Pb)\n', (4169, 4173), True, 'import numpy as np\n'), ((4448, 4516), 'numpy.linalg.inv', 'np.linalg.inv', (['(omega_optimal * Pa_inv + (1 - omega_optimal) * Pb_inv)'], {}), '(omega_optimal * Pa_inv + (1 - omega_optimal) * Pb_inv)\n', (4461, 4516), True, 'import numpy as np\n'), ((6008, 6025), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (6016, 6025), True, 'import numpy as np\n'), ((6100, 6117), 'numpy.zeros', 'np.zeros', (['P.shape'], {}), '(P.shape)\n', (6108, 6117), True, 'import numpy as np\n'), ((6345, 6371), 'numpy.dot', 'np.dot', (['posterior_cov', 'tmp'], {}), '(posterior_cov, tmp)\n', (6351, 6371), True, 'import numpy as np\n'), ((11631, 11646), 'numpy.ones', 'np.ones', (['(6, 6)'], {}), '((6, 6))\n', (11638, 11646), True, 'import numpy as np\n'), ((12043, 12071), 'numpy.array', 'np.array', (['[[0.1, 0.1, -0.1]]'], {}), '([[0.1, 0.1, -0.1]])\n', (12051, 12071), True, 'import numpy as np\n'), ((4284, 4352), 'scipy.optimize.minimize_scalar', 'scipy.optimize.minimize_scalar', (['fxn'], {'bounds': '(0, 1)', 'method': '"""bounded"""'}), "(fxn, bounds=(0, 1), method='bounded')\n", (4314, 4352), False, 'import scipy\n'), ((5690, 5708), 'numpy.linalg.inv', 'np.linalg.inv', (['Pcc'], {}), '(Pcc)\n', (5703, 5708), True, 'import numpy as np\n'), ((5711, 5733), 'numpy.linalg.inv', 'np.linalg.inv', (['P_prior'], {}), '(P_prior)\n', (5724, 5733), True, 'import numpy as np\n'), ((11280, 11300), 'copy.deepcopy', 'deepcopy', (['asset_mean'], {}), '(asset_mean)\n', (11288, 11300), False, 'from copy import deepcopy\n'), ((11302, 11321), 'copy.deepcopy', 'deepcopy', (['asset_unc'], {}), '(asset_unc)\n', (11310, 11321), False, 'from copy import deepcopy\n'), ((3359, 3448), 'rospy.logerr', 'rospy.logerr', (['("ETDDF doesn\'t recognize: " + ros_meas.measured_asset + \' ... ignoring\')'], {}), '("ETDDF doesn\'t recognize: " + ros_meas.measured_asset +\n \' ... ignoring\')\n', (3371, 3448), False, 'import rospy\n'), ((4212, 4264), 'numpy.linalg.inv', 'np.linalg.inv', (['(omega * Pa_inv + (1 - omega) * Pb_inv)'], {}), '(omega * Pa_inv + (1 - omega) * Pb_inv)\n', (4225, 4264), True, 'import numpy as np\n'), ((4623, 4649), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_bar - xa)'], {}), '(c_bar - xa)\n', (4637, 4649), True, 'import numpy as np\n'), ((4651, 4677), 'numpy.linalg.norm', 'np.linalg.norm', (['(c_bar - xb)'], {}), '(c_bar - xb)\n', (4665, 4677), True, 'import numpy as np\n'), ((5760, 5778), 'numpy.linalg.inv', 'np.linalg.inv', (['Pcc'], {}), '(Pcc)\n', (5773, 5778), True, 'import numpy as np\n'), ((5797, 5819), 'numpy.linalg.inv', 'np.linalg.inv', (['P_prior'], {}), '(P_prior)\n', (5810, 5819), True, 'import numpy as np\n'), ((6226, 6242), 'numpy.linalg.inv', 'np.linalg.inv', (['P'], {}), '(P)\n', (6239, 6242), True, 'import numpy as np\n'), ((6282, 6298), 'numpy.linalg.inv', 'np.linalg.inv', (['P'], {}), '(P)\n', (6295, 6298), True, 'import numpy as np\n')] |
from typing import Mapping
import pulumi_aws as aws
from infra import config
import pulumi
def get_s3_url(obj: aws.s3.BucketObject) -> pulumi.Output[str]:
def _inner(inputs: Mapping[str, str]) -> str:
if config.LOCAL_GRAPL:
return f"http://{config.HOST_IP_IN_NOMAD}:4566/{inputs['bucket']}/{inputs['key']}"
return f"https://{inputs['bucket']}.s3.amazonaws.com/{inputs['key']}"
return pulumi.Output.all(bucket=obj.bucket, key=obj.key).apply(_inner)
| [
"pulumi.Output.all"
] | [((425, 474), 'pulumi.Output.all', 'pulumi.Output.all', ([], {'bucket': 'obj.bucket', 'key': 'obj.key'}), '(bucket=obj.bucket, key=obj.key)\n', (442, 474), False, 'import pulumi\n')] |
import os
import sys
import requests
import re
import pandas as pd
from save_to_xlsx import append_df_to_excel
# APPLICATION INFO
client_id = "ppYCMnYAz3em2lZ4Oisn"
client_secret = "bUstOMZXpg"
# CONSTS
baseURL = "https://openapi.naver.com/v1/search/local.json"
headers = {"X-Naver-Client-Id": client_id,
"X-Naver-Client-Secret": client_secret}
filename = 'naver_data.xlsx'
sheet_name = 'Data'
keywords = pd.read_csv('keyword.csv')
writer = pd.ExcelWriter(filename, engine='openpyxl')
locations = ['서울', '인천', '경기', '충청', '대전', '대구', '광주', '전라도', '제주', '강원도']
# DELETE FILE
try:
os.remove(filename)
except OSError:
pass
# CREATE REQUEST
for _, row in keywords.iterrows():
keyword = row['keyword']
print(f'## KEYWORD: {keyword}')
for location in locations:
for i in range(34):
params = {"query": f'{location} {keyword}',
"display": 30, "start": i * 30 + 1}
res = requests.get(baseURL, params=params, headers=headers)
if res.status_code == 200:
data = res.json()
items = data['items']
items_len = len(items)
print(f' count: {i}, results: {items_len}')
for item in items:
titles = re.sub('(<b>|</b>)', ' ', item['title'])
titles.strip()
del item['title']
item['title'] = titles
if item['description']:
descriptions = re.sub(
'(<b>|</b>)', ' ', item['description'])
descriptions.strip()
del item['description']
item['description'] = descriptions
# convert items to dataframe
df = pd.DataFrame(items)
append_df_to_excel(
filename, df, sheet_name=sheet_name, index=False)
if items_len < 30:
break
| [
"pandas.read_csv",
"requests.get",
"save_to_xlsx.append_df_to_excel",
"pandas.DataFrame",
"re.sub",
"pandas.ExcelWriter",
"os.remove"
] | [((420, 446), 'pandas.read_csv', 'pd.read_csv', (['"""keyword.csv"""'], {}), "('keyword.csv')\n", (431, 446), True, 'import pandas as pd\n'), ((456, 499), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {'engine': '"""openpyxl"""'}), "(filename, engine='openpyxl')\n", (470, 499), True, 'import pandas as pd\n'), ((599, 618), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (608, 618), False, 'import os\n'), ((954, 1007), 'requests.get', 'requests.get', (['baseURL'], {'params': 'params', 'headers': 'headers'}), '(baseURL, params=params, headers=headers)\n', (966, 1007), False, 'import requests\n'), ((1821, 1840), 'pandas.DataFrame', 'pd.DataFrame', (['items'], {}), '(items)\n', (1833, 1840), True, 'import pandas as pd\n'), ((1857, 1925), 'save_to_xlsx.append_df_to_excel', 'append_df_to_excel', (['filename', 'df'], {'sheet_name': 'sheet_name', 'index': '(False)'}), '(filename, df, sheet_name=sheet_name, index=False)\n', (1875, 1925), False, 'from save_to_xlsx import append_df_to_excel\n'), ((1285, 1325), 're.sub', 're.sub', (['"""(<b>|</b>)"""', '""" """', "item['title']"], {}), "('(<b>|</b>)', ' ', item['title'])\n", (1291, 1325), False, 'import re\n'), ((1526, 1572), 're.sub', 're.sub', (['"""(<b>|</b>)"""', '""" """', "item['description']"], {}), "('(<b>|</b>)', ' ', item['description'])\n", (1532, 1572), False, 'import re\n')] |
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from jamo import hangul_to_jamo
from models.text2mel import Text2Mel
from util.hparams import *
from util.plot_alignment import plot_alignment
from util.text import sequence_to_text, text_to_sequence
sentences = [
'정말로 사랑한담 기다려주세요'
]
checkpoint_dir = './checkpoint/1'
save_dir = './output'
os.makedirs(save_dir, exist_ok=True)
def test_step(text, idx):
seq = text_to_sequence(text)
enc_input = np.asarray([seq], dtype=np.int32)
dec_input = np.zeros((1, max_iter, mel_dim), dtype=np.float32)
for i in range(1, max_iter+1):
mel_out, alignment = model(enc_input, dec_input)
if i < max_iter:
dec_input[:, i, :] = mel_out[:, i - 1, :]
pred = np.reshape(np.asarray(mel_out), [-1, mel_dim])
alignment = np.squeeze(alignment, axis=0)
np.save(os.path.join(save_dir, 'mel-{}'.format(idx)), pred, allow_pickle=False)
input_seq = sequence_to_text(seq)
alignment_dir = os.path.join(save_dir, 'align-{}.png'.format(idx))
plot_alignment(alignment, alignment_dir, input_seq)
model = Text2Mel()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()
for i, text in enumerate(sentences):
jamo = ''.join(list(hangul_to_jamo(text)))
test_step(jamo, i)
| [
"tensorflow.train.Checkpoint",
"util.text.text_to_sequence",
"os.makedirs",
"models.text2mel.Text2Mel",
"numpy.asarray",
"numpy.squeeze",
"jamo.hangul_to_jamo",
"numpy.zeros",
"util.plot_alignment.plot_alignment",
"tensorflow.train.latest_checkpoint",
"util.text.sequence_to_text"
] | [((380, 416), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (391, 416), False, 'import os\n'), ((1133, 1143), 'models.text2mel.Text2Mel', 'Text2Mel', ([], {}), '()\n', (1141, 1143), False, 'from models.text2mel import Text2Mel\n'), ((1157, 1189), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model'}), '(model=model)\n', (1176, 1189), True, 'import tensorflow as tf\n'), ((455, 477), 'util.text.text_to_sequence', 'text_to_sequence', (['text'], {}), '(text)\n', (471, 477), False, 'from util.text import sequence_to_text, text_to_sequence\n'), ((494, 527), 'numpy.asarray', 'np.asarray', (['[seq]'], {'dtype': 'np.int32'}), '([seq], dtype=np.int32)\n', (504, 527), True, 'import numpy as np\n'), ((544, 594), 'numpy.zeros', 'np.zeros', (['(1, max_iter, mel_dim)'], {'dtype': 'np.float32'}), '((1, max_iter, mel_dim), dtype=np.float32)\n', (552, 594), True, 'import numpy as np\n'), ((842, 871), 'numpy.squeeze', 'np.squeeze', (['alignment'], {'axis': '(0)'}), '(alignment, axis=0)\n', (852, 871), True, 'import numpy as np\n'), ((974, 995), 'util.text.sequence_to_text', 'sequence_to_text', (['seq'], {}), '(seq)\n', (990, 995), False, 'from util.text import sequence_to_text, text_to_sequence\n'), ((1071, 1122), 'util.plot_alignment.plot_alignment', 'plot_alignment', (['alignment', 'alignment_dir', 'input_seq'], {}), '(alignment, alignment_dir, input_seq)\n', (1085, 1122), False, 'from util.plot_alignment import plot_alignment\n'), ((790, 809), 'numpy.asarray', 'np.asarray', (['mel_out'], {}), '(mel_out)\n', (800, 809), True, 'import numpy as np\n'), ((1209, 1251), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1235, 1251), True, 'import tensorflow as tf\n'), ((1332, 1352), 'jamo.hangul_to_jamo', 'hangul_to_jamo', (['text'], {}), '(text)\n', (1346, 1352), False, 'from jamo import hangul_to_jamo\n')] |
#!/usr/bin/env python
import os
import glob
from time import sleep
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
# get raw data
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
sleep(0.2)
lines = read_temp_raw()
equal_pos = lines[1].find('t=')
if equal_pos != -1:
temp_string = lines[1][equal_pos + 2:]
temp_c = float(temp_string) / 1000.0
return temp_c
try:
while True:
print(read_temp())
sleep(1)
except KeyboardInterrupt:
print('stop')
pass
| [
"os.system",
"time.sleep",
"glob.glob"
] | [((69, 98), 'os.system', 'os.system', (['"""modprobe w1-gpio"""'], {}), "('modprobe w1-gpio')\n", (78, 98), False, 'import os\n'), ((99, 129), 'os.system', 'os.system', (['"""modprobe w1-therm"""'], {}), "('modprobe w1-therm')\n", (108, 129), False, 'import os\n'), ((181, 208), 'glob.glob', 'glob.glob', (["(base_dir + '28*')"], {}), "(base_dir + '28*')\n", (190, 208), False, 'import glob\n'), ((475, 485), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (480, 485), False, 'from time import sleep\n'), ((749, 757), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (754, 757), False, 'from time import sleep\n')] |
#!/usr/bin/env python
# Copyright (C) 2014 <NAME>. All rights reserved.
import unittest
from libgsync.sync.file import SyncFile
class TestSyncFile(unittest.TestCase):
def test_SyncFile_relative_to(self):
f = SyncFile("/gsync_unittest")
self.assertEqual(
f.relative_to("/gsync_unittest/open_for_read.txt"),
"open_for_read.txt"
)
| [
"libgsync.sync.file.SyncFile"
] | [((224, 251), 'libgsync.sync.file.SyncFile', 'SyncFile', (['"""/gsync_unittest"""'], {}), "('/gsync_unittest')\n", (232, 251), False, 'from libgsync.sync.file import SyncFile\n')] |
import pyfiglet
import termcolor
import wikipedia
import gtts
import playsound
import os
import datetime
import newsapi
icon_text = pyfiglet.figlet_format('WikiTalk')
welcome_screen = termcolor.colored(icon_text, color='blue')
print(welcome_screen)
def raw_audio(greating_text):
try:
audio = gtts.gTTS(text=greating_text, lang='en', slow=False)
audio.save('greatings.mp3')
playsound.playsound('greatings.mp3')
os.remove('greatings.mp3')
except BaseException:
return False
def greatings(text):
x = raw_audio(text)
while True:
if x != False:
break
else:
x = raw_audio(text)
return x
current = datetime.datetime.now()
# check if it is the first time of the user
try:
with open('welcome.txt', 'r') as f:
name = f.readlines()
except BaseException:
with open('welcome.txt', 'w') as f2:
welcome_text = "Hi there.welcome to WikiTalk.I am Wiki,your personal voice assistant.It seems we've met for the first time.That's amazing!!Ummm!!What should I call you?"
greatings(welcome_text)
name = input('Your name:')
f2.write(name)
thanks_text = f'Humm...{name}..sounds cool.So,{name},let me officially greet you.'
greatings(thanks_text)
with open('welcome.txt', 'r') as f2:
name = f2.readlines()
if int(current.hour) <= 12:
text = f'Hi {name}.Good morning.How can I help you?I can tell you todays breaking news.Or if you want,I can search for any information on web..'
greatings(text)
elif int(current.hour) <= 15:
text = f'Hi {name}.Good noon.How can I help you?I can tell you todays breaking news.Or if you want,I can search for any information on web..'
greatings(text)
elif int(current.hour) <= 17:
text = f'Hi {name}.Good afternoon.How can I help you?I can tell you todays breaking news.Or if you want,I can search for any information on web..'
greatings(text)
else:
text = f'Hi {name}.Good evening.How can I help you?I can tell you todays breaking news.Or if you want,I can search for any information on web..'
greatings(text)
while True:
def wikisearch():
try:
ask_for_search_type = input('News/Information: ')
if ask_for_search_type[0].lower(
) == 'n' or 'news' in ask_for_search_type.lower():
welcome_to_newsworld = "Okay.Please let me know what kinds of news you are looking for."
greatings(welcome_to_newsworld)
my_api_key = '38df851456f641ea9df315b23bc19ffa'
news_client = newsapi.NewsApiClient(api_key=my_api_key)
user_input = input('News topic: ')
news = news_client.get_everything(
q=user_input, language='en', page_size=100)['articles']
news_ammount = len(news)
news_find_time = "Give me few seconds.I am searching through more than 30 world's famous newspapers about your news topic"
greatings(news_find_time)
if news_ammount == 0:
text = f"Umm!It seems there is no news today about your topic.Make sure you enter a right topic.And just enter the topic name and nothing else."
greatings(text)
elif news_ammount <= 5:
greatings('Yep.I am done.Here it is')
j = 1
for x in news:
title = x['title']
description = x['description']
final_news = title + description
n_count = f'News:{j}'
greatings(n_count)
greatings(final_news)
j += 1
else:
user_demand = f"Yeo.I am done.i have found {news_ammount} news about your topic.How many news you want to listen?"
greatings(user_demand)
news_number = input('Amount of news: ')
i = 0
j = 1
for x in news:
if i < int(news_number):
title = x['title']
description = x['description']
final_news = title + description
n_count = f"News{j}"
greatings(n_count)
greatings(final_news)
i += 1
j += 1
else:
greatings('Okay.What kind of information you are looking for?')
search = input('Search:')
search_result = wikipedia.search(search)
greatings(
'Give me few seconds.I am colllecting information on your topic.')
search_summary = wikipedia.summary(search_result)
x = f"Yep.I am done.Here it is.{search_summary}"
greatings(x)
except BaseException:
return False
while True:
s_res = wikisearch()
if s_res != False:
break
else:
text = "Umm!...I'm sorry.I couldn't get you.Can you please try again??"
greatings(text)
greatings('Do you wanna search something else?')
search_again = input('Yes/No:')
if search_again[0].lower() == 'n':
greatings('Thanks for using me.See you later.')
break
else:
greatings('Sure.What I will search now?')
| [
"wikipedia.search",
"termcolor.colored",
"pyfiglet.figlet_format",
"playsound.playsound",
"datetime.datetime.now",
"gtts.gTTS",
"newsapi.NewsApiClient",
"wikipedia.summary",
"os.remove"
] | [((133, 167), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', (['"""WikiTalk"""'], {}), "('WikiTalk')\n", (155, 167), False, 'import pyfiglet\n'), ((185, 227), 'termcolor.colored', 'termcolor.colored', (['icon_text'], {'color': '"""blue"""'}), "(icon_text, color='blue')\n", (202, 227), False, 'import termcolor\n'), ((699, 722), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (720, 722), False, 'import datetime\n'), ((308, 360), 'gtts.gTTS', 'gtts.gTTS', ([], {'text': 'greating_text', 'lang': '"""en"""', 'slow': '(False)'}), "(text=greating_text, lang='en', slow=False)\n", (317, 360), False, 'import gtts\n'), ((405, 441), 'playsound.playsound', 'playsound.playsound', (['"""greatings.mp3"""'], {}), "('greatings.mp3')\n", (424, 441), False, 'import playsound\n'), ((450, 476), 'os.remove', 'os.remove', (['"""greatings.mp3"""'], {}), "('greatings.mp3')\n", (459, 476), False, 'import os\n'), ((2602, 2643), 'newsapi.NewsApiClient', 'newsapi.NewsApiClient', ([], {'api_key': 'my_api_key'}), '(api_key=my_api_key)\n', (2623, 2643), False, 'import newsapi\n'), ((4719, 4743), 'wikipedia.search', 'wikipedia.search', (['search'], {}), '(search)\n', (4735, 4743), False, 'import wikipedia\n'), ((4891, 4923), 'wikipedia.summary', 'wikipedia.summary', (['search_result'], {}), '(search_result)\n', (4908, 4923), False, 'import wikipedia\n')] |
from gettweets import get_tweets
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
from classifier import predict_sentiment
# Initiate app instance
app = FastAPI(title='Twitter Sentiment API',
version='1.0',
description='A REST API to extract tweets, and run sentiment analysis on it.')
# Design the incoming feature data
class SearchData(BaseModel):
keywords: str
exclude_words: float
start_date: str
end_date: str
num_tweets: int
# Api root or home endpoint
@app.get('/')
def getTweet(keywords: str,
exclude_words: str,
start_date: str,
end_date: str,
num_tweets: int):
"""
This endpoint serves the predictions based on the values received from a user and the saved model.
Parameters:
-----------
keywords : str
strings of words to search for.
exclude_words : str
strings of words that shouldn't be included in results.
start_date : datetime
Date from which tweets are to be scraped
end_date : datetime
Date until which tweets are to scraped
num_tweets : int
Number of tweets to scrap daily.
"""
df = get_tweets(keywords,
exclude_words,
start_date,
end_date,
num_tweets)
df['processed_text'], df['sentiment'] = predict_sentiment(list(df['text']))
df['sentiment'] = df['sentiment'].map({1: 'positive', -1: 'negative'})
tweets = []
for i, row in df.iterrows():
tweets.append(dict(datetime=row['datetime'], tweet_url=row['tweet_url'], username=row['username'],
text=row['text'], char_length=row['char_length'], word_length=row['word_length'],
likes=row['likes'], retweets=row['retweets'], processed_text=row['processed_text'],
sentiment=row['sentiment']))
return {'tweets': tweets}
if __name__ == "__main__":
# Run app with uvicorn with port and host specified. Host needed for docker port mapping.
uvicorn.run(app, port=8000, host="127.0.0.1")
| [
"uvicorn.run",
"fastapi.FastAPI",
"gettweets.get_tweets"
] | [((180, 317), 'fastapi.FastAPI', 'FastAPI', ([], {'title': '"""Twitter Sentiment API"""', 'version': '"""1.0"""', 'description': '"""A REST API to extract tweets, and run sentiment analysis on it."""'}), "(title='Twitter Sentiment API', version='1.0', description=\n 'A REST API to extract tweets, and run sentiment analysis on it.')\n", (187, 317), False, 'from fastapi import FastAPI\n'), ((1229, 1298), 'gettweets.get_tweets', 'get_tweets', (['keywords', 'exclude_words', 'start_date', 'end_date', 'num_tweets'], {}), '(keywords, exclude_words, start_date, end_date, num_tweets)\n', (1239, 1298), False, 'from gettweets import get_tweets\n'), ((2126, 2171), 'uvicorn.run', 'uvicorn.run', (['app'], {'port': '(8000)', 'host': '"""127.0.0.1"""'}), "(app, port=8000, host='127.0.0.1')\n", (2137, 2171), False, 'import uvicorn\n')] |
"""empty message
Revision ID: <KEY>
Revises:
Create Date: 2021-10-21 13:42:25.857939
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('town',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('names',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('town_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['town_id'], ['town.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('names')
op.drop_table('town')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.UniqueConstraint",
"sqlalchemy.String"
] | [((1007, 1029), 'alembic.op.drop_table', 'op.drop_table', (['"""names"""'], {}), "('names')\n", (1020, 1029), False, 'from alembic import op\n'), ((1034, 1055), 'alembic.op.drop_table', 'op.drop_table', (['"""town"""'], {}), "('town')\n", (1047, 1055), False, 'from alembic import op\n'), ((488, 517), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (511, 517), True, 'import sqlalchemy as sa\n'), ((523, 550), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (542, 550), True, 'import sqlalchemy as sa\n'), ((757, 806), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['town_id']", "['town.id']"], {}), "(['town_id'], ['town.id'])\n", (780, 806), True, 'import sqlalchemy as sa\n'), ((814, 843), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (837, 843), True, 'import sqlalchemy as sa\n'), ((849, 876), 'sqlalchemy.UniqueConstraint', 'sa.UniqueConstraint', (['"""name"""'], {}), "('name')\n", (868, 876), True, 'import sqlalchemy as sa\n'), ((392, 404), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (402, 404), True, 'import sqlalchemy as sa\n'), ((445, 465), 'sqlalchemy.String', 'sa.String', ([], {'length': '(80)'}), '(length=80)\n', (454, 465), True, 'import sqlalchemy as sa\n'), ((606, 618), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (616, 618), True, 'import sqlalchemy as sa\n'), ((659, 679), 'sqlalchemy.String', 'sa.String', ([], {'length': '(80)'}), '(length=80)\n', (668, 679), True, 'import sqlalchemy as sa\n'), ((723, 735), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (733, 735), True, 'import sqlalchemy as sa\n')] |
# Parse spreadsheets and its sheets ===========================================
import pandas as pd
import os
# add your folder path
sheets_folder = r''
for path, subdirs, files in os.walk(sheets_folder):
for filename in files:
print('\n [] File:', filename, '===============')
if filename.endswith('.xlsx'):
excel = pd.ExcelFile(path + '\\' + filename)
print('Number of sheets:', len(excel.sheet_names))
print('Sheet names:', excel.sheet_names)
for sheet in excel.sheet_names:
df = excel.parse(sheet)
print('Sheet:', sheet, ' with the columns:', list(df.columns))
| [
"pandas.ExcelFile",
"os.walk"
] | [((183, 205), 'os.walk', 'os.walk', (['sheets_folder'], {}), '(sheets_folder)\n', (190, 205), False, 'import os\n'), ((351, 387), 'pandas.ExcelFile', 'pd.ExcelFile', (["(path + '\\\\' + filename)"], {}), "(path + '\\\\' + filename)\n", (363, 387), True, 'import pandas as pd\n')] |
import matplotlib.pyplot as plt
import numpy as np
# Link - https://towardsdatascience.com/linear-regression-using-gradient-descent-97a6c8700931
# refer the github link too
#dataset from geeksforgeek
input_data=[0, 1, 2, 3, 4, 5, 6, 7, 8]
output_data=[1, 3, 2, 5, 7, 8, 8, 9, 10]
x=np.array(input_data)
y=np.array(output_data) #creating np arrays
t0=1
t1=1
alpha=0.01
m=float(len(x))
epochs=1000
for i in range(epochs):
predicted_result=t0+(t1*x)
diff0=sum(y-predicted_result)/(-2*m)
diff1=sum((y-predicted_result)*x)/(-2*m)
t0=t0-alpha*diff0
t1=t1-alpha*diff1
print(t0,t1)
final_fitted_data=t0+(t1*x)
print(t0+(t1*9))
plt.scatter(x,y)
plt.plot([min(x),max(x)],[min(final_fitted_data),max(final_fitted_data)],color='red')
plt.show()
| [
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((284, 304), 'numpy.array', 'np.array', (['input_data'], {}), '(input_data)\n', (292, 304), True, 'import numpy as np\n'), ((307, 328), 'numpy.array', 'np.array', (['output_data'], {}), '(output_data)\n', (315, 328), True, 'import numpy as np\n'), ((675, 692), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (686, 692), True, 'import matplotlib.pyplot as plt\n'), ((778, 788), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (786, 788), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
""" A ham radio keyer for raspberry pi"""
import RPi.GPIO as GPIO
import time
from cmd import Cmd
# Set defaults here
CALL = "w1aw"
RST = "r 5nn tu"
default_WPM = 25
BCM_pin = 24
def dit():
tx(DitLength)
time.sleep(DitLength)
def dah():
tx(DahLength)
time.sleep(DitLength)
def space():
time.sleep(CharSpaceExtra)
def word():
time.sleep(WordSpaceExtra)
def tx(keylength):
""" Keys the TX """
# set the output to Key Down...
GPIO.output(BCM_pin, True)
time.sleep(keylength)
# clear the output ...
GPIO.output(BCM_pin, False)
return
def send(code):
for element in morse[code]:
if element == ".":
dit()
elif element == "-":
dah()
def lookup(message):
sendspace = True
for char in message:
if char == "<":
sendspace = False
continue
if char == ">":
sendspace = True
continue
if char == " ":
sendspace = True
if char == " ":
word()
elif char not in morse.keys():
print("")
print("unknown char: %s" % char)
pass
else:
send(char)
if sendspace:
space()
print("sent message: '%s'" % message)
class MyPrompt(Cmd):
def emptyline(self):
pass
def do_PROSIGNS(self, args):
"""Anything enclosed in angle brackets will have no separating space: <bt>"""
pass
def do_CALL(self, args):
"""Passing arguments sets the CALL message.
With no arguments, the CALL message is sent.
'CALL ?' displays the current CALL message."""
global CALL
if not args:
lookup(CALL.lower())
elif args == "?":
print("current CALL message: '%s'" % CALL)
else:
print("Setting CALL to '%s'" % args.lower())
CALL = args.lower()
def do_RST(self, args):
"""Passing arguments sets the RST message.
With no arguments, the RST message is sent.
'RST ?' displays the current RST message."""
global RST
if not args:
lookup(RST.lower())
elif args == "?":
print("current RST message: '%s'" % RST)
else:
print("Setting RST to '%s'" % args.lower())
RST = args.lower()
def do_EOF(self, args):
"""Quits the program. Type 'EOF' or press 'Cntrl-D'"""
print("Quitting.")
raise SystemExit
def default(self, line):
lookup(line.lower())
def do_WPM(self, args):
"""Sets WPM to argument passed. No argument will set it to the default."""
global WPM
global DitLength
global DahLength
if not args:
WPM = default_WPM
else:
if args.isdigit():
WPM = int(args)
else:
print("Invalid value for WPM: %s. WPM must be an integer.")
prompt.prompt = str(WPM) + ' WPM> '
DitLength = float(60 / float(WPM * 50))
DahLength = 3 * DitLength
# Initialize GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(BCM_pin, GPIO.OUT)
GPIO.output(BCM_pin, GPIO.LOW)
WPM = default_WPM
DitLength = float(60 / float(WPM * 50))
DahLength = 3 * DitLength
# Dits and Dahs will have one ditlength space after each already
# CharSpace and WordSpace should be 3 and 7, so here is the extra
CharSpaceExtra = 2 * DitLength
WordSpaceExtra = 6 * DitLength
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"?": "..--..",
".": ".-.-.-",
",": "--..--",
"/": "-..-."
}
if __name__ == '__main__':
prompt = MyPrompt()
prompt.prompt = str(WPM) + ' WPM> '
prompt.cmdloop('Starting keyer. Type "help" for help...')
| [
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.setwarnings",
"time.sleep",
"RPi.GPIO.setmode"
] | [((3123, 3145), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (3135, 3145), True, 'import RPi.GPIO as GPIO\n'), ((3146, 3169), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (3162, 3169), True, 'import RPi.GPIO as GPIO\n'), ((3170, 3199), 'RPi.GPIO.setup', 'GPIO.setup', (['BCM_pin', 'GPIO.OUT'], {}), '(BCM_pin, GPIO.OUT)\n', (3180, 3199), True, 'import RPi.GPIO as GPIO\n'), ((3200, 3230), 'RPi.GPIO.output', 'GPIO.output', (['BCM_pin', 'GPIO.LOW'], {}), '(BCM_pin, GPIO.LOW)\n', (3211, 3230), True, 'import RPi.GPIO as GPIO\n'), ((239, 260), 'time.sleep', 'time.sleep', (['DitLength'], {}), '(DitLength)\n', (249, 260), False, 'import time\n'), ((296, 317), 'time.sleep', 'time.sleep', (['DitLength'], {}), '(DitLength)\n', (306, 317), False, 'import time\n'), ((337, 363), 'time.sleep', 'time.sleep', (['CharSpaceExtra'], {}), '(CharSpaceExtra)\n', (347, 363), False, 'import time\n'), ((382, 408), 'time.sleep', 'time.sleep', (['WordSpaceExtra'], {}), '(WordSpaceExtra)\n', (392, 408), False, 'import time\n'), ((494, 520), 'RPi.GPIO.output', 'GPIO.output', (['BCM_pin', '(True)'], {}), '(BCM_pin, True)\n', (505, 520), True, 'import RPi.GPIO as GPIO\n'), ((525, 546), 'time.sleep', 'time.sleep', (['keylength'], {}), '(keylength)\n', (535, 546), False, 'import time\n'), ((578, 605), 'RPi.GPIO.output', 'GPIO.output', (['BCM_pin', '(False)'], {}), '(BCM_pin, False)\n', (589, 605), True, 'import RPi.GPIO as GPIO\n')] |
from conans import AutoToolsBuildEnvironment, ConanFile, tools
import os
import platform
class FfmpegConan(ConanFile):
name = 'ffmpeg'
source_version = '4.4'
package_version = '3'
version = '%s-%s' % (source_version, package_version)
build_requires = (
'llvm/5.0.2-1@vuo+conan+llvm/stable',
'macos-sdk/11.0-0@vuo+conan+macos-sdk/stable',
'vuoutils/1.2@vuo+conan+vuoutils/stable',
)
requires = 'openssl/1.1.1h-0@vuo+conan+openssl/stable'
settings = 'os', 'compiler', 'build_type', 'arch'
url = 'http://www.ffmpeg.org/'
license = 'http://www.ffmpeg.org/legal.html'
description = 'A cross-platform library for recording, converting, and streaming audio and video'
source_dir = 'ffmpeg-%s' % source_version
build_x86_dir = '_build_x86'
build_arm_dir = '_build_arm'
install_x86_dir = '_install_x86'
install_arm_dir = '_install_arm'
install_universal_dir = '_install_universal_dir'
libs = {
'avcodec': 58,
'avdevice': 58,
'avfilter': 7,
'avformat': 58,
'avutil': 56,
'swresample': 3,
'swscale': 5,
}
exports_sources = '*.patch'
def requirements(self):
if platform.system() == 'Linux':
self.requires('patchelf/0.10pre-1@vuo/stable')
elif platform.system() != 'Darwin':
raise Exception('Unknown platform "%s"' % platform.system())
def source(self):
tools.get('http://www.ffmpeg.org/releases/ffmpeg-%s.tar.bz2' % self.source_version,
sha256='42093549751b582cf0f338a21a3664f52e0a9fbe0d238d3c992005e493607d0e')
# On both Linux and macOS, tell ./configure to check for the presence of OPENSSL_init_ssl
# (instead of the removed-in-openssl-1.1 SSL_library_init).
if platform.system() == 'Linux':
# Tell ./configure that it needs the dynamic linker in order to link with OpenSSL.
# (`autotools.libs.append('dl')` doesn't work because it appears before the OpenSSL libraries on the ./configure command line.)
tools.replace_in_file('%s/configure' % self.source_dir,
' check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||',
' check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto -ldl -lpthread ||')
else:
tools.replace_in_file('%s/configure' % self.source_dir,
' check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||',
' check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||')
self.run('mv %s/LICENSE.md %s/%s.txt' % (self.source_dir, self.source_dir, self.name))
def build(self):
import VuoUtils
autotools = AutoToolsBuildEnvironment(self)
# The LLVM/Clang libs get automatically added by the `requires` line,
# but this package doesn't need to link with them.
autotools.libs = []
autotools.flags.append('-I%s/include' % self.deps_cpp_info['openssl'].rootpath)
autotools.link_flags.append('-L%s/lib' % self.deps_cpp_info['openssl'].rootpath)
if platform.system() == 'Darwin':
# autotools.flags.append('-Oz') # Superseded by `--enable-small` below.
autotools.flags.append('-isysroot %s' % self.deps_cpp_info['macos-sdk'].rootpath)
autotools.flags.append('-mmacosx-version-min=10.11')
autotools.link_flags.append('-isysroot %s' % self.deps_cpp_info['macos-sdk'].rootpath)
autotools.link_flags.append('-Wl,-macos_version_min,10.11')
autotools.link_flags.append('-Wl,-headerpad_max_install_names')
elif platform.system() == 'Linux':
autotools.flags.append('-O4')
common_configure_args = [
'--disable-programs',
'--disable-doc',
'--enable-shared',
'--disable-stripping', # Keep symbols during development; remove them during the final VuoPackageEditor/VuoPackageSDK step.
'--disable-static',
'--enable-pthreads',
'--disable-debug',
'--enable-demuxer=mpegts',
'--enable-demuxer=mpegtsraw',
'--disable-bsfs',
'--disable-devices',
'--enable-openssl',
'--enable-small', # Reduces library size by about 25%.
# '--enable-lto', # No effect on library size.
# '--disable-runtime-cpudetect', # No effect on library size.
# Disable unneeded features; reduces library size by about 20%.
'--disable-muxers',
'--disable-devices',
'--disable-filters',
'--disable-bzlib',
'--disable-iconv',
'--disable-encoders',
# Only enable the encoders/muxer needed for RTMP.
'--enable-encoder=h264_videotoolbox',
'--enable-encoder=aac_at',
'--enable-muxer=flv',
# Use AVFoundation's hardware-accelerated H.264 decoder instead.
'--disable-decoder=h264',
]
env_vars = {
'CC' : self.deps_cpp_info['llvm'].rootpath + '/bin/clang',
'CXX': self.deps_cpp_info['llvm'].rootpath + '/bin/clang++',
}
with tools.environment_append(env_vars):
build_root = os.getcwd()
self.output.info("=== Build for x86_64 ===")
tools.mkdir(self.build_x86_dir)
with tools.chdir(self.build_x86_dir):
autotools.flags.append('-arch x86_64')
autotools.link_flags.append('-arch x86_64')
autotools.configure(configure_dir='../%s' % self.source_dir,
build=False,
host=False,
args=common_configure_args + [
'--prefix=%s/%s' % (build_root, self.install_x86_dir),
'--enable-x86asm'])
autotools.make(args=['--quiet'])
autotools.make(target='install', args=['--quiet'])
with tools.chdir('%s/lib' % self.install_x86_dir):
VuoUtils.fixLibs(self.libs, self.deps_cpp_info)
self.output.info("=== Build for arm64 ===")
tools.mkdir(self.build_arm_dir)
with tools.chdir(self.build_arm_dir):
autotools.flags.remove('-arch x86_64')
autotools.flags.remove('-mmacosx-version-min=10.11')
autotools.flags.append('-arch arm64')
autotools.flags.append('-target arm64-apple-macosx11.0.0')
autotools.link_flags.remove('-arch x86_64')
autotools.link_flags.remove('-Wl,-macos_version_min,10.11')
autotools.link_flags.append('-arch arm64')
autotools.link_flags.append('-target arm64-apple-macosx11.0.0')
autotools.configure(configure_dir='../%s' % self.source_dir,
build=False,
host=False,
args=common_configure_args + [
'--prefix=%s/%s' % (build_root, self.install_arm_dir),
'--enable-cross-compile',
'--disable-asm',
'--target-os=darwin',
'--arch=arm64'])
autotools.make(args=['--quiet'])
autotools.make(target='install', args=['--quiet'])
with tools.chdir('%s/lib' % self.install_arm_dir):
VuoUtils.fixLibs(self.libs, self.deps_cpp_info)
def package(self):
import VuoUtils
tools.mkdir(self.install_universal_dir)
with tools.chdir(self.install_universal_dir):
for f in self.libs:
self.run('lipo -create ..//%s/lib/lib%s.dylib ../%s/lib/lib%s.dylib -output lib%s.dylib' % (self.install_x86_dir, f, self.install_arm_dir, f, f))
self.copy('*.h', src='%s/include' % self.install_x86_dir, dst='include')
if platform.system() == 'Darwin':
libext = 'dylib'
elif platform.system() == 'Linux':
libext = 'so'
else:
raise Exception('Unknown platform "%s"' % platform.system())
for f in list(self.libs.keys()):
self.copy('lib%s.%s' % (f, libext), src=self.install_universal_dir, dst='lib')
self.copy('%s.txt' % self.name, src=self.source_dir, dst='license')
def package_info(self):
self.cpp_info.libs = list(self.libs.keys())
| [
"conans.tools.replace_in_file",
"VuoUtils.fixLibs",
"os.getcwd",
"conans.tools.chdir",
"conans.tools.mkdir",
"conans.tools.get",
"conans.AutoToolsBuildEnvironment",
"platform.system",
"conans.tools.environment_append"
] | [((1464, 1632), 'conans.tools.get', 'tools.get', (["('http://www.ffmpeg.org/releases/ffmpeg-%s.tar.bz2' % self.source_version)"], {'sha256': '"""42093549751b582cf0f338a21a3664f52e0a9fbe0d238d3c992005e493607d0e"""'}), "('http://www.ffmpeg.org/releases/ffmpeg-%s.tar.bz2' % self.\n source_version, sha256=\n '42093549751b582cf0f338a21a3664f52e0a9fbe0d238d3c992005e493607d0e')\n", (1473, 1632), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((2952, 2983), 'conans.AutoToolsBuildEnvironment', 'AutoToolsBuildEnvironment', (['self'], {}), '(self)\n', (2977, 2983), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((8000, 8039), 'conans.tools.mkdir', 'tools.mkdir', (['self.install_universal_dir'], {}), '(self.install_universal_dir)\n', (8011, 8039), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((1227, 1244), 'platform.system', 'platform.system', ([], {}), '()\n', (1242, 1244), False, 'import platform\n'), ((1819, 1836), 'platform.system', 'platform.system', ([], {}), '()\n', (1834, 1836), False, 'import platform\n'), ((2096, 2386), 'conans.tools.replace_in_file', 'tools.replace_in_file', (["('%s/configure' % self.source_dir)", '""" check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||"""', '""" check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto -ldl -lpthread ||"""'], {}), "('%s/configure' % self.source_dir,\n ' check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||'\n ,\n ' check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto -ldl -lpthread ||'\n )\n", (2117, 2386), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((2463, 2738), 'conans.tools.replace_in_file', 'tools.replace_in_file', (["('%s/configure' % self.source_dir)", '""" check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||"""', '""" check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||"""'], {}), "('%s/configure' % self.source_dir,\n ' check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||'\n ,\n ' check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||'\n )\n", (2484, 2738), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((3340, 3357), 'platform.system', 'platform.system', ([], {}), '()\n', (3355, 3357), False, 'import platform\n'), ((5460, 5494), 'conans.tools.environment_append', 'tools.environment_append', (['env_vars'], {}), '(env_vars)\n', (5484, 5494), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((5521, 5532), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5530, 5532), False, 'import os\n'), ((5603, 5634), 'conans.tools.mkdir', 'tools.mkdir', (['self.build_x86_dir'], {}), '(self.build_x86_dir)\n', (5614, 5634), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((6510, 6541), 'conans.tools.mkdir', 'tools.mkdir', (['self.build_arm_dir'], {}), '(self.build_arm_dir)\n', (6521, 6541), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((8053, 8092), 'conans.tools.chdir', 'tools.chdir', (['self.install_universal_dir'], {}), '(self.install_universal_dir)\n', (8064, 8092), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((8382, 8399), 'platform.system', 'platform.system', ([], {}), '()\n', (8397, 8399), False, 'import platform\n'), ((1329, 1346), 'platform.system', 'platform.system', ([], {}), '()\n', (1344, 1346), False, 'import platform\n'), ((3875, 3892), 'platform.system', 'platform.system', ([], {}), '()\n', (3890, 3892), False, 'import platform\n'), ((5652, 5683), 'conans.tools.chdir', 'tools.chdir', (['self.build_x86_dir'], {}), '(self.build_x86_dir)\n', (5663, 5683), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((6331, 6375), 'conans.tools.chdir', 'tools.chdir', (["('%s/lib' % self.install_x86_dir)"], {}), "('%s/lib' % self.install_x86_dir)\n", (6342, 6375), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((6393, 6440), 'VuoUtils.fixLibs', 'VuoUtils.fixLibs', (['self.libs', 'self.deps_cpp_info'], {}), '(self.libs, self.deps_cpp_info)\n', (6409, 6440), False, 'import VuoUtils\n'), ((6559, 6590), 'conans.tools.chdir', 'tools.chdir', (['self.build_arm_dir'], {}), '(self.build_arm_dir)\n', (6570, 6590), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((7833, 7877), 'conans.tools.chdir', 'tools.chdir', (["('%s/lib' % self.install_arm_dir)"], {}), "('%s/lib' % self.install_arm_dir)\n", (7844, 7877), False, 'from conans import AutoToolsBuildEnvironment, ConanFile, tools\n'), ((7895, 7942), 'VuoUtils.fixLibs', 'VuoUtils.fixLibs', (['self.libs', 'self.deps_cpp_info'], {}), '(self.libs, self.deps_cpp_info)\n', (7911, 7942), False, 'import VuoUtils\n'), ((8455, 8472), 'platform.system', 'platform.system', ([], {}), '()\n', (8470, 8472), False, 'import platform\n'), ((1414, 1431), 'platform.system', 'platform.system', ([], {}), '()\n', (1429, 1431), False, 'import platform\n'), ((8579, 8596), 'platform.system', 'platform.system', ([], {}), '()\n', (8594, 8596), False, 'import platform\n')] |
"""
@author: <NAME>
@contact: <EMAIL>
"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from fastx_barber.const import FastxFormats, FlagData, FlagStatsType, QFLAG_START
from fastx_barber.match import ANPMatch
from fastx_barber.seqio import SimpleFastxRecord
import logging
import os
import pandas as pd # type: ignore
import regex as re # type: ignore
from rich.progress import track # type: ignore
from typing import Any, Dict, List, Match, Optional, Pattern, Tuple, Type, Union
class FlagStats(object):
__stats: FlagStatsType
_flags_for_stats: Optional[List[str]] = None
def __init__(self, flags_for_stats: Optional[List[str]] = None):
super(FlagStats, self).__init__()
self.__stats = defaultdict(lambda: defaultdict(lambda: 0))
self._flags_for_stats = flags_for_stats
def update(self, flags: Dict[str, FlagData]) -> None:
if self._flags_for_stats is None:
return
for flag_name, flag_data in flags.items():
if flag_name in self._flags_for_stats:
self.__stats[flag_name][flag_data[0]] += 1
def __getitem__(self, key):
return self.__stats[key]
def __setitem__(self, key, value):
self.__stats[key] = value
def keys(self):
return self.__stats.keys()
def values(self):
return self.__stats.values()
def items(self):
return self.__stats.items()
def get_dataframe(self, flag_name: str) -> pd.DataFrame:
stats = self.__stats[flag_name]
df = pd.DataFrame()
df["value"] = list(stats.keys())
df["counts"] = list(stats.values())
df["perc"] = round(df["counts"] / df["counts"].sum() * 100, 2)
df.sort_values("counts", ascending=False, ignore_index=True, inplace=True)
return df
def export(self, output_path: str, verbose: bool = True) -> None:
output_dir = os.path.dirname(output_path)
basename = os.path.basename(output_path)
if basename.endswith(".gz"):
basename = basename.split(".gz")[0]
basename = os.path.splitext(basename)[0]
if verbose:
flag_keys = track(self.keys(), description="Exporting flagstats")
else:
flag_keys = self.keys()
for flag_name in list(flag_keys):
self.get_dataframe(flag_name).to_csv(
os.path.join(output_dir, f"{basename}.{flag_name}.stats.tsv"),
sep="\t",
index=False,
)
class ABCFlagBase(metaclass=ABCMeta):
"""Class with basic flag-related variables
Extends:
metaclass=ABCMeta
Variables:
_flag_delim {str} -- flag delimiter
_comment_space {str} -- fastx comment separator
"""
_flag_delim: str = "~"
_comment_space: str = " "
def __init__(self):
super(ABCFlagBase, self).__init__()
@property
def flag_delim(self):
return self._flag_delim
@flag_delim.setter
def flag_delim(self, flag_delim: str):
assert 1 == len(flag_delim)
self._flag_delim = flag_delim
@property
def comment_space(self):
return self._flag_delim
@comment_space.setter
def comment_space(self, comment_space: str):
assert 1 == len(comment_space)
self._comment_space = comment_space
class ABCFlagExtractor(ABCFlagBase):
"""Flag extractor abstract base class
Extends:
ABCFlagBase
Variables:
_selected_flags {Optional[List[str]]} -- flags to extract
_flags_for_stats {Optional[List[str]]} -- list of flags for stats calculation
_flagstats {FlagStats} -- to contain flagstats generated by update_stats
"""
_selected_flags: Optional[List[str]] = None
_flagstats: FlagStats
def __init__(
self,
selected_flags: Optional[List[str]] = None,
flags_for_stats: Optional[List[str]] = None,
):
self._selected_flags = selected_flags
self._flagstats = FlagStats(flags_for_stats)
@property
def flagstats(self):
return self._flagstats
@abstractmethod
def extract_selected(
self, record: Any, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
"""Extract selected flags
Flags are selected according to self._selected_flags
Decorators:
abstractmethod
Arguments:
record {Any} -- record from where to extract flags
match {Match} -- results of matching the record to a flag pattern
Returns:
Dict[str, FlagData] -- a dictionary with flag name as key and data as value
"""
pass
@abstractmethod
def extract_all(
self, record: Any, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
"""Extract all flags
Decorators:
abstractmethod
Arguments:
record {Any} -- record from where to extract flags
match {Match} -- results of matching the record to a flag pattern
Returns:
Dict[str, FlagData] -- a dictionary with flag name as key and data as value
"""
pass
@abstractmethod
def update(self, record: Any, flag_data: Dict[str, FlagData]) -> Any:
"""Update record
Decorators:
abstractmethod
Arguments:
record {Any} -- record to update based on flags
flag_data {Dict[str, FlagData]} -- a dictionary with flag name as key
and data as value
Returns:
Any -- updated record.
"""
pass
def update_stats(self, flags: Dict[str, FlagData]) -> None:
self._flagstats.update(flags)
def apply_selection(self, flag_data: Dict[str, FlagData]) -> Dict[str, FlagData]:
"""Subselects provided flags.
According to self._selected_flags
Decorators:
abstractmethod
Arguments:
flag_data {Dict[str, FlagData]} -- a dictionary with flag name as key
and data as value
"""
if self._selected_flags is None:
return flag_data
else:
selected_flag_data = {}
for name in self._selected_flags:
if name in flag_data.keys():
selected_flag_data[name] = flag_data[name]
return selected_flag_data
class FastaFlagExtractor(ABCFlagExtractor):
def __init__(
self,
selected_flags: Optional[List[str]] = None,
flags_for_stats: Optional[List[str]] = None,
):
super(FastaFlagExtractor, self).__init__(selected_flags, flags_for_stats)
def extract_selected(
self, record: SimpleFastxRecord, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
assert match is not None
flag_data: Dict[str, FlagData] = {}
flag_data_all = self.extract_all(record, match)
if self._selected_flags is not None:
for flag, data in flag_data_all.items():
if flag in self._selected_flags:
flag_data[flag] = data
return flag_data
def extract_all(
self, record: SimpleFastxRecord, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
if match is None:
return {}
flag_data: Dict[str, FlagData] = {}
for gid in range(len(match.groups())):
flag = self.__extract_single_flag(match, gid)
flag_data.update([flag])
return flag_data
def __extract_single_flag(
self,
match: Union[ANPMatch, Match],
gid: int,
flag: Optional[Tuple[str, str]] = None,
) -> Tuple[str, FlagData]:
if flag is None:
flag = list(match.groupdict().items())[gid]
return (flag[0], (flag[1], match.start(gid + 1), match.end(gid + 1)))
def update(
self, record: SimpleFastxRecord, flag_data: Dict[str, FlagData]
) -> SimpleFastxRecord:
name, seq, _ = record
name_bits = name.split(self._comment_space)
for name, (flag, start, end) in flag_data.items():
name_bits[0] += f"{self._flag_delim}{self._flag_delim}{name}"
name_bits[0] += f"{self._flag_delim}{flag}"
name = " ".join(name_bits)
return (name, seq, None)
class FastqFlagExtractor(FastaFlagExtractor):
extract_qual_flags: bool = True
def __init__(
self,
selected_flags: Optional[List[str]] = None,
flags_for_stats: Optional[List[str]] = None,
):
super(FastqFlagExtractor, self).__init__(selected_flags, flags_for_stats)
def extract_selected(
self, record: SimpleFastxRecord, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
assert match is not None
name, seq, qual = record
assert qual is not None
flag_data = super(FastqFlagExtractor, self).extract_selected(record, match)
if self.extract_qual_flags:
flag_data = self.__add_qual_flags(flag_data, qual)
return flag_data
def extract_all(
self, record: SimpleFastxRecord, match: Union[ANPMatch, Match, None]
) -> Dict[str, FlagData]:
assert match is not None
name, seq, qual = record
assert qual is not None
flag_data = super(FastqFlagExtractor, self).extract_all(record, match)
if self.extract_qual_flags:
flag_data = self.__add_qual_flags(flag_data, qual)
return flag_data
def __add_qual_flags(
self, flag_data: Dict[str, FlagData], qual: str
) -> Dict[str, FlagData]:
for name, (_, start, end) in list(flag_data.items()):
flag = (f"{QFLAG_START}{name}", (qual[slice(start, end)], start, end))
flag_data.update([flag])
return flag_data
def update(
self, record: SimpleFastxRecord, flag_data: Dict[str, FlagData]
) -> SimpleFastxRecord:
_, _, qual = record
name, seq, _ = super(FastqFlagExtractor, self).update(record, flag_data)
return (name, seq, qual)
def apply_selection(self, flag_data: Dict[str, FlagData]) -> Dict[str, FlagData]:
if self._selected_flags is None:
return flag_data
else:
selected_flag_data = super(FastqFlagExtractor, self).apply_selection(
flag_data
)
for name in self._selected_flags:
name = f"{QFLAG_START}{name}"
if name in flag_data.keys():
selected_flag_data[name] = flag_data[name]
return selected_flag_data
def get_fastx_flag_extractor(fmt: FastxFormats) -> Type[ABCFlagExtractor]:
"""Retrieves appropriate flag extractor class."""
if FastxFormats.FASTA == fmt:
return FastaFlagExtractor
elif FastxFormats.FASTQ == fmt:
return FastqFlagExtractor
else:
return ABCFlagExtractor
class ABCFlagReader(ABCFlagBase):
def __init__(self):
super(ABCFlagReader, self).__init__()
@abstractmethod
def read(self, record: Any) -> Optional[Dict[str, FlagData]]:
pass
class FastxFlagReader(ABCFlagReader):
_flagstats: FlagStats
def __init__(self, flags_for_stats: Optional[List[str]] = None):
super(FastxFlagReader, self).__init__()
self._flagstats = FlagStats(flags_for_stats)
@property
def flagstats(self):
return self._flagstats
def read(self, record: SimpleFastxRecord) -> Optional[Dict[str, FlagData]]:
header = record[0]
if self._comment_space in header:
header = header.split(self._comment_space)[0]
double_delim = f"{self._flag_delim}{self._flag_delim}"
if double_delim not in header:
return None
flag_data: Dict[str, FlagData] = {}
for flag in header.split(double_delim)[1:]:
if self._flag_delim not in flag:
continue
name, value = flag.split(self._flag_delim)[:2]
flag_data.update([(name, (value, -1, -1))])
self._flagstats.update(flag_data)
return flag_data
class FlagRegexes(object):
_flag_regex: Dict[str, str]
_flag_regex_compiled: Dict[str, Pattern]
def __init__(self, pattern_list: List[str]):
super(FlagRegexes, self).__init__()
self._flag_regex = {}
self.__init(pattern_list)
self._flag_regex_compiled = {}
self.__compile()
def __init(self, pattern_list: List[str]) -> Dict[str, str]:
self._flag_regex = {}
for pattern in pattern_list:
if "," not in pattern:
continue
exploded = pattern.split(",")
self._flag_regex[exploded[0]] = ",".join(exploded[1:])
return self._flag_regex
def __compile(self) -> None:
for name, regex in self._flag_regex.items():
self._flag_regex_compiled[name] = re.compile(regex)
def log(self) -> None:
logging.info("[bold underline red]Flag regex[/]")
for name, regex in self._flag_regex.items():
logging.info(f"{name}-regex\t{regex}")
def match(self, flags: Dict[str, FlagData]) -> bool:
for name, regex in self._flag_regex_compiled.items():
if name not in flags:
return False
match = re.match(regex, flags[name][0])
if match is None:
return False
return True
| [
"os.path.splitext",
"regex.match",
"os.path.join",
"os.path.dirname",
"collections.defaultdict",
"os.path.basename",
"pandas.DataFrame",
"logging.info",
"regex.compile"
] | [((1556, 1570), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1568, 1570), True, 'import pandas as pd\n'), ((1920, 1948), 'os.path.dirname', 'os.path.dirname', (['output_path'], {}), '(output_path)\n', (1935, 1948), False, 'import os\n'), ((1968, 1997), 'os.path.basename', 'os.path.basename', (['output_path'], {}), '(output_path)\n', (1984, 1997), False, 'import os\n'), ((13062, 13111), 'logging.info', 'logging.info', (['"""[bold underline red]Flag regex[/]"""'], {}), "('[bold underline red]Flag regex[/]')\n", (13074, 13111), False, 'import logging\n'), ((2102, 2128), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (2118, 2128), False, 'import os\n'), ((13008, 13025), 'regex.compile', 're.compile', (['regex'], {}), '(regex)\n', (13018, 13025), True, 'import regex as re\n'), ((13177, 13215), 'logging.info', 'logging.info', (['f"""{name}-regex\t{regex}"""'], {}), "(f'{name}-regex\\t{regex}')\n", (13189, 13215), False, 'import logging\n'), ((13419, 13450), 'regex.match', 're.match', (['regex', 'flags[name][0]'], {}), '(regex, flags[name][0])\n', (13427, 13450), True, 'import regex as re\n'), ((774, 797), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (785, 797), False, 'from collections import defaultdict\n'), ((2389, 2450), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{basename}.{flag_name}.stats.tsv"""'], {}), "(output_dir, f'{basename}.{flag_name}.stats.tsv')\n", (2401, 2450), False, 'import os\n')] |
from functools import wraps
from flask import redirect, url_for, flash
from flask_login import current_user
from winejournal.data_models.timestamp import TimeStampMixin
from winejournal.extensions import db
class Region(db.Model, TimeStampMixin):
__tablename__ = 'regions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
description = db.Column(db.Text)
image = db.Column(db.String(250))
parent_id = db.Column(db.Integer)
country = db.Column(db.String(20), index=True)
state = db.Column(db.String(20), index=True)
owner = db.Column(db.Integer, db.ForeignKey('users.id'))
wine = db.relationship('Wine', backref=db.backref('wine_region',
lazy=True))
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'description': self.description,
'image': self.image,
'parent_id': self.parent_id,
'country': self.country,
'state': self.state,
'created_on': self.created_on,
'updated_on': self.updated_on
}
def region_owner_required(f):
"""
Ensure a user is admin or the region owner,
if not redirect them to the regions list page page.
:return: Function
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_admin():
return f(*args, **kwargs)
else:
region_id = kwargs['region_id']
cat = db.session.query(Region).get(region_id)
owner_id = cat.owner
if current_user.id != owner_id:
flash('You must be the owner to access that page')
return redirect(url_for('regions.list_regions'))
return f(*args, **kwargs)
return decorated_function
| [
"winejournal.extensions.db.backref",
"flask_login.current_user.is_admin",
"winejournal.extensions.db.String",
"flask.flash",
"winejournal.extensions.db.ForeignKey",
"functools.wraps",
"flask.url_for",
"winejournal.extensions.db.session.query",
"winejournal.extensions.db.Column"
] | [((291, 330), 'winejournal.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (300, 330), False, 'from winejournal.extensions import db\n'), ((401, 419), 'winejournal.extensions.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (410, 419), False, 'from winejournal.extensions import db\n'), ((474, 495), 'winejournal.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (483, 495), False, 'from winejournal.extensions import db\n'), ((1373, 1381), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (1378, 1381), False, 'from functools import wraps\n'), ((352, 365), 'winejournal.extensions.db.String', 'db.String', (['(80)'], {}), '(80)\n', (361, 365), False, 'from winejournal.extensions import db\n'), ((442, 456), 'winejournal.extensions.db.String', 'db.String', (['(250)'], {}), '(250)\n', (451, 456), False, 'from winejournal.extensions import db\n'), ((520, 533), 'winejournal.extensions.db.String', 'db.String', (['(20)'], {}), '(20)\n', (529, 533), False, 'from winejournal.extensions import db\n'), ((569, 582), 'winejournal.extensions.db.String', 'db.String', (['(20)'], {}), '(20)\n', (578, 582), False, 'from winejournal.extensions import db\n'), ((630, 655), 'winejournal.extensions.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (643, 655), False, 'from winejournal.extensions import db\n'), ((1438, 1461), 'flask_login.current_user.is_admin', 'current_user.is_admin', ([], {}), '()\n', (1459, 1461), False, 'from flask_login import current_user\n'), ((701, 737), 'winejournal.extensions.db.backref', 'db.backref', (['"""wine_region"""'], {'lazy': '(True)'}), "('wine_region', lazy=True)\n", (711, 737), False, 'from winejournal.extensions import db\n'), ((1710, 1760), 'flask.flash', 'flash', (['"""You must be the owner to access that page"""'], {}), "('You must be the owner to access that page')\n", (1715, 1760), False, 'from flask import redirect, url_for, flash\n'), ((1577, 1601), 'winejournal.extensions.db.session.query', 'db.session.query', (['Region'], {}), '(Region)\n', (1593, 1601), False, 'from winejournal.extensions import db\n'), ((1793, 1824), 'flask.url_for', 'url_for', (['"""regions.list_regions"""'], {}), "('regions.list_regions')\n", (1800, 1824), False, 'from flask import redirect, url_for, flash\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
def plot_metric_vs_missing_fraction(data, y, ylabel, ci, fpath, fname):
plt.figure(figsize=(8, 6))
ax = sns.lineplot(
data=data,
x="Missing Fraction",
y=y,
hue="Imputer",
style="Imputer",
ci=ci
)
ax.set(ylabel=ylabel)
ax.set_xticks(sorted(data["Missing Fraction"].unique()))
plt.legend(bbox_to_anchor=(1, 1), loc="upper left") # place legend in top right corner
fpath.mkdir(parents=True, exist_ok=True)
plt.savefig(fpath/fname)
def plot_rank_vs_fraction_by_type(data, ci, fpath, fname):
sns.relplot(
data=data,
x="Missing Fraction",
y="Imputation Rank",
hue="Imputer",
style="Imputer",
col="Missing Type",
row="metric",
kind="line",
height=5,
ci=ci,
col_order=["MCAR", "MAR", "MNAR"]
)
# ax.set_xticks(sorted(results["Missing Fraction"].unique()))
plt.gcf().subplots_adjust(bottom=0.15, left=0.05) # avoid x/ylabel cutoff in SVG export
fpath.mkdir(parents=True, exist_ok=True)
plt.savefig(fpath/fname)
def draw_cat_box_plot(data, y, ylim, fpath, fname, col_order=["MCAR", "MAR", "MNAR"], hue_order=None, row_order=None):
g = sns.catplot(
x="Missing Fraction",
y=y,
hue="Imputation Method",
col="Missing Type",
row="metric",
data=data,
kind="box",
height=4,
col_order=col_order,
row_order=row_order,
hue_order=hue_order,
margin_titles=True
)
g.set_titles(row_template="{row_name}", col_template="{col_name}", size=18).set(ylim=ylim)
plt.tight_layout(rect=(0, 0, 0.85, 1))
fpath.mkdir(parents=True, exist_ok=True)
plt.savefig(fpath/fname)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gcf",
"seaborn.catplot",
"seaborn.lineplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"seaborn.relplot"
] | [((132, 158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (142, 158), True, 'import matplotlib.pyplot as plt\n'), ((168, 262), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': '"""Missing Fraction"""', 'y': 'y', 'hue': '"""Imputer"""', 'style': '"""Imputer"""', 'ci': 'ci'}), "(data=data, x='Missing Fraction', y=y, hue='Imputer', style=\n 'Imputer', ci=ci)\n", (180, 262), True, 'import seaborn as sns\n'), ((403, 454), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1, 1), loc='upper left')\n", (413, 454), True, 'import matplotlib.pyplot as plt\n'), ((540, 566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(fpath / fname)'], {}), '(fpath / fname)\n', (551, 566), True, 'import matplotlib.pyplot as plt\n'), ((630, 836), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'data', 'x': '"""Missing Fraction"""', 'y': '"""Imputation Rank"""', 'hue': '"""Imputer"""', 'style': '"""Imputer"""', 'col': '"""Missing Type"""', 'row': '"""metric"""', 'kind': '"""line"""', 'height': '(5)', 'ci': 'ci', 'col_order': "['MCAR', 'MAR', 'MNAR']"}), "(data=data, x='Missing Fraction', y='Imputation Rank', hue=\n 'Imputer', style='Imputer', col='Missing Type', row='metric', kind=\n 'line', height=5, ci=ci, col_order=['MCAR', 'MAR', 'MNAR'])\n", (641, 836), True, 'import seaborn as sns\n'), ((1129, 1155), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(fpath / fname)'], {}), '(fpath / fname)\n', (1140, 1155), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1509), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""Missing Fraction"""', 'y': 'y', 'hue': '"""Imputation Method"""', 'col': '"""Missing Type"""', 'row': '"""metric"""', 'data': 'data', 'kind': '"""box"""', 'height': '(4)', 'col_order': 'col_order', 'row_order': 'row_order', 'hue_order': 'hue_order', 'margin_titles': '(True)'}), "(x='Missing Fraction', y=y, hue='Imputation Method', col=\n 'Missing Type', row='metric', data=data, kind='box', height=4,\n col_order=col_order, row_order=row_order, hue_order=hue_order,\n margin_titles=True)\n", (1294, 1509), True, 'import seaborn as sns\n'), ((1699, 1737), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '(0, 0, 0.85, 1)'}), '(rect=(0, 0, 0.85, 1))\n', (1715, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1813), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(fpath / fname)'], {}), '(fpath / fname)\n', (1798, 1813), True, 'import matplotlib.pyplot as plt\n'), ((991, 1000), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (998, 1000), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas
import torch
from torch.distributions import Normal
import pk_example
out_ACE = pandas.read_csv('outputs/pk_SIG_tuning.csv', na_values='Inf')
out_ACE = out_ACE.fillna(np.inf)
##################
## EXECUTION TIMES
##################
time_pairs = out_ACE.groupby(['mc_size_gp'])['times']
mc_size, times = [x for x in zip(*time_pairs)]
plt.boxplot(times, whis=(0,100))
locs, _ = plt.xticks()
plt.xlabel("Monte Carlo sample size")
plt.ylabel("Run time (seconds)")
plt.xticks(ticks=locs, labels=mc_size)
plt.tight_layout()
plt.savefig('plots/pk_tuning_times.pdf')
#####################
## OUTPUT DESIGN PLOTS
#####################
##plt.ion() # Uncomment to see all exploratory plots
colnames = ['design_' + str(i+1) for i in range(15)]
ace_grouped = out_ACE.groupby(['mc_size_gp'])
for pars, dat in ace_grouped:
gp = pars
plt.figure()
ace_designs = dat[colnames].to_numpy()
ace_designs = np.sort(ace_designs, 1)
design_index = [i+1 for i in range(15)]
design_index = np.tile(design_index, ace_designs.shape[0])
plt.scatter(design_index, ace_designs, color='b', alpha=0.05)
plt.ylim([0.,24.])
plt.xlabel('Observation index')
plt.ylabel('Observation time')
plt.title('GP {}'.format(gp))
plt.tight_layout()
################################
## CODE TO ESTIMATE EXPECTED SIG
################################
fim = pk_example.PK_FIM(nsamples=100, multi=True)
def pk_mean(theta, design):
design = design.unsqueeze(1)
theta1 = theta[:, 0:1].unsqueeze(0)
theta2 = theta[:, 1:2].unsqueeze(0)
theta3 = theta[:, 2:3].unsqueeze(0)
x = 400. * theta2 * (torch.exp(-theta1*design) - torch.exp(-theta2*design)) / (theta3*(theta2-theta1))
return x
def estimate_SIG(design, n_outer=1000, n_inner=1000):
torch.manual_seed(0) ## Should reduce variability
if len(design.shape) == 1:
design = design.unsqueeze(0)
n_designs = design.shape[0]
noise_dist = Normal(loc=0., scale=0.1)
thetas_outer = fim.prior.sample((n_outer,))
thetas_inner = fim.prior.sample((n_inner,))
y_outer = pk_mean(thetas_outer, design) + noise_dist.sample((1, n_outer, 15))
## Same noise reused for all designs to reduce variability
x_inner = pk_mean(thetas_inner, design)
##y_outer and x_inner dimensions represent counts of:
##design, x or y replication, observation
## A simple but inefficient approach is:
## Outer loop over designs
## Middle loop over ys
## Inner loop over thetas
## Evaluate likelihood f(y | theta; design)
## Take mean of likelihoods to get evidence estimate
## Get SIG estimate: entropy minus mean of log evidences
## Return vector of SIG estimates
## Whole calculation could be parallelised, but I don't have enough memory
## So I iterate over designs
def est_SIG(design_count):
temp = y_outer[design_count,:,:].unsqueeze(1) \
- x_inner[design_count,:,:].unsqueeze(0)
## temp[i,j,k] is y_outer[design_count,i,k] - x_inner[design_count,j,k]
temp = noise_dist.log_prob(temp)
## temp[i,j,k] is log density of observing y_outer[design_count,i,k]
## given x_inner[design_count,j,k]
temp = torch.sum(temp, dim=2)
## temp[i,j] is log density of observing y_outer[design_count,i,:]
## given thetas_inner[j,:]
temp = torch.logsumexp(temp, dim=1) - np.log(n_outer)
## temp[i] is log mean density of observing y_outer[design_count,i,:]
## i.e. log evidence estimate
return 15. * noise_dist.entropy() - torch.mean(temp)
## SIG estimate
return np.array([est_SIG(i).item() for i in range(n_designs)])
################################
## ESTIMATE EXPECTED SIG
################################
sig = []
for _, dat in ace_grouped:
ace_designs = dat[colnames].to_numpy()
ace_designs = torch.tensor(ace_designs)
ace_SIG = estimate_SIG(ace_designs)
sig += [ace_SIG]
plt.figure()
plt.boxplot(sig, whis=(0,100))
locs, _ = plt.xticks()
plt.xlabel("Monte Carlo sample size")
plt.ylabel("SIG objective estimate")
plt.xticks(ticks=locs, labels=mc_size)
plt.tight_layout()
plt.savefig('plots/pk_tuning_SIGs.pdf')
| [
"matplotlib.pyplot.boxplot",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.log",
"torch.exp",
"torch.sum",
"torch.mean",
"matplotlib.pyplot.xlabel",
"numpy.sort",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.tile",
"matplotlib.pyplot.savefig",
"torch.distributions.Normal",
"matplotlib.pyplot.xticks",
"torch.manual_seed",
"torch.tensor",
"pk_example.PK_FIM",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"torch.logsumexp"
] | [((146, 207), 'pandas.read_csv', 'pandas.read_csv', (['"""outputs/pk_SIG_tuning.csv"""'], {'na_values': '"""Inf"""'}), "('outputs/pk_SIG_tuning.csv', na_values='Inf')\n", (161, 207), False, 'import pandas\n'), ((401, 434), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['times'], {'whis': '(0, 100)'}), '(times, whis=(0, 100))\n', (412, 434), True, 'import matplotlib.pyplot as plt\n'), ((444, 456), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (454, 456), True, 'import matplotlib.pyplot as plt\n'), ((457, 494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Monte Carlo sample size"""'], {}), "('Monte Carlo sample size')\n", (467, 494), True, 'import matplotlib.pyplot as plt\n'), ((495, 527), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Run time (seconds)"""'], {}), "('Run time (seconds)')\n", (505, 527), True, 'import matplotlib.pyplot as plt\n'), ((528, 566), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': 'locs', 'labels': 'mc_size'}), '(ticks=locs, labels=mc_size)\n', (538, 566), True, 'import matplotlib.pyplot as plt\n'), ((567, 585), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (583, 585), True, 'import matplotlib.pyplot as plt\n'), ((586, 626), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/pk_tuning_times.pdf"""'], {}), "('plots/pk_tuning_times.pdf')\n", (597, 626), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1470), 'pk_example.PK_FIM', 'pk_example.PK_FIM', ([], {'nsamples': '(100)', 'multi': '(True)'}), '(nsamples=100, multi=True)\n', (1444, 1470), False, 'import pk_example\n'), ((3919, 3931), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3929, 3931), True, 'import matplotlib.pyplot as plt\n'), ((3932, 3963), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['sig'], {'whis': '(0, 100)'}), '(sig, whis=(0, 100))\n', (3943, 3963), True, 'import matplotlib.pyplot as plt\n'), ((3973, 3985), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (3983, 3985), True, 'import matplotlib.pyplot as plt\n'), ((3986, 4023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Monte Carlo sample size"""'], {}), "('Monte Carlo sample size')\n", (3996, 4023), True, 'import matplotlib.pyplot as plt\n'), ((4024, 4060), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SIG objective estimate"""'], {}), "('SIG objective estimate')\n", (4034, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4061, 4099), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': 'locs', 'labels': 'mc_size'}), '(ticks=locs, labels=mc_size)\n', (4071, 4099), True, 'import matplotlib.pyplot as plt\n'), ((4100, 4118), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4116, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4158), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/pk_tuning_SIGs.pdf"""'], {}), "('plots/pk_tuning_SIGs.pdf')\n", (4130, 4158), True, 'import matplotlib.pyplot as plt\n'), ((898, 910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (908, 910), True, 'import matplotlib.pyplot as plt\n'), ((972, 995), 'numpy.sort', 'np.sort', (['ace_designs', '(1)'], {}), '(ace_designs, 1)\n', (979, 995), True, 'import numpy as np\n'), ((1059, 1102), 'numpy.tile', 'np.tile', (['design_index', 'ace_designs.shape[0]'], {}), '(design_index, ace_designs.shape[0])\n', (1066, 1102), True, 'import numpy as np\n'), ((1107, 1168), 'matplotlib.pyplot.scatter', 'plt.scatter', (['design_index', 'ace_designs'], {'color': '"""b"""', 'alpha': '(0.05)'}), "(design_index, ace_designs, color='b', alpha=0.05)\n", (1118, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1194), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 24.0]'], {}), '([0.0, 24.0])\n', (1181, 1194), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1227), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observation index"""'], {}), "('Observation index')\n", (1206, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1262), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Observation time"""'], {}), "('Observation time')\n", (1242, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1301, 1319), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1317, 1319), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1838), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1835, 1838), False, 'import torch\n'), ((1981, 2007), 'torch.distributions.Normal', 'Normal', ([], {'loc': '(0.0)', 'scale': '(0.1)'}), '(loc=0.0, scale=0.1)\n', (1987, 2007), False, 'from torch.distributions import Normal\n'), ((3831, 3856), 'torch.tensor', 'torch.tensor', (['ace_designs'], {}), '(ace_designs)\n', (3843, 3856), False, 'import torch\n'), ((3204, 3226), 'torch.sum', 'torch.sum', (['temp'], {'dim': '(2)'}), '(temp, dim=2)\n', (3213, 3226), False, 'import torch\n'), ((3343, 3371), 'torch.logsumexp', 'torch.logsumexp', (['temp'], {'dim': '(1)'}), '(temp, dim=1)\n', (3358, 3371), False, 'import torch\n'), ((3374, 3389), 'numpy.log', 'np.log', (['n_outer'], {}), '(n_outer)\n', (3380, 3389), True, 'import numpy as np\n'), ((3539, 3555), 'torch.mean', 'torch.mean', (['temp'], {}), '(temp)\n', (3549, 3555), False, 'import torch\n'), ((1668, 1695), 'torch.exp', 'torch.exp', (['(-theta1 * design)'], {}), '(-theta1 * design)\n', (1677, 1695), False, 'import torch\n'), ((1696, 1723), 'torch.exp', 'torch.exp', (['(-theta2 * design)'], {}), '(-theta2 * design)\n', (1705, 1723), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 16:38:52 2020
@author: kim3
"""
from gpiozero import LED, Button
from playsound import playsound
from threading import Thread
import RPi.GPIO as gpio
import random
import time
import datetime
import sys
collisionbut = Button(2) # Pin 3
fuck = Button(3) # Pin 5
led = LED(17) # Pin 11
# "power" led
led.on()
# prepare gpio for motors
# Ref: https://maker.pro/raspberry-pi/tutorial/how-to-control-a-dc-motor-with-an-l298-controller-and-raspberry-pi
mode = gpio.getmode()
gpio.cleanup()
lfor = 27 # Pin 13
rfor = 22 # Pin 15
lbak = 23 # Pin 16
rbak = 24 # Pin 18
gpio.setmode(gpio.BOARD)
gpio.setup(lfor, gpio.OUT)
gpio.setup(rfor, gpio.OUT)
gpio.setup(lbak, gpio.OUT)
gpio.setup(rbak, gpio.OUT)
# initialize and logic for fuck button
def fuckbut():
if fuck.is_pressed:
print('button triggered. halting execution')
gpio.cleanup()
return False
sys.exit()
else:
return True
# Function to head left
def lefton(maxrt):
time.sleep(500 / 1000)
print('turning left')
stop = datetime.datetime.now() + maxrt
while datetime.datetime.now() < stop:
print('turning left')
# Put the code to turn Mykul left
Thread(target = gpio.output(lbak, gpio.HIGH)).start()
Thread(target = gpio.output(rfor, gpio.HIGH)).start()
# Function to head right
def righton(maxrt):
time.sleep(500 / 1000)
print('turning right')
stop = datetime.datetime.now() + maxrt
while (datetime.datetime.now() < stop):
print('turning right')
# Put the code to turn Mykul right
Thread(target = gpio.output(lfor, gpio.HIGH)).start()
Thread(target = gpio.output(rbak, gpio.HIGH)).start()
# Function to back dat ass up
def backwards(maxrt):
time.sleep(500 / 1000)
print('going backwards')
stop = datetime.datetime.now() + maxrt
while datetime.datetime.now() < stop:
print('still going back')
# Put the code to move the motors backwards here
Thread(target = gpio.output(lbak, gpio.HIGH)).start()
Thread(target = gpio.output(rbak, gpio.HIGH)).start()
# Function to move the robot forwards
# Move forwards until some event happens - this function doesn't handle collision logic
def forwards():
time.sleep(500 / 1000)
print('going forwards')
Thread(target = gpio.output(lfor, gpio.HIGH)).start()
Thread(target = gpio.output(rfor, gpio.HIGH)).start()
# Collision logic function
def collision():
# STOP all motors
gpio.output(lfor, gpio.LOW)
gpio.output(rfor, gpio.LOW)
# Ran into something - Mykul is in pain and suffering just like me
print('ah fuck i ran into something')
owwie()
# Back up from the
print('gotta back up')
backwards(datetime.timedelta(seconds=0.5))
print('hold on, turning')
actionList = ['lefton(datetime.timedelta(seconds=0.2))', 'righton(datetime.timedelta(seconds=0.2))']
random.choice(actionList)
# Function to play audio when Mykul bumps into something
def owwie():
owList = ['./audio/ow1.mp3','./audio/ow2.mp3','./audio/ow3.mp3','./audio/ow4.mp3','./audio/ow5.mp3','./audio/ow6.mp3','./audio/ow7.mp3','./audio/ow8.mp3']
playsound(random.choice(owList))
time.sleep(1)
def main():
runtime = True
fuckbutstat = fuckbut()
while (runtime == True and fuckbutstat == True):
fuckbutstat = fuckbut()
forwards()
if collisionbut.is_pressed:
collision()
if __name__ == "__main__":
main()
| [
"RPi.GPIO.cleanup",
"random.choice",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"gpiozero.Button",
"gpiozero.LED",
"time.sleep",
"datetime.datetime.now",
"sys.exit",
"datetime.timedelta",
"RPi.GPIO.getmode",
"RPi.GPIO.setmode"
] | [((272, 281), 'gpiozero.Button', 'Button', (['(2)'], {}), '(2)\n', (278, 281), False, 'from gpiozero import LED, Button\n'), ((297, 306), 'gpiozero.Button', 'Button', (['(3)'], {}), '(3)\n', (303, 306), False, 'from gpiozero import LED, Button\n'), ((321, 328), 'gpiozero.LED', 'LED', (['(17)'], {}), '(17)\n', (324, 328), False, 'from gpiozero import LED, Button\n'), ((510, 524), 'RPi.GPIO.getmode', 'gpio.getmode', ([], {}), '()\n', (522, 524), True, 'import RPi.GPIO as gpio\n'), ((525, 539), 'RPi.GPIO.cleanup', 'gpio.cleanup', ([], {}), '()\n', (537, 539), True, 'import RPi.GPIO as gpio\n'), ((616, 640), 'RPi.GPIO.setmode', 'gpio.setmode', (['gpio.BOARD'], {}), '(gpio.BOARD)\n', (628, 640), True, 'import RPi.GPIO as gpio\n'), ((641, 667), 'RPi.GPIO.setup', 'gpio.setup', (['lfor', 'gpio.OUT'], {}), '(lfor, gpio.OUT)\n', (651, 667), True, 'import RPi.GPIO as gpio\n'), ((668, 694), 'RPi.GPIO.setup', 'gpio.setup', (['rfor', 'gpio.OUT'], {}), '(rfor, gpio.OUT)\n', (678, 694), True, 'import RPi.GPIO as gpio\n'), ((695, 721), 'RPi.GPIO.setup', 'gpio.setup', (['lbak', 'gpio.OUT'], {}), '(lbak, gpio.OUT)\n', (705, 721), True, 'import RPi.GPIO as gpio\n'), ((722, 748), 'RPi.GPIO.setup', 'gpio.setup', (['rbak', 'gpio.OUT'], {}), '(rbak, gpio.OUT)\n', (732, 748), True, 'import RPi.GPIO as gpio\n'), ((1023, 1045), 'time.sleep', 'time.sleep', (['(500 / 1000)'], {}), '(500 / 1000)\n', (1033, 1045), False, 'import time\n'), ((1403, 1425), 'time.sleep', 'time.sleep', (['(500 / 1000)'], {}), '(500 / 1000)\n', (1413, 1425), False, 'import time\n'), ((1795, 1817), 'time.sleep', 'time.sleep', (['(500 / 1000)'], {}), '(500 / 1000)\n', (1805, 1817), False, 'import time\n'), ((2294, 2316), 'time.sleep', 'time.sleep', (['(500 / 1000)'], {}), '(500 / 1000)\n', (2304, 2316), False, 'import time\n'), ((2532, 2559), 'RPi.GPIO.output', 'gpio.output', (['lfor', 'gpio.LOW'], {}), '(lfor, gpio.LOW)\n', (2543, 2559), True, 'import RPi.GPIO as gpio\n'), ((2564, 2591), 'RPi.GPIO.output', 'gpio.output', (['rfor', 'gpio.LOW'], {}), '(rfor, gpio.LOW)\n', (2575, 2591), True, 'import RPi.GPIO as gpio\n'), ((2953, 2978), 'random.choice', 'random.choice', (['actionList'], {}), '(actionList)\n', (2966, 2978), False, 'import random\n'), ((3250, 3263), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3260, 3263), False, 'import time\n'), ((890, 904), 'RPi.GPIO.cleanup', 'gpio.cleanup', ([], {}), '()\n', (902, 904), True, 'import RPi.GPIO as gpio\n'), ((934, 944), 'sys.exit', 'sys.exit', ([], {}), '()\n', (942, 944), False, 'import sys\n'), ((1083, 1106), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1104, 1106), False, 'import datetime\n'), ((1125, 1148), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1146, 1148), False, 'import datetime\n'), ((1464, 1487), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1485, 1487), False, 'import datetime\n'), ((1507, 1530), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1528, 1530), False, 'import datetime\n'), ((1858, 1881), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1879, 1881), False, 'import datetime\n'), ((1900, 1923), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1921, 1923), False, 'import datetime\n'), ((2781, 2812), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0.5)'}), '(seconds=0.5)\n', (2799, 2812), False, 'import datetime\n'), ((3223, 3244), 'random.choice', 'random.choice', (['owList'], {}), '(owList)\n', (3236, 3244), False, 'import random\n'), ((2365, 2393), 'RPi.GPIO.output', 'gpio.output', (['lfor', 'gpio.HIGH'], {}), '(lfor, gpio.HIGH)\n', (2376, 2393), True, 'import RPi.GPIO as gpio\n'), ((2423, 2451), 'RPi.GPIO.output', 'gpio.output', (['rfor', 'gpio.HIGH'], {}), '(rfor, gpio.HIGH)\n', (2434, 2451), True, 'import RPi.GPIO as gpio\n'), ((1253, 1281), 'RPi.GPIO.output', 'gpio.output', (['lbak', 'gpio.HIGH'], {}), '(lbak, gpio.HIGH)\n', (1264, 1281), True, 'import RPi.GPIO as gpio\n'), ((1315, 1343), 'RPi.GPIO.output', 'gpio.output', (['rfor', 'gpio.HIGH'], {}), '(rfor, gpio.HIGH)\n', (1326, 1343), True, 'import RPi.GPIO as gpio\n'), ((1638, 1666), 'RPi.GPIO.output', 'gpio.output', (['lfor', 'gpio.HIGH'], {}), '(lfor, gpio.HIGH)\n', (1649, 1666), True, 'import RPi.GPIO as gpio\n'), ((1700, 1728), 'RPi.GPIO.output', 'gpio.output', (['rbak', 'gpio.HIGH'], {}), '(rbak, gpio.HIGH)\n', (1711, 1728), True, 'import RPi.GPIO as gpio\n'), ((2047, 2075), 'RPi.GPIO.output', 'gpio.output', (['lbak', 'gpio.HIGH'], {}), '(lbak, gpio.HIGH)\n', (2058, 2075), True, 'import RPi.GPIO as gpio\n'), ((2109, 2137), 'RPi.GPIO.output', 'gpio.output', (['rbak', 'gpio.HIGH'], {}), '(rbak, gpio.HIGH)\n', (2120, 2137), True, 'import RPi.GPIO as gpio\n')] |
from django.shortcuts import render, redirect
from users.forms import RegistrationForm, ProfileRegisterForm
from django.contrib import messages
# Create your views here.
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
p_reg_form = ProfileRegisterForm(request.POST)
if form.is_valid() and p_reg_form.is_valid():
user = form.save()
user.refresh_from_db()
p_reg_form = ProfileRegisterForm(request.POST, instance=user.userprofile)
p_reg_form.full_clean()
p_reg_form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for username {username}!')
return redirect('login')
else:
form = RegistrationForm()
p_reg_form = ProfileRegisterForm()
return render(request, 'users/register.html', {'form':form, 'p_reg_form': p_reg_form })
| [
"django.shortcuts.render",
"users.forms.RegistrationForm",
"django.shortcuts.redirect",
"users.forms.ProfileRegisterForm",
"django.contrib.messages.success"
] | [((787, 872), 'django.shortcuts.render', 'render', (['request', '"""users/register.html"""', "{'form': form, 'p_reg_form': p_reg_form}"], {}), "(request, 'users/register.html', {'form': form, 'p_reg_form': p_reg_form}\n )\n", (793, 872), False, 'from django.shortcuts import render, redirect\n'), ((242, 272), 'users.forms.RegistrationForm', 'RegistrationForm', (['request.POST'], {}), '(request.POST)\n', (258, 272), False, 'from users.forms import RegistrationForm, ProfileRegisterForm\n'), ((289, 322), 'users.forms.ProfileRegisterForm', 'ProfileRegisterForm', (['request.POST'], {}), '(request.POST)\n', (308, 322), False, 'from users.forms import RegistrationForm, ProfileRegisterForm\n'), ((721, 739), 'users.forms.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (737, 739), False, 'from users.forms import RegistrationForm, ProfileRegisterForm\n'), ((756, 777), 'users.forms.ProfileRegisterForm', 'ProfileRegisterForm', ([], {}), '()\n', (775, 777), False, 'from users.forms import RegistrationForm, ProfileRegisterForm\n'), ((439, 499), 'users.forms.ProfileRegisterForm', 'ProfileRegisterForm', (['request.POST'], {'instance': 'user.userprofile'}), '(request.POST, instance=user.userprofile)\n', (458, 499), False, 'from users.forms import RegistrationForm, ProfileRegisterForm\n'), ((603, 673), 'django.contrib.messages.success', 'messages.success', (['request', 'f"""Account created for username {username}!"""'], {}), "(request, f'Account created for username {username}!')\n", (619, 673), False, 'from django.contrib import messages\n'), ((685, 702), 'django.shortcuts.redirect', 'redirect', (['"""login"""'], {}), "('login')\n", (693, 702), False, 'from django.shortcuts import render, redirect\n')] |
## simple example python program
## counts number of lines in file
## single argument is path to file
## reports input file path and number of lines to stdout
import sys
if len(sys.argv) != 2:
sys.stderr.write(f"Usage: python {sys.argv[0]} file_in.txt\n")
sys.exit(3)
file_in = sys.argv[1]
print(f"python file_in: '{file_in}'")
nlines = 0
try:
with open(file_in, 'r') as fh_in:
for line in fh_in:
nlines += 1
except Exception as e:
sys.stderr.write(f"error reading '{file_in}': {e}\n")
sys.exit(11)
print(f"python nlines: {nlines}")
sys.exit(0)
| [
"sys.stderr.write",
"sys.exit"
] | [((585, 596), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (593, 596), False, 'import sys\n'), ((205, 267), 'sys.stderr.write', 'sys.stderr.write', (['f"""Usage: python {sys.argv[0]} file_in.txt\n"""'], {}), "(f'Usage: python {sys.argv[0]} file_in.txt\\n')\n", (221, 267), False, 'import sys\n'), ((272, 283), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (280, 283), False, 'import sys\n'), ((479, 532), 'sys.stderr.write', 'sys.stderr.write', (['f"""error reading \'{file_in}\': {e}\n"""'], {}), '(f"error reading \'{file_in}\': {e}\\n")\n', (495, 532), False, 'import sys\n'), ((537, 549), 'sys.exit', 'sys.exit', (['(11)'], {}), '(11)\n', (545, 549), False, 'import sys\n')] |
import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
nick_re = re.compile("^[A-Za-z0-9_|.\-\]\[\{\}]*$", re.I)
def is_valid(target):
""" Checks if a string is a valid IRC nick. """
if nick_re.match(target):
return True
else:
return False
def is_self(conn, target):
""" Checks if a string is "****self" or contains conn.name. """
if re.search("(^..?.?.?self|{})".format(re.escape(conn.nick)), target, re.I):
return True
else:
return False
@hook.on_start()
def load_attacks(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global larts, flirts, kills, slaps
with codecs.open(os.path.join(bot.data_dir, "larts.txt"), encoding="utf-8") as f:
larts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "flirts.txt"), encoding="utf-8") as f:
flirts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "kills.json"), encoding="utf-8") as f:
kills = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "slaps.json"), encoding="utf-8") as f:
slaps = json.load(f)
@asyncio.coroutine
@hook.command
def lart(text, conn, nick, action):
"""<user> - LARTs <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
phrase = random.choice(larts)
# act out the message
action(phrase.format(user=target))
@asyncio.coroutine
@hook.command
def flirt(text, conn, nick, message):
"""<user> - flirts with <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
message('{}, {}'.format(target, random.choice(flirts)))
@asyncio.coroutine
@hook.command
def kill(text, conn, nick, action):
"""<user> - kills <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
generator = textgen.TextGenerator(kills["templates"], kills["parts"], variables={"user": target})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def slap(text, action, nick, conn):
"""<user> -- Makes the bot slap <user>."""
target = text.strip()
if not is_valid(target):
return "I can't attack that."
if is_self(conn, target):
# user is trying to make the bot attack itself!
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(slaps["templates"], slaps["parts"], variables=variables)
# act out the message
action(generator.generate_string())
| [
"re.escape",
"random.choice",
"cloudbot.util.textgen.TextGenerator",
"re.compile",
"os.path.join",
"cloudbot.hook.on_start",
"json.load"
] | [((147, 199), 're.compile', 're.compile', (['"""^[A-Za-z0-9_|.\\\\-\\\\]\\\\[\\\\{\\\\}]*$"""', 're.I'], {}), "('^[A-Za-z0-9_|.\\\\-\\\\]\\\\[\\\\{\\\\}]*$', re.I)\n", (157, 199), False, 'import re\n'), ((585, 600), 'cloudbot.hook.on_start', 'hook.on_start', ([], {}), '()\n', (598, 600), False, 'from cloudbot import hook\n'), ((1618, 1638), 'random.choice', 'random.choice', (['larts'], {}), '(larts)\n', (1631, 1638), False, 'import random\n'), ((2403, 2492), 'cloudbot.util.textgen.TextGenerator', 'textgen.TextGenerator', (["kills['templates']", "kills['parts']"], {'variables': "{'user': target}"}), "(kills['templates'], kills['parts'], variables={'user':\n target})\n", (2424, 2492), False, 'from cloudbot.util import textgen\n'), ((2941, 3019), 'cloudbot.util.textgen.TextGenerator', 'textgen.TextGenerator', (["slaps['templates']", "slaps['parts']"], {'variables': 'variables'}), "(slaps['templates'], slaps['parts'], variables=variables)\n", (2962, 3019), False, 'from cloudbot.util import textgen\n'), ((1168, 1180), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1177, 1180), False, 'import json\n'), ((1285, 1297), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1294, 1297), False, 'import json\n'), ((493, 513), 're.escape', 're.escape', (['conn.nick'], {}), '(conn.nick)\n', (502, 513), False, 'import re\n'), ((738, 777), 'os.path.join', 'os.path.join', (['bot.data_dir', '"""larts.txt"""'], {}), "(bot.data_dir, 'larts.txt')\n", (750, 777), False, 'import os\n'), ((911, 951), 'os.path.join', 'os.path.join', (['bot.data_dir', '"""flirts.txt"""'], {}), "(bot.data_dir, 'flirts.txt')\n", (923, 951), False, 'import os\n'), ((1086, 1126), 'os.path.join', 'os.path.join', (['bot.data_dir', '"""kills.json"""'], {}), "(bot.data_dir, 'kills.json')\n", (1098, 1126), False, 'import os\n'), ((1203, 1243), 'os.path.join', 'os.path.join', (['bot.data_dir', '"""slaps.json"""'], {}), "(bot.data_dir, 'slaps.json')\n", (1215, 1243), False, 'import os\n'), ((2056, 2077), 'random.choice', 'random.choice', (['flirts'], {}), '(flirts)\n', (2069, 2077), False, 'import random\n')] |
#!/usr/bin/env python3
PKG = 'lg_common'
NAME = 'test_combine_viewport_geometries'
import rospy
import unittest
from lg_common import ManagedWindow
from lg_common.helpers import combine_viewport_geometries
class TestCombineViewportGeometries(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _assert_equal_geometries(self, a, b):
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(a.width, b.width)
self.assertEqual(a.height, b.height)
def test_combine_all_geometries(self):
geometry_names = ['touchscreen', 'left_one', 'center', 'right_one']
combined_geometry = combine_viewport_geometries(geometry_names)
expected_geometry = ManagedWindow.lookup_viewport_geometry('expected_all')
self._assert_equal_geometries(expected_geometry, combined_geometry)
def test_combine_portrait_geometries(self):
geometry_names = ['left_one', 'center', 'right_one']
combined_geometry = combine_viewport_geometries(geometry_names)
expected_geometry = ManagedWindow.lookup_viewport_geometry('expected_portraits')
self._assert_equal_geometries(expected_geometry, combined_geometry)
if __name__ == '__main__':
import rostest
rospy.init_node(NAME)
rostest.rosrun(PKG, NAME, TestCombineViewportGeometries)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| [
"lg_common.helpers.combine_viewport_geometries",
"rospy.init_node",
"rostest.rosrun",
"lg_common.ManagedWindow.lookup_viewport_geometry"
] | [((1296, 1317), 'rospy.init_node', 'rospy.init_node', (['NAME'], {}), '(NAME)\n', (1311, 1317), False, 'import rospy\n'), ((1322, 1378), 'rostest.rosrun', 'rostest.rosrun', (['PKG', 'NAME', 'TestCombineViewportGeometries'], {}), '(PKG, NAME, TestCombineViewportGeometries)\n', (1336, 1378), False, 'import rostest\n'), ((690, 733), 'lg_common.helpers.combine_viewport_geometries', 'combine_viewport_geometries', (['geometry_names'], {}), '(geometry_names)\n', (717, 733), False, 'from lg_common.helpers import combine_viewport_geometries\n'), ((763, 817), 'lg_common.ManagedWindow.lookup_viewport_geometry', 'ManagedWindow.lookup_viewport_geometry', (['"""expected_all"""'], {}), "('expected_all')\n", (801, 817), False, 'from lg_common import ManagedWindow\n'), ((1033, 1076), 'lg_common.helpers.combine_viewport_geometries', 'combine_viewport_geometries', (['geometry_names'], {}), '(geometry_names)\n', (1060, 1076), False, 'from lg_common.helpers import combine_viewport_geometries\n'), ((1106, 1166), 'lg_common.ManagedWindow.lookup_viewport_geometry', 'ManagedWindow.lookup_viewport_geometry', (['"""expected_portraits"""'], {}), "('expected_portraits')\n", (1144, 1166), False, 'from lg_common import ManagedWindow\n')] |
import logging
# 创建Logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# 创建Handler
# 终端Handler
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
# 文件Handler
fileHandler = logging.FileHandler('log.log', mode='w', encoding='UTF-8')
fileHandler.setLevel(logging.NOTSET)
# Formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
consoleHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
# 添加到Logger中
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
# 打印日志
print('--------------打印日志--------------')
logger.debug('debug 信息')
logger.info('info 信息')
logger.warning('warn 信息')
logger.error('error 信息')
logger.critical('critical 信息')
logger.debug('%s 是自定义信息' % '这些东西')
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"logging.FileHandler"
] | [((36, 55), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (53, 55), False, 'import logging\n'), ((130, 153), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (151, 153), False, 'import logging\n'), ((220, 278), 'logging.FileHandler', 'logging.FileHandler', (['"""log.log"""'], {'mode': '"""w"""', 'encoding': '"""UTF-8"""'}), "('log.log', mode='w', encoding='UTF-8')\n", (239, 278), False, 'import logging\n'), ((341, 414), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (358, 414), False, 'import logging\n')] |
from __future__ import annotations
import sys
from modules.command.definition import (
Command,
Feature,
default_cases_for_gen,
default_cases_for_test,
default_prefix,
default_show_progress,
default_suffix,
default_time_limit,
default_is_verification
)
from modules.utility.colorizer import Color, colorize
from modules.utility.exit_failure import exit_failure
from modules.utility.printer import error, usage, warning
def parse_command(args: list[str]) -> Command:
result = Command()
skip = 0
if len(args) < 2:
error('The command line arguments are not sufficient.')
usage()
exit_failure()
if args[1] == 'gen':
result.feature = Feature.GENERATE
skip = 2
elif args[1] == 'test':
if len(args) == 2:
error('You need to provide program(s) to test.')
usage()
exit_failure()
elif (len(args) == 3) or args[3] in ('-i', '--input', '-p', '--prefix', '-s', '--suffix', '-c', '--cases',
'-t', '--time-limit', '-n', '--no-progress-bar', '--unit-test'):
result.feature = Feature.TEST_SINGLE
result.program_1 = args[2]
skip = 3
else:
result.feature = Feature.TEST_DOUBLE
result.program_1 = args[2]
result.program_2 = args[3]
skip = 4
else:
error('Only {} or {} is accepted as the first command line argument.'.format(
colorize(Color.CODE, 'gen'),
colorize(Color.CODE, 'test')
))
usage()
exit_failure()
for i in range(len(args)):
if skip > 0:
skip -= 1
continue
if args[i] in ('-i', '--input'):
if hasattr(result, 'source'):
error('More than one input files ({} or {}) are provided.'.format(
colorize(Color.CODE, '-i'),
colorize(Color.CODE, '--input')
))
usage()
exit_failure()
elif i == len(args) - 1:
error('The input file is missing.')
usage()
exit_failure()
else:
try:
result.source = open(args[i + 1], encoding='utf-8')
except:
error(f'Failed to open {colorize(Color.CODE, args[i + 1])}.')
exit_failure()
skip = 1
elif args[i] in ('-p', '--prefix'):
if hasattr(result, 'prefix'):
error('More than one prefixes ({} or {}) are provided.'.format(
colorize(Color.CODE, '-p'),
colorize(Color.CODE, '--prefix')
))
usage()
exit_failure()
elif i == len(args) - 1:
error('The prefix is missing.')
usage()
exit_failure()
else:
result.prefix = args[i + 1]
skip = 1
elif args[i] in ('-s', '--suffix'):
if hasattr(result, 'suffix'):
error('More than one suffixes ({} or {}) are provided.'.format(
colorize(Color.CODE, '-s'),
colorize(Color.CODE, '--suffix')
))
usage()
exit_failure()
elif i == len(args) - 1:
error('The suffix is missing.')
usage()
exit_failure()
else:
result.suffix = args[i + 1]
skip = 1
elif args[i] in ('-c', '--cases'):
if hasattr(result, 'cases'):
error('More than one numbers of test cases ({} or {}) are provided.'.format(
colorize(Color.CODE, '-c'),
colorize(Color.CODE, '--cases')
))
usage()
exit_failure()
elif i == len(args) - 1:
error('The number of test cases is missing.')
usage()
exit_failure()
else:
try:
result.cases = int(args[i + 1])
except:
error(f'The number of test cases (= {colorize(Color.CODE, args[i + 1])})'
' does not seem to be an integer.')
usage()
exit_failure()
skip = 1
elif args[i] in ('-t', '--time-limit'):
if hasattr(result, 'time_limit'):
error('More than one time limits ({} or {}) are provided.'.format(
colorize(Color.CODE, '-t'),
colorize(Color.CODE, '--time-limit')
))
usage()
exit_failure()
elif i == len(args) - 1:
error('The time limit is missing.')
usage()
exit_failure()
else:
try:
result.time_limit = float(args[i + 1])
except:
error(f'The time limit (= {colorize(Color.CODE, args[i + 1])}) does not seem to be a number.')
usage()
exit_failure()
if result.feature == Feature.GENERATE:
warning('The argument {} is ignored since it is unnecessary for {} command.'.format(
colorize(Color.CODE, f'{args[i]} {args[i + 1]}'),
colorize(Color.CODE, 'gen')
))
skip = 1
elif args[i] in ('-n', '--no-progress-bar'):
if hasattr(result, 'show_progress'):
warning('{} or {} are provided more than once.'.format(
colorize(Color.CODE, '-n'),
colorize(Color.CODE, '--no-progress-bar')
))
result.show_progress = False
elif args[i] == '--verify':
if hasattr(result, 'is_verification'):
warning(f'{colorize(Color.CODE, "--verify")} is provided more than once.')
result.is_verification = True
else:
error(f'The argument {colorize(Color.CODE, args[i])} is unknown.')
usage()
exit_failure()
# default settings
if not hasattr(result, 'source'):
result.source = sys.stdin
if not hasattr(result, 'prefix'):
result.prefix = default_prefix
if not hasattr(result, 'suffix'):
result.suffix = default_suffix
if not hasattr(result, 'cases'):
result.cases = (default_cases_for_gen if (result.feature == Feature.GENERATE) else default_cases_for_test)
if not hasattr(result, 'time_limit'):
result.time_limit = default_time_limit
if not hasattr(result, 'show_progress'):
result.show_progress = default_show_progress
if not hasattr(result, 'is_verification'):
result.is_verification = default_is_verification
return result
| [
"modules.utility.exit_failure.exit_failure",
"modules.utility.printer.error",
"modules.utility.printer.usage",
"modules.command.definition.Command",
"modules.utility.colorizer.colorize"
] | [((519, 528), 'modules.command.definition.Command', 'Command', ([], {}), '()\n', (526, 528), False, 'from modules.command.definition import Command, Feature, default_cases_for_gen, default_cases_for_test, default_prefix, default_show_progress, default_suffix, default_time_limit, default_is_verification\n'), ((573, 628), 'modules.utility.printer.error', 'error', (['"""The command line arguments are not sufficient."""'], {}), "('The command line arguments are not sufficient.')\n", (578, 628), False, 'from modules.utility.printer import error, usage, warning\n'), ((637, 644), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (642, 644), False, 'from modules.utility.printer import error, usage, warning\n'), ((653, 667), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (665, 667), False, 'from modules.utility.exit_failure import exit_failure\n'), ((1609, 1616), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (1614, 1616), False, 'from modules.utility.printer import error, usage, warning\n'), ((1625, 1639), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (1637, 1639), False, 'from modules.utility.exit_failure import exit_failure\n'), ((820, 868), 'modules.utility.printer.error', 'error', (['"""You need to provide program(s) to test."""'], {}), "('You need to provide program(s) to test.')\n", (825, 868), False, 'from modules.utility.printer import error, usage, warning\n'), ((881, 888), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (886, 888), False, 'from modules.utility.printer import error, usage, warning\n'), ((901, 915), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (913, 915), False, 'from modules.utility.exit_failure import exit_failure\n'), ((2038, 2045), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (2043, 2045), False, 'from modules.utility.printer import error, usage, warning\n'), ((2062, 2076), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (2074, 2076), False, 'from modules.utility.exit_failure import exit_failure\n'), ((1520, 1547), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""gen"""'], {}), "(Color.CODE, 'gen')\n", (1528, 1547), False, 'from modules.utility.colorizer import Color, colorize\n'), ((1561, 1589), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""test"""'], {}), "(Color.CODE, 'test')\n", (1569, 1589), False, 'from modules.utility.colorizer import Color, colorize\n'), ((2130, 2165), 'modules.utility.printer.error', 'error', (['"""The input file is missing."""'], {}), "('The input file is missing.')\n", (2135, 2165), False, 'from modules.utility.printer import error, usage, warning\n'), ((2182, 2189), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (2187, 2189), False, 'from modules.utility.printer import error, usage, warning\n'), ((2206, 2220), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (2218, 2220), False, 'from modules.utility.exit_failure import exit_failure\n'), ((2798, 2805), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (2803, 2805), False, 'from modules.utility.printer import error, usage, warning\n'), ((2822, 2836), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (2834, 2836), False, 'from modules.utility.exit_failure import exit_failure\n'), ((1923, 1949), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-i"""'], {}), "(Color.CODE, '-i')\n", (1931, 1949), False, 'from modules.utility.colorizer import Color, colorize\n'), ((1971, 2002), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--input"""'], {}), "(Color.CODE, '--input')\n", (1979, 2002), False, 'from modules.utility.colorizer import Color, colorize\n'), ((2890, 2921), 'modules.utility.printer.error', 'error', (['"""The prefix is missing."""'], {}), "('The prefix is missing.')\n", (2895, 2921), False, 'from modules.utility.printer import error, usage, warning\n'), ((2938, 2945), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (2943, 2945), False, 'from modules.utility.printer import error, usage, warning\n'), ((2962, 2976), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (2974, 2976), False, 'from modules.utility.exit_failure import exit_failure\n'), ((3364, 3371), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (3369, 3371), False, 'from modules.utility.printer import error, usage, warning\n'), ((3388, 3402), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (3400, 3402), False, 'from modules.utility.exit_failure import exit_failure\n'), ((2458, 2472), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (2470, 2472), False, 'from modules.utility.exit_failure import exit_failure\n'), ((2682, 2708), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-p"""'], {}), "(Color.CODE, '-p')\n", (2690, 2708), False, 'from modules.utility.colorizer import Color, colorize\n'), ((2730, 2762), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--prefix"""'], {}), "(Color.CODE, '--prefix')\n", (2738, 2762), False, 'from modules.utility.colorizer import Color, colorize\n'), ((3456, 3487), 'modules.utility.printer.error', 'error', (['"""The suffix is missing."""'], {}), "('The suffix is missing.')\n", (3461, 3487), False, 'from modules.utility.printer import error, usage, warning\n'), ((3504, 3511), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (3509, 3511), False, 'from modules.utility.printer import error, usage, warning\n'), ((3528, 3542), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (3540, 3542), False, 'from modules.utility.exit_failure import exit_failure\n'), ((3940, 3947), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (3945, 3947), False, 'from modules.utility.printer import error, usage, warning\n'), ((3964, 3978), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (3976, 3978), False, 'from modules.utility.exit_failure import exit_failure\n'), ((3248, 3274), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-s"""'], {}), "(Color.CODE, '-s')\n", (3256, 3274), False, 'from modules.utility.colorizer import Color, colorize\n'), ((3296, 3328), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--suffix"""'], {}), "(Color.CODE, '--suffix')\n", (3304, 3328), False, 'from modules.utility.colorizer import Color, colorize\n'), ((4032, 4077), 'modules.utility.printer.error', 'error', (['"""The number of test cases is missing."""'], {}), "('The number of test cases is missing.')\n", (4037, 4077), False, 'from modules.utility.printer import error, usage, warning\n'), ((4094, 4101), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (4099, 4101), False, 'from modules.utility.printer import error, usage, warning\n'), ((4118, 4132), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (4130, 4132), False, 'from modules.utility.exit_failure import exit_failure\n'), ((4807, 4814), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (4812, 4814), False, 'from modules.utility.printer import error, usage, warning\n'), ((4831, 4845), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (4843, 4845), False, 'from modules.utility.exit_failure import exit_failure\n'), ((3825, 3851), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-c"""'], {}), "(Color.CODE, '-c')\n", (3833, 3851), False, 'from modules.utility.colorizer import Color, colorize\n'), ((3873, 3904), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--cases"""'], {}), "(Color.CODE, '--cases')\n", (3881, 3904), False, 'from modules.utility.colorizer import Color, colorize\n'), ((4899, 4934), 'modules.utility.printer.error', 'error', (['"""The time limit is missing."""'], {}), "('The time limit is missing.')\n", (4904, 4934), False, 'from modules.utility.printer import error, usage, warning\n'), ((4951, 4958), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (4956, 4958), False, 'from modules.utility.printer import error, usage, warning\n'), ((4975, 4989), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (4987, 4989), False, 'from modules.utility.exit_failure import exit_failure\n'), ((6275, 6282), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (6280, 6282), False, 'from modules.utility.printer import error, usage, warning\n'), ((6295, 6309), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (6307, 6309), False, 'from modules.utility.exit_failure import exit_failure\n'), ((2400, 2433), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', 'args[i + 1]'], {}), '(Color.CODE, args[i + 1])\n', (2408, 2433), False, 'from modules.utility.colorizer import Color, colorize\n'), ((4424, 4431), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (4429, 4431), False, 'from modules.utility.printer import error, usage, warning\n'), ((4452, 4466), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (4464, 4466), False, 'from modules.utility.exit_failure import exit_failure\n'), ((4687, 4713), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-t"""'], {}), "(Color.CODE, '-t')\n", (4695, 4713), False, 'from modules.utility.colorizer import Color, colorize\n'), ((4735, 4771), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--time-limit"""'], {}), "(Color.CODE, '--time-limit')\n", (4743, 4771), False, 'from modules.utility.colorizer import Color, colorize\n'), ((5463, 5511), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', 'f"""{args[i]} {args[i + 1]}"""'], {}), "(Color.CODE, f'{args[i]} {args[i + 1]}')\n", (5471, 5511), False, 'from modules.utility.colorizer import Color, colorize\n'), ((5533, 5560), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""gen"""'], {}), "(Color.CODE, 'gen')\n", (5541, 5560), False, 'from modules.utility.colorizer import Color, colorize\n'), ((5247, 5254), 'modules.utility.printer.usage', 'usage', ([], {}), '()\n', (5252, 5254), False, 'from modules.utility.printer import error, usage, warning\n'), ((5275, 5289), 'modules.utility.exit_failure.exit_failure', 'exit_failure', ([], {}), '()\n', (5287, 5289), False, 'from modules.utility.exit_failure import exit_failure\n'), ((5797, 5823), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""-n"""'], {}), "(Color.CODE, '-n')\n", (5805, 5823), False, 'from modules.utility.colorizer import Color, colorize\n'), ((5845, 5886), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--no-progress-bar"""'], {}), "(Color.CODE, '--no-progress-bar')\n", (5853, 5886), False, 'from modules.utility.colorizer import Color, colorize\n'), ((6218, 6247), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', 'args[i]'], {}), '(Color.CODE, args[i])\n', (6226, 6247), False, 'from modules.utility.colorizer import Color, colorize\n'), ((4305, 4338), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', 'args[i + 1]'], {}), '(Color.CODE, args[i + 1])\n', (4313, 4338), False, 'from modules.utility.colorizer import Color, colorize\n'), ((6062, 6094), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', '"""--verify"""'], {}), "(Color.CODE, '--verify')\n", (6070, 6094), False, 'from modules.utility.colorizer import Color, colorize\n'), ((5159, 5192), 'modules.utility.colorizer.colorize', 'colorize', (['Color.CODE', 'args[i + 1]'], {}), '(Color.CODE, args[i + 1])\n', (5167, 5192), False, 'from modules.utility.colorizer import Color, colorize\n')] |
# List (and optionally move) audio files that are in a directory but not in the specified database(s),
# given a species name.
import argparse
import inspect
import os
import sys
# this is necessary before importing from a peer directory
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from core import constants
from core import database
from core import util
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-f1', type=str, default='training', help='Database name #1.')
parser.add_argument('-f2', type=str, default='validation', help='Database name #2.')
parser.add_argument('-s', type=str, default='', help='Species name.')
parser.add_argument('-i', type=str, default='', help='Input directory.')
parser.add_argument('-o', type=str, default='', help='If specified, move the unused files to this directory.')
args = parser.parse_args()
db_names = [args.f1]
if len(args.f2) > 0:
db_names.append(args.f2)
species_name = args.s
input_dir = args.i
output_dir = args.o
if len(output_dir) > 0 and not os.path.exists(output_dir):
os.makedirs(output_dir)
# create a list of file names in the input directory (without the '.mp3' extension)
raw_list = util.get_audio_files(input_dir)
base_to_file_dict = {}
input_dir_list = []
for file_path in raw_list:
file_name = os.path.basename(file_path)
base_name, ext = os.path.splitext(file_name)
input_dir_list.append(base_name)
base_to_file_dict[base_name] = file_name
# create a list of file names in the database(s) (also without the '.mp3' extension)
db_dict = {}
for db_name in db_names:
db = database.Database(f'../data/{db_name}.db')
results = db.get_recordings_by_subcategory_name(species_name)
for result in results:
_, file_name, _ = result
base_name, ext = os.path.splitext(file_name)
db_dict[base_name] = 1
# list files that are in the input directory but not the database(s)
sep = os.path.sep
for base_name in input_dir_list:
if base_name not in db_dict.keys():
if len(output_dir) > 0:
cmd = f'move "{input_dir}{sep}{base_to_file_dict[base_name]}" "{output_dir}{sep}{base_to_file_dict[base_name]}"'
print(cmd)
os.system(cmd)
else:
print(base_name)
| [
"os.path.exists",
"sys.path.insert",
"core.database.Database",
"core.util.get_audio_files",
"argparse.ArgumentParser",
"os.makedirs",
"inspect.currentframe",
"os.path.splitext",
"os.path.dirname",
"os.path.basename",
"os.system"
] | [((339, 366), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (354, 366), False, 'import os\n'), ((367, 396), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (382, 396), False, 'import sys\n'), ((509, 534), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (532, 534), False, 'import argparse\n'), ((1303, 1334), 'core.util.get_audio_files', 'util.get_audio_files', (['input_dir'], {}), '(input_dir)\n', (1323, 1334), False, 'from core import util\n'), ((1183, 1206), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1194, 1206), False, 'import os\n'), ((1421, 1448), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (1437, 1448), False, 'import os\n'), ((1470, 1497), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1486, 1497), False, 'import os\n'), ((1713, 1755), 'core.database.Database', 'database.Database', (['f"""../data/{db_name}.db"""'], {}), "(f'../data/{db_name}.db')\n", (1730, 1755), False, 'from core import database\n'), ((1151, 1177), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1165, 1177), False, 'import os\n'), ((1907, 1934), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1923, 1934), False, 'import os\n'), ((301, 323), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (321, 323), False, 'import inspect\n'), ((2319, 2333), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2328, 2333), False, 'import os\n')] |
from django.test import TestCase, Client
from django.urls import reverse
from .models import LectureCategory
def create_lecture_category(name, description, parent):
lecture_category = LectureCategory(name=name, description=description, parent=parent)
lecture_category.save()
return lecture_category
class LectureCategoryListViewTests(TestCase):
def test_zero_lecture_category(self):
client = Client()
response = client.get(reverse('lecture:lecture_category_list'))
self.assertQuerysetEqual(
response.context['lecture_category_dict_list'],
[]
)
def test_one_lecture_category(self):
create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
client = Client()
response = client.get(reverse('lecture:lecture_category_list'))
self.assertQuerysetEqual(
map(lambda x: x['content'], response.context['lecture_category_dict_list']),
['<LectureCategory: 総合教育部>']
)
self.assertEqual(
list(map(lambda x: x['leaf_product_count'], response.context['lecture_category_dict_list'])),
[0]
)
def test_two_lecture_category(self):
parent = create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
create_lecture_category("一般教育演習(フレッシュマンセミナー)", "フレッシュマンセミナー", parent=parent)
client = Client()
response = client.get(reverse('lecture:lecture_category_list'))
self.assertQuerysetEqual(
map(lambda x: x['content'], response.context['lecture_category_dict_list']),
['<LectureCategory: 総合教育部>']
)
self.assertEqual(
list(map(lambda x: x['leaf_product_count'], response.context['lecture_category_dict_list'])),
[0]
)
def test_three_lecture_category(self):
parent = create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
create_lecture_category("一般教育演習(フレッシュマンセミナー)", "フレッシュマンセミナー", parent=parent)
create_lecture_category("該当なし", "どの科目区分とも関連していないもの", None)
client = Client()
response = client.get(reverse('lecture:lecture_category_list'))
self.assertQuerysetEqual(
map(lambda x: x['content'], response.context['lecture_category_dict_list']),
['<LectureCategory: 総合教育部>', '<LectureCategory: 該当なし>']
)
self.assertEqual(
list(map(lambda x: x['leaf_product_count'], response.context['lecture_category_dict_list'])),
[0, 0]
)
class LectureCategoryDetailsViewTests(TestCase):
def test_one_lecture_category(self):
create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
client = Client()
lecture_category = LectureCategory.objects.get(pk=1)
response = client.get(reverse('lecture:lecture_category_details',
kwargs={'pk': lecture_category.pk}))
self.assertQuerysetEqual(
[response.context['lecture_category']],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['lecture_category_parent_chain'],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['child_lecture_categories'],
[]
)
self.assertQuerysetEqual(
response.context['lecture_category_products'],
[]
)
def test_two_lecture_category(self):
parent = create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
create_lecture_category("一般教育演習(フレッシュマンセミナー)", "フレッシュマンセミナー", parent=parent)
client = Client()
response = client.get(reverse('lecture:lecture_category_details',
kwargs={'pk': parent.pk}))
self.assertQuerysetEqual(
[response.context['lecture_category']],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['lecture_category_parent_chain'],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['child_lecture_categories'],
['<LectureCategory: 一般教育演習(フレッシュマンセミナー)>']
)
self.assertQuerysetEqual(
response.context['lecture_category_products'],
[]
)
def test_three_lecture_category(self):
parent = create_lecture_category("総合教育部", "主に1年生が受ける授業の科目区分", None)
create_lecture_category("一般教育演習(フレッシュマンセミナー)", "フレッシュマンセミナー", parent=parent)
create_lecture_category("共通科目", "環境と人間・健康と社会・人間と文化など", parent=parent)
client = Client()
response = client.get(reverse('lecture:lecture_category_details',
kwargs={'pk': parent.pk}))
self.assertQuerysetEqual(
[response.context['lecture_category']],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['lecture_category_parent_chain'],
['<LectureCategory: 総合教育部>']
)
self.assertQuerysetEqual(
response.context['child_lecture_categories'],
['<LectureCategory: 一般教育演習(フレッシュマンセミナー)>', '<LectureCategory: 共通科目>']
)
self.assertQuerysetEqual(
response.context['lecture_category_products'],
[]
)
| [
"django.urls.reverse",
"django.test.Client"
] | [((420, 428), 'django.test.Client', 'Client', ([], {}), '()\n', (426, 428), False, 'from django.test import TestCase, Client\n'), ((746, 754), 'django.test.Client', 'Client', ([], {}), '()\n', (752, 754), False, 'from django.test import TestCase, Client\n'), ((1379, 1387), 'django.test.Client', 'Client', ([], {}), '()\n', (1385, 1387), False, 'from django.test import TestCase, Client\n'), ((2081, 2089), 'django.test.Client', 'Client', ([], {}), '()\n', (2087, 2089), False, 'from django.test import TestCase, Client\n'), ((2701, 2709), 'django.test.Client', 'Client', ([], {}), '()\n', (2707, 2709), False, 'from django.test import TestCase, Client\n'), ((3660, 3668), 'django.test.Client', 'Client', ([], {}), '()\n', (3666, 3668), False, 'from django.test import TestCase, Client\n'), ((4668, 4676), 'django.test.Client', 'Client', ([], {}), '()\n', (4674, 4676), False, 'from django.test import TestCase, Client\n'), ((459, 499), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_list"""'], {}), "('lecture:lecture_category_list')\n", (466, 499), False, 'from django.urls import reverse\n'), ((785, 825), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_list"""'], {}), "('lecture:lecture_category_list')\n", (792, 825), False, 'from django.urls import reverse\n'), ((1418, 1458), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_list"""'], {}), "('lecture:lecture_category_list')\n", (1425, 1458), False, 'from django.urls import reverse\n'), ((2120, 2160), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_list"""'], {}), "('lecture:lecture_category_list')\n", (2127, 2160), False, 'from django.urls import reverse\n'), ((2801, 2880), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_details"""'], {'kwargs': "{'pk': lecture_category.pk}"}), "('lecture:lecture_category_details', kwargs={'pk': lecture_category.pk})\n", (2808, 2880), False, 'from django.urls import reverse\n'), ((3699, 3768), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_details"""'], {'kwargs': "{'pk': parent.pk}"}), "('lecture:lecture_category_details', kwargs={'pk': parent.pk})\n", (3706, 3768), False, 'from django.urls import reverse\n'), ((4707, 4776), 'django.urls.reverse', 'reverse', (['"""lecture:lecture_category_details"""'], {'kwargs': "{'pk': parent.pk}"}), "('lecture:lecture_category_details', kwargs={'pk': parent.pk})\n", (4714, 4776), False, 'from django.urls import reverse\n')] |
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
from torch.autograd import Variable
import numpy as np
from model.utils.config import cfg
from model.rpn.rpn import _RPN
from model.roi_layers import ROIAlign, ROIPool
from model.rpn.proposal_target_layer import _ProposalTargetLayer
from model.utils.net_utils import _smooth_l1_loss,grad_reverse
class _fasterRCNN(nn.Module):
""" faster RCNN """
def __init__(self, classes, class_agnostic,lc,gc):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
# loss
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
self.lc = lc
self.gc = gc
# define rpn
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0)
self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0/16.0, 0)
self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE
def forward(self, im_data, im_info, gt_boxes, num_boxes,target=False,eta=1.0):
outputs = dict()
batch_size = im_data.size(0)
im_info = im_info.data
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
# feed image data to base model to obtain base feature map
base_feat1 = self.RCNN_base1(im_data)
if self.lc:
d_pixel, _ = self.netD_pixel(grad_reverse(base_feat1, lambd=eta))
#print(d_pixel)
if not target:
_, feat_pixel = self.netD_pixel(base_feat1.detach())
else:
d_pixel = self.netD_pixel(grad_reverse(base_feat1, lambd=eta))
base_feat = self.RCNN_base2(base_feat1)
if self.gc:
domain_p, _ = self.netD(grad_reverse(base_feat, lambd=eta))
if target:
return d_pixel,domain_p#, diff
_,feat = self.netD(base_feat.detach())
else:
domain_p = self.netD(grad_reverse(base_feat, lambd=eta))
if target:
return d_pixel,domain_p#,diff
# feed base feature map tp RPN to obtain rois
rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
# if it is training phrase, then use ground trubut bboxes for refining
if self.training:
roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws, weights = roi_data
rois_label = rois_label.view(-1).long()
rois_target = rois_target.view(-1, rois_target.size(2))
rois_inside_ws = rois_inside_ws.view(-1, rois_inside_ws.size(2))
rois_outside_ws = rois_outside_ws.view(-1, rois_outside_ws.size(2))
else:
rois_label = None
rois_target = None
rois_inside_ws = None
rois_outside_ws = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
if cfg.POOLING_MODE == 'align':
pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))
elif cfg.POOLING_MODE == 'pool':
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))
# feed pooled features to top model
pooled_feat = self._head_to_tail(pooled_feat)
#feat_pixel = torch.zeros(feat_pixel.size()).cuda()
if self.lc:
feat_pixel = feat_pixel.view(1, -1).repeat(pooled_feat.size(0), 1)
pooled_feat = torch.cat((feat_pixel, pooled_feat), 1)
if self.gc:
feat = feat.view(1, -1).repeat(pooled_feat.size(0), 1)
pooled_feat = torch.cat((feat, pooled_feat), 1)
# compute bbox offset
# compute bbox offset
bbox_pred = self.RCNN_bbox_pred(pooled_feat)
if self.training and not self.class_agnostic:
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
# compute object classification probability
cls_score = self.RCNN_cls_score(pooled_feat)
cls_prob = F.softmax(cls_score, 1)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
if self.training:
# classification loss
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)
# bounding box regression L1 loss
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)
cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)
outputs['base_feat'] = base_feat
outputs['predict'] = [rois, cls_prob, bbox_pred]
outputs['loss'] = [rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox]
outputs['rois_label'] = rois_label
outputs['d_loss'] = [d_pixel, domain_p]
return outputs
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
def create_architecture(self):
self._init_modules()
self._init_weights()
| [
"model.roi_layers.ROIPool",
"model.rpn.rpn._RPN",
"model.utils.net_utils.grad_reverse",
"model.utils.net_utils._smooth_l1_loss",
"model.rpn.proposal_target_layer._ProposalTargetLayer",
"torch.nn.functional.cross_entropy",
"model.roi_layers.ROIAlign",
"torch.nn.functional.softmax",
"torch.cat"
] | [((883, 909), 'model.rpn.rpn._RPN', '_RPN', (['self.dout_base_model'], {}), '(self.dout_base_model)\n', (887, 909), False, 'from model.rpn.rpn import _RPN\n'), ((946, 982), 'model.rpn.proposal_target_layer._ProposalTargetLayer', '_ProposalTargetLayer', (['self.n_classes'], {}), '(self.n_classes)\n', (966, 982), False, 'from model.rpn.proposal_target_layer import _ProposalTargetLayer\n'), ((1012, 1069), 'model.roi_layers.ROIPool', 'ROIPool', (['(cfg.POOLING_SIZE, cfg.POOLING_SIZE)', '(1.0 / 16.0)'], {}), '((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0 / 16.0)\n', (1019, 1069), False, 'from model.roi_layers import ROIAlign, ROIPool\n'), ((1098, 1159), 'model.roi_layers.ROIAlign', 'ROIAlign', (['(cfg.POOLING_SIZE, cfg.POOLING_SIZE)', '(1.0 / 16.0)', '(0)'], {}), '((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0 / 16.0, 0)\n', (1106, 1159), False, 'from model.roi_layers import ROIAlign, ROIPool\n'), ((4559, 4582), 'torch.nn.functional.softmax', 'F.softmax', (['cls_score', '(1)'], {}), '(cls_score, 1)\n', (4568, 4582), True, 'import torch.nn.functional as F\n'), ((3790, 3829), 'torch.cat', 'torch.cat', (['(feat_pixel, pooled_feat)', '(1)'], {}), '((feat_pixel, pooled_feat), 1)\n', (3799, 3829), False, 'import torch\n'), ((3943, 3976), 'torch.cat', 'torch.cat', (['(feat, pooled_feat)', '(1)'], {}), '((feat, pooled_feat), 1)\n', (3952, 3976), False, 'import torch\n'), ((4726, 4764), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['cls_score', 'rois_label'], {}), '(cls_score, rois_label)\n', (4741, 4764), True, 'import torch.nn.functional as F\n'), ((4841, 4913), 'model.utils.net_utils._smooth_l1_loss', '_smooth_l1_loss', (['bbox_pred', 'rois_target', 'rois_inside_ws', 'rois_outside_ws'], {}), '(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)\n', (4856, 4913), False, 'from model.utils.net_utils import _smooth_l1_loss, grad_reverse\n'), ((1681, 1716), 'model.utils.net_utils.grad_reverse', 'grad_reverse', (['base_feat1'], {'lambd': 'eta'}), '(base_feat1, lambd=eta)\n', (1693, 1716), False, 'from model.utils.net_utils import _smooth_l1_loss, grad_reverse\n'), ((1894, 1929), 'model.utils.net_utils.grad_reverse', 'grad_reverse', (['base_feat1'], {'lambd': 'eta'}), '(base_feat1, lambd=eta)\n', (1906, 1929), False, 'from model.utils.net_utils import _smooth_l1_loss, grad_reverse\n'), ((2035, 2069), 'model.utils.net_utils.grad_reverse', 'grad_reverse', (['base_feat'], {'lambd': 'eta'}), '(base_feat, lambd=eta)\n', (2047, 2069), False, 'from model.utils.net_utils import _smooth_l1_loss, grad_reverse\n'), ((2239, 2273), 'model.utils.net_utils.grad_reverse', 'grad_reverse', (['base_feat'], {'lambd': 'eta'}), '(base_feat, lambd=eta)\n', (2251, 2273), False, 'from model.utils.net_utils import _smooth_l1_loss, grad_reverse\n')] |
#!/usr/bin/python3
from kafka import KafkaConsumer
from kafka import TopicPartition
from configparser import ConfigParser
from EventLog import EventLog
import psycopg2
import datetime
selection = True
while(selection):
inputSel = input(
"""Please\n
select 1 if you wish to read/write events from the sample topic\n
select 2 if you wish to read/write events from the eventlog topic\nSelection : """)
if inputSel == "1" or "2":
selection = False
EXERCISE_TOPIC = 'exercise_topic'
EVENT_TOPIC = 'event_topic'
consumer = KafkaConsumer(bootstrap_servers='kafka-7ab3a9e-justinraj1984-b417.<EMAIL>.com:14211',
security_protocol='SSL',
ssl_cafile='ca.pem',
ssl_certfile='access_cert.pem',
ssl_keyfile='key.pem',
consumer_timeout_ms=10000)
if inputSel == "1":
# Open connection to PostgreSQL
parser = ConfigParser()
parser.read('database.ini')
db_connection_values = {}
params = parser.items('postgresql_exercise')
for param in params:
db_connection_values[param[0]] = param[1]
# Connect to PostgreSQL
print('Connecting to the PostgreSQL database')
conn = None
conn = psycopg2.connect(**db_connection_values)
# Read all messages from exercise_topic
consumer.assign([TopicPartition(EXERCISE_TOPIC, 0)])
consumer.seek_to_beginning(TopicPartition(EXERCISE_TOPIC, 0))
consumer.seek_to_beginning
# create a cursor
try:
cur = conn.cursor()
# cur.execute('SELECT version()')
sql_str = """
CREATE TABLE exercise_table (
my_values VARCHAR(255)
)
"""
# Code commented after table is created
# cur.execute(sql_str)
for msg in consumer:
print(msg)
print("%s:%d:%d: key=%s value=%s" % (msg.topic,
msg.partition, msg.offset, msg.key, msg.value))
msg_value = msg.value()
msg_value = msg.value.decode("utf-8")
print("Message retrieved from topic :") + str(msg_value)
sql = "INSERT INTO exercise_table(my_values) VALUES('" + \
msg_value+"');"
print(sql)
cur.execute(sql)
conn.commit()
cur.execute("SELECT my_values FROM exercise_table")
db_result = cur.fetchone()
print('Result :' + str(db_result))
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: "+str(error))
finally:
if conn is not None:
conn.close()
if inputSel == "2":
# Open connection to PostgreSQL
parser = ConfigParser()
parser.read('database.ini')
db_connection_values = {}
params = parser.items('postgresql_eventlog')
for param in params:
db_connection_values[param[0]] = param[1]
# Connect to PostgreSQL
print('Connecting to the PostgreSQL database')
conn = None
conn = psycopg2.connect(**db_connection_values)
# Read all messages from event_topic
consumer.assign([TopicPartition(EVENT_TOPIC, 0)])
consumer.seek_to_beginning(TopicPartition(EVENT_TOPIC, 0))
consumer.seek_to_beginning
# create a cursor
try:
cur = conn.cursor()
# cur.execute('SELECT version()')
sql_str = """
CREATE TABLE message_table (
client_process VARCHAR(255),
log_message VARCHAR(255),
log_date VARCHAR(255)
)
"""
# Code commented after table is created
# cur.execute(sql_str)
for msg in consumer:
print(msg)
print("%s:%d:%d: key=%s value=%s" % (msg.topic,
msg.partition, msg.offset, msg.key, msg.value))
# msg_value = EventLog()
msg_value = msg.value.decode("utf8")
x = msg_value.split(";")
print("Message retrieved from topic :")
print(x)
if len(x) == 3:
sql = "INSERT INTO message_table(client_process, log_message, log_date) VALUES('" + \
x[0]+"','"+x[1] + "','"+x[2]+"');"
cur.execute(sql)
conn.commit()
elif len(x) == 1:
sql = "INSERT INTO message_table(log_message, log_date) VALUES('" + \
x[0]+"','"+str(datetime.datetime.now()) + "');"
cur.execute(sql)
conn.commit()
else:
sql = 'SELECT version()'
cur.execute(sql)
print('Result start:')
cur.execute(
"SELECT client_process, log_message, log_date FROM message_table")
db_result = cur.fetchone()
print('Result :' + str(db_result))
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: "+str(error))
finally:
if conn is not None:
conn.close()
| [
"psycopg2.connect",
"configparser.ConfigParser",
"kafka.KafkaConsumer",
"kafka.TopicPartition",
"datetime.datetime.now"
] | [((561, 788), 'kafka.KafkaConsumer', 'KafkaConsumer', ([], {'bootstrap_servers': '"""kafka-7ab3a9e-justinraj1984-b417.<EMAIL>.com:14211"""', 'security_protocol': '"""SSL"""', 'ssl_cafile': '"""ca.pem"""', 'ssl_certfile': '"""access_cert.pem"""', 'ssl_keyfile': '"""key.pem"""', 'consumer_timeout_ms': '(10000)'}), "(bootstrap_servers=\n 'kafka-7ab3a9e-justinraj1984-b417.<EMAIL>.com:14211', security_protocol\n ='SSL', ssl_cafile='ca.pem', ssl_certfile='access_cert.pem',\n ssl_keyfile='key.pem', consumer_timeout_ms=10000)\n", (574, 788), False, 'from kafka import KafkaConsumer\n'), ((971, 985), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (983, 985), False, 'from configparser import ConfigParser\n'), ((1278, 1318), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**db_connection_values)\n', (1294, 1318), False, 'import psycopg2\n'), ((2793, 2807), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2805, 2807), False, 'from configparser import ConfigParser\n'), ((3100, 3140), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**db_connection_values)\n', (3116, 3140), False, 'import psycopg2\n'), ((1451, 1484), 'kafka.TopicPartition', 'TopicPartition', (['EXERCISE_TOPIC', '(0)'], {}), '(EXERCISE_TOPIC, 0)\n', (1465, 1484), False, 'from kafka import TopicPartition\n'), ((3267, 3297), 'kafka.TopicPartition', 'TopicPartition', (['EVENT_TOPIC', '(0)'], {}), '(EVENT_TOPIC, 0)\n', (3281, 3297), False, 'from kafka import TopicPartition\n'), ((1384, 1417), 'kafka.TopicPartition', 'TopicPartition', (['EXERCISE_TOPIC', '(0)'], {}), '(EXERCISE_TOPIC, 0)\n', (1398, 1417), False, 'from kafka import TopicPartition\n'), ((3203, 3233), 'kafka.TopicPartition', 'TopicPartition', (['EVENT_TOPIC', '(0)'], {}), '(EVENT_TOPIC, 0)\n', (3217, 3233), False, 'from kafka import TopicPartition\n'), ((4559, 4582), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4580, 4582), False, 'import datetime\n')] |
from InstaTweet import InstaTweet
import json
"""
This example creates a template profile, which can later be loaded, saved under a new name, and further modified.
Profile attributes can be set at the time of object initialization or later on via InstaTweet.attribute = value
"""
session_id = 'string' # The sessionid cookie is obtained by logging into Instagram from browser
twitter_keys = { # You must have Twitter API keys with access to Standard v1.1 endpoints
'Consumer Key': 'string',
'Consumer Secret': 'string',
'Access Token': 'string',
'Token Secret': 'string'
}
it = InstaTweet(session_id=session_id, twitter_keys=twitter_keys)
it.save_profile('My Template')
print('Profile Settings:', json.dumps(it.config, indent=4), sep='\n')
def create_template(template_name, session_id=session_id, twitter_keys=twitter_keys):
"""Function to Create a Template Profile"""
it = InstaTweet(session_id=session_id, twitter_keys=twitter_keys)
it.save_profile(template_name)
print('Profile Settings:', json.dumps(it.config, indent=4), sep='\n')
| [
"InstaTweet.InstaTweet",
"json.dumps"
] | [((618, 678), 'InstaTweet.InstaTweet', 'InstaTweet', ([], {'session_id': 'session_id', 'twitter_keys': 'twitter_keys'}), '(session_id=session_id, twitter_keys=twitter_keys)\n', (628, 678), False, 'from InstaTweet import InstaTweet\n'), ((738, 769), 'json.dumps', 'json.dumps', (['it.config'], {'indent': '(4)'}), '(it.config, indent=4)\n', (748, 769), False, 'import json\n'), ((926, 986), 'InstaTweet.InstaTweet', 'InstaTweet', ([], {'session_id': 'session_id', 'twitter_keys': 'twitter_keys'}), '(session_id=session_id, twitter_keys=twitter_keys)\n', (936, 986), False, 'from InstaTweet import InstaTweet\n'), ((1053, 1084), 'json.dumps', 'json.dumps', (['it.config'], {'indent': '(4)'}), '(it.config, indent=4)\n', (1063, 1084), False, 'import json\n')] |
import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='help', aliases=['h'])
async def _help(self, ctx, *, command: str=None):
"""Get help on a specified cog or command.
Don't put any arguments to get a list of available commands."""
pref = '```\n'
postf = f'Get info on a command group, category or just a command with @{self.bot.user.name}#{self.bot.user.discriminator} help <Category>/<Command>/<Command group> or *help <Category>/<Command>/<Command group>'
result = ''
postfix = '\n```'
paginator = commands.Paginator()
if not command:
li = [cog for cog in self.bot.cogs]
for smth in li:
if smth != 'Help':
s = list(self.bot.cogs[smth].get_commands())
if s:
paginator.add_line(f"{s[0].cog_name}:")
for c in s:
if not c.hidden:
paginator.add_line(f' {c.name} - {c.short_doc}')
paginator.add_line(postf)
for page in paginator.pages:
await ctx.send(page)
else:
if command not in self.bot.all_commands:
if command not in self.bot.cogs:
cmd = self.bot.get_command(command.replace('*', '').replace(self.bot.user.mention, ''))
if cmd:
paginator.add_line(f"{ctx.prefix.replace(self.bot.user.mention, f'@{self.bot.user.name}#{self.bot.user.discriminator} ')}{cmd.signature}\n\n {cmd.help}")
for page in paginator.pages:
await ctx.send(page)
else:
result = 'That command/category/command group does not exist!'
await ctx.send(result)
else:
the_cog = list(command.get_commands())
paginator.add_line(f"{the_cog[0].cog_name}:")
for cmd in the_cog:
if not cmd.hidden:
paginator.add_line(f' {cmd.name} - {cmd.help}')
paginator.add_line(postf)
for page in paginator.pages:
await ctx.send(page)
else:
cmd = self.bot.get_command(command.replace('*', '').replace(self.bot.user.mention, ''))
result += f"{ctx.prefix.replace(self.bot.user.mention, f'@{self.bot.user.name}#{self.bot.user.discriminator} ')}{cmd.signature}\n\nCog: {cmd.cog_name}\n\n {cmd.help}"
await ctx.send(f"{pref}{result}{postfix}")
def setup(bot):
bot.add_cog(Help(bot))
| [
"discord.ext.commands.Paginator",
"discord.ext.commands.command"
] | [((133, 177), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""help"""', 'aliases': "['h']"}), "(name='help', aliases=['h'])\n", (149, 177), False, 'from discord.ext import commands\n'), ((664, 684), 'discord.ext.commands.Paginator', 'commands.Paginator', ([], {}), '()\n', (682, 684), False, 'from discord.ext import commands\n')] |
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=200)
link = models.CharField(max_length=250)
image = models.ImageField(upload_to='images')
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name | [
"django.db.models.ImageField",
"django.db.models.ManyToManyField",
"django.db.models.CharField"
] | [((67, 98), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (83, 98), False, 'from django.db import models\n'), ((194, 226), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (210, 226), False, 'from django.db import models\n'), ((245, 277), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (261, 277), False, 'from django.db import models\n'), ((289, 321), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (305, 321), False, 'from django.db import models\n'), ((334, 371), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images"""'}), "(upload_to='images')\n", (351, 371), False, 'from django.db import models\n'), ((383, 410), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Tag'], {}), '(Tag)\n', (405, 410), False, 'from django.db import models\n')] |
from rcmh import config,group,chain,expert
grupo_inicial = expert.construct_expert_extended_group()
cadena = chain.Chain(type='poisson',group=grupo_inicial)
cadena.walks(1000)
| [
"rcmh.chain.Chain",
"rcmh.expert.construct_expert_extended_group"
] | [((61, 101), 'rcmh.expert.construct_expert_extended_group', 'expert.construct_expert_extended_group', ([], {}), '()\n', (99, 101), False, 'from rcmh import config, group, chain, expert\n'), ((111, 159), 'rcmh.chain.Chain', 'chain.Chain', ([], {'type': '"""poisson"""', 'group': 'grupo_inicial'}), "(type='poisson', group=grupo_inicial)\n", (122, 159), False, 'from rcmh import config, group, chain, expert\n')] |
#!/usr/bin/env python
# vim: set expandtab tabstop=4 shiftwidth=4:
# Borderlands 3 Data Processing Scripts
# Copyright (C) 2019-2020 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the development team nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CJ KUCERA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import itertools
paks = {}
for filename in os.listdir('.'):
if filename.startswith('pak-'):
paks[filename] = set()
for pakname in os.listdir(filename):
if pakname.endswith('.pak'):
paks[filename].add(pakname)
# Now find duplicates.
for ((pakdir1, paks1), (pakdir2, paks2)) in itertools.combinations(paks.items(), 2):
combined = paks1 & paks2
if len(combined) > 0:
print('{} and {} share these:'.format(pakdir1, pakdir2))
print(combined)
print('')
| [
"os.listdir"
] | [((1676, 1691), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (1686, 1691), False, 'import os\n'), ((1783, 1803), 'os.listdir', 'os.listdir', (['filename'], {}), '(filename)\n', (1793, 1803), False, 'import os\n')] |
import os
import time
import unittest
from specter.specter import SpecterError, TimeoutError
from .util import SpecterTestCase, StaticSpecterTestCase
from .bottle import redirect, static_file
class TestSimple(StaticSpecterTestCase):
STATIC_FILE = 'simple.html'
def test_simple_and_sleep(self):
self.open('/')
self.assert_in('This is an index page', self.s.content)
def test_wait_for(self):
self.open('/')
self.s.wait_for(lambda: 'This is an index page' in self.s.content)
def test_wait_for_text(self):
self.open('/')
self.s.wait_for_text('This is an index page')
def test_url(self):
self.open('/')
self.assert_equal(self.s.url,
self.baseUrl + '/'
)
def test_title(self):
self.open('/')
self.assert_equal(self.s.title, 'This is a title')
def test_bad_open(self):
with self.assert_raises(SpecterError):
self.s.open('/foobar', method="BAD")
def test_wait_timeout(self):
with self.assert_raises(TimeoutError):
self.s.wait_for(lambda: False, timeout=0.01)
def test_sleep(self):
start = time.time()
self.s.sleep(0.05)
end = time.time()
diff = (end - start) - 0.05
self.assert_true(diff < 0.02)
def test_default_viewport(self):
self.assert_equal(self.s.viewport_size, (800, 600))
def test_timeout(self):
self.assert_equal(self.s.page.main_frame.timeout, 90)
# TODO: should test this is being used?
self.s.page.main_frame.timeout = 120
self.assert_equal(self.s.page.main_frame.timeout, 120)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"time.time"
] | [((1725, 1740), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1738, 1740), False, 'import unittest\n'), ((1206, 1217), 'time.time', 'time.time', ([], {}), '()\n', (1215, 1217), False, 'import time\n'), ((1259, 1270), 'time.time', 'time.time', ([], {}), '()\n', (1268, 1270), False, 'import time\n')] |
import numpy as np
import os
from ColorMoment import ColorMoment
import scipy.io as sio
def VFExtractor(present_of_indexing_base,new_product_image_path,feature_path,indexing_path,database_image_path):
# Read all images
image_files = os.listdir(new_product_image_path)
# Extract visual features
number_of_images = len(image_files)
number_of_visual_features = 225 #here we use color histogram (225D) solely for an example
VF_of_new_products = np.zeros((number_of_images,number_of_visual_features))
if present_of_indexing_base: #get the number of current products to save the new products to database
VF_database = sio.loadmat(indexing_path + 'VF_database.mat')
VF_database = VF_database['VF_database']
index_of_image_in_database, _ = VF_database.shape # get number of current products
else:
index_of_image_in_database = 0
for i in range(0, number_of_images):
print('Processing image %d' %i)
# Visual feature extraction
VF_of_new_products[i, :] = ColorMoment(new_product_image_path + image_files[i])
#rename and move to database
index_of_image_in_database += 1
os.rename(new_product_image_path + image_files[i], database_image_path + str(index_of_image_in_database) + ".jpg")
# Do normalization
#Get the max, min of new products
normalization_of_new_products = np.zeros((2, number_of_visual_features))
normalization_of_new_products[0, :] = np.min(VF_of_new_products, axis=0)
normalization_of_new_products[1, :] = np.max(VF_of_new_products, axis=0)
#a,b,c are variables for normalization
a = np.zeros((1, number_of_visual_features))
b = np.zeros((1, number_of_visual_features))
c = np.zeros((1, number_of_visual_features))
if present_of_indexing_base: #if there exists an indexing base
#load related data from indexing base in case the update of visual features
normalization = sio.loadmat(indexing_path + 'normalization.mat')
normalization = normalization['normalization']
#check whether existing max, min bounds of features should change for new products
min_new = np.minimum(normalization_of_new_products[0, :], normalization[0, :])
max_new = np.maximum(normalization_of_new_products[1, :], normalization[1, :])
if sum((normalization[0, :]-min_new)+(max_new-normalization[1, :])) > 0: #if changed
Wv = sio.loadmat(indexing_path + 'Wv.mat')
Wv = Wv['Wv']
#update visual feature values of products and cluster weights in existing indexing base
#according to equation(12)-(13) in my paper "Online Multimodal Co-indexing and Retrieval of Weakly Labeled Web Image Collections"
#which are in forms of a*(bx+c): We do not use a/b to avoid the case that the initial max,min are the same
a[0,:] = 1 / (max_new - min_new)
b[0,:] = normalization[1, :] - normalization[0, :]
c[0,:] = (normalization[0, :] - min_new)
m1 = Wv.shape[0]
m2 = VF_database.shape[0]
normalizer_a = np.concatenate([a,a], 1) #note that the dimentionality of Wv and VF_database is doubled in GHF-ART by complement coding
normalizer_a1 = np.repeat(normalizer_a, m1, axis=0) #for updating cluster weights
normalizer_a2 = np.repeat(normalizer_a, m2, axis=0) #for updating product features
normalizer_b = np.concatenate([b, b], 1)
normalizer_b1 = np.repeat(normalizer_b, m1, axis=0)
normalizer_b2 = np.repeat(normalizer_b, m2, axis=0)
normalizer_c = np.concatenate([c, c], 1)
normalizer_c1 = np.repeat(normalizer_c, m1, axis=0)
normalizer_c2 = np.repeat(normalizer_c, m2, axis=0)
#save the updated data to indexing base
Wv_new = normalizer_a1 * (normalizer_b1 * Wv + normalizer_c1)
VF_database_new = normalizer_a2 * (normalizer_b2 * VF_database + normalizer_c2)
sio.savemat(indexing_path + 'Wv.mat', {'Wv': Wv_new})
sio.savemat(indexing_path + 'VF_database.mat', {'VF_database': VF_database_new})
normalization_new = np.array([min_new,max_new])
sio.savemat(indexing_path + 'normalization.mat', {'normalization': normalization_new})
#normalizing visual features of new products with existing normalization records in indexing base
a[0,:] = max_new - min_new
b[0,:] = min_new
normalizer_a = np.repeat(a, number_of_images, axis=0)
normalizer_b = np.repeat(b, number_of_images, axis=0)
VF_of_new_products = (VF_of_new_products - normalizer_b) / normalizer_a
#save visual features of new products for indexing procedures
sio.savemat(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products': VF_of_new_products})
else: #if no change in max min boundaries
a[0,:] = normalization[1, :] - normalization[0, :]
b[0,:] = normalization[0,:]
normalizer_a = np.repeat(a, number_of_images, axis=0)
normalizer_b = np.repeat(b, number_of_images, axis=0)
# note that we do the following to avoid the case that the initial max,min are the same
# any value for such features is fine (see the update equations above for reasons) and we use 0 for convention
index = np.where( (normalization[1, :] - normalization[0, :]) > 0)
VF_of_new_products[:,index] = (VF_of_new_products[:,index] - normalizer_b[:,index]) / normalizer_a[:,index]
index = np.where((normalization[1, :] - normalization[0, :]) == 0)
VF_of_new_products[:,index] = 0
# save visual features of new products for indexing procedures
sio.savemat(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products': VF_of_new_products})
else: #if no existing indexing base, use the max,min of new products for normalization
a[0,:] = normalization_of_new_products[1,:] - normalization_of_new_products[0,:]
b[0,:] = normalization_of_new_products[0,:]
normalizer_a = np.repeat(a, number_of_images, axis=0)
normalizer_b = np.repeat(b, number_of_images, axis=0)
index = np.where(normalization_of_new_products[1,:] - normalization_of_new_products[0,:] > 0)
VF_of_new_products[:,index] = (VF_of_new_products[:,index] - normalizer_b[:,index]) / normalizer_a[:,index]
index = np.where(normalization_of_new_products[1,:] - normalization_of_new_products[0,:] == 0)
VF_of_new_products[:,index] = 0
# save normalization and visual features of new products for indexing procedures
sio.savemat(indexing_path + 'normalization.mat', {'normalization': normalization_of_new_products})
sio.savemat(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products': VF_of_new_products})
return VF_of_new_products | [
"os.listdir",
"numpy.repeat",
"numpy.minimum",
"scipy.io.savemat",
"numpy.where",
"scipy.io.loadmat",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"ColorMoment.ColorMoment"
] | [((255, 289), 'os.listdir', 'os.listdir', (['new_product_image_path'], {}), '(new_product_image_path)\n', (265, 289), False, 'import os\n'), ((487, 542), 'numpy.zeros', 'np.zeros', (['(number_of_images, number_of_visual_features)'], {}), '((number_of_images, number_of_visual_features))\n', (495, 542), True, 'import numpy as np\n'), ((1443, 1483), 'numpy.zeros', 'np.zeros', (['(2, number_of_visual_features)'], {}), '((2, number_of_visual_features))\n', (1451, 1483), True, 'import numpy as np\n'), ((1527, 1561), 'numpy.min', 'np.min', (['VF_of_new_products'], {'axis': '(0)'}), '(VF_of_new_products, axis=0)\n', (1533, 1561), True, 'import numpy as np\n'), ((1605, 1639), 'numpy.max', 'np.max', (['VF_of_new_products'], {'axis': '(0)'}), '(VF_of_new_products, axis=0)\n', (1611, 1639), True, 'import numpy as np\n'), ((1695, 1735), 'numpy.zeros', 'np.zeros', (['(1, number_of_visual_features)'], {}), '((1, number_of_visual_features))\n', (1703, 1735), True, 'import numpy as np\n'), ((1745, 1785), 'numpy.zeros', 'np.zeros', (['(1, number_of_visual_features)'], {}), '((1, number_of_visual_features))\n', (1753, 1785), True, 'import numpy as np\n'), ((1795, 1835), 'numpy.zeros', 'np.zeros', (['(1, number_of_visual_features)'], {}), '((1, number_of_visual_features))\n', (1803, 1835), True, 'import numpy as np\n'), ((674, 720), 'scipy.io.loadmat', 'sio.loadmat', (["(indexing_path + 'VF_database.mat')"], {}), "(indexing_path + 'VF_database.mat')\n", (685, 720), True, 'import scipy.io as sio\n'), ((1077, 1129), 'ColorMoment.ColorMoment', 'ColorMoment', (['(new_product_image_path + image_files[i])'], {}), '(new_product_image_path + image_files[i])\n', (1088, 1129), False, 'from ColorMoment import ColorMoment\n'), ((2016, 2064), 'scipy.io.loadmat', 'sio.loadmat', (["(indexing_path + 'normalization.mat')"], {}), "(indexing_path + 'normalization.mat')\n", (2027, 2064), True, 'import scipy.io as sio\n'), ((2231, 2299), 'numpy.minimum', 'np.minimum', (['normalization_of_new_products[0, :]', 'normalization[0, :]'], {}), '(normalization_of_new_products[0, :], normalization[0, :])\n', (2241, 2299), True, 'import numpy as np\n'), ((2318, 2386), 'numpy.maximum', 'np.maximum', (['normalization_of_new_products[1, :]', 'normalization[1, :]'], {}), '(normalization_of_new_products[1, :], normalization[1, :])\n', (2328, 2386), True, 'import numpy as np\n'), ((6260, 6298), 'numpy.repeat', 'np.repeat', (['a', 'number_of_images'], {'axis': '(0)'}), '(a, number_of_images, axis=0)\n', (6269, 6298), True, 'import numpy as np\n'), ((6323, 6361), 'numpy.repeat', 'np.repeat', (['b', 'number_of_images'], {'axis': '(0)'}), '(b, number_of_images, axis=0)\n', (6332, 6361), True, 'import numpy as np\n'), ((6381, 6472), 'numpy.where', 'np.where', (['(normalization_of_new_products[1, :] - normalization_of_new_products[0, :] > 0)'], {}), '(normalization_of_new_products[1, :] -\n normalization_of_new_products[0, :] > 0)\n', (6389, 6472), True, 'import numpy as np\n'), ((6601, 6693), 'numpy.where', 'np.where', (['(normalization_of_new_products[1, :] - normalization_of_new_products[0, :] == 0\n )'], {}), '(normalization_of_new_products[1, :] -\n normalization_of_new_products[0, :] == 0)\n', (6609, 6693), True, 'import numpy as np\n'), ((6830, 6932), 'scipy.io.savemat', 'sio.savemat', (["(indexing_path + 'normalization.mat')", "{'normalization': normalization_of_new_products}"], {}), "(indexing_path + 'normalization.mat', {'normalization':\n normalization_of_new_products})\n", (6841, 6932), True, 'import scipy.io as sio\n'), ((6938, 7038), 'scipy.io.savemat', 'sio.savemat', (["(feature_path + 'VF_of_new_products.mat')", "{'VF_of_new_products': VF_of_new_products}"], {}), "(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products':\n VF_of_new_products})\n", (6949, 7038), True, 'import scipy.io as sio\n'), ((2501, 2538), 'scipy.io.loadmat', 'sio.loadmat', (["(indexing_path + 'Wv.mat')"], {}), "(indexing_path + 'Wv.mat')\n", (2512, 2538), True, 'import scipy.io as sio\n'), ((3183, 3208), 'numpy.concatenate', 'np.concatenate', (['[a, a]', '(1)'], {}), '([a, a], 1)\n', (3197, 3208), True, 'import numpy as np\n'), ((3331, 3366), 'numpy.repeat', 'np.repeat', (['normalizer_a', 'm1'], {'axis': '(0)'}), '(normalizer_a, m1, axis=0)\n', (3340, 3366), True, 'import numpy as np\n'), ((3425, 3460), 'numpy.repeat', 'np.repeat', (['normalizer_a', 'm2'], {'axis': '(0)'}), '(normalizer_a, m2, axis=0)\n', (3434, 3460), True, 'import numpy as np\n'), ((3519, 3544), 'numpy.concatenate', 'np.concatenate', (['[b, b]', '(1)'], {}), '([b, b], 1)\n', (3533, 3544), True, 'import numpy as np\n'), ((3573, 3608), 'numpy.repeat', 'np.repeat', (['normalizer_b', 'm1'], {'axis': '(0)'}), '(normalizer_b, m1, axis=0)\n', (3582, 3608), True, 'import numpy as np\n'), ((3637, 3672), 'numpy.repeat', 'np.repeat', (['normalizer_b', 'm2'], {'axis': '(0)'}), '(normalizer_b, m2, axis=0)\n', (3646, 3672), True, 'import numpy as np\n'), ((3700, 3725), 'numpy.concatenate', 'np.concatenate', (['[c, c]', '(1)'], {}), '([c, c], 1)\n', (3714, 3725), True, 'import numpy as np\n'), ((3754, 3789), 'numpy.repeat', 'np.repeat', (['normalizer_c', 'm1'], {'axis': '(0)'}), '(normalizer_c, m1, axis=0)\n', (3763, 3789), True, 'import numpy as np\n'), ((3818, 3853), 'numpy.repeat', 'np.repeat', (['normalizer_c', 'm2'], {'axis': '(0)'}), '(normalizer_c, m2, axis=0)\n', (3827, 3853), True, 'import numpy as np\n'), ((4086, 4139), 'scipy.io.savemat', 'sio.savemat', (["(indexing_path + 'Wv.mat')", "{'Wv': Wv_new}"], {}), "(indexing_path + 'Wv.mat', {'Wv': Wv_new})\n", (4097, 4139), True, 'import scipy.io as sio\n'), ((4152, 4237), 'scipy.io.savemat', 'sio.savemat', (["(indexing_path + 'VF_database.mat')", "{'VF_database': VF_database_new}"], {}), "(indexing_path + 'VF_database.mat', {'VF_database': VF_database_new}\n )\n", (4163, 4237), True, 'import scipy.io as sio\n'), ((4267, 4295), 'numpy.array', 'np.array', (['[min_new, max_new]'], {}), '([min_new, max_new])\n', (4275, 4295), True, 'import numpy as np\n'), ((4307, 4397), 'scipy.io.savemat', 'sio.savemat', (["(indexing_path + 'normalization.mat')", "{'normalization': normalization_new}"], {}), "(indexing_path + 'normalization.mat', {'normalization':\n normalization_new})\n", (4318, 4397), True, 'import scipy.io as sio\n'), ((4601, 4639), 'numpy.repeat', 'np.repeat', (['a', 'number_of_images'], {'axis': '(0)'}), '(a, number_of_images, axis=0)\n', (4610, 4639), True, 'import numpy as np\n'), ((4667, 4705), 'numpy.repeat', 'np.repeat', (['b', 'number_of_images'], {'axis': '(0)'}), '(b, number_of_images, axis=0)\n', (4676, 4705), True, 'import numpy as np\n'), ((4880, 4980), 'scipy.io.savemat', 'sio.savemat', (["(feature_path + 'VF_of_new_products.mat')", "{'VF_of_new_products': VF_of_new_products}"], {}), "(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products':\n VF_of_new_products})\n", (4891, 4980), True, 'import scipy.io as sio\n'), ((5159, 5197), 'numpy.repeat', 'np.repeat', (['a', 'number_of_images'], {'axis': '(0)'}), '(a, number_of_images, axis=0)\n', (5168, 5197), True, 'import numpy as np\n'), ((5225, 5263), 'numpy.repeat', 'np.repeat', (['b', 'number_of_images'], {'axis': '(0)'}), '(b, number_of_images, axis=0)\n', (5234, 5263), True, 'import numpy as np\n'), ((5509, 5564), 'numpy.where', 'np.where', (['(normalization[1, :] - normalization[0, :] > 0)'], {}), '(normalization[1, :] - normalization[0, :] > 0)\n', (5517, 5564), True, 'import numpy as np\n'), ((5708, 5764), 'numpy.where', 'np.where', (['(normalization[1, :] - normalization[0, :] == 0)'], {}), '(normalization[1, :] - normalization[0, :] == 0)\n', (5716, 5764), True, 'import numpy as np\n'), ((5900, 6000), 'scipy.io.savemat', 'sio.savemat', (["(feature_path + 'VF_of_new_products.mat')", "{'VF_of_new_products': VF_of_new_products}"], {}), "(feature_path + 'VF_of_new_products.mat', {'VF_of_new_products':\n VF_of_new_products})\n", (5911, 6000), True, 'import scipy.io as sio\n')] |
import click
import subprocess
import os
import time
from chass.preprocessing import preprocessing
from chass.changed_variables import changed_variables
from chass.identify_variables import identify_variables
from chass.variable import funcvar
from chass.get_line_number import get_value_at_line
from chass.locate_loops import locate_loops
from chass.locate_case import locate_cases
from chass.locate_commands import locate_commands
from chass.identify_functions import identify_functions
from chass.calculate_expr import calculate_expr
from chass.function_handle import function_handle
from chass.sedcommand import sedcommand
from chass.forloop import forloop
from chass.whiloop import whiloop
from chass.untiloop import untiloop
from chass.get_file_paths import get_path
from chass.if_else import if_else
from chass.case_foo import edit_case
from chass.locate_ifs import locate_ifs
from chass.locate_function_calls import locate_function_calls
from chass.remove_cp import remove_cp
@click.command()
@click.option('--variable', '-v', multiple=True, help='Execute this specific variable')
@click.option('--line', '-l', type=int, help='Get value at a particular line')
@click.option('--code', '-c', type=int, help='Get code for a particular line')
@click.option('--codeline', nargs=2, type=int, help='Get a section of your code')
@click.option('--breakpoints', nargs=2, type=int, help="set breakpoint")
@click.option('--function','-f', help="Debug only a function by providing its name")
@click.option('--printall','-p', is_flag=True, help="Prints all the changed variables' values at every line in one go")
@click.option('--output','-o', is_flag=True, help="Shows the actual output of the file")
@click.option('--loops','-r', is_flag=True, help="Debug Only loops")
@click.option('--cond','-i', is_flag=True, help="Debug only conditional statements")
@click.option('--sed','-s', is_flag=True, help="Shows the output of all the sed commands")
@click.argument('file', type=click.Path())
def cli(file, variable, line, code, codeline, breakpoints, function, printall, output, loops, cond, sed):
"""A user friendly CLI Debugging application exclusively for Bash Scripts"""
# delete all pre-existing .txt files
try :
os.system("rm *.txt >/dev/null 2>&1")
except :
pass
#delete all pre-existing .sh files
try :
os.system("rm *.sh >/dev/null 2>&1")
except :
pass
#create a copy of original file for furthur processing
preprocessing(file)
subprocess.call("chmod 777 ./copy.sh", shell=True)
# take parameter from user if required
input_parameters = []
click.echo("Provide input parameters and press ENTER : ")
for argument in input().split(" ") :
input_parameters.append(argument)
new_file = "copy.sh"
f = "copy.sh"
variables_info = identify_variables(new_file)
if_statements = locate_ifs("copy.sh")
for_loops = []
while_loops = []
until_loops = []
locate_loops(new_file,for_loops,while_loops,until_loops)
variables_info = identify_variables(new_file)
remove_cp("copy.sh")
temp = open("original_output.txt","w")
temp.flush()
subprocess.Popen(["bash","copy3.sh"]+input_parameters,stdout=temp,stderr=subprocess.STDOUT)
time.sleep(0.5)
if output :
click.echo("The output of the file is : ")
click.echo(open("original_output.txt",'r').read())
commands = locate_commands("copy.sh")
new_file = open("copy.sh")
sedcommand(f,commands,input_parameters)
case_statements = locate_cases("copy.sh")
functions = identify_functions(f)
for (a, b, c) in variables_info :
funcvar(f, a, b, input_parameters, case_statements, functions)
for (a,b,c) in functions:
function_handle(f,a,b,c,variables_info,case_statements,input_parameters)
time.sleep(0.5)
cnt_for_loops = 0
cnt_while_loops = 0
cnt_until_loops = 0
for (a,b) in for_loops:
forloop(f,a,b,cnt_for_loops,variables_info,input_parameters)
cnt_for_loops=cnt_for_loops+1
for (a,b) in while_loops:
whiloop(f,a,b,cnt_while_loops,variables_info,input_parameters)
cnt_while_loops += 1
for (a,b) in until_loops:
untiloop(f,a,b,cnt_until_loops,variables_info,input_parameters)
cnt_until_loops += 1
time.sleep(0.2)
if_else("copy.sh",for_loops,functions,while_loops,until_loops,if_statements,input_parameters)
edit_case("copy.sh",case_statements,for_loops,functions,while_loops,until_loops,input_parameters)
time.sleep(1)
#count total lines
num_lines=0
new_file = open("copy.sh","r")
for li in new_file:
num_lines+=1
new_file.close()
get_path(file,num_lines,commands)
#only loops
if loops:
new_file = open("copy.sh")
i = -1
changed_variables_info = {}
for (a,b,c) in variables_info:
changed_variables_info[a] = ''
while i<(num_lines-1):
i+=1
b = False
loop_number = -1
#for
for iterator in for_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("For loop starting from line " +str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != changed_variables_info[variables_info[mod][0]] and line != '\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
break
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number=-1
#while
for iterator in while_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("While loop starting from line " +str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != changed_variables_info[variables_info[mod][0]] and line != '\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number = -1
#until
for iterator in until_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("Until loop starting from line " +str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != changed_variables_info[variables_info[mod][0]] and line != '\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
# conditional statements
elif cond :
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
for ntuple in if_statements :
i = ntuple[0]
check = False
for iterator in for_loops :
if iterator[0]<i and i<iterator[1] :
check = True
break
for iterator in while_loops :
if iterator[0]<i and i<iterator[1] :
check = True
break
for iterator in until_loops :
if iterator[0]<i and i<iterator[1] :
check = True
break
for iterator in functions :
if iterator[1]<i and i<iterator[2] :
check = True
break
if check :
continue
else :
while i<=ntuple[-1] :
if_file = "ifrand"+str(i)+".txt"
click.echo("press ENTER to continue")
var = input()
if i in ntuple and i!=ntuple[-1] :
if var=='' :
click.echo("line : "+str(i+1))
click.echo("The condition for the if/elif statement is : ")
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
click.echo("The values of the variables used in the condition are : ")
click.echo(open(if_file,"r").read())
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
else :
if var=='' :
changed_variables_list = changed_variables(f, i)
click.echo("line : "+str(i+1))
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
i += 1
elif sed :
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
if not line :
sed_count = 0
for iterator in commands :
if iterator[1]=="sed" :
i = iterator[0]
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
sed_count += 1
click.echo("press ENTER to continue")
var = input()
if var=='' :
click.echo("line : "+str(i+1))
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, i)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
else :
click.echo("Command not found.")
else :
sed_count = -1
b = False
for iterator in commands :
if iterator[1]=="sed" :
sed_count += 1
if iterator[0]==(line-1) :
b = True
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, line-1)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
if not b :
click.echo("No sed command found on given line number.")
elif function :
check = False
starting_index = 0
ending_index = 0
for (a,b,c) in functions :
if function == a :
starting_index = b
ending_index = c
check = True
break
if not check :
click.echo("No such function is defined.")
else :
function_arguments = []
click.echo("Provide input arguments for the function and press ENTER : ")
for argument in input().split(" ") :
function_arguments.append(argument)
function_calls_list = locate_function_calls("copy.sh",function)
click.echo("The line numbers where the function was called are : ")
for d in function_calls_list :
if d!=starting_index :
print(d+1,end=" ")
click.echo()
i = starting_index
sed_count = 0
line_number = -1
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
while i<(ending_index-1) :
i += 1
b = False
loop_number = -1
for iterator in for_loops :
loop_number += 1
if iterator[0]<=i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in while_loops :
loop_number += 1
if iterator[0]<=i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in until_loops :
loop_number += 1
if iterator[0]<=i and i<iterator[1] :
b = True
if b:
break
if b:
continue
for iterator in if_statements :
if iterator[0]<=i and i<iterator[-1] :
b = True
break
if b :
continue
for iterator in case_statements :
if iterator[0]<=i and i<iterator[-1]:
b = True
break
if b :
continue
for iterator in commands :
if iterator[0]==i and iterator[1]=="sed" :
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
sed_count += 1
b = True
click.echo("press ENTER to continue")
var = input()
click.echo("line : "+str(i+1))
if var=='' :
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, i)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=='c' :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
function_file = str(function)+"_handle.txt"
click.echo("press ENTER to continue")
var = input()
if var=='' :
click.echo("line "+str(i+1)+" : ")
count = 0
temp = 0
for line in open(function_file,"r") :
if temp==len(variables_info) :
break
line_number += 1
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
count += 1
temp += 1
if count==0 :
click.echo("No variable change.")
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
elif var=="quit" :
break
elif var=="pwd" :
subprocess.call("head -"+str(i+1)+" pwd.txt | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
elif code :
subprocess.call("head -"+str(code)+" "+str(file)+" | tail -1", shell=True)
elif codeline :
for i in range(codeline[0], codeline[1]+1):
subprocess.call("head -"+str(i)+" "+str(file)+" | tail -1", shell=True)
#particular variable b/w given breakpoints
elif breakpoints and variable:
for i in range(breakpoints[0], breakpoints[1]+1):
for variable_name in variable :
value = get_value_at_line(variable_name, i-1)
#check variable scope
if i > num_lines or i <= 0:
click.echo("Line number out of file!")
elif value == "\n":
click.echo("Variable out of scope!")
else:
click.echo(variable_name + " at line number " + str(i) + ":" + str(value))
elif breakpoints:
new_file = open("copy.sh")
sed_count = 0
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
if len(breakpoints)==0 :
click.echo("Insufficient number of arguments for option.")
else :
i = breakpoints[0]-1
if len(breakpoints)==1 :
temp = num_lines-1
else :
temp = breakpoints[1]-1
while i<temp :
i += 1
b = False
loop_number = -1
for iterator in functions :
if iterator[1]==i :
i = iterator[2]
break
for iterator in for_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("For loop starting from line "+str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
break
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in while_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("While loop starting from line "+str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in until_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("Until loop starting from line "+str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
for iterator in if_statements :
for a in range(len(iterator)-1) :
if iterator[a]==i :
if_file = "ifrand"+str(i)+".txt"
click.echo("press ENTER to continue")
var = input()
b = True
if var=='' :
click.echo("line : "+str(i+1))
click.echo("The condition for the if/elif statement is : ")
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
click.echo("The values of the variables used in the condition are : ")
click.echo(open(if_file,"r").read())
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
for iterator in case_statements :
if iterator[0]==i :
case_file = "case_"+str(i)+"rand__namenotcommon.txt"
click.echo("press ENTER to continue")
var = input()
b = True
if var=='' :
click.echo("line : "+str(i+1))
click.echo("The value of the variable used in the condition is : ")
click.echo(open(case_file,"r").read())
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
for iterator in commands :
if iterator[0]==i and iterator[1]=="sed" :
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
sed_count += 1
b = True
click.echo("press ENTER to continue")
var = input()
if var=='' :
click.echo("line : "+str(i+1))
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, i)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=='c' :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
click.echo("press ENTER to continue")
var = input()
if var=='' :
changed_variables_list = changed_variables(f, i)
click.echo("line : "+str(i+1))
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=="expr" :
click.echo("Provide the expression to be calculated : ")
expression = input()
output = calculate_expr(i,expression,variables_info)
# if the output is empty that means either the syntax is invalid or the variable is out of scope
if len(output)==0 :
click.echo("The expression could not be evaluated.")
else :
click.echo("The output of the above expression is : "+ output)
i -= 1
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
elif var=="quit" :
break
elif var=="pwd" :
subprocess.call("head -"+str(i+1)+" pwd.txt | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
#if variable is provided
elif variable:
sed_count = 0
#check if variable(s) is/are present
for variable_name in variable :
check = False
for (a, b, c) in variables_info:
if variable_name==a:
check=True
break
else:
pass
if not check:
#varibale is not present
click.echo("Given variable not found!")
break
if not check :
pass
elif line :
for variable_name in variable :
value = get_value_at_line(variable_name, line-1)
#check variable scope
if line > num_lines or line <= 0 :
click.echo("Line number out of file!")
elif value == "\n":
click.echo("Variable "+variable_name+" out of scope!")
else:
click.echo(variable_name+" : "+value)
elif not printall :
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
i = -1
while i<(num_lines-1):
i += 1
b = False
loop_number = -1
#for
for iterator in for_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("For loop starting from line " +str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
break
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number = -1
#while
for iterator in while_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("While loop starting from line " +
str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number = -1
#until
for iterator in until_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("Until loop starting from line " +
str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo("press ENTER to continue")
var = input()
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var == '':
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var == "quit":
break
else:
click.echo("Command not found.")
i -= 1
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b :
continue
click.echo("press ENTER to continue")
var = input()
if var=='' :
click.echo("line : "+str(i+1))
for var_name in variable:
click.echo(var_name+" = "+get_value_at_line(var_name,i))
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
elif var=="quit" :
break
elif var=="pwd" :
subprocess.call("head -"+str(i+1)+" pwd.txt | tail -1", shell=True)
i -= 1
else :
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
i = -1
while i<(num_lines-1):
i += 1
b = False
loop_number = -1
#for
for iterator in for_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("For loop starting from line " +str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
break
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number = -1
#while
for iterator in while_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("While loop starting from line " +
str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b:
continue
loop_number = -1
#until
for iterator in until_loops:
loop_number += 1
if iterator[0] == i:
b = True
click.echo("Until loop starting from line " +
str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file, "r"):
line_number += 1
if (line_number % len(variables_info) == 0):
click.echo(
"Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number % len(variables_info)
if line != '\n' and ( variables_info[mod][0] in variable) :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
continue
if iterator[0] < i and i < iterator[1]:
b = True
if b:
break
if b :
continue
click.echo("line : "+str(i+1))
for var_name in variable:
click.echo(var_name+" = "+get_value_at_line(var_name,i))
elif line :
for (a,b,c) in variables_info :
value = get_value_at_line(a,line-1)
if value!="\n" :
click.echo(a+" : "+value)
elif printall :
new_file = open("copy.sh")
i = -1
sed_count = 0
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
while i<(num_lines-1) :
i += 1
b = False
loop_number = -1
for iterator in functions :
if iterator[1]==i :
i = iterator[2]
break
for iterator in for_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("For loop starting from line "+str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
break
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in while_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("While loop starting from line "+str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in until_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("Until loop starting from line "+str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
for iterator in if_statements :
for a in range(len(iterator)-1) :
if iterator[a]==i :
click.echo("line : "+str(i+1))
if_file = "ifrand"+str(i)+".txt"
b = True
click.echo("The condition for the if/elif statement is : ")
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
click.echo("The values of the variables used in the condition are : ")
click.echo(open(if_file,"r").read())
break
if b :
continue
for iterator in case_statements :
if iterator[0]==i :
click.echo("line : "+str(i+1))
case_file = "case_"+str(i)+"rand__namenotcommon.txt"
b = True
click.echo("The value of the variable used in the condition is : ")
click.echo(open(case_file,"r").read())
break
if b :
continue
for iterator in commands :
if iterator[0]==i and iterator[1]=="sed" :
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
sed_count += 1
b = True
click.echo("line : "+str(i+1))
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, i)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
break
if b :
continue
changed_variables_list = changed_variables(f, i)
click.echo("line : "+str(i+1))
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
#Default version
else :
new_file = open("copy.sh")
i = -1
sed_count = 0
changed_variables_info = {}
for (a,b,c) in variables_info :
changed_variables_info[a] = ''
while i<(num_lines-1) :
i += 1
b = False
loop_number = -1
for iterator in functions :
if iterator[1]==i :
i = iterator[2]
break
for iterator in for_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("For loop starting from line "+str(iterator[0]+1))
for_loop_file = "forloop"+str(loop_number)+".txt"
line_number = -1
for line in open(for_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n':
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
break
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in while_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("While loop starting from line "+str(iterator[0]+1))
while_loop_file = "whiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(while_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
loop_number = -1
for iterator in until_loops :
loop_number += 1
if iterator[0]==i :
b = True
click.echo("Until loop starting from line "+str(iterator[0]+1))
until_loop_file = "untiloop"+str(loop_number)+".txt"
line_number = -1
for line in open(until_loop_file,"r") :
line_number += 1
if (line_number%len(variables_info)==0) :
click.echo("press ENTER to continue")
var = input()
click.echo("Iteration "+str(1+int(line_number/len(variables_info)))+" : ")
if var=='' :
mod = line_number%len(variables_info)
if line!=changed_variables_info[variables_info[mod][0]] and line!='\n' :
changed_variables_info[variables_info[mod][0]] = line
click.echo(variables_info[mod][0]+" : "+line)
elif var=="quit" :
break
else :
click.echo("Command not found.")
i -= 1
continue
if iterator[0]<i and i<iterator[1] :
b = True
if b:
break
if b:
continue
for iterator in if_statements :
for a in range(len(iterator)-1) :
if iterator[a]==i :
if_file = "ifrand"+str(i)+".txt"
click.echo("press ENTER to continue")
var = input()
b = True
if var=='' :
click.echo("line : "+str(i+1))
click.echo("The condition for the if/elif statement is : ")
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
click.echo("The values of the variables used in the condition are : ")
click.echo(open(if_file,"r").read())
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
for iterator in case_statements :
if iterator[0]==i :
case_file = "case_"+str(i)+"rand__namenotcommon.txt"
click.echo("press ENTER to continue")
var = input()
b = True
if var=='' :
click.echo("line : "+str(i+1))
click.echo("The value of the variable used in the condition is : ")
click.echo(open(case_file,"r").read())
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
for iterator in commands :
if iterator[0]==i and iterator[1]=="sed" :
sedfile = open("sedfile"+str(sed_count)+".txt","r+")
sed_count += 1
b = True
click.echo("press ENTER to continue")
var = input()
if var=='' :
click.echo("line : "+str(i+1))
out = sedfile.read()
if out!='' :
click.echo("The output of the sed file is : ")
click.echo(out)
else :
changed_variables_list = changed_variables(f, i)
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=='c' :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
break
if b :
continue
click.echo("press ENTER to continue")
var = input()
if var=='' :
changed_variables_list = changed_variables(f, i)
click.echo("line : "+str(i+1))
#check if variable is changed or not
if changed_variables_list :
for j in range(len(changed_variables_list)) :
changed_variables_info[changed_variables_list[j][0]] = changed_variables_list[j][1]
click.echo(changed_variables_list[j][0] + " : " + changed_variables_list[j][1])
else:
click.echo("No variable change!")
elif var=="expr" :
click.echo("Provide the expression to be calculated : ")
expression = input()
output = calculate_expr(i,expression,variables_info)
# if the output is empty that means either the syntax is invalid or the variable is out of scope
if len(output)==0 :
click.echo("The expression could not be evaluated.")
else :
click.echo("The output of the above expression is : "+ output)
i -= 1
elif var=="c" :
subprocess.call("head -"+str(i+1)+" copy.sh | tail -1", shell=True)
i -= 1
elif var=="quit" :
break
elif var=="pwd" :
subprocess.call("head -"+str(i+1)+" pwd.txt | tail -1", shell=True)
i -= 1
else :
click.echo("Command not found.")
i -= 1
# delete all temp file created during execution
subprocess.call("rm *.txt", shell=True)
subprocess.call("rm *.sh", shell=True) | [
"chass.changed_variables.changed_variables",
"time.sleep",
"click.echo",
"chass.identify_functions.identify_functions",
"chass.function_handle.function_handle",
"chass.preprocessing.preprocessing",
"chass.identify_variables.identify_variables",
"chass.get_file_paths.get_path",
"click.option",
"subprocess.Popen",
"chass.calculate_expr.calculate_expr",
"chass.get_line_number.get_value_at_line",
"chass.locate_function_calls.locate_function_calls",
"subprocess.call",
"click.command",
"chass.if_else.if_else",
"chass.locate_commands.locate_commands",
"chass.remove_cp.remove_cp",
"chass.untiloop.untiloop",
"chass.forloop.forloop",
"chass.locate_loops.locate_loops",
"chass.sedcommand.sedcommand",
"chass.case_foo.edit_case",
"chass.variable.funcvar",
"chass.whiloop.whiloop",
"chass.locate_case.locate_cases",
"click.Path",
"chass.locate_ifs.locate_ifs",
"os.system"
] | [((986, 1001), 'click.command', 'click.command', ([], {}), '()\n', (999, 1001), False, 'import click\n'), ((1003, 1094), 'click.option', 'click.option', (['"""--variable"""', '"""-v"""'], {'multiple': '(True)', 'help': '"""Execute this specific variable"""'}), "('--variable', '-v', multiple=True, help=\n 'Execute this specific variable')\n", (1015, 1094), False, 'import click\n'), ((1091, 1168), 'click.option', 'click.option', (['"""--line"""', '"""-l"""'], {'type': 'int', 'help': '"""Get value at a particular line"""'}), "('--line', '-l', type=int, help='Get value at a particular line')\n", (1103, 1168), False, 'import click\n'), ((1170, 1247), 'click.option', 'click.option', (['"""--code"""', '"""-c"""'], {'type': 'int', 'help': '"""Get code for a particular line"""'}), "('--code', '-c', type=int, help='Get code for a particular line')\n", (1182, 1247), False, 'import click\n'), ((1249, 1334), 'click.option', 'click.option', (['"""--codeline"""'], {'nargs': '(2)', 'type': 'int', 'help': '"""Get a section of your code"""'}), "('--codeline', nargs=2, type=int, help='Get a section of your code'\n )\n", (1261, 1334), False, 'import click\n'), ((1331, 1402), 'click.option', 'click.option', (['"""--breakpoints"""'], {'nargs': '(2)', 'type': 'int', 'help': '"""set breakpoint"""'}), "('--breakpoints', nargs=2, type=int, help='set breakpoint')\n", (1343, 1402), False, 'import click\n'), ((1404, 1493), 'click.option', 'click.option', (['"""--function"""', '"""-f"""'], {'help': '"""Debug only a function by providing its name"""'}), "('--function', '-f', help=\n 'Debug only a function by providing its name')\n", (1416, 1493), False, 'import click\n'), ((1489, 1613), 'click.option', 'click.option', (['"""--printall"""', '"""-p"""'], {'is_flag': '(True)', 'help': '"""Prints all the changed variables\' values at every line in one go"""'}), '(\'--printall\', \'-p\', is_flag=True, help=\n "Prints all the changed variables\' values at every line in one go")\n', (1501, 1613), False, 'import click\n'), ((1609, 1702), 'click.option', 'click.option', (['"""--output"""', '"""-o"""'], {'is_flag': '(True)', 'help': '"""Shows the actual output of the file"""'}), "('--output', '-o', is_flag=True, help=\n 'Shows the actual output of the file')\n", (1621, 1702), False, 'import click\n'), ((1698, 1766), 'click.option', 'click.option', (['"""--loops"""', '"""-r"""'], {'is_flag': '(True)', 'help': '"""Debug Only loops"""'}), "('--loops', '-r', is_flag=True, help='Debug Only loops')\n", (1710, 1766), False, 'import click\n'), ((1767, 1856), 'click.option', 'click.option', (['"""--cond"""', '"""-i"""'], {'is_flag': '(True)', 'help': '"""Debug only conditional statements"""'}), "('--cond', '-i', is_flag=True, help=\n 'Debug only conditional statements')\n", (1779, 1856), False, 'import click\n'), ((1852, 1947), 'click.option', 'click.option', (['"""--sed"""', '"""-s"""'], {'is_flag': '(True)', 'help': '"""Shows the output of all the sed commands"""'}), "('--sed', '-s', is_flag=True, help=\n 'Shows the output of all the sed commands')\n", (1864, 1947), False, 'import click\n'), ((2489, 2508), 'chass.preprocessing.preprocessing', 'preprocessing', (['file'], {}), '(file)\n', (2502, 2508), False, 'from chass.preprocessing import preprocessing\n'), ((2518, 2568), 'subprocess.call', 'subprocess.call', (['"""chmod 777 ./copy.sh"""'], {'shell': '(True)'}), "('chmod 777 ./copy.sh', shell=True)\n", (2533, 2568), False, 'import subprocess\n'), ((2647, 2704), 'click.echo', 'click.echo', (['"""Provide input parameters and press ENTER : """'], {}), "('Provide input parameters and press ENTER : ')\n", (2657, 2704), False, 'import click\n'), ((2859, 2887), 'chass.identify_variables.identify_variables', 'identify_variables', (['new_file'], {}), '(new_file)\n', (2877, 2887), False, 'from chass.identify_variables import identify_variables\n'), ((2909, 2930), 'chass.locate_ifs.locate_ifs', 'locate_ifs', (['"""copy.sh"""'], {}), "('copy.sh')\n", (2919, 2930), False, 'from chass.locate_ifs import locate_ifs\n'), ((2997, 3056), 'chass.locate_loops.locate_loops', 'locate_loops', (['new_file', 'for_loops', 'while_loops', 'until_loops'], {}), '(new_file, for_loops, while_loops, until_loops)\n', (3009, 3056), False, 'from chass.locate_loops import locate_loops\n'), ((3075, 3103), 'chass.identify_variables.identify_variables', 'identify_variables', (['new_file'], {}), '(new_file)\n', (3093, 3103), False, 'from chass.identify_variables import identify_variables\n'), ((3109, 3129), 'chass.remove_cp.remove_cp', 'remove_cp', (['"""copy.sh"""'], {}), "('copy.sh')\n", (3118, 3129), False, 'from chass.remove_cp import remove_cp\n'), ((3195, 3295), 'subprocess.Popen', 'subprocess.Popen', (["(['bash', 'copy3.sh'] + input_parameters)"], {'stdout': 'temp', 'stderr': 'subprocess.STDOUT'}), "(['bash', 'copy3.sh'] + input_parameters, stdout=temp,\n stderr=subprocess.STDOUT)\n", (3211, 3295), False, 'import subprocess\n'), ((3291, 3306), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3301, 3306), False, 'import time\n'), ((3450, 3476), 'chass.locate_commands.locate_commands', 'locate_commands', (['"""copy.sh"""'], {}), "('copy.sh')\n", (3465, 3476), False, 'from chass.locate_commands import locate_commands\n'), ((3514, 3555), 'chass.sedcommand.sedcommand', 'sedcommand', (['f', 'commands', 'input_parameters'], {}), '(f, commands, input_parameters)\n', (3524, 3555), False, 'from chass.sedcommand import sedcommand\n'), ((3577, 3600), 'chass.locate_case.locate_cases', 'locate_cases', (['"""copy.sh"""'], {}), "('copy.sh')\n", (3589, 3600), False, 'from chass.locate_case import locate_cases\n'), ((3618, 3639), 'chass.identify_functions.identify_functions', 'identify_functions', (['f'], {}), '(f)\n', (3636, 3639), False, 'from chass.identify_functions import identify_functions\n'), ((3867, 3882), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3877, 3882), False, 'import time\n'), ((4357, 4372), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4367, 4372), False, 'import time\n'), ((4378, 4481), 'chass.if_else.if_else', 'if_else', (['"""copy.sh"""', 'for_loops', 'functions', 'while_loops', 'until_loops', 'if_statements', 'input_parameters'], {}), "('copy.sh', for_loops, functions, while_loops, until_loops,\n if_statements, input_parameters)\n", (4385, 4481), False, 'from chass.if_else import if_else\n'), ((4477, 4584), 'chass.case_foo.edit_case', 'edit_case', (['"""copy.sh"""', 'case_statements', 'for_loops', 'functions', 'while_loops', 'until_loops', 'input_parameters'], {}), "('copy.sh', case_statements, for_loops, functions, while_loops,\n until_loops, input_parameters)\n", (4486, 4584), False, 'from chass.case_foo import edit_case\n'), ((4580, 4593), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4590, 4593), False, 'import time\n'), ((4740, 4775), 'chass.get_file_paths.get_path', 'get_path', (['file', 'num_lines', 'commands'], {}), '(file, num_lines, commands)\n', (4748, 4775), False, 'from chass.get_file_paths import get_path\n'), ((62037, 62076), 'subprocess.call', 'subprocess.call', (['"""rm *.txt"""'], {'shell': '(True)'}), "('rm *.txt', shell=True)\n", (62052, 62076), False, 'import subprocess\n'), ((62081, 62119), 'subprocess.call', 'subprocess.call', (['"""rm *.sh"""'], {'shell': '(True)'}), "('rm *.sh', shell=True)\n", (62096, 62119), False, 'import subprocess\n'), ((2233, 2270), 'os.system', 'os.system', (['"""rm *.txt >/dev/null 2>&1"""'], {}), "('rm *.txt >/dev/null 2>&1')\n", (2242, 2270), False, 'import os\n'), ((2360, 2396), 'os.system', 'os.system', (['"""rm *.sh >/dev/null 2>&1"""'], {}), "('rm *.sh >/dev/null 2>&1')\n", (2369, 2396), False, 'import os\n'), ((3332, 3374), 'click.echo', 'click.echo', (['"""The output of the file is : """'], {}), "('The output of the file is : ')\n", (3342, 3374), False, 'import click\n'), ((3687, 3749), 'chass.variable.funcvar', 'funcvar', (['f', 'a', 'b', 'input_parameters', 'case_statements', 'functions'], {}), '(f, a, b, input_parameters, case_statements, functions)\n', (3694, 3749), False, 'from chass.variable import funcvar\n'), ((3789, 3867), 'chass.function_handle.function_handle', 'function_handle', (['f', 'a', 'b', 'c', 'variables_info', 'case_statements', 'input_parameters'], {}), '(f, a, b, c, variables_info, case_statements, input_parameters)\n', (3804, 3867), False, 'from chass.function_handle import function_handle\n'), ((3991, 4056), 'chass.forloop.forloop', 'forloop', (['f', 'a', 'b', 'cnt_for_loops', 'variables_info', 'input_parameters'], {}), '(f, a, b, cnt_for_loops, variables_info, input_parameters)\n', (3998, 4056), False, 'from chass.forloop import forloop\n'), ((4129, 4196), 'chass.whiloop.whiloop', 'whiloop', (['f', 'a', 'b', 'cnt_while_loops', 'variables_info', 'input_parameters'], {}), '(f, a, b, cnt_while_loops, variables_info, input_parameters)\n', (4136, 4196), False, 'from chass.whiloop import whiloop\n'), ((4260, 4328), 'chass.untiloop.untiloop', 'untiloop', (['f', 'a', 'b', 'cnt_until_loops', 'variables_info', 'input_parameters'], {}), '(f, a, b, cnt_until_loops, variables_info, input_parameters)\n', (4268, 4328), False, 'from chass.untiloop import untiloop\n'), ((1971, 1983), 'click.Path', 'click.Path', ([], {}), '()\n', (1981, 1983), False, 'import click\n'), ((10787, 10824), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (10797, 10824), False, 'import click\n'), ((15454, 15510), 'click.echo', 'click.echo', (['"""No sed command found on given line number."""'], {}), "('No sed command found on given line number.')\n", (15464, 15510), False, 'import click\n'), ((15826, 15868), 'click.echo', 'click.echo', (['"""No such function is defined."""'], {}), "('No such function is defined.')\n", (15836, 15868), False, 'import click\n'), ((15932, 16005), 'click.echo', 'click.echo', (['"""Provide input arguments for the function and press ENTER : """'], {}), "('Provide input arguments for the function and press ENTER : ')\n", (15942, 16005), False, 'import click\n'), ((16141, 16183), 'chass.locate_function_calls.locate_function_calls', 'locate_function_calls', (['"""copy.sh"""', 'function'], {}), "('copy.sh', function)\n", (16162, 16183), False, 'from chass.locate_function_calls import locate_function_calls\n'), ((16195, 16262), 'click.echo', 'click.echo', (['"""The line numbers where the function was called are : """'], {}), "('The line numbers where the function was called are : ')\n", (16205, 16262), False, 'import click\n'), ((16396, 16408), 'click.echo', 'click.echo', ([], {}), '()\n', (16406, 16408), False, 'import click\n'), ((5622, 5659), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (5632, 5659), False, 'import click\n'), ((7196, 7233), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (7206, 7233), False, 'import click\n'), ((8776, 8813), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (8786, 8813), False, 'import click\n'), ((13143, 13180), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (13153, 13180), False, 'import click\n'), ((19908, 19945), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (19918, 19945), False, 'import click\n'), ((6166, 6215), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (6176, 6215), False, 'import click\n'), ((6348, 6380), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (6358, 6380), False, 'import click\n'), ((7740, 7789), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (7750, 7789), False, 'import click\n'), ((7922, 7954), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (7932, 7954), False, 'import click\n'), ((9287, 9336), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (9297, 9336), False, 'import click\n'), ((9469, 9501), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (9479, 9501), False, 'import click\n'), ((11038, 11097), 'click.echo', 'click.echo', (['"""The condition for the if/elif statement is : """'], {}), "('The condition for the if/elif statement is : ')\n", (11048, 11097), False, 'import click\n'), ((11222, 11292), 'click.echo', 'click.echo', (['"""The values of the variables used in the condition are : """'], {}), "('The values of the variables used in the condition are : ')\n", (11232, 11292), False, 'import click\n'), ((11773, 11796), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (11790, 11796), False, 'from chass.changed_variables import changed_variables\n'), ((14198, 14230), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (14208, 14230), False, 'import click\n'), ((11588, 11620), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (11598, 11620), False, 'import click\n'), ((12358, 12391), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (12368, 12391), False, 'import click\n'), ((12622, 12654), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (12632, 12654), False, 'import click\n'), ((13413, 13459), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (13423, 13459), False, 'import click\n'), ((13488, 13503), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (13498, 13503), False, 'import click\n'), ((13588, 13611), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (13605, 13611), False, 'from chass.changed_variables import changed_variables\n'), ((14676, 14722), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (14686, 14722), False, 'import click\n'), ((14751, 14766), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (14761, 14766), False, 'import click\n'), ((14851, 14881), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', '(line - 1)'], {}), '(f, line - 1)\n', (14868, 14881), False, 'from chass.changed_variables import changed_variables\n'), ((18387, 18424), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (18397, 18424), False, 'import click\n'), ((20755, 20788), 'click.echo', 'click.echo', (['"""No variable change."""'], {}), "('No variable change.')\n", (20765, 20788), False, 'import click\n'), ((12212, 12291), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (12222, 12291), False, 'import click\n'), ((14113, 14146), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (14123, 14146), False, 'import click\n'), ((15381, 15414), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (15391, 15414), False, 'import click\n'), ((20577, 20626), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (20587, 20626), False, 'import click\n'), ((21728, 21767), 'chass.get_line_number.get_value_at_line', 'get_value_at_line', (['variable_name', '(i - 1)'], {}), '(variable_name, i - 1)\n', (21745, 21767), False, 'from chass.get_line_number import get_value_at_line\n'), ((22362, 22420), 'click.echo', 'click.echo', (['"""Insufficient number of arguments for option."""'], {}), "('Insufficient number of arguments for option.')\n", (22372, 22420), False, 'import click\n'), ((13967, 14046), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (13977, 14046), False, 'import click\n'), ((15235, 15314), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (15245, 15314), False, 'import click\n'), ((18677, 18723), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (18687, 18723), False, 'import click\n'), ((18756, 18771), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (18766, 18771), False, 'import click\n'), ((18864, 18887), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (18881, 18887), False, 'from chass.changed_variables import changed_variables\n'), ((19681, 19713), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (19691, 19713), False, 'import click\n'), ((21193, 21225), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (21203, 21225), False, 'import click\n'), ((21868, 21906), 'click.echo', 'click.echo', (['"""Line number out of file!"""'], {}), "('Line number out of file!')\n", (21878, 21906), False, 'import click\n'), ((31662, 31699), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (31672, 31699), False, 'import click\n'), ((19417, 19450), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (19427, 19450), False, 'import click\n'), ((21963, 21999), 'click.echo', 'click.echo', (['"""Variable out of scope!"""'], {}), "('Variable out of scope!')\n", (21973, 21999), False, 'import click\n'), ((31804, 31827), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (31821, 31827), False, 'from chass.changed_variables import changed_variables\n'), ((33877, 33916), 'click.echo', 'click.echo', (['"""Given variable not found!"""'], {}), "('Given variable not found!')\n", (33887, 33916), False, 'import click\n'), ((44940, 44970), 'chass.get_line_number.get_value_at_line', 'get_value_at_line', (['a', '(line - 1)'], {}), '(a, line - 1)\n', (44957, 44970), False, 'from chass.get_line_number import get_value_at_line\n'), ((19263, 19342), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (19273, 19342), False, 'import click\n'), ((29170, 29207), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (29180, 29207), False, 'import click\n'), ((30198, 30235), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (30208, 30235), False, 'import click\n'), ((32325, 32358), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (32335, 32358), False, 'import click\n'), ((32415, 32471), 'click.echo', 'click.echo', (['"""Provide the expression to be calculated : """'], {}), "('Provide the expression to be calculated : ')\n", (32425, 32471), False, 'import click\n'), ((32542, 32587), 'chass.calculate_expr.calculate_expr', 'calculate_expr', (['i', 'expression', 'variables_info'], {}), '(i, expression, variables_info)\n', (32556, 32587), False, 'from chass.calculate_expr import calculate_expr\n'), ((34068, 34110), 'chass.get_line_number.get_value_at_line', 'get_value_at_line', (['variable_name', '(line - 1)'], {}), '(variable_name, line - 1)\n', (34085, 34110), False, 'from chass.get_line_number import get_value_at_line\n'), ((45013, 45042), 'click.echo', 'click.echo', (["(a + ' : ' + value)"], {}), "(a + ' : ' + value)\n", (45023, 45042), False, 'import click\n'), ((51236, 51259), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (51253, 51259), False, 'from chass.changed_variables import changed_variables\n'), ((60341, 60378), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (60351, 60378), False, 'import click\n'), ((27985, 28022), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (27995, 28022), False, 'import click\n'), ((29403, 29470), 'click.echo', 'click.echo', (['"""The value of the variable used in the condition is : """'], {}), "('The value of the variable used in the condition is : ')\n", (29413, 29470), False, 'import click\n'), ((32195, 32274), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (32205, 32274), False, 'import click\n'), ((32768, 32820), 'click.echo', 'click.echo', (['"""The expression could not be evaluated."""'], {}), "('The expression could not be evaluated.')\n", (32778, 32820), False, 'import click\n'), ((32872, 32935), 'click.echo', 'click.echo', (["('The output of the above expression is : ' + output)"], {}), "('The output of the above expression is : ' + output)\n", (32882, 32935), False, 'import click\n'), ((34218, 34256), 'click.echo', 'click.echo', (['"""Line number out of file!"""'], {}), "('Line number out of file!')\n", (34228, 34256), False, 'import click\n'), ((39867, 39904), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (39877, 39904), False, 'import click\n'), ((51705, 51738), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (51715, 51738), False, 'import click\n'), ((60471, 60494), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (60488, 60494), False, 'from chass.changed_variables import changed_variables\n'), ((23431, 23468), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (23441, 23468), False, 'import click\n'), ((25070, 25107), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (25080, 25107), False, 'import click\n'), ((26713, 26750), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (26723, 26750), False, 'import click\n'), ((28238, 28297), 'click.echo', 'click.echo', (['"""The condition for the if/elif statement is : """'], {}), "('The condition for the if/elif statement is : ')\n", (28248, 28297), False, 'import click\n'), ((28430, 28500), 'click.echo', 'click.echo', (['"""The values of the variables used in the condition are : """'], {}), "('The values of the variables used in the condition are : ')\n", (28440, 28500), False, 'import click\n'), ((29768, 29800), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (29778, 29800), False, 'import click\n'), ((30492, 30538), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (30502, 30538), False, 'import click\n'), ((30571, 30586), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (30581, 30586), False, 'import click\n'), ((30679, 30702), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (30696, 30702), False, 'from chass.changed_variables import changed_variables\n'), ((31496, 31528), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (31506, 31528), False, 'import click\n'), ((34313, 34371), 'click.echo', 'click.echo', (["('Variable ' + variable_name + ' out of scope!')"], {}), "('Variable ' + variable_name + ' out of scope!')\n", (34323, 34371), False, 'import click\n'), ((34410, 34451), 'click.echo', 'click.echo', (["(variable_name + ' : ' + value)"], {}), "(variable_name + ' : ' + value)\n", (34420, 34451), False, 'import click\n'), ((49853, 49920), 'click.echo', 'click.echo', (['"""The value of the variable used in the condition is : """'], {}), "('The value of the variable used in the condition is : ')\n", (49863, 49920), False, 'import click\n'), ((51591, 51670), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (51601, 51670), False, 'import click\n'), ((58037, 58074), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (58047, 58074), False, 'import click\n'), ((58981, 59018), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (58991, 59018), False, 'import click\n'), ((60960, 60993), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (60970, 60993), False, 'import click\n'), ((61042, 61098), 'click.echo', 'click.echo', (['"""Provide the expression to be calculated : """'], {}), "('Provide the expression to be calculated : ')\n", (61052, 61098), False, 'import click\n'), ((61161, 61206), 'chass.calculate_expr.calculate_expr', 'calculate_expr', (['i', 'expression', 'variables_info'], {}), '(i, expression, variables_info)\n', (61175, 61206), False, 'from chass.calculate_expr import calculate_expr\n'), ((23964, 24013), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (23974, 24013), False, 'import click\n'), ((24163, 24195), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (24173, 24195), False, 'import click\n'), ((25604, 25653), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (25614, 25653), False, 'import click\n'), ((25803, 25835), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (25813, 25835), False, 'import click\n'), ((27247, 27296), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (27257, 27296), False, 'import click\n'), ((27446, 27478), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (27456, 27478), False, 'import click\n'), ((28820, 28852), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (28830, 28852), False, 'import click\n'), ((31232, 31265), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (31242, 31265), False, 'import click\n'), ((33366, 33398), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (33376, 33398), False, 'import click\n'), ((49215, 49274), 'click.echo', 'click.echo', (['"""The condition for the if/elif statement is : """'], {}), "('The condition for the if/elif statement is : ')\n", (49225, 49274), False, 'import click\n'), ((49391, 49461), 'click.echo', 'click.echo', (['"""The values of the variables used in the condition are : """'], {}), "('The values of the variables used in the condition are : ')\n", (49401, 49461), False, 'import click\n'), ((50435, 50481), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (50445, 50481), False, 'import click\n'), ((50506, 50521), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (50516, 50521), False, 'import click\n'), ((50598, 50621), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (50615, 50621), False, 'from chass.changed_variables import changed_variables\n'), ((56936, 56973), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (56946, 56973), False, 'import click\n'), ((58250, 58317), 'click.echo', 'click.echo', (['"""The value of the variable used in the condition is : """'], {}), "('The value of the variable used in the condition is : ')\n", (58260, 58317), False, 'import click\n'), ((60838, 60917), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (60848, 60917), False, 'import click\n'), ((61375, 61427), 'click.echo', 'click.echo', (['"""The expression could not be evaluated."""'], {}), "('The expression could not be evaluated.')\n", (61385, 61427), False, 'import click\n'), ((61471, 61534), 'click.echo', 'click.echo', (["('The output of the above expression is : ' + output)"], {}), "('The output of the above expression is : ' + output)\n", (61481, 61534), False, 'import click\n'), ((31078, 31157), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (31088, 31157), False, 'import click\n'), ((44832, 44862), 'chass.get_line_number.get_value_at_line', 'get_value_at_line', (['var_name', 'i'], {}), '(var_name, i)\n', (44849, 44862), False, 'from chass.get_line_number import get_value_at_line\n'), ((46357, 46406), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (46367, 46406), False, 'import click\n'), ((47508, 47557), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (47518, 47557), False, 'import click\n'), ((48662, 48711), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (48672, 48711), False, 'import click\n'), ((51095, 51128), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (51105, 51128), False, 'import click\n'), ((52726, 52763), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (52736, 52763), False, 'import click\n'), ((54241, 54278), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (54251, 54278), False, 'import click\n'), ((55760, 55797), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (55770, 55797), False, 'import click\n'), ((57169, 57228), 'click.echo', 'click.echo', (['"""The condition for the if/elif statement is : """'], {}), "('The condition for the if/elif statement is : ')\n", (57179, 57228), False, 'import click\n'), ((57353, 57423), 'click.echo', 'click.echo', (['"""The values of the variables used in the condition are : """'], {}), "('The values of the variables used in the condition are : ')\n", (57363, 57423), False, 'import click\n'), ((58591, 58623), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (58601, 58623), False, 'import click\n'), ((59251, 59297), 'click.echo', 'click.echo', (['"""The output of the sed file is : """'], {}), "('The output of the sed file is : ')\n", (59261, 59297), False, 'import click\n'), ((59326, 59341), 'click.echo', 'click.echo', (['out'], {}), '(out)\n', (59336, 59341), False, 'import click\n'), ((59426, 59449), 'chass.changed_variables.changed_variables', 'changed_variables', (['f', 'i'], {}), '(f, i)\n', (59443, 59449), False, 'from chass.changed_variables import changed_variables\n'), ((60195, 60227), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (60205, 60227), False, 'import click\n'), ((35333, 35370), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (35343, 35370), False, 'import click\n'), ((37058, 37095), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (37068, 37095), False, 'import click\n'), ((38787, 38824), 'click.echo', 'click.echo', (['"""press ENTER to continue"""'], {}), "('press ENTER to continue')\n", (38797, 38824), False, 'import click\n'), ((40111, 40141), 'chass.get_line_number.get_value_at_line', 'get_value_at_line', (['var_name', 'i'], {}), '(var_name, i)\n', (40128, 40141), False, 'from chass.get_line_number import get_value_at_line\n'), ((41759, 41808), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (41769, 41808), False, 'import click\n'), ((43086, 43135), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (43096, 43135), False, 'import click\n'), ((44417, 44466), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (44427, 44466), False, 'import click\n'), ((50957, 51036), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (50967, 51036), False, 'import click\n'), ((53231, 53280), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (53241, 53280), False, 'import click\n'), ((53414, 53446), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (53424, 53446), False, 'import click\n'), ((54747, 54796), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (54757, 54796), False, 'import click\n'), ((54930, 54962), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (54940, 54962), False, 'import click\n'), ((56266, 56315), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (56276, 56315), False, 'import click\n'), ((56449, 56481), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (56459, 56481), False, 'import click\n'), ((57719, 57751), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (57729, 57751), False, 'import click\n'), ((59951, 59984), 'click.echo', 'click.echo', (['"""No variable change!"""'], {}), "('No variable change!')\n", (59961, 59984), False, 'import click\n'), ((61921, 61953), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (61931, 61953), False, 'import click\n'), ((35893, 35942), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (35903, 35942), False, 'import click\n'), ((36091, 36123), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (36101, 36123), False, 'import click\n'), ((37618, 37667), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (37628, 37667), False, 'import click\n'), ((37816, 37848), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (37826, 37848), False, 'import click\n'), ((39347, 39396), 'click.echo', 'click.echo', (["(variables_info[mod][0] + ' : ' + line)"], {}), "(variables_info[mod][0] + ' : ' + line)\n", (39357, 39396), False, 'import click\n'), ((39545, 39577), 'click.echo', 'click.echo', (['"""Command not found."""'], {}), "('Command not found.')\n", (39555, 39577), False, 'import click\n'), ((59805, 59884), 'click.echo', 'click.echo', (["(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])"], {}), "(changed_variables_list[j][0] + ' : ' + changed_variables_list[j][1])\n", (59815, 59884), False, 'import click\n')] |
# coding=utf-8
import tensorflow as tf
import os
import sys
def load_model(saver, sess, logdir):
tf.logging.info("Trying to restore saved checkpoints from {} ...".format(logdir))
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt:
tf.logging.info(" Checkpoint found: {}".format(ckpt.model_checkpoint_path))
global_step = int(ckpt.model_checkpoint_path
.split('/')[-1]
.split('-')[-1])
tf.logging.info(" Global step was: {}".format(global_step))
tf.logging.info(" Restoring...")
saver.restore(sess, ckpt.model_checkpoint_path)
tf.logging.info(" Done.")
return global_step
else:
tf.logging.warning(" No checkpoint found.")
return None
def save_model(saver, sess, logdir, step):
model_name = "model.ckpt"
checkpoint_path = os.path.join(logdir, model_name)
tf.logging.info("Storing checkpoint to {} ...".format(logdir))
sys.stdout.flush()
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
tf.logging.info(" Done.")
| [
"os.path.exists",
"tensorflow.logging.warning",
"os.makedirs",
"tensorflow.logging.info",
"os.path.join",
"tensorflow.train.get_checkpoint_state",
"sys.stdout.flush"
] | [((196, 233), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['logdir'], {}), '(logdir)\n', (225, 233), True, 'import tensorflow as tf\n'), ((877, 909), 'os.path.join', 'os.path.join', (['logdir', 'model_name'], {}), '(logdir, model_name)\n', (889, 909), False, 'import os\n'), ((981, 999), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (997, 999), False, 'import sys\n'), ((1123, 1148), 'tensorflow.logging.info', 'tf.logging.info', (['""" Done."""'], {}), "(' Done.')\n", (1138, 1148), True, 'import tensorflow as tf\n'), ((547, 580), 'tensorflow.logging.info', 'tf.logging.info', (['""" Restoring..."""'], {}), "(' Restoring...')\n", (562, 580), True, 'import tensorflow as tf\n'), ((645, 670), 'tensorflow.logging.info', 'tf.logging.info', (['""" Done."""'], {}), "(' Done.')\n", (660, 670), True, 'import tensorflow as tf\n'), ((716, 759), 'tensorflow.logging.warning', 'tf.logging.warning', (['""" No checkpoint found."""'], {}), "(' No checkpoint found.')\n", (734, 759), True, 'import tensorflow as tf\n'), ((1011, 1033), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (1025, 1033), False, 'import os\n'), ((1043, 1062), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (1054, 1062), False, 'import os\n')] |
from django.db import models
from fantasy_gambling_league.users.models import User
class Season(models.Model):
commissioner = models.ForeignKey(
User,
null=True,
on_delete=models.SET_NULL,
)
players = models.ManyToManyField(User, related_name='seasons')
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
weekly_allowance = models.DecimalField(
default=100.0,
decimal_places=2,
max_digits=99
)
class Gameweek(models.Model):
season = models.ForeignKey(
Season,
on_delete=models.CASCADE,
)
number = models.IntegerField()
deadline = models.DateTimeField()
spiel = models.TextField(null=True, blank=True)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((133, 194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(User, null=True, on_delete=models.SET_NULL)\n', (150, 194), False, 'from django.db import models\n'), ((240, 292), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""seasons"""'}), "(User, related_name='seasons')\n", (262, 292), False, 'from django.db import models\n'), ((304, 336), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (320, 336), False, 'from django.db import models\n'), ((348, 377), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (364, 377), False, 'from django.db import models\n'), ((401, 468), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'default': '(100.0)', 'decimal_places': '(2)', 'max_digits': '(99)'}), '(default=100.0, decimal_places=2, max_digits=99)\n', (420, 468), False, 'from django.db import models\n'), ((543, 594), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Season'], {'on_delete': 'models.CASCADE'}), '(Season, on_delete=models.CASCADE)\n', (560, 594), False, 'from django.db import models\n'), ((631, 652), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (650, 652), False, 'from django.db import models\n'), ((668, 690), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (688, 690), False, 'from django.db import models\n'), ((703, 742), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (719, 742), False, 'from django.db import models\n')] |
import re
def decompressLength(string, recursive):
expansion = re.search(r"\((\d+x\d+)\)", string)
if expansion:
start, end = expansion.span()
a, b = map(int, string[start + 1 : end - 1].split("x"))
p1 = string[:start]
toRepeat = string[end : end + a]
p3 = string[end + a :]
if recursive:
return len(p1) + decompressLength(toRepeat, recursive) * b + decompressLength(p3, recursive)
else:
return len(p1) + len(toRepeat) * b + decompressLength(p3, recursive)
return len(string)
def part1(data):
return decompressLength(data, recursive=False)
def part2(data):
return decompressLength(data, recursive=True)
if __name__ == "__main__":
from aocd import get_data
data = get_data(year=2016, day=9)
print(part1(data))
print(part2(data))
| [
"aocd.get_data",
"re.search"
] | [((69, 107), 're.search', 're.search', (['"""\\\\((\\\\d+x\\\\d+)\\\\)"""', 'string'], {}), "('\\\\((\\\\d+x\\\\d+)\\\\)', string)\n", (78, 107), False, 'import re\n'), ((780, 806), 'aocd.get_data', 'get_data', ([], {'year': '(2016)', 'day': '(9)'}), '(year=2016, day=9)\n', (788, 806), False, 'from aocd import get_data\n')] |
"""
Number Reach
Version 1
This is a text-based game.
Players compete with their own formula
to make up the goal number.
"""
import random
# Define Constants
INIT_NUMBER_POOL = {0}
INIT_NUMBERS = (1, 9)
INIT_COUNT = 2
GOAL_RANGE = (100, 999)
MAX_ANSWER = GOAL_RANGE[1] * 2
VALID_OPERATORS = ['+', '-', '*', '/']
# Ask how many players in the beginning
# and intiate a list of scores
def input_player_count_initiate_scores():
player_count = 0
while player_count <= 0:
try:
player_count = int(input('How many players play this game? '))
except:
print('Player Count must be an integer!')
return [0 for idx in range(player_count)]
# Function: Initiate the number pool
# with 0 and other two single digit numbers
def initiate_number_pool():
num_pool = INIT_NUMBER_POOL.copy()
for counter in range(INIT_COUNT):
tmp_num = next(iter(num_pool))
while tmp_num in num_pool:
tmp_num = random.randint(INIT_NUMBERS[0], INIT_NUMBERS[1])
num_pool.add(tmp_num)
return num_pool
# Calculate the formula with first_num opr second_num
def calculate(first_num, opr, second_num):
if opr == '+':
return first_num + second_num
elif opr == '-':
return first_num - second_num
elif opr == '*':
return first_num * second_num
elif opr == '/':
return first_num // second_num
else:
return -1 * MAX_ANSWER # Invalid Number
# Ask a player about his formula
# Validate the formula
# Check the answer of the formula
# return a tuple of (First_Number, Operator, Second_Number, Answer)
def ask_player_formula(player_idx, num_pool, num_goal):
is_valid_formula = False
while not is_valid_formula:
print('== Hello Player ' + str(player_idx + 1) + ' ==')
print('The goal is ' + str(num_goal))
print('We got a pool of ' + str(num_pool))
print('Please make up a formula.')
# Ask the first number
first_num = -1
while first_num < 0 or first_num not in num_pool:
try:
first_num = int(input('First Number is: '))
except:
print('Your input is NOT a number! Please try again.')
if first_num not in num_pool:
print('Your input does NOT exist in the pool. Please try again.')
# Ask the operator
opr = ''
while opr not in VALID_OPERATORS:
opr = input('Operator: ')
if opr not in VALID_OPERATORS:
print('Please select a valid operator: ' + str(VALID_OPERATORS))
# Ask the second number
second_num = -1
while second_num < 0 or second_num not in num_pool:
try:
second_num = int(input('Second Number is: '))
except:
print('Your input is NOT a number! Please try again.')
if second_num not in num_pool:
print('Your input does NOT exist in the pool. Please try again.')
answer = calculate(first_num, opr, second_num)
if answer < 0:
print('The answer is INVALID. Please try again!')
elif answer > MAX_ANSWER:
print('The answer is too BIG. Please try again!')
else:
# Valid formula and answer
is_valid_formula = True
return (first_num, opr, second_num, answer)
# Initiate Overall Variables
is_playing = True
player_index = 0
player_scores = input_player_count_initiate_scores()
while is_playing:
# 1. Initiate variables
number_goal = random.randint(GOAL_RANGE[0], GOAL_RANGE[1])
number_pool = initiate_number_pool()
last_answer = -1
while last_answer != number_goal:
# 2. Ask a player about his formula
# 3. Validate the formula
# 4. Check the answer of the formula
formula_tuple = ask_player_formula(player_index, number_pool, number_goal)
# 5. Check the answer with the goal
last_answer = formula_tuple[3]
if last_answer == number_goal:
# Win Message
print('Congratulation! Player ' + str(player_index + 1) + ' wins.')
print('{first_n} {opr} {second_n} = {ans}'.format(
first_n = formula_tuple[0]
, opr = formula_tuple[1]
, second_n = formula_tuple[2]
, ans = last_answer
))
# Add 1 score to the player
player_scores[player_index] += 1
print('Player ' + str(player_index + 1) + ' got ' + str(player_scores[player_index]))
else:
# If not matched, add the answer to the pool
number_pool.add(last_answer)
# 5. Repeat with another player
print('Next turn...')
player_index = (player_index + 1) % len(player_scores)
# 6. Show Score Result
# and Ask players if they want to play again
print('== Score Result ==')
for plyr_idex, plyr_score in enumerate(player_scores):
print('Player ' + str(plyr_idex + 1) + ': ' + str(plyr_score))
play_again_reply = input('Do you want to play again? (Y/N) ')
if play_again_reply.upper() != 'Y':
is_playing = False
# End of the program
print('Goodbye :D') | [
"random.randint"
] | [((3565, 3609), 'random.randint', 'random.randint', (['GOAL_RANGE[0]', 'GOAL_RANGE[1]'], {}), '(GOAL_RANGE[0], GOAL_RANGE[1])\n', (3579, 3609), False, 'import random\n'), ((966, 1014), 'random.randint', 'random.randint', (['INIT_NUMBERS[0]', 'INIT_NUMBERS[1]'], {}), '(INIT_NUMBERS[0], INIT_NUMBERS[1])\n', (980, 1014), False, 'import random\n')] |
Subsets and Splits