code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Generated by Django 2.2.5 on 2020-10-02 22:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nsp_project_app', '0006_auto_20201003_0414'),
]
operations = [
migrations.DeleteModel(
name='Like',
),
]
| [
"django.db.migrations.DeleteModel"
] | [((235, 270), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Like"""'}), "(name='Like')\n", (257, 270), False, 'from django.db import migrations\n')] |
#
# DAPLink Interface Firmware
# Copyright (c) 2016-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import struct
import numbers
import time
import usb.util
class USBMsd(object):
"""Wrapper class for a MSD usb device"""
# Bulk only transport documented in
# "Universal Serial Bus Mass Storage Class"
# SCSI commands documented in "SCSI Commands Reference Manual" by Seagate
CLASS_MSD = 0x8
# Write 10
# Read 10
# Test unit ready
# Request Sense
# dCBWSignature
# dCBWTag
# dCBWDataTransferLength
# bmCBWFlags
# bCBWLUN
# bCBWCBLength
FMT_CBW = "<IIIBBB"
# dCSWSignature
# dCSWTag
# dCSWDataResidue
# bCSWStatus
FMT_CSW = "<IIIB"
CSW_STATUS_PASSED = 0
CSW_STATUS_FAILED = 1
CSW_STATUS_PHASE_ERROR = 2
class SCSIError(Exception):
def __init__(self, error):
Exception.__init__(self)
self.value = error
# Some SCSI commands
# Value Keil middleware define Seagate name
# 0x12 SCSI_INQUIRY INQUIRY
# 0x23 SCSI_READ_FORMAT_CAPACITIES Missing
# 0x25 SCSI_READ_CAPACITY READ CAPACITY (10)
# 0x28 SCSI_READ10 READ (10)
# 0x1A SCSI_MODE_SENSE6 MODE SENSE (6)
# 0x00 SCSI_TEST_UNIT_READY TEST UNIT READY
# 0x2A SCSI_WRITE10 WRITE (10)
# 0x03 SCSI_REQUEST_SENSE REQUEST SENSE
# 0x1E SCSI_MEDIA_REMOVAL Missing
def __init__(self, device):
self._dev = device
self._if = None
self.ep_in = None
self.ep_out = None
self._locked = False
self._cbw_tag = 0
self.timeout = 60 * 1000
# delays are for testing only
self.delay_cbw_to_data = 0
self.delay_data_to_csw = 0
# Find interface
for interface in device.get_active_configuration():
if interface.bInterfaceClass == USBMsd.CLASS_MSD:
assert self._if is None
self._if = interface
assert self._if is not None
# Find endpoints
for endpoint in self._if:
if endpoint.bEndpointAddress & 0x80:
assert self.ep_in is None
self.ep_in = endpoint
else:
assert self.ep_out is None
self.ep_out = endpoint
assert self.ep_in is not None
assert self.ep_out is not None
def lock(self):
"""Acquire exclisive access to MSD"""
assert not self._locked
num = self._if.bInterfaceNumber
try:
if self._dev.is_kernel_driver_active(num):
self._dev.detach_kernel_driver(num)
except NotImplementedError:
pass
except usb.core.USBError:
pass
usb.util.claim_interface(self._dev, num)
self._locked = True
def unlock(self):
"""Release exclusive access to MSD"""
assert self._locked
num = self._if.bInterfaceNumber
usb.util.release_interface(self._dev, num)
try:
self._dev.attach_kernel_driver(num)
except NotImplementedError:
pass
except usb.core.USBError:
pass
self._locked = False
def scsi_read10(self, lba, block_count):
"""Send the SCSI read 10 command and return the data read"""
block_size = 512
cbwcb = bytearray(10)
cbwcb[0] = 0x28
cbwcb[2] = (lba >> (8 * 3)) & 0xFF
cbwcb[3] = (lba >> (8 * 2)) & 0xFF
cbwcb[4] = (lba >> (8 * 1)) & 0xFF
cbwcb[5] = (lba >> (8 * 0)) & 0xFF
cbwcb[7] = (block_count >> (8 * 1)) & 0xFF
cbwcb[8] = (block_count >> (8 * 0)) & 0xFF
ret, data = self._msd_transfer(cbwcb, 0, block_count * block_size)
if ret != self.CSW_STATUS_PASSED:
raise self.SCSIError(ret)
return data
def scsi_write10(self, lba, data):
"""Send the SCSI write 10 command"""
block_size = 512
assert len(data) % block_size == 0
block_count = (len(data) + (block_size - 1)) // block_size
cbwcb = bytearray(10)
cbwcb[0] = 0x2A
cbwcb[2] = (lba >> (8 * 3)) & 0xFF
cbwcb[3] = (lba >> (8 * 2)) & 0xFF
cbwcb[4] = (lba >> (8 * 1)) & 0xFF
cbwcb[5] = (lba >> (8 * 0)) & 0xFF
cbwcb[7] = (block_count >> (8 * 1)) & 0xFF
cbwcb[8] = (block_count >> (8 * 0)) & 0xFF
ret, _ = self._msd_transfer(cbwcb, 0, data)
if ret != self.CSW_STATUS_PASSED:
raise self.SCSIError(ret)
def scsi_test_unit_ready(self):
"""Send the SCSI test unit ready command and return status"""
cbwcb = bytearray(10)
cbwcb[0] = 0
ret, _ = self._msd_transfer(cbwcb, 0)
return ret
def _msd_transfer(self, cbwcb, lun, size_or_data=None):
"""Perform a bulk only transfer"""
assert self._locked
assert 1 <= len(cbwcb) <= 16
# Increment packet tag
transfer_tag = self._cbw_tag
self._cbw_tag = (self._cbw_tag + 1) & 0xFFFFFFFF
# None means data size of zero
if size_or_data is None:
size_or_data = 0
in_transfer = isinstance(size_or_data, numbers.Number)
transfer_size = (size_or_data if in_transfer else len(size_or_data))
assert in_transfer or len(size_or_data) > 0
# Phase - Command transport
cbw_signature = 0x43425355
cbw_tag = transfer_tag
cbw_data_transfer_length = transfer_size
cbw_flags = (1 << 7) if in_transfer else 0
cbw_lun = lun
cbw_length = len(cbwcb)
params = [cbw_signature, cbw_tag, cbw_data_transfer_length,
cbw_flags, cbw_lun, cbw_length]
cbw = struct.pack(self.FMT_CBW, *params)
pad_size = 16 - len(cbwcb)
payload = cbw + cbwcb + bytearray(pad_size)
self.ep_out.write(payload)
if self.delay_cbw_to_data != 0:
time.sleep(self.delay_cbw_to_data)
# Phase - Data Out or Data In (Optional)
data = None
if transfer_size > 0:
endpoint = self.ep_in if in_transfer else self.ep_out
try:
if in_transfer:
data = self.ep_in.read(transfer_size, self.timeout)
else:
self.ep_out.write(size_or_data, self.timeout)
except usb.core.USBError:
endpoint.clear_halt()
if self.delay_data_to_csw != 0:
time.sleep(self.delay_data_to_csw)
# Phase - Status Transport
csw = self.ep_in.read(13, self.timeout)
csw_signature, csw_tag, csw_data_residue, csw_status = \
struct.unpack(self.FMT_CSW, csw)
assert csw_signature == 0x53425355
assert csw_tag == transfer_tag
#TODO - check residue
return (csw_status, data)
class Struct(object):
"""Base class for a C structure"""
def __init__(self, name, structure, data):
field_list = [field[0] for field in structure]
fmt_list = [field[1] for field in structure]
format_str = "<" + "".join(fmt_list)
struct_size = struct.calcsize(format_str)
value_list = struct.unpack(format_str, data[:struct_size])
value_dict = {}
for name, value in zip(field_list, value_list):
value_dict[name] = value
self.name = name
self.format_str = format_str
self.field_list = field_list
self.value_dict = value_dict
self.size = struct_size
def __getitem__(self, key):
return self.value_dict[key]
def __setitem__(self, key, value):
self.value_dict[key] = value
def __str__(self):
desc = ""
desc += self.name + ":" + os.linesep
for field in self.field_list:
value = self.value_dict[field]
if isinstance(value, bytes):
value = list(bytearray(value))
desc += (" %s=%s" + os.linesep) % (field, value)
return desc
def pack(self):
"""Return a byte representation of this structure"""
value_list = []
for field in self.field_list:
value_list.append(self.value_dict[field])
return struct.pack(self.format_str, *value_list)
class MBR(Struct):
"""Wrapper class for a FAT MBR"""
STRUCTURE = (
("BS_jmpBoot", "3s"),
("BS_OEMName", "8s"),
("BPB_BytsPerSec", "H"),
("BPB_SecPerClus", "B"),
("BPB_RsvdSecCnt", "H"),
("BPB_NumFATs", "B"),
("BPB_RootEntCnt", "H"),
("BPB_TotSec16", "H"),
("BPB_Media", "B"),
("BPB_FATSz16", "H"),
("BPB_SecPerTrk", "H"),
("BPB_NumHeads", "H"),
("BPB_HiddSec", "L"),
("BPB_TotSec32", "L"),
)
def __init__(self, data, sector=None):
Struct.__init__(self, "MBR", self.STRUCTURE, data)
self.sector = sector
class DirectoryEntry(Struct):
"""Wrapper class for a FAT DirectoryEntry"""
STRUCTURE = (
("DIR_Name", "11s"),
("DIR_Attr", "B"),
("DIR_NTRes", "B"),
("DIR_CrtTimeTenth", "B"),
("DIR_CrtTime", "H"),
("DIR_CrtDate", "H"),
("DIR_LstAccDate", "H"),
("DIR_FstClusHI", "H"),
("DIR_WrtTime", "H"),
("DIR_WrtDate", "H"),
("DIR_FstClusLO", "H"),
("DIR_FileSize", "L"),
)
def __init__(self, data):
Struct.__init__(self, "DirectoryEntry", self.STRUCTURE, data)
class Directory(object):
"""Wrapper class for a FAT Directory"""
ENTRY_SIZE = 32
def __init__(self, entry_count, data, sector=None):
directory_list = []
for i in range(entry_count):
start = i * self.ENTRY_SIZE
dir_data = data[start:start + self.ENTRY_SIZE]
entry = DirectoryEntry(dir_data)
directory_list.append(entry)
self.directory_list = directory_list
self.sector = sector
def __iter__(self):
return iter(self.directory_list)
def __getitem__(self, key):
return self.directory_list[key]
def find_free_entry_index(self):
"""Find a free index in this Directory or return None"""
for idx, directory in enumerate(self.directory_list):
name_data = bytearray(directory["DIR_Name"])
if name_data[0] in (0x00, 0xE5):
return idx
return None
def pack(self):
"""Return a byte a Directory"""
data = bytearray()
for directory in self.directory_list:
data.extend(directory.pack())
return data
class Fat(object):
"""Wrapper class for a FAT filesystem on a SCSI device"""
SECTOR_SIZE = 512
CLUSTER_SIZE = 4 * 1024
def __init__(self, msd):
self.msd = msd
self.reload()
def reload(self):
"""Reload all internal data of this Fat filesystem"""
# Read MBR
mbr_data = self.msd.scsi_read10(0, 1)
mbr = MBR(mbr_data, 0)
# Read in the root directory
root_dir_sec = (mbr["BPB_RsvdSecCnt"] +
(mbr["BPB_NumFATs"] * mbr["BPB_FATSz16"]))
sec_count = (mbr["BPB_RootEntCnt"] * 32 + 512 - 1) // 512
root_dir_data = self.msd.scsi_read10(root_dir_sec, sec_count)
root_dir = Directory(mbr["BPB_RootEntCnt"], root_dir_data,
root_dir_sec)
self.mbr = mbr
self.root_dir = root_dir
| [
"struct.calcsize",
"struct.unpack",
"time.sleep",
"struct.pack"
] | [((6438, 6472), 'struct.pack', 'struct.pack', (['self.FMT_CBW', '*params'], {}), '(self.FMT_CBW, *params)\n', (6449, 6472), False, 'import struct\n'), ((7383, 7415), 'struct.unpack', 'struct.unpack', (['self.FMT_CSW', 'csw'], {}), '(self.FMT_CSW, csw)\n', (7396, 7415), False, 'import struct\n'), ((7848, 7875), 'struct.calcsize', 'struct.calcsize', (['format_str'], {}), '(format_str)\n', (7863, 7875), False, 'import struct\n'), ((7897, 7942), 'struct.unpack', 'struct.unpack', (['format_str', 'data[:struct_size]'], {}), '(format_str, data[:struct_size])\n', (7910, 7942), False, 'import struct\n'), ((8927, 8968), 'struct.pack', 'struct.pack', (['self.format_str', '*value_list'], {}), '(self.format_str, *value_list)\n', (8938, 8968), False, 'import struct\n'), ((6648, 6682), 'time.sleep', 'time.sleep', (['self.delay_cbw_to_data'], {}), '(self.delay_cbw_to_data)\n', (6658, 6682), False, 'import time\n'), ((7187, 7221), 'time.sleep', 'time.sleep', (['self.delay_data_to_csw'], {}), '(self.delay_data_to_csw)\n', (7197, 7221), False, 'import time\n')] |
"""
Fast operations on a matrix class
"""
import multiprocessing
from matrix import Matrix, MatrixFactory
from functools import partial
# Debugging
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
def calc_row(r, m1, m2):
""" Calculate a row 'r' of the multiplication output of matrices m1 and m2 """
d = {}
print('Calculating for',r)
for y in range(m2.m):
d[(r, y)] = sum(item[0]*item[1] for item in zip(m1.rows[r], m2[y]))
return d
def calc_row2(r, m1, m2):
""" Calculate a row 'r' of the multiplication output of matrices m1 and m2 """
d = []
print('Calculating for',r)
for y in range(m2.m):
# import pdb;pdb.set_trace()
# import forkedpdb;forkedpdb.ForkedPdb().set_trace()
d.append(sum(item[0]*item[1] for item in zip(m1.rows[r], m2[y])))
# d.append(sum(item[0]*item[1] for item in zip(m1.rows[r], m2[y])))
# import forkedpdb;forkedpdb.ForkablePdb().set_trace()
return d
class FastMatrixOps(object):
""" Fast operations on the matrix """
@classmethod
def multiply(self, m1, m2):
""" Concurrently multiply two matrices using multiprocessing """
matm, m2_n = m2.getRank()
# Number of columns of m1 == Number of rows of m2
if (m1.n != matm):
raise MatrixError("Matrices cannot be multipled!")
m2_t = m2.getTranspose()
mulmat = Matrix(m1.m, m2_n)
# Matrix multiplication theorem
# C = A x B then
#
# k= m
# C(i,j) = Sigma A(i,k)*B(k,j)
# k = 1
#
# where m => number of rows of A
# If rank of A => (m, n)
# and rank of B => (n, p)
# Rank of C => (m, p)
mul_partial = partial(calc_row, m1=m1, m2=m2_t)
pool = multiprocessing.Pool(2)
# Parallelize each row multiplication
for row_dict in pool.map(mul_partial, range(m1.m)):
for k,v in row_dict.items():
x, y = k
mulmat[x][y] = v
# print output
return mulmat
@classmethod
def multiply2(self, m1, m2):
""" Concurrently multiply two matrices using multiprocessing - version 2 """
matm, m2_n = m2.getRank()
# Number of columns of m1 == Number of rows of m2
if (m1.n != matm):
raise MatrixError("Matrices cannot be multipled!")
m2_t = m2.getTranspose()
# Matrix multiplication theorem
# C = A x B then
#
# k= m
# C(i,j) = Sigma A(i,k)*B(k,j)
# k = 1
#
# where m => number of rows of A
# If rank of A => (m, n)
# and rank of B => (n, p)
# Rank of C => (m, p)
mul_partial = partial(calc_row2, m1=m1, m2=m2_t)
pool = multiprocessing.Pool(2)
# Parallelize each row multiplication
# data = pool.map(mul_partial, m1.m)
data = pool.map(mul_partial, range(m1.m))
# print data
# Build directly
mulmat = MatrixFactory.fromList(data)
return mulmat
if __name__ == "__main__":
# Make random matrix of rank (10, 10)
m1 = MatrixFactory.makeRandom(10, 10)
# Make second random matrix of rank (10, 10)
m2 = MatrixFactory.makeRandom(10, 10)
print('Calculating m1*m2 directly...')
m3 = m1*m2
fops = FastMatrixOps()
print('Calculating m1*m2 concurrently using dictionaries...')
m3_n = fops.multiply(m1, m2)
print('Asserting both are equal...')
print(m3 == m3_n)
assert(m3 == m3_n)
print('Calculating m1*m2 concurrently using direct data...')
m3_n = fops.multiply2(m1, m2)
print('Asserting both are equal...')
print(m3 == m3_n)
assert(m3 == m3_n)
| [
"matrix.MatrixFactory.makeRandom",
"multiprocessing.log_to_stderr",
"matrix.Matrix",
"functools.partial",
"multiprocessing.Pool",
"matrix.MatrixFactory.fromList"
] | [((161, 192), 'multiprocessing.log_to_stderr', 'multiprocessing.log_to_stderr', ([], {}), '()\n', (190, 192), False, 'import multiprocessing\n'), ((3276, 3308), 'matrix.MatrixFactory.makeRandom', 'MatrixFactory.makeRandom', (['(10)', '(10)'], {}), '(10, 10)\n', (3300, 3308), False, 'from matrix import Matrix, MatrixFactory\n'), ((3371, 3403), 'matrix.MatrixFactory.makeRandom', 'MatrixFactory.makeRandom', (['(10)', '(10)'], {}), '(10, 10)\n', (3395, 3403), False, 'from matrix import Matrix, MatrixFactory\n'), ((1457, 1475), 'matrix.Matrix', 'Matrix', (['m1.m', 'm2_n'], {}), '(m1.m, m2_n)\n', (1463, 1475), False, 'from matrix import Matrix, MatrixFactory\n'), ((1822, 1855), 'functools.partial', 'partial', (['calc_row'], {'m1': 'm1', 'm2': 'm2_t'}), '(calc_row, m1=m1, m2=m2_t)\n', (1829, 1855), False, 'from functools import partial\n'), ((1872, 1895), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(2)'], {}), '(2)\n', (1892, 1895), False, 'import multiprocessing\n'), ((2848, 2882), 'functools.partial', 'partial', (['calc_row2'], {'m1': 'm1', 'm2': 'm2_t'}), '(calc_row2, m1=m1, m2=m2_t)\n', (2855, 2882), False, 'from functools import partial\n'), ((2899, 2922), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(2)'], {}), '(2)\n', (2919, 2922), False, 'import multiprocessing\n'), ((3134, 3162), 'matrix.MatrixFactory.fromList', 'MatrixFactory.fromList', (['data'], {}), '(data)\n', (3156, 3162), False, 'from matrix import Matrix, MatrixFactory\n')] |
from PIL import Image
from toolz.curried import get
import numpy as np
import torch
from ignite.metrics import Metric
from horch.legacy.train.metrics import Average
class MeanIoU(Metric):
def __init__(self, num_classes, ignore_index=None):
self.num_classes = num_classes
self.ignore_index = ignore_index
super().__init__(self.output_transform)
def reset(self):
self.total_cm = np.zeros(self.num_classes, self.num_classes)
def update(self, output):
cm = output
self.total_cm += cm
def output_transform(self, output):
y_true, y_pred = get(["y_true", "y_pred"], output)
c = self.num_classes
if isinstance(y_true, Image.Image):
y_true = [np.array(img) for img in y_true]
elif torch.is_tensor(y_true):
y_true = y_true.cpu().byte().numpy()
y_pred = y_pred.argmax(dim=1)
y_pred = y_pred.cpu().byte().numpy()
y_pred = y_pred.reshape(-1)
y_true = y_true.reshape(-1)
if self.ignore_index is not None:
mask = np.not_equal(y_true, self.ignore_index)
y_pred = np.where(mask, y_pred, c)
y_true = np.where(mask, y_true, c)
current_cm = confusion_matrix(y_true, y_pred, self.num_classes + 1)[:c, :c]
return current_cm
def confusion_matrix(y_true, y_pred, num_classes):
c = num_classes
return np.reshape(np.bincount(y_true * c + y_pred, minlength=c * c), (c, c))
class PixelAccuracy(Average):
def __init__(self, ignore_index=255):
self.ignore_index = ignore_index
super().__init__(self.output_transform)
def output_transform(self, output):
y_true, y_pred, batch_size = get(
["y_true", "y_pred", "batch_size"], output)
y_pred = y_pred.argmax(dim=1)
accs = []
for i in range(batch_size):
y = y_true[i]
p = y_pred[i]
tp = (y == p).sum()
if self.ignore_index is not None:
tp += (y == self.ignore_index).sum()
accs.append(tp.cpu().item() / np.prod(y.shape))
acc = np.mean(accs)
return acc, batch_size
class F1Score(Metric):
r"""
"""
def __init__(self, threshold=0.5, ignore_index=None, eps=1e-8, from_logits=True):
self.threshold = threshold
self.ignore_index = ignore_index
self.eps = eps
self.from_logits = from_logits
super().__init__(self.output_transform)
def reset(self):
self.tp = 0
self.fp = 0
self.fn = 0
def update(self, output):
tp, fp, fn = output
self.tp += tp
self.fp += fp
self.fn += fn
def compute(self):
p = self.tp / (self.tp + self.fp + self.eps)
r = self.tp / (self.tp + self.fn + self.eps)
f1 = 2 * p * r / (p + r + self.eps)
return f1
def output_transform(self, output):
y, p = get(["y_true", "y_pred"], output)
if p.ndim == 4:
if p.size(1) == 1:
p = p.squeeze(1)
if self.from_logits:
p = torch.sigmoid(p)
elif p.size(1) == 2:
if self.from_logits:
p = torch.softmax(p, dim=1)[:, 1, :, :]
elif p.ndim == 3:
if self.from_logits:
p = torch.sigmoid(p)
p = p > self.threshold
p = p.long()
y = y.long()
if self.ignore_index is None:
w = torch.ones_like(y)
else:
w = (y != self.ignore_index).long()
tp = torch.sum(p * y * w).item()
fp = torch.sum((1 - p) * y * w).item()
fn = torch.sum(p * (1 - y) * w).item()
return tp, fp, fn | [
"numpy.mean",
"torch.ones_like",
"numpy.prod",
"numpy.where",
"torch.sigmoid",
"numpy.not_equal",
"torch.is_tensor",
"numpy.zeros",
"numpy.array",
"torch.sum",
"torch.softmax",
"numpy.bincount",
"toolz.curried.get"
] | [((424, 468), 'numpy.zeros', 'np.zeros', (['self.num_classes', 'self.num_classes'], {}), '(self.num_classes, self.num_classes)\n', (432, 468), True, 'import numpy as np\n'), ((614, 647), 'toolz.curried.get', 'get', (["['y_true', 'y_pred']", 'output'], {}), "(['y_true', 'y_pred'], output)\n", (617, 647), False, 'from toolz.curried import get\n'), ((1423, 1472), 'numpy.bincount', 'np.bincount', (['(y_true * c + y_pred)'], {'minlength': '(c * c)'}), '(y_true * c + y_pred, minlength=c * c)\n', (1434, 1472), True, 'import numpy as np\n'), ((1724, 1771), 'toolz.curried.get', 'get', (["['y_true', 'y_pred', 'batch_size']", 'output'], {}), "(['y_true', 'y_pred', 'batch_size'], output)\n", (1727, 1771), False, 'from toolz.curried import get\n'), ((2136, 2149), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (2143, 2149), True, 'import numpy as np\n'), ((2952, 2985), 'toolz.curried.get', 'get', (["['y_true', 'y_pred']", 'output'], {}), "(['y_true', 'y_pred'], output)\n", (2955, 2985), False, 'from toolz.curried import get\n'), ((790, 813), 'torch.is_tensor', 'torch.is_tensor', (['y_true'], {}), '(y_true)\n', (805, 813), False, 'import torch\n'), ((1083, 1122), 'numpy.not_equal', 'np.not_equal', (['y_true', 'self.ignore_index'], {}), '(y_true, self.ignore_index)\n', (1095, 1122), True, 'import numpy as np\n'), ((1144, 1169), 'numpy.where', 'np.where', (['mask', 'y_pred', 'c'], {}), '(mask, y_pred, c)\n', (1152, 1169), True, 'import numpy as np\n'), ((1191, 1216), 'numpy.where', 'np.where', (['mask', 'y_true', 'c'], {}), '(mask, y_true, c)\n', (1199, 1216), True, 'import numpy as np\n'), ((3507, 3525), 'torch.ones_like', 'torch.ones_like', (['y'], {}), '(y)\n', (3522, 3525), False, 'import torch\n'), ((744, 757), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (752, 757), True, 'import numpy as np\n'), ((3601, 3621), 'torch.sum', 'torch.sum', (['(p * y * w)'], {}), '(p * y * w)\n', (3610, 3621), False, 'import torch\n'), ((3642, 3668), 'torch.sum', 'torch.sum', (['((1 - p) * y * w)'], {}), '((1 - p) * y * w)\n', (3651, 3668), False, 'import torch\n'), ((3689, 3715), 'torch.sum', 'torch.sum', (['(p * (1 - y) * w)'], {}), '(p * (1 - y) * w)\n', (3698, 3715), False, 'import torch\n'), ((2104, 2120), 'numpy.prod', 'np.prod', (['y.shape'], {}), '(y.shape)\n', (2111, 2120), True, 'import numpy as np\n'), ((3136, 3152), 'torch.sigmoid', 'torch.sigmoid', (['p'], {}), '(p)\n', (3149, 3152), False, 'import torch\n'), ((3362, 3378), 'torch.sigmoid', 'torch.sigmoid', (['p'], {}), '(p)\n', (3375, 3378), False, 'import torch\n'), ((3247, 3270), 'torch.softmax', 'torch.softmax', (['p'], {'dim': '(1)'}), '(p, dim=1)\n', (3260, 3270), False, 'import torch\n')] |
from django.contrib import admin
from .models import Profile
from django.contrib.auth.models import Group
admin.site.register(Profile)
# remove group
admin.site.unregister(Group)
| [
"django.contrib.admin.site.unregister",
"django.contrib.admin.site.register"
] | [((107, 135), 'django.contrib.admin.site.register', 'admin.site.register', (['Profile'], {}), '(Profile)\n', (126, 135), False, 'from django.contrib import admin\n'), ((152, 180), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['Group'], {}), '(Group)\n', (173, 180), False, 'from django.contrib import admin\n')] |
"""Console helper classes."""
import sys
from typing import Dict, Optional, Type, TypeVar
T = TypeVar("T")
class Console:
"""A helper class for console backends."""
def __init__(self, descriptor: str) -> None:
self._descriptor = descriptor
def _print(self, string: str) -> None:
"""
Wrapper around print.
:param string: String to print.
"""
print(string) # noqa: T001
def _input(self, prompt: str) -> str:
"""
Wrapper around input.
:param prompt: prompt for user.
:returns: response from user.
"""
return input(prompt)
def info(self, message: str) -> None:
"""
Print information to the user.
:param message: Message to print to the user.
"""
self._print(f"{self._descriptor}: {message}")
def read( # type: ignore
self,
prompt: str,
return_type: Optional[Type[T]] = str, # type: ignore
check_stdin: bool = True,
) -> T:
"""
Prompt the user for a value of type 'return_type'.
:param prompt: Prompt to display to the user.
:param return_type: type to cast the input as, defaults to str.
:param check_stdin: Check if stdin is available is a tty.
:returns: value of type 'return_type'.
"""
if check_stdin and return_type is bool and not sys.stdin.isatty():
return False # type: ignore
elif return_type is not None:
while True:
response = self._input(f"{self._descriptor}: {prompt}: ")
try:
# We have to ignore the types on this function unfortunately,
# as static type checking is not powerful enough to confirm
# that it is correct at runtime.
if return_type == bool:
return self._get_bool(response) # type: ignore
return return_type(response) # type: ignore
except ValueError:
self.info(f"Unable to construct a {return_type.__name__}"
f" from '{response}'")
else:
self._input(f"{self._descriptor}: {prompt}: ")
@staticmethod
def _get_bool(case: str) -> bool:
"""
Check if a string is a bool, if so return it.
:param case: string to check.
:return: boolean representation of case
:raises ValueError: case is not a bool.
"""
response_map: Dict[str, bool] = {
"true": True,
"yes": True,
"false": False,
"no": False,
}
normalised = case.lower().strip()
if normalised not in response_map:
raise ValueError()
else:
return response_map[normalised]
| [
"sys.stdin.isatty",
"typing.TypeVar"
] | [((95, 107), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (102, 107), False, 'from typing import Dict, Optional, Type, TypeVar\n'), ((1423, 1441), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (1439, 1441), False, 'import sys\n')] |
from fastapi.testclient import TestClient
from url_shortener.backend.main import app
client = TestClient(app)
def test_create_shortlink():
response = client.post("/", json={"url": "testurl.com"})
response_json = response.json()
assert response.status_code == 200
assert "url" in response_json
assert "shortlink" in response_json
assert response_json["url"] == "testurl.com"
# not the most safe test
shortlink = response_json["shortlink"].split("/")[-1]
assert len(shortlink) == 7
assert shortlink.isalnum()
print(response_json["shortlink"])
# hacky check for insertion for now
assert Database.get_url(shortlink) == "testurl.com"
def test_get_full_url():
# hacky insert to database for now
Database.save_shortlink("testurl.com", "1234567")
response = client.get("/1234567")
assert response.status_code == 200
assert response.json() == {"url": "testurl.com"}
| [
"fastapi.testclient.TestClient"
] | [((96, 111), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (106, 111), False, 'from fastapi.testclient import TestClient\n')] |
from abc import abstractmethod
import asyncio
from collections import OrderedDict
import logging
__all__ = [
'Container',
]
LOG = logging.getLogger(__name__)
class Container:
def __init__(self, config, worker, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
super().__init__()
self.config = config
self.worker = worker
# Engines initialisation
self.engines = {}
# Services initialisation
self.services = {}
self.servers = OrderedDict()
self._stopping = False
@abstractmethod
async def make_servers(self, sockets):
"""Return handlers to serve data"""
@classmethod
def make_event_loop(cls, config):
"""To customize loop generation"""
return asyncio.new_event_loop()
async def start(self):
LOG.info('Starting application...')
def pre_stop(self):
if not self._stopping:
self._stopping = True
task = asyncio.ensure_future(self.stop(), loop=self.loop)
task.add_done_callback(self.post_stop)
else:
LOG.debug('Already stopping application, not doing anything')
async def stop(self):
LOG.info('Stopping application...')
def post_stop(self, future):
pass
| [
"logging.getLogger",
"collections.OrderedDict",
"asyncio.get_event_loop",
"asyncio.new_event_loop"
] | [((138, 165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'import logging\n'), ((559, 572), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (570, 572), False, 'from collections import OrderedDict\n'), ((826, 850), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (848, 850), False, 'import asyncio\n'), ((281, 305), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (303, 305), False, 'import asyncio\n')] |
import smart_imports
smart_imports.all()
class APIListRequestTests(utils_testcase.TestCase):
def setUp(self):
super(APIListRequestTests, self).setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
def test_success(self):
data = self.check_ajax_ok(self.request_ajax_json(logic.api_list_url()))
self.assertEqual(data,
{'places': {
str(self.place_1.id): {'specialization': modifiers.CITY_MODIFIERS.NONE.value,
'frontier': False,
'name': '1x1-\u043d\u0441,\u0435\u0434,\u0438\u043c',
'position': {'y': 1, 'x': 1},
'id': self.place_1.id,
'size': 1},
str(self.place_3.id): {'specialization': modifiers.CITY_MODIFIERS.NONE.value,
'frontier': False,
'name': '1x10-\u043d\u0441,\u0435\u0434,\u0438\u043c',
'position': {'y': 3, 'x': 1},
'id': self.place_3.id,
'size': 3},
str(self.place_2.id): {'specialization': modifiers.CITY_MODIFIERS.NONE.value,
'frontier': False,
'name': '10x10-\u043d\u0441,\u0435\u0434,\u0438\u043c',
'position': {'y': 3, 'x': 3},
'id': self.place_2.id,
'size': 3}}})
class APIShowTests(utils_testcase.TestCase):
def setUp(self):
super(APIShowTests, self).setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
def test_success(self):
self.check_ajax_ok(self.request_ajax_json(logic.api_show_url(self.place_2)))
class TestShowRequests(utils_testcase.TestCase):
def setUp(self):
super(TestShowRequests, self).setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
def test_place_new_place_message(self):
self.assertTrue(self.place_1.is_new)
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=['pgf-new-place-message'])
@mock.patch('the_tale.game.balance.constants.PLACE_NEW_PLACE_LIVETIME', 0)
def test_place_new_place_message__not_new(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=[('pgf-new-place-message', 0)])
@mock.patch('the_tale.game.places.objects.Place.is_frontier', True)
def test_place_frontier_message(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=['pgf-frontier-message'])
@mock.patch('the_tale.game.places.objects.Place.is_frontier', False)
def test_place_frontier_message__not_new(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=[('pgf-frontier-message', 0)])
def test_wrong_place_id(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', 'wrong_id')), texts=['pgf-error-place.wrong_format'])
def test_place_does_not_exist(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', 666)), texts=['pgf-error-place.wrong_value'])
def test__has_folclor(self):
blogs_helpers.prepair_forum()
blogs_helpers.create_post_for_meta_object(self.account, 'folclor-1-caption', 'folclor-1-text', meta_relations.Place.create_from_object(self.place_1))
blogs_helpers.create_post_for_meta_object(self.account, 'folclor-2-caption', 'folclor-2-text', meta_relations.Place.create_from_object(self.place_1))
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=[('pgf-no-folclor', 0),
'folclor-1-caption',
'folclor-2-caption'])
def test__no_folclor(self):
self.check_html_ok(self.request_html(utils_urls.url('game:places:show', self.place_1.id)), texts=['pgf-no-folclor'])
| [
"smart_imports.all"
] | [((23, 42), 'smart_imports.all', 'smart_imports.all', ([], {}), '()\n', (40, 42), False, 'import smart_imports\n')] |
import tempfile
import numpy as np
import qpformat.core
def test_wrong_file_format():
tf = tempfile.mktemp(prefix="qpformat_test_")
with open(tf, "w") as fd:
fd.write("test")
try:
qpformat.core.guess_format(tf)
except qpformat.core.UnknownFileFormatError:
pass
else:
assert False, "Unknown file format was loaded!"
def test_load_with_bg():
data = np.ones((20, 20), dtype=float)
data *= np.linspace(-.1, 3, 20).reshape(-1, 1)
f_data = tempfile.mktemp(prefix="qpformat_test_", suffix=".npy")
np.save(f_data, data)
bg_data = np.ones((20, 20), dtype=float)
bg_data *= np.linspace(0, 1.1, 20).reshape(1, -1)
f_bg_data = tempfile.mktemp(prefix="qpformat_test_", suffix=".npy")
np.save(f_bg_data, bg_data)
ds = qpformat.core.load_data(path=f_data,
bg_data=f_bg_data,
as_type="float64")
qpi = ds.get_qpimage()
assert np.allclose(qpi.pha, data - bg_data, atol=1e-15, rtol=0)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
| [
"numpy.allclose",
"numpy.ones",
"tempfile.mktemp",
"numpy.linspace",
"numpy.save"
] | [((98, 138), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""qpformat_test_"""'}), "(prefix='qpformat_test_')\n", (113, 138), False, 'import tempfile\n'), ((408, 438), 'numpy.ones', 'np.ones', (['(20, 20)'], {'dtype': 'float'}), '((20, 20), dtype=float)\n', (415, 438), True, 'import numpy as np\n'), ((503, 558), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""qpformat_test_"""', 'suffix': '""".npy"""'}), "(prefix='qpformat_test_', suffix='.npy')\n", (518, 558), False, 'import tempfile\n'), ((563, 584), 'numpy.save', 'np.save', (['f_data', 'data'], {}), '(f_data, data)\n', (570, 584), True, 'import numpy as np\n'), ((600, 630), 'numpy.ones', 'np.ones', (['(20, 20)'], {'dtype': 'float'}), '((20, 20), dtype=float)\n', (607, 630), True, 'import numpy as np\n'), ((701, 756), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'prefix': '"""qpformat_test_"""', 'suffix': '""".npy"""'}), "(prefix='qpformat_test_', suffix='.npy')\n", (716, 756), False, 'import tempfile\n'), ((761, 788), 'numpy.save', 'np.save', (['f_bg_data', 'bg_data'], {}), '(f_bg_data, bg_data)\n', (768, 788), True, 'import numpy as np\n'), ((978, 1034), 'numpy.allclose', 'np.allclose', (['qpi.pha', '(data - bg_data)'], {'atol': '(1e-15)', 'rtol': '(0)'}), '(qpi.pha, data - bg_data, atol=1e-15, rtol=0)\n', (989, 1034), True, 'import numpy as np\n'), ((451, 475), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(3)', '(20)'], {}), '(-0.1, 3, 20)\n', (462, 475), True, 'import numpy as np\n'), ((646, 669), 'numpy.linspace', 'np.linspace', (['(0)', '(1.1)', '(20)'], {}), '(0, 1.1, 20)\n', (657, 669), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import deeptensor as dt
import torch
def accuracy(logits, labels, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = labels.size(0)
_, pred = logits.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(labels.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(1.0 / batch_size))
return res
| [
"torch.no_grad"
] | [((199, 214), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (212, 214), False, 'import torch\n')] |
__author__ = 'jparedes'
import numpy as np
import pandas as pd
from itertools import compress, chain
import scipy.sparse as sp
def trimf(x, params):
"""
Triangular fuzzy operation in a vector
:param x: Input column vector: array([[0.2],[0.9],[0.42],[0.74],[0.24],[0.28],[0.34]])
:param params: 3 points os triangle shape: [0.1 0.4, 0.7]
:return: Column vector: array([[0.], [0.], [0.8], [0.], [0.], [0.], [0.4]])
"""
a = params[0]
b = params[1]
c = params[2]
y = np.zeros(np.shape(x)) # Left and right shoulders (y = 0)
# Left slope
if a != b:
index = np.logical_and(a < x, x < b) # find(a < x & x < b)
y[index] = (x[index] - a) / (b - a)
# right slope
if b != c:
index = np.logical_and(b < x, x < c) # find(b < x & x < c)
y[index] = (c - x[index]) / (c - b)
# Center (y = 1)
index = x == b
y[index] = 1
return y
def trapmf(x, params):
"""
Trapezoidal fuzzy operation
:param x: Input column vector
:param params: 4 points which define the trapezoidal
:return: Output column vector
"""
a, b, c, d = params
y1 = np.zeros(np.shape(x))
y2 = np.zeros(np.shape(x))
# Compute y1
index = x >= b
if sum(index) != 0: # ~isempty(index)
y1[index] = 1.
index = x < a
if sum(index) != 0: # ~isempty(index):
y1[index] = 0.
index = np.logical_and(a <= x, x < b) # find(a <= x & x < b);
ind = np.logical_and(sum(index) != 0, a != b)
if ind: # ~isempty(index) & a ~= b,
y1[index] = (x[index] - a) / (b - a)
# Compute y2
index = x <= c
if sum(index) != 0:
y2[index] = 1.
index = x > d
if sum(index) != 0:
y2[index] = 0.
index = np.logical_and(c < x, x <= d) # find(c < x & x <= d)
ind = np.logical_and(sum(index) != 0, c != d)
if ind: # ~isempty(index) & c ~= d
y2[index] = (d - x[index]) / (d - c)
y = np.minimum(y1, y2)
return y
def triangle_mb(y, tipo, n):
"""
Aplication of 'n' triangular membership functions
:param y: Attribute
:param tipo: 'normal' or 'tukey'
:param n: number of triangular membership functions
:return:
"""
if tipo == 'tukey':
centro = np.percentile(y, np.linspace(0, 100, n).tolist()) # [0, 25, 50, 75, 100]
else: # 'normal'
ymin = min(y)
ymax = max(y)
centro = np.linspace(ymin, ymax, n)
ex_i = trapmf(y, [-np.inf, -np.inf, centro[0], centro[1]])
ex_d = trapmf(y, [centro[n - 2], centro[n - 1], np.inf, np.inf])
# Fuzzy sets
muY = np.array(ex_i)
for i in range(n - 2):
aux = trimf(y, centro[i:i + n])
muY = np.append(muY, aux, 1)
muY = np.append(muY, ex_d, 1)
return muY
def gather_columnspremises_by_attribute(lista, sizes_attributes):
# lista: [0,1,2,3...,7]
# sizes_attributes: [3, 2, 3]
# output: [(0,1,2), (3,4), (5,6,7)]
new_lista = []
ref = 0
for i in sizes_attributes:
new_lista.append(tuple(lista[ref:ref+i]))
ref += i
return new_lista
class Fuzzification:
def __init__(self, X, categorical_list_bool):
self.X = X
self.uX = []
self.cat_list_bool = categorical_list_bool # [0, 1, 0]
self.num_prem_by_attribute = [0] # [3, 2, 3]
self.premises_attributes = [] # [(0,1,2),(3,4),(5,6,7)]
self.indexes_premises_contain_negation = []
self.ref_attributes = range(len(categorical_list_bool))
def build_uX(self, tipo, n): # tipo con referencia a la fuzzificacion triangular: normal o tukey
# Calculate 'uX' and 'size of attributes'
list_uX = []
size_attr = []
MX = self.X
if sum(self.cat_list_bool) != 0:
for i in range(MX.shape[1]):
if self.cat_list_bool[i] == 1:
attribute = pd.DataFrame(MX[:, [i]].tolist(), dtype="category") # print attribute.describe()
aux = pd.get_dummies(attribute).values
if aux.shape[1] == 2: # new IF
aux = np.delete(aux, 1, axis=1)
size_attr.append(aux.shape[1])
else:
attribute = MX[:, [i]]
aux = triangle_mb(attribute, tipo, n)
size_attr.append(aux.shape[1])
list_uX.append(aux)
else:
for i in range(MX.shape[1]):
attribute = MX[:, [i]]
aux = triangle_mb(attribute, tipo, n)
list_uX.append(aux)
size_attr.append(aux.shape[1])
self.uX = np.hstack(list_uX)
self.num_prem_by_attribute = size_attr
number_columns = self.uX.shape[1]
self.premises_attributes = gather_columnspremises_by_attribute(range(number_columns), size_attr)
self.indexes_premises_contain_negation = number_columns * [0]
def add_negation(self):
num_attributes = len(self.cat_list_bool)
# ref_attributes = range(num_attributes)
num_col = sum(self.num_prem_by_attribute) # number of columns, individual premises
# attributes with more than 2 membership functions
attrib_more_2fp = [0 if i < 3 else 1 for i in self.num_prem_by_attribute]
index_premises_negation = [1 if (attrib_more_2fp[i] + 1 - self.cat_list_bool[i]) != 0
else 0 for i in range(len(self.cat_list_bool))]
attrib_survivors_negation = list(compress(range(num_attributes), index_premises_negation))
premises_attrib_neg = gather_columnspremises_by_attribute(range(num_col, 2*num_col),
list(pd.Series(self.num_prem_by_attribute)
[attrib_survivors_negation])) # Modified line
premises_survivors_negation = list(compress(premises_attrib_neg, list(pd.Series(self.num_prem_by_attribute)
[attrib_survivors_negation]))) # Modified line
prem = [] # total premises (with negation) by attribute
for i in range(num_attributes):
prem_attr_i = self.premises_attributes[i]
if i in attrib_survivors_negation:
aux_index = attrib_survivors_negation.index(i)
prem_attr_i += premises_survivors_negation[aux_index]
prem.append(prem_attr_i)
# self.uX = sp.csr_matrix(self.uX)
# self.uX = sp.hstack((self.uX, 1. - self.uX), format='csr')
prem_surv = pd.Series(self.premises_attributes)[attrib_survivors_negation] # New line
ind_neg = [i for sub in list(prem_surv) for i in sub] # New line
self.uX = np.concatenate((self.uX, 1. - self.uX[:, ind_neg]), axis=1) # Modified line
self.premises_attributes = prem[:]
self.num_prem_by_attribute = [len(i) for i in prem] # servira para el filtro de overlapping basicamente
self.indexes_premises_contain_negation = index_premises_negation
# self.ref_attributes = ref_attributes
def main():
print ('Module 2 <<Fuzzification>>')
if __name__ == '__main__':
main() | [
"pandas.Series",
"numpy.minimum",
"numpy.logical_and",
"numpy.hstack",
"numpy.delete",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.concatenate",
"pandas.get_dummies",
"numpy.shape"
] | [((1411, 1440), 'numpy.logical_and', 'np.logical_and', (['(a <= x)', '(x < b)'], {}), '(a <= x, x < b)\n', (1425, 1440), True, 'import numpy as np\n'), ((1765, 1794), 'numpy.logical_and', 'np.logical_and', (['(c < x)', '(x <= d)'], {}), '(c < x, x <= d)\n', (1779, 1794), True, 'import numpy as np\n'), ((1963, 1981), 'numpy.minimum', 'np.minimum', (['y1', 'y2'], {}), '(y1, y2)\n', (1973, 1981), True, 'import numpy as np\n'), ((2612, 2626), 'numpy.array', 'np.array', (['ex_i'], {}), '(ex_i)\n', (2620, 2626), True, 'import numpy as np\n'), ((2743, 2766), 'numpy.append', 'np.append', (['muY', 'ex_d', '(1)'], {}), '(muY, ex_d, 1)\n', (2752, 2766), True, 'import numpy as np\n'), ((514, 525), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (522, 525), True, 'import numpy as np\n'), ((612, 640), 'numpy.logical_and', 'np.logical_and', (['(a < x)', '(x < b)'], {}), '(a < x, x < b)\n', (626, 640), True, 'import numpy as np\n'), ((758, 786), 'numpy.logical_and', 'np.logical_and', (['(b < x)', '(x < c)'], {}), '(b < x, x < c)\n', (772, 786), True, 'import numpy as np\n'), ((1165, 1176), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1173, 1176), True, 'import numpy as np\n'), ((1196, 1207), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1204, 1207), True, 'import numpy as np\n'), ((2424, 2450), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'n'], {}), '(ymin, ymax, n)\n', (2435, 2450), True, 'import numpy as np\n'), ((2709, 2731), 'numpy.append', 'np.append', (['muY', 'aux', '(1)'], {}), '(muY, aux, 1)\n', (2718, 2731), True, 'import numpy as np\n'), ((4704, 4722), 'numpy.hstack', 'np.hstack', (['list_uX'], {}), '(list_uX)\n', (4713, 4722), True, 'import numpy as np\n'), ((6862, 6922), 'numpy.concatenate', 'np.concatenate', (['(self.uX, 1.0 - self.uX[:, ind_neg])'], {'axis': '(1)'}), '((self.uX, 1.0 - self.uX[:, ind_neg]), axis=1)\n', (6876, 6922), True, 'import numpy as np\n'), ((6695, 6730), 'pandas.Series', 'pd.Series', (['self.premises_attributes'], {}), '(self.premises_attributes)\n', (6704, 6730), True, 'import pandas as pd\n'), ((2284, 2306), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (2295, 2306), True, 'import numpy as np\n'), ((5789, 5826), 'pandas.Series', 'pd.Series', (['self.num_prem_by_attribute'], {}), '(self.num_prem_by_attribute)\n', (5798, 5826), True, 'import pandas as pd\n'), ((4052, 4077), 'pandas.get_dummies', 'pd.get_dummies', (['attribute'], {}), '(attribute)\n', (4066, 4077), True, 'import pandas as pd\n'), ((4167, 4192), 'numpy.delete', 'np.delete', (['aux', '(1)'], {'axis': '(1)'}), '(aux, 1, axis=1)\n', (4176, 4192), True, 'import numpy as np\n'), ((6023, 6060), 'pandas.Series', 'pd.Series', (['self.num_prem_by_attribute'], {}), '(self.num_prem_by_attribute)\n', (6032, 6060), True, 'import pandas as pd\n')] |
from django.contrib import admin
from models import Moniton
class MonitonAdmin(admin.ModelAdmin):
pass
admin.site.register(Moniton, MonitonAdmin)
| [
"django.contrib.admin.site.register"
] | [((110, 152), 'django.contrib.admin.site.register', 'admin.site.register', (['Moniton', 'MonitonAdmin'], {}), '(Moniton, MonitonAdmin)\n', (129, 152), False, 'from django.contrib import admin\n')] |
def start_flask(file_path, port=8064):
import os, sys, json
from flask import Flask, request, jsonify
app = Flask('otil_server')
@app.route('/', methods=['POST'])
def handle():
data = request.get_data()
input_data = json.loads(data.decode('utf-8'))
model_path, py_file = os.path.split(file_path)
model = py_file.split('.py')[0]
sys.path.insert(0, model_path)
module = __import__(model)
result = module.apply(input_data)
return jsonify(result), 200
app.run(host='0.0.0.0', port=port)
if __name__ == '__main__':
import sys
print('sys.argv:', sys.argv)
_, port, file_path = sys.argv
start_flask(file_path, int(port))
| [
"sys.path.insert",
"flask.Flask",
"flask.request.get_data",
"os.path.split",
"flask.jsonify"
] | [((125, 145), 'flask.Flask', 'Flask', (['"""otil_server"""'], {}), "('otil_server')\n", (130, 145), False, 'from flask import Flask, request, jsonify\n'), ((222, 240), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (238, 240), False, 'from flask import Flask, request, jsonify\n'), ((329, 353), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (342, 353), False, 'import os, sys, json\n'), ((404, 434), 'sys.path.insert', 'sys.path.insert', (['(0)', 'model_path'], {}), '(0, model_path)\n', (419, 434), False, 'import sys\n'), ((530, 545), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (537, 545), False, 'from flask import Flask, request, jsonify\n')] |
"""A Graph G = (V,E) is a set of vertices/nodes (V) and a set of edges
(E included in V^2).
A Graph is a dictionary defining the fields:
- entity: 'graph'
- nbr_n: int: The number of Nodes.
- nbr_e: int: The number of Edges.
a node
,----------'-----------,
- nodes: ( ((int, ...), (int, ...)), ... )
'---.----' '---.----'
neighbors edges
- edges: ( (int, int), ... )
'---.----'
an edge
The 'nodes' key stores in cell i the neighbors and the edges of the ith
node.
The 'edges' key stores in cell i the ends of the ith edge.
We use tuples to have a fast access to the ngbrs/edges of a node,
because we assume that the graph will not change.
Optional fields are:
- dimns: int: The Geometrical dimension of the Graph
- coord: ((float, ...), ...): The coordinates of the Nodes
NB: these fields are initialized by the init_GraphGeometry function.
Example:
Consider the Graph: 0 1
0 --- 1 --- 2
2| |
3 ----'3
Then the corresponding dictionary is:
>>> graph_key = {
"entity": "graph",
"nbr_n": 4,
"nbr_e": 4,
"nodes": [
[ [1 ], [0 ] ], # node 0 has 1 neighbor along edge 0
[ [0,2,3], [0,1,2] ], # node 1 has 3 neighbors
[ [1,3 ], [1,3 ] ], # node 2 has 2 neighbors
[ [1,2 ], [2,3 ] ], # node 3 has 2 neighbors
],
"edges": [
[0,1],
[1,2],
[1,3],
[2,3],
],
}
"""
__author__ = "<NAME>"
__version__ = "1.0"
from crack.models.nweights import init_NWeights_from_args
from crack.models.eweights import init_EWeights_from_args
#################################
### Initialization (topology) ###
#################################
#****************#
# Init from file #
#****************#
def init_Graph_from_grf(models, records, filename,
key_topology="graph", key_nweights="nweights",
key_eweights="eweights"):
"""Build the Graph stored in a .grf file (format used by Scotch).
Can initialize the [NE]Weights if specified in [entities].
Arguments:
models: dict: The created Graph will be assigned to
[key_topology] in [models].
filename: Path to the .grf file.
Optional Arguments:
extract_keys: str or list or str or dict: Apart from the
Graph, can also initialize the '[ne]weights'. In this
case, if a dict is provided, is maps '[ne]weights' to
the new keys. If None is provided, will try to
initialize the '[ne]weights'.
"""
with open(filename, "r") as f:
version = int(f.readline())
nbr_n, nbr_e = tuple(int(w) for w in f.readline().split())
base, fmt = tuple(w for w in f.readline().split())
# fmt = "ijk" where - i indicates if there are labels
# - j indicates if there are weights on edges
# - k indicates if there are weights on nodes
nbr_e = int(nbr_e/2)
nodes = [[None, []] for i in range(nbr_n)]
edges = [None] * nbr_e
def read_ngbrs(i, ngbrs, ne):
nodes[i][0] = ngbrs
for ngbr in ngbrs:
if i < ngbr: # otherwise, already considered
edges[ne] = (i, ngbr)
nodes[ i][1].append(ne)
nodes[ngbr][1].append(ne)
ne += 1
return ne
def read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne):
nodes[i][0] = ngbrs
for ngbr, ew in zip(ngbrs, ews):
if i < ngbr: # otherwise, already considered
edges[ne] = (i, ngbr)
ewgts[ne] = [ew]
nodes[ i][1].append(ne)
nodes[ngbr][1].append(ne)
ne += 1
return ne
if fmt == "000": # No labels, no ewgts, no nwgts
nbr_c = 0
nwgts = None
ewgts = None
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
# degree = words[0]
return read_ngbrs(i, words[1:], ne)
elif fmt == "001": # No labels, no ewgts, nwgts
nbr_c = 1
nwgts = [None] * nbr_n
ewgts = None
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
nwgts[i] = (words[0],)
# degree = words[1]
return read_ngbrs(i, words[2:], ne)
elif fmt == "002": # No labels, no ewgts, nbr_c nwgts
nbr_c = int(f.readline())
nwgts = [None] * nbr_n
ewgts = None
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
nwgts[i] = list(words[:nbr_c])
# degree = words[nbr_c]
ngbrs = list(words[nbr_c+1:])
return read_ngbrs(i, ngbrs, ne)
elif fmt == "100": # labels, no ewgts, no nwgts
nbr_c = 0
nwgts = None
ewgts = None
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
# degree = words[1]
return read_ngbrs(i, words[2:], ne)
elif fmt == "101": # labels, no ewgts, nwgts
nbr_c = 1
nwgts = [None] * nbr_n
ewgts = None
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
nwgts[i] = [words[1]]
# degree = words[2]
return read_ngbrs(i, words[3:], ne)
elif fmt == "102": # labels, no ewgts, nbr_c nwgts
nbr_c = int(f.readline())
nwgts = [None] * nbr_n
ewgts = None
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
nwgts[i] = list(words[1:nbr_c+1])
# degree = words[nbr_c+1]
return read_ngbrs(i, words[nbr_c+2:], ne)
elif fmt == "010": # No labels, ewgts, no nwgts
nbr_c = 0
nwgts = None
ewgts = [None] * nbr_e
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
# degree = words[0]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(1, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
elif fmt == "110": # labels, ewgts, no nwgts
nbr_c = 0
nwgts = None
ewgts = [None] * nbr_e
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
# degree = words[1]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(2, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
elif fmt == "011": # No labels, ewgts, nwgts
nbr_c = 1
nwgts = [None] * nbr_n
ewgts = [None] * nbr_e
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
nwgts[i] = [words[0]]
# degree = words[1]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(2, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
elif fmt == "012": # No labels, ewgts, nbr_c nwgts
nbr_c = int(f.readline())
nwgts = [None] * nbr_n
ewgts = [None] * nbr_e
def init(i, line, ne):
words = tuple(int(w) for w in line.split())
nwgts[i] = tuple(words[:nbr_c])
# degree = words[nbr_c]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(nbr_c+1, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
elif fmt == "111": # labels, ewgts, nwgts
nbr_c = 1
nwgts = [None] * nbr_n
ewgts = [None] * nbr_e
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
nwgts[i] = (words[1],)
# degree = words[2]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(3, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
elif fmt == "112": # labels, ewgts, nbr_c nwgts
nbr_c = int(f.readline())
nwgts = [None] * nbr_n
ewgts = [None] * nbr_e
def init(_, line, ne):
words = tuple(int(w) for w in line.split())
i = words[0]
nwgts[i] = tuple(words[1:nbr_c+1])
# degree = words[nbr_c+2]
ews, ngbrs = tuple(zip(*[(words[j],words[j+1]) for j in range(nbr_c+3, len(words), 2)]))
return read_ngbrs_ewgts(i, ewgts, ngbrs, ews, ne)
else:
raise ValueError("Wrong format in {}: got {}".format(filename, fmt))
ne = 0
for i, line in enumerate(f):
ne = init(i, line, ne)
assert ne == nbr_e
models[key_topology] = {
"entity": "graph",
"nbr_n": nbr_n,
"nbr_e": nbr_e,
"nodes": nodes,
"edges": edges,
}
if nwgts is not None and key_nweights is not None:
init_NWeights_from_args(models, records, nwgts, key_in=key_topology, key_out=key_nweights)
if ewgts is not None and key_eweights is not None:
init_EWeights_from_args(models, records, ewgts, key_in=key_topology, key_out=key_eweights)
models["key_lead"] = key_topology
def init_Graph_from_mtx(models, records, filename=None, key="graph"):
"""Build the Graph specified in a .mtx file (format used to store
matrices).
Arguments:
models: dict: The created Graph will be assigned to [key]
in [models].
filename: Path to the .grf file.
Structure of a .mtx file:
,--------------------------------------------------------------,
| % Header line |
| % Comment lines |
| #lines #columns #entries <-- for us: #nodes #nodes #edges |
| #in #out #value <-- for each edge, there is a line |
'--------------------------------------------------------------'
NB: - the ids in the mesh begin at 1...
- the edges are specified twice in the mesh
- see: http://math.nist.gov/MatrixMarket/formats.html
Arguments:
models: dict: The created Graph will be assigned to [key]
in [models].
filename: Path to the .mtx file.
"""
with open(filename, "r") as f:
line = f.readline()
fmt = line.split()[-1]
assert fmt in ["symmetric", "general"]
assert "array" not in line # specifier for dense matrices
while line[0] == "%":
line = f.readline()
nbr_n, nbr_n_, nbr_f = tuple(int(w) for w in line.split())
if nbr_n != nbr_n_:
crack_error(ValueError, "init_Graph_from_mtx",
"Non symmetric matrices: {} rows and {} columns.".format(nbr_n, nbr_n_))
nodes = [ [[], []] for _ in range(nbr_n)]
edges = []
nbr_e = 0
for line in f:
i, j = tuple(int(w)-1 for w in line.split()[:2])
if i > j:
# Update Nodes
# - Ngbrs
nodes[i][0].append(j)
nodes[j][0].append(i)
# - Edges
nodes[i][1].append(nbr_e)
nodes[j][1].append(nbr_e)
# Create Edge
edges.append([i, j])
nbr_e += 1
elif i == j:
nbr_f -= 1
if fmt == "general":
assert 2*nbr_e == nbr_f
else:
assert nbr_e == nbr_f
models[key] = {
"entity": "graph",
"nbr_n": nbr_n,
"nbr_e": nbr_e,
"nodes": nodes,
"edges": edges,
}
models["key_lead"] = key
#************************#
# Init from other models #
#************************#
def init_Graph_from_Hypergraph(models, algopt, stats):
"""Split the hyperedges into edges to form a Graph.
"""
# TODO
pass
def init_Graph_from_Mesh(models, records, key="graph", mesh_key="mesh", ngbr_dim="dual"):
"""Build a Graph depending on geometrical attributes of the Mesh.
Arguments:
models: dict: The created Graph will be assigned to [key]
in [models].
Optional Arguments:
mesh_key: str: The Mesh that will be used to create the Graph.
ngbr_dim: int [default: dim(Mesh)-1]: Cells that share an
object of exactly dimension [ngbr_dim] are considered
neighbors. Example; in 3D, if ngbr_dim = 2, two cells
that share a face are neighbors.
NB: when ngbr_dim == dim(Mesh)-1, we say that we build
the Dual Graph of the Mesh.
"""
nbr_n = models[mesh_key]["nbr_n"]
dimns = models[mesh_key]["dimns"]
elems = models[mesh_key]["elems"]
verts = models[mesh_key]["verts"]
nodes = [ ([], []) for _ in range(nbr_n) ]
edges = []
# Condition for an edge #
if ngbr_dim == "dual":
ngbr_dim = dimns - 1
if ngbr_dim == 0: # point --> Share 1 vertices
cond = (lambda l: l == 1)
elif ngbr_dim == 1: # segment --> Share 2 vertices
cond = (lambda l: l == 2)
elif ngbr_dim == 2: # face --> Share at least 3 vertices
cond = (lambda l: l > 2)
else:
raise ValueError("Impossible value for ngbr_dim (got {}).".format(ngbr_dim))
# 1st pass on the Vertices to assign to every Element which Elements
# it shares Vertices with it.
elems_com_verts = tuple([] for _ in range(nbr_n))
for v_elems in verts:
for elem in v_elems:
touched = list(v_elems)
touched.remove(elem)
elems_com_verts[elem].extend(touched)
# Find the neighbors of each Element and create the Edges.
nbr_e = 0
for elem, pot_ngbrs in enumerate(elems_com_verts):
ngbrs = set(ei for ei in pot_ngbrs
if (elem < ei
and cond(len([i for i in pot_ngbrs if i == ei]))
)
)
nodes[elem][0].extend(list(ngbrs))
edge_beg = nbr_e
for ngbr in ngbrs:
edges.append([elem, ngbr])
nodes[ngbr][0].append(elem)
nodes[ngbr][1].append(nbr_e)
nbr_e += 1
nodes[elem][1].extend(list(range(edge_beg, nbr_e)))
models[key] = {
"entity": "graph",
"nbr_n": nbr_n,
"nbr_e": nbr_e,
"nodes": nodes,
"edges": edges,
"dimns": models[mesh_key]["dimns"],
"coord": models[mesh_key]["coord"],
}
models["key_lead"] = key
#################################
### Initialization (geometry) ###
#################################
#****************#
# Init from file #
#****************#
def init_GraphGeom_from_mtx(models, records, filename):
"""Build the Graph Geometry (coordinates of the vertices)
specified by a coord.mtx file.
NB: There may be one coord per line, or all the coords for one
element on the same line.
"""
with open(filename, 'r') as f:
for line in f:
if line[0] != "%":
break
nbr_n, dimns = tuple(int(w) for w in line.split()[:2])
coord = []
line = f.readline()
words = line.split()
if len(words) == 1: # one coord per line
pt = [float(words[0])]
d = 1
for line in f:
pt.append(float(line))
d += 1
if d == dimns:
coord.append(tuple(pt))
d = 0
pt = []
else: # all coords for one node on one line
coord.append(tuple(float(w) for w in words))
for line in f:
coord.append(tuple(float(w) for w in line.split()))
if "graph" not in models:
models["graph"] = {
"nbr_n": nbr_n,
}
models["graph"]["dimns"] = dimns
models["graph"]["coord"] = tuple(coord)
def init_GraphGeom_from_xyz(models, records, filename):
with open(filename, "r") as f:
dimns = int(f.readline())
nbr_n = int(f.readline())
coord = [None] * nbr_n
for line in f:
words = line.split()
label = int(words[0])
coord[label] = tuple(float(w) for w in words[1:])
if "graph" not in models:
models["graph"] = {
"nbr_n": nbr_n,
}
models["graph"]["dimns"] = dimns
models["graph"]["coord"] = tuple(coord)
###################################
### Transformation & Properties ###
###################################
def coarsen_Graph(models, records, c_models, key_topo, aggregation):
"""
"""
nbr_n = models[key_topo]["nbr_n"]
edges = models[key_topo]["edges"]
nbr_n_ = max(aggregation) + 1
nodes_ = [ [[], []] for _ in range(nbr_n_) ]
edges_ = []
nbr_e_ = 0
for edge in edges:
i = aggregation[edge[0]]
j = aggregation[edge[1]]
if i != j and i not in nodes_[j][0]:
edges_.append([i, j])
nodes_[i][0].append(j)
nodes_[j][0].append(i)
nodes_[i][1].append(nbr_e_)
nodes_[j][1].append(nbr_e_)
nbr_e_ += 1
c_models[key_topo] = {
"entity": "graph",
"nbr_n" : nbr_n_,
"nbr_e" : nbr_e_,
"nodes" : nodes_,
"edges" : edges_,
}
if "dimns" in models[key_topo]:
c_models[key_topo]["dimns"] = models[key_topo]["dimns"]
c_models[key_topo]["coord"] = [0] * nbr_n_
for i, i_ in enumerate(aggregation):
c_models[key_topo]["coord"][i_] = models[key_topo]["coord"][i]
def check_Graph(models, records, print_models=True):
"""Properties checked:
(1) Each node's edge contains the node
(2) Each node's neighbour 'knows' node is one of its neighbors
(3) Each edge's node contains the edge
(4) Each edge has exactly 2 ends
(5) edge[0] < edge[1]
(6) The ngbrs and edges are ordered and correspond
(for node i, the jst edge links i to its jst neighbor)
"""
# TODO
pass
##############
### Record ###
##############
def Graph_to_grf(models, records, out_file,
nwgt_version=2, key_topology="graph",
key_nweights="nweights", key_eweights="eweights",
):
"""Register the Graph in a .grf file (format used by Scotch).
Keyword argument:
nwgt_version: 1 or 2: version of grf file to use.
"""
graph = models[key_topology]
nwgts = models.get(key_nweights, None)
ewgts = models.get(key_eweights, None)
nodes = graph["nodes"]
if nwgts is None:
nwgt_version = 1
# Format
fmt = [0] # fmt = "ijk" where - i indicates if there are labels
# - j indicates if there are weights on edges
# - k indicates if there are weights on nodes
fmt.append(int(ewgts is not None))
if nwgts is not None:
fmt.append(nwgt_version)
nbr_c = nwgts["nbr_c"]
nwgts = nwgts["weights"]
else:
fmt.append(0)
fmt = "".join([str(i) for i in fmt])
# Write the file
with open(out_file, "w") as f:
# Version line
f.write("0\n")
# Header line: nb_nodes nb_edges
f.write("{} {}\n".format(graph["nbr_n"], graph["nbr_e"] * 2))
f.write("{} {}\n".format(0, fmt))
# For nwgt_version_2: must write the number of criteria
if nwgt_version == 2:
f.write("{}\n".format(nbr_c))
# Write functions
# - Node weight
if nwgts is None:
def write_nwgt(*args):
pass
elif nwgt_version == 1:
def write_nwgt(f, nwgt):
f.write("{} ".format(nwgt))
if wgt == 0:
raise ValueError("scotch does not support null weights (node {} has a null weight)".format(ni))
else:
def write_nwgt(f, nwgt):
for wgt in nwgt:
f.write("{} ".format(wgt))
# - Node degree
def write_deg (f, ngbrs):
f.write("{} ".format(len(ngbrs)))
# - Ewgts and ngbrs
if ewgts is None:
def write_ewgts_ngbrs(f, nbgrs, _):
for ngbr in ngbrs:
f.write("{} ".format(ngbr))
else:
ewgts = ewgts["weights"]
def write_ewgts_ngbrs(f, nbgrs, edges):
for ngbr in ngbrs:
ei = next(e for e in edges if e in nodes[ngbr][1])
wgt = ewgts[ei][0]
if wgt == 0:
raise ValueError("edge {} has a null weight".format(ei))
f.write("{} {} ".format(wgt, ngbr))
# Write
for ni, (ngbrs,edges) in enumerate(nodes):
write_nwgt(f, nwgts[ni])
write_deg (f, ngbrs)
write_ewgts_ngbrs(f, ngbrs, edges)
f.write("\n")
def list_to_line(l):
return "{}\n".format(str(l)[1:-1].replace(", ", " "))
def Graph_to_mesh(models, filename, z=-1):
"""Register the Graph in a .mesh format.
NB: Graph Vertices become Mesh Vertices (not Elements).
Arguments:
z: int: -1: Use the original coordinate (None in 2D).
i >= 0: Use the ith node weight.
"""
n = models["n"]
e = models["e"]
edges = models["graph"]["edges"]
dimns = models["geometry"]["dimns" ]
ecoord = models["geometry"]["ecoord"]
if z >= 0:
dimns = 3
nwgts = models.get("nweights", ((1,) for _ in range(n)))
nwmax = max(wgt[z] for wgt in nwgts)
coord = lambda i: list(ecoord[i]) + [nwgts[i][z]*5.0/nwmax] + [0]
else:
coord = lambda i: list(ecoord[i]) + [0]
with open(filename, "w") as f:
f.write("MeshVersionFormatted 2\n")
f.write("Dimension\n{}\n".format(dimns))
f.write("Vertices\n{}\n".format(n))
for i in range(n):
f.write(list_to_line(coord(i)))
f.write("Edges\n{}\n".format(e))
for j in range(e):
f.write(list_to_line([end + 1 for end in edges[j]] + [0]))
f.write("End\n")
def Graph_to_mgraph(models, filename):
"""Register the Graph in a .mgraph file (format used by MeTiS).
"""
grph = models["graph"]
nwgt = models.get("nweights")
hwgt = models.get("hweights")
ewgt = models.get("eweights")
n = models["n"]
e = models["e"]
nbr_c = models["c"]
nodes = grph["nodes"]
fmt = "{}{}{}".format(
0 if hwgt is None else 1, # vertex sizes (communication volume if the node is to transmit)
0 if nwgt is None else 1, # vertex weights
0 if ewgt is None else 1, # edge weights
)
# Define function that write a node data #
# Beware: indexes begin at 1
def write_node_size(f, node):
f.write("{}\t".format(hwgt[node][0]))
def write_node_nwgts(f, node):
for c in range(nbr_c):
f.write("{}\t".format(nwgt[node][c]))
def write_node_ngbrs_ewgts(f, node):
for ngbr in nodes[node][0]:
ei = next(e for e in nodes[node][1] if e in nodes[ngbr][1])
wgt = ewgt[ei][0]
if wgt == 0:
raise ValueError("edge {} has a null weight".format(ei))
f.write("{}\t{}\t".format(ngbr + 1, wgt))
f.write("\n")
def write_node_ngbrs(f, node):
for ngbr in nodes[node][0]:
f.write("{}\t".format(ngbr + 1))
f.write("\n")
# Define the write node function #
write_fcts = []
if hwgt is not None:
write_fcts.append(write_node_size)
if nwgt is not None:
write_fcts.append(write_node_nwgts)
if ewgt is None:
write_fcts.append(write_node_ngbrs)
else:
write_fcts.append(write_node_ngbrs_ewgts)
write_fcts = tuple(write_fcts)
def write_node(f, node):
for write_fct in write_fcts:
write_fct(f, node)
# Write the file #
with open(filename, "w") as f:
f.write("{}\t{}\t{}\t{}\n".format(n, e, fmt, nbr_c))
for i in range(n):
write_node(f, i)
def Graph_to_u(models, filename):
"""Register the Graph in a .u file (format used by PaToH).
"""
# TODO
pass
####################
### Function IDs ###
####################
INIT_GRAPH_FCTS = {
"init_Graph_from_grf" : init_Graph_from_grf,
"init_Graph_from_Hypergraph": init_Graph_from_Hypergraph,
"init_Graph_from_Mesh" : init_Graph_from_Mesh,
"init_Graph_from_mtx" : init_Graph_from_mtx,
"init_Graph_from_u" : None, # TODO
}
INIT_GRAPHGEOM_FCTS = {
"init_GraphGeom_from_mtx": init_GraphGeom_from_mtx,
"init_GraphGeom_from_xyz": init_GraphGeom_from_xyz,
}
OUTPUT_GRAPH_FCTS = {
"Graph_to_grf": Graph_to_grf,
}
| [
"crack.models.eweights.init_EWeights_from_args",
"crack.models.nweights.init_NWeights_from_args"
] | [((9591, 9685), 'crack.models.nweights.init_NWeights_from_args', 'init_NWeights_from_args', (['models', 'records', 'nwgts'], {'key_in': 'key_topology', 'key_out': 'key_nweights'}), '(models, records, nwgts, key_in=key_topology,\n key_out=key_nweights)\n', (9614, 9685), False, 'from crack.models.nweights import init_NWeights_from_args\n'), ((9745, 9839), 'crack.models.eweights.init_EWeights_from_args', 'init_EWeights_from_args', (['models', 'records', 'ewgts'], {'key_in': 'key_topology', 'key_out': 'key_eweights'}), '(models, records, ewgts, key_in=key_topology,\n key_out=key_eweights)\n', (9768, 9839), False, 'from crack.models.eweights import init_EWeights_from_args\n')] |
import os
from typing import Tuple
from bs4 import BeautifulSoup
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from .article import (
Article,
ArticleComponentCheck
)
from .section_extr import *
class ArticleFunctions:
def __init__(self):
pass
@staticmethod
def article_construct_html_nature(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'nature'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split('|')[0].strip()
article.title = title
body = soup.body
sections = body.find_all('section')
# --- get abstract ---
abstract = None
abstract_idx = None
for i, section in enumerate(sections):
try:
if 'abs' in section['aria-labelledby'].lower() or section['data-title'] == 'Abstract':
abs_paras = section.find_all('p')
abstract = ''
for abs_para in abs_paras:
abstract += abs_para.text
abstract = format_text(abstract.strip())
abstract_idx = i
except KeyError:
pass
if not abstract:
# print('[Warning] No abstract is detected!')
article_component_check.abstract = False
article.abstract = abstract
# --- get sections ---
content_sections = list()
for i, section in enumerate(sections):
try:
if i == abstract_idx:
pass
else:
content_sections.append(section)
except KeyError:
pass
element_list = list()
for section in content_sections:
section_elements = html_section_extract_nature(section_root=section)
if section_elements:
element_list += section_elements
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_wiley(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'wiley'
# --- get title ---
title = soup.find_all('title')
title = title[0].text.split(' - ')[0].strip()
article.title = title
body = soup.body
sections = body.find_all('section')
# --- get abstract ---
abstract = None
for section in sections:
try:
if 'article-section__abstract' in section['class']:
abs_paras = section.find_all('p')
abstract = ''
for abs_para in abs_paras:
abstract += abs_para.text
abstract = format_text(abstract.strip())
except KeyError:
pass
if not abstract:
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abstract
# --- get sections ---
content_sections = None
for section in sections:
try:
if 'article-section__full' in section['class']:
content_sections = section
except KeyError:
pass
if content_sections:
element_list = html_section_extract_wiley(section_root=content_sections)
else:
# print(f"[Warning] No section is detected")
element_list = []
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_rsc(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'rsc'
# --- get title ---
head = soup.head
body = soup.body
title = body.find_all('h1')
if title:
title = format_text(title[0].text).strip()
else:
title = head.find_all('title')
title = '-'.join(title[0].text.split('-')[:-1]).strip()
article.title = title
paras = body.find_all('p')
# --- get abstract ---
abstract = ''
for section in paras:
try:
if 'abstract' in section['class']:
abstract = section.text
abstract = format_text(abstract.strip())
except KeyError:
pass
if not abstract:
article_component_check.abstract = False
article.abstract = abstract
# --- get sections ---
element_list = html_section_extract_rsc(section_root=body)
if '<abs>' in element_list:
element_list.remove('<abs>')
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_springer(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'springer'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split('|')[0].strip()
article.title = title
body = soup.body
sections = body.find_all('section')
# --- get abstract ---
abstract = ''
abstract_idx = None
for i, section in enumerate(sections):
data_title = section.get('data-title', '')
if isinstance(data_title, str):
data_title = [data_title.lower()]
elif isinstance(data_title, list):
data_title = [t.lower() for t in data_title]
else:
data_title = []
class_element = section.get('class', '')
if isinstance(class_element, str):
class_element = [class_element.lower()]
elif isinstance(class_element, list):
class_element = [t.lower() for t in class_element]
else:
class_element = []
is_abs = False
for ele in data_title + class_element:
if 'abstract' in ele or 'summary' in ele:
is_abs = True
if is_abs:
abs_paras = section.find_all('p')
abstract = ''
for abs_para in abs_paras:
abstract += abs_para.text
abstract = format_text(abstract.strip())
abstract_idx = i
if not abstract:
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abstract
# --- get sections ---
content_sections = list()
for i, section in enumerate(sections):
try:
if i != abstract_idx and not section.find_parent('section'):
content_sections.append(section)
except KeyError:
pass
element_list = list()
for section in content_sections:
section_elements = html_section_extract_springer(section_root=section)
if section_elements:
element_list += section_elements
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_aip(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'aip'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split(':')[0].strip()
article.title = title
element_list = html_section_extract_aip(section_root=soup)
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_acs(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'acs'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split(' | ')[0].strip()
article.title = title
# --- get abstract ---
body = soup.body
h2s = body.find_all('h2')
abs_h2 = None
for h2 in h2s:
h2_class = h2.get('class', [''])
if len(h2_class) == 0:
h2_class = ['']
if h2_class[0] == 'article_abstract-title':
abs_h2 = h2
if abs_h2 is not None:
abstract = abs_h2.nextSibling.text.strip()
else:
ps = body.find_all('p')
abs_p = None
for p in ps:
if p.get('class', [''])[0] == 'articleBody_abstractText':
abs_p = p
if not abs_p:
# print('[Warning] Abstract is not found!')
abstract = ''
article_component_check.abstract = False
else:
abstract = abs_p.text.strip()
abstract = format_text(abstract)
article.abstract = abstract
# --- get body sections ---
try:
content = soup.find_all('div', class_="article_content")[0]
except IndexError:
content = soup
element_list = html_section_extract_acs(section_root=content)
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_elsevier(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'elsevier'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split(' - ')[0].strip()
article.title = title
# --- get abstract ---
body = soup.body
abs_divs = body.find_all('div', {"class": "Abstracts"})
if abs_divs:
abs_div = abs_divs[0]
else:
abs_div = []
abstract = list()
for div in abs_div:
for s in div.find_all('h2'):
s.extract()
div_class = div.get('class', '')
div_class = ' '.join(div_class) if isinstance(div_class, list) else div_class
if 'graphical' not in div_class and 'author-highlights' not in div_class:
abstract.append(format_text(div.text))
if not abstract:
abstract = ''
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abstract
# --- get body sections ---
try:
article_block = body.find_all('article')[0]
except IndexError:
article_block = body
element_list = html_section_extract_elsevier(section_root=article_block)
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_html_aaas(soup: bs4.BeautifulSoup, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'aaas'
# --- get title ---
head = soup.head
title = head.find_all('title')
title = title[0].text.split(' | ')[0].strip()
article.title = title
# --- get abstract ---
body = soup.body
h2s = body.find_all('h2')
abs_h2 = None
for h2 in h2s:
if h2.text.lower() == 'abstract':
abs_h2 = h2
break
if abs_h2:
abstract = format_text(abs_h2.nextSibling.text)
else:
abstract = None
if not abstract:
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abstract
# --- get body sections ---
element_list = html_section_extract_aaas(section_root=body)
if not element_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
article.sections = element_list
return article, article_component_check
@staticmethod
def article_construct_xml_elsevier(root: ET.Element, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'elsevier'
ori_txt = root.findall(r'{http://www.elsevier.com/xml/svapi/article/dtd}originalText')[0]
doc = ori_txt.findall(r'{http://www.elsevier.com/xml/xocs/dtd}doc')[0]
# get title
title_element = list(doc.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}title'))[-1]
iter_txt = list()
for txt in title_element.itertext():
txt_s = txt.strip()
if txt_s:
iter_txt.append(txt)
title = ''.join(iter_txt).strip()
article.title = title
# get abstract
abs_elements = list(doc.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}abstract'))
abs_element = None
for abs_ele in abs_elements:
try:
if abs_ele.attrib['class'] == 'author':
abs_element = abs_ele
except KeyError:
# print('[ERROR] keyword "class" does not exist!')
pass
abs_paras = list()
try:
for abs_ele in (abs_element.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}simple-para')):
abs_text = list()
for txt in abs_ele.itertext():
txt_s = txt.strip()
if txt_s:
abs_text.append(txt)
abs_paras.append(''.join(abs_text))
except Exception:
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abs_paras
# get tables and figures
try:
table_elements = list(doc.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}table'))
figure_elements = list(doc.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}figure'))
section_list = []
for table_element in table_elements:
tbl = xml_table_extract_elsevier(table_element)
tbl_element = ArticleElement(type=ArticleElementType.TABLE, content=tbl)
section_list.append(tbl_element)
for figure_element in figure_elements:
fig = xml_figure_extract(figure_element)
if not fig.caption:
continue
fig_element = ArticleElement(type=ArticleElementType.FIGURE, content=fig)
section_list.append(fig_element)
except Exception:
section_list = []
# get article content
try:
sections_element = list(doc.iter(tag=r'{http://www.elsevier.com/xml/common/dtd}sections'))[-1]
section_list += xml_section_extract_elsevier(section_root=sections_element)
except Exception:
# print('[Warning] No section is detected!')
article_component_check.sections = False
new_section_list = list()
for i in range(len(section_list)):
if section_list[i].type == ArticleElementType.SECTION_ID:
continue
elif section_list[i].type == ArticleElementType.SECTION_TITLE:
if i > 0 and section_list[i - 1].type == ArticleElementType.SECTION_ID:
combined_section_title = section_list[i - 1].content + ' ' + section_list[i].content
new_section_list.append(
ArticleElement(type=ArticleElementType.SECTION_TITLE, content=combined_section_title)
)
else:
new_section_list.append(section_list[i])
else:
new_section_list.append(section_list[i])
article.sections = new_section_list
return article, article_component_check
@staticmethod
def article_construct_xml_acs(root: ET.Element, doi: str):
article = Article()
article_component_check = ArticleComponentCheck()
article.doi = doi
article.publisher = 'acs'
front = root.findall('front')[0]
title_text = list()
for element in front.iter(tag='article-title'):
title_text.append(element.text)
title = format_text(''.join(title_text))
article.title = title
abs_text = list()
for element in front.iter(tag='abstract'):
if not element.attrib:
for txt in element.itertext():
abs_text.append(txt)
abstract = format_text(''.join(abs_text))
if not abstract:
# print('[Warning] Abstract is not found!')
article_component_check.abstract = False
article.abstract = abstract
# get article content
body = root.findall('body')[0]
section_list = xml_section_extract_acs(body)
if not section_list:
# print('[Warning] No section is detected!')
article_component_check.sections = False
new_section_list = list()
for i in range(len(section_list)):
if section_list[i].type == ArticleElementType.SECTION_ID:
continue
elif section_list[i].type == ArticleElementType.SECTION_TITLE:
if i > 0 and section_list[i - 1].type == ArticleElementType.SECTION_ID:
combined_section_title = section_list[i - 1].content + ' ' + section_list[i].content
new_section_list.append(
ArticleElement(type=ArticleElementType.SECTION_TITLE,
content=combined_section_title)
)
else:
new_section_list.append(section_list[i])
else:
new_section_list.append(section_list[i])
article.sections = new_section_list
return article, article_component_check
def check_html_publisher(soup: bs4.BeautifulSoup):
publisher = None
try:
if soup.html.attrs['xmlns:rsc'] == 'urn:rsc.org':
publisher = 'rsc'
except KeyError:
pass
metas = soup.find_all('meta')
title = soup.find_all('title')
pub_web = None
if title and len(title) >= 1:
pub_web = title[0].text.strip().split(' - ')[-1]
for meta in metas:
try:
if meta['name'].lower() == 'dc.publisher' and \
meta['content'] == 'Springer':
publisher = 'springer'
break
elif meta['name'].lower() == 'dc.publisher' and \
meta['content'] == 'Nature Publishing Group':
publisher = 'nature'
break
elif meta['name'].lower() == 'citation_publisher' and \
'<NAME> & Sons, Ltd' in meta['content']:
publisher = 'wiley'
break
elif meta['name'].lower() == 'dc.publisher' and \
(meta['content'] == 'American Institute of PhysicsAIP' or ('AIP Publishing' in meta['content'])):
publisher = 'aip'
break
elif meta['name'].lower() == 'dc.publisher' and \
meta['content'].strip() == 'American Chemical Society':
publisher = 'acs'
break
elif meta['name'].lower() == 'dc.publisher' and \
meta['content'].strip() == 'The Royal Society of Chemistry':
publisher = 'rsc'
break
elif meta['name'].lower() == 'dc.publisher' and \
meta['content'].strip() == 'American Association for the Advancement of Science':
publisher = 'aaas'
break
elif meta['name'].lower() == 'dc.publisher' and \
meta['content'].strip() == 'World Scientific Publishing Company':
publisher = 'cjps'
elif meta['name'] == 'citation_springer_api_url':
publisher = 'springer'
break
except KeyError:
pass
if not publisher and pub_web.lower() == 'sciencedirect':
publisher = 'elsevier'
if not publisher:
raise ValueError('Publisher not found!')
return publisher
def check_xml_publisher(root: ET.Element):
publisher = None
tag = root.tag
if 'elsevier' in tag:
publisher = 'elsevier'
else:
for child in root.iter():
if child.tag == 'publisher-name':
text = child.text
text = format_text(text)
if text == 'American Chemical Society':
publisher = 'acs'
break
if not publisher:
raise ValueError('Publisher not found!')
return publisher
def search_html_doi_publisher(soup, publisher=None):
if not publisher:
publisher = check_html_publisher(soup)
if publisher == 'acs':
doi_sec = soup.find_all('div', {'class': 'article_header-doiurl'})
doi_url = doi_sec[0].text.strip().lower()
elif publisher == 'wiley':
doi_sec = soup.find_all('a', {'class': 'epub-doi'})
doi_url = doi_sec[0].text.strip().lower()
elif publisher == 'springer':
doi_spans = soup.find_all('span')
doi_sec = None
for span in doi_spans:
span_class = span.get("class", [''])
span_class = ' '.join(span_class) if isinstance(span_class, list) else span_class
if 'bibliographic-information__value' in span_class and 'doi.org' in span.text:
doi_sec = span
doi_url = doi_sec.text.strip().lower()
elif publisher == 'rsc':
doi_sec = soup.find_all('div', {'class': 'article_info'})
doi_url = doi_sec[0].a.text.strip().lower()
elif publisher == 'elsevier':
doi_sec = soup.find_all('a', {'class': 'doi'})
doi_url = doi_sec[0].text.strip().lower()
elif publisher == 'nature':
doi_link = soup.find_all('a', {'data-track-action': 'view doi'})[0]
doi_url = doi_link.text.strip().lower()
elif publisher == 'aip':
doi_sec = soup.find_all('div', {'class': 'publicationContentCitation'})
doi_url = doi_sec[0].text.strip().lower()
elif publisher == 'aaas':
doi_sec = soup.find_all('div', {'class': 'self-citation'})
doi_url = doi_sec[0].a.text.strip().split()[-1].strip().lower()
else:
raise ValueError('Unknown publisher')
doi_url_prefix = "https://doi.org/"
try:
doi = doi_url[doi_url.index(doi_url_prefix) + len(doi_url_prefix):].strip()
except ValueError:
doi = doi_url
return doi, publisher
def search_xml_doi_publisher(root, publisher=None):
if not publisher:
publisher = check_xml_publisher(root)
if publisher == 'elsevier':
doi_sec = list(root.iter('{http://www.elsevier.com/xml/xocs/dtd}doi'))
doi = doi_sec[0].text.strip().lower()
elif publisher == 'acs':
doi_sec = list(root.iter('article-id'))
doi = doi_sec[0].text.strip().lower()
else:
raise ValueError('Unknown publisher')
return doi, publisher
def parse_html(file_path: Optional[str] = None,
html_content: Optional[str] = None) -> Tuple[Article, ArticleComponentCheck]:
"""
Parse html files
Parameters
----------
file_path: File name
html_content: html content. Cannot pass values to both file_path and html_content
Returns
-------
article: Article, component check: ArticleComponentCheck
"""
assert (file_path is None) != (html_content is None)
if file_path is not None:
file_path = os.path.normpath(file_path)
with open(file_path, 'r', encoding='utf-8') as f:
contents = f.read()
else:
contents = html_content
soup = BeautifulSoup(contents, 'lxml')
# get publisher and doi
doi, publisher = search_html_doi_publisher(soup)
if publisher in ['elsevier', 'rsc']:
# allow illegal nested <p>
# soup = BeautifulSoup(contents, 'html.parser')
# allow nested <span>
soup = BeautifulSoup(contents, 'html5lib')
article_construct_func = getattr(ArticleFunctions, f'article_construct_html_{publisher}')
article, component_check = article_construct_func(soup=soup, doi=doi)
return article, component_check
def parse_xml(file_path: str) -> Tuple[Article, ArticleComponentCheck]:
"""
Parse xml files
Parameters
----------
file_path: File name
Returns
-------
article: Article, component check: ArticleComponentCheck
"""
file_path = os.path.normpath(file_path)
tree = ET.parse(file_path)
root = tree.getroot()
# get the publisher
doi, publisher = search_xml_doi_publisher(root)
article_construct_func = getattr(ArticleFunctions, f'article_construct_xml_{publisher}')
article, component_check = article_construct_func(root=root, doi=doi)
return article, component_check
| [
"bs4.BeautifulSoup",
"os.path.normpath",
"xml.etree.ElementTree.parse"
] | [((25588, 25619), 'bs4.BeautifulSoup', 'BeautifulSoup', (['contents', '"""lxml"""'], {}), "(contents, 'lxml')\n", (25601, 25619), False, 'from bs4 import BeautifulSoup\n'), ((26390, 26417), 'os.path.normpath', 'os.path.normpath', (['file_path'], {}), '(file_path)\n', (26406, 26417), False, 'import os\n'), ((26430, 26449), 'xml.etree.ElementTree.parse', 'ET.parse', (['file_path'], {}), '(file_path)\n', (26438, 26449), True, 'import xml.etree.ElementTree as ET\n'), ((25416, 25443), 'os.path.normpath', 'os.path.normpath', (['file_path'], {}), '(file_path)\n', (25432, 25443), False, 'import os\n'), ((25880, 25915), 'bs4.BeautifulSoup', 'BeautifulSoup', (['contents', '"""html5lib"""'], {}), "(contents, 'html5lib')\n", (25893, 25915), False, 'from bs4 import BeautifulSoup\n')] |
r"""
ak135-f
=======
The ak135-f Earth model [Kennett1995]_ is a variant of the ak135 Earth model
with the density and Q model from [Montagner1996]_ added.
The ak135-f Earth model is a one-dimensional model representing average Earth properties as a
function of depth. The model includes the depth, density, seismic velocities and attenuation (Q)
on the boundaries of several Earth layers. It's available through IRIS Data Services Products
[IRIS2011]_ in a csv file (comma-separated values). The data is loaded into
:class:`pandas.DataFrame` objects.
"""
import rockhound as rh
import matplotlib.pyplot as plt
# Load ak135f into a DataFrame
ak135f = rh.fetch_ak135f()
# Plot density and velocities
fig, axes = plt.subplots(1, 2, figsize=(9, 5), sharey=True)
fig.suptitle("ak135-f")
ax = axes[0]
ak135f.plot("density", "depth", legend=False, ax=ax)
ax.invert_yaxis()
ax.set_xlabel("Density [g/cm³]")
ax.set_ylabel("Depth [km]")
ax.grid()
ax = axes[1]
for velocity in ["Vp", "Vs"]:
ak135f.plot(velocity, "depth", legend=False, ax=ax, label=velocity)
ax.grid()
ax.legend()
ax.set_xlabel("Velocity [km/s]")
plt.show()
| [
"rockhound.fetch_ak135f",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((654, 671), 'rockhound.fetch_ak135f', 'rh.fetch_ak135f', ([], {}), '()\n', (669, 671), True, 'import rockhound as rh\n'), ((715, 762), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 5)', 'sharey': '(True)'}), '(1, 2, figsize=(9, 5), sharey=True)\n', (727, 762), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1120, 1122), True, 'import matplotlib.pyplot as plt\n')] |
from doubly_linked_list import DoublyLinkedList
class Stack:
def __init__(self):
self.size = 0
self.storage = DoublyLinkedList()
def push(self, value):
pass
def pop(self):
pass
def len(self):
pass
| [
"doubly_linked_list.DoublyLinkedList"
] | [((132, 150), 'doubly_linked_list.DoublyLinkedList', 'DoublyLinkedList', ([], {}), '()\n', (148, 150), False, 'from doubly_linked_list import DoublyLinkedList\n')] |
# Micropython a9g example
# Source: https://github.com/pulkin/micropython
# Author: pulkin
# Demonstrates how to send and receive SMS
import cellular
import time
global flag
flag = 1
def sms_handler(evt):
global flag
if evt == cellular.SMS_SENT:
print("SMS sent")
elif evt == cellular.SMS_RECEIVED:
print("SMS received, attempting to read ...")
ls = cellular.SMS.list()
print(ls[-1])
flag = 0
cellular.on_sms(sms_handler)
cellular.SMS("8800", "asd").send()
print("Doing something important ...")
while flag:
time.sleep(1)
print("Done!")
| [
"cellular.SMS.list",
"cellular.on_sms",
"time.sleep",
"cellular.SMS"
] | [((449, 477), 'cellular.on_sms', 'cellular.on_sms', (['sms_handler'], {}), '(sms_handler)\n', (464, 477), False, 'import cellular\n'), ((569, 582), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (579, 582), False, 'import time\n'), ((478, 505), 'cellular.SMS', 'cellular.SMS', (['"""8800"""', '"""asd"""'], {}), "('8800', 'asd')\n", (490, 505), False, 'import cellular\n'), ((389, 408), 'cellular.SMS.list', 'cellular.SMS.list', ([], {}), '()\n', (406, 408), False, 'import cellular\n')] |
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("output_n_perturbations.csv")
fig = plt.figure(figsize=(5,3))
# x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
x = [1, 2, 3, 4, 5]
y1_cora = [0.36, 0.63, 0.78, 0.83, 0.84]
y2_cora = [0.40, 0.53, 0.64, 0.65, 0.62]
# y1_cora = [0.36, 0.63, 0.78, 0.83, 0.84, 0.84, 0.88, 0.91, 0.89, 0.89]
# y2_cora = [0.40, 0.53, 0.64, 0.65, 0.62, 0.64, 0.58, 0.66, 0.62, 0.63]
# y3_cora = [0.33, 0.64, 0.79, 0.82, 0.84, 0.87, 0.87, 0.89, 0.89, 0.90]
# y4_cora = [0.37, 0.49, 0.65, 0.64, 0.67, 0.66, 0.65, 0.68, 0.64, 0.65]
y1_citeseer = [0.41, 0.55, 0.65, 0.7, 0.79]
y2_citeseer = [0.41, 0.52, 0.56, 0.61, 0.65]
# y3_citeseer = []
# y4_citeseer = []
y1_pubmed = [0.35,0.53,0.67,0.74,0.84]
y2_pubmed = [0.4,0.49,0.58,0.65,0.73]
# y3_pubmed = []
# y4_pubmed = []
# ax = plt.subplot(1,3,1)
# ax.plot(x, y1_cora, '--', c='blue')
# ax.plot(x, y2_cora, c='blue')
# ax.set_title('Cora')
# ax.set_xticks(x)
# ax.set_xticklabels(x)
# ax.set_xlabel("# of allowed perturbations($\Delta$)")
# ax.set_ylabel("Success rate")
ax = plt.subplot(1,2, 1)
ax.plot(x, y1_citeseer, '--', c='blue')
ax.plot(x, y2_citeseer, c='blue')
ax.set_title('Citeseer')
ax.set_xticks(x)
ax.set_xticklabels(x)
ax.set_xlabel("# of allowed perturbations($\Delta$)")
ax.set_ylabel("Success rate")
ax = plt.subplot(1,2, 2)
ax.plot(x, y1_pubmed, '--', c='blue')
ax.plot(x, y2_pubmed, c='blue')
ax.set_title('PubMed')
ax.set_xticks(x)
ax.set_xticklabels(x)
ax.set_xlabel("# of allowed perturbations($\Delta$)")
ax.set_ylabel("Success rate")
# plt.plot(x, y3, '--', c='g')
# plt.plot(x, y4, c='g')
# plt.legend(['GCN-Nettack-A', 'LAT-GCN-Nettack-A', 'GCN-Nettack-AX', 'LAT-GCN-Nettack-AX'], fontsize=8)
# plt.title('Cora')
# plt.xlabel('# of allowed perturbations ($\Delta$)')
# plt.ylabel('Success rate')
fig.legend(['GCN-Nettack-A', 'LAT-GCN-Nettack-A'], ncol=2, loc=8)
plt.tight_layout()
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((61, 102), 'pandas.read_csv', 'pd.read_csv', (['"""output_n_perturbations.csv"""'], {}), "('output_n_perturbations.csv')\n", (72, 102), True, 'import pandas as pd\n'), ((110, 136), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (120, 136), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1110), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1101, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1367), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1358, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1948), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1946, 1948), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1960), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1958, 1960), True, 'import matplotlib.pyplot as plt\n')] |
import logging
class Reporter(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.creates = 0
self.updates = 0
self.deletes = 0
self.skips = 0
def add(self, data):
self.creates += 1
def update(self, data):
self.updates += 1
def delete(self, data):
self.deletes += 1
def skip(self):
self.skips += 1
def close(self):
self.logger.info("Adds: {}".format(self.creates))
self.logger.info("Skips: {}".format(self.skips))
self.logger.info("Deletes: {}".format(self.deletes)) | [
"logging.getLogger"
] | [((87, 114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'import logging\n')] |
from pytest import raises
from twill import parse
from .utils import execute_script
def test_variable_substitution():
fut = parse.variable_substitution
args = (dict(foo=7), dict(bar=13, baz='wall'))
assert fut("${foo} * ${bar} bottles on the ${baz}!",
*args) == "7 * 13 bottles on the wall!"
assert fut("${foo * bar}", *args) == str(7 * 13)
with raises(ZeroDivisionError):
fut("${1/0}", {}, {})
def test_variables_script(url):
execute_script('test_variables.twill', initial_url=url)
| [
"pytest.raises"
] | [((385, 410), 'pytest.raises', 'raises', (['ZeroDivisionError'], {}), '(ZeroDivisionError)\n', (391, 410), False, 'from pytest import raises\n')] |
import torch # used to extract data from .pt files
import numpy as np
from sklearn.metrics import confusion_matrix,accuracy_score
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
#Extract data from pt files
(x_train, y_train, x_test, y_test)=torch.load('mnist.pt')
#Convert X_train and Y_train into numpy array
X_train = np.array(x_train).reshape(1000,784)#reshaping into 1000 rows of 784 feature vector
Y_train = np.array(y_train).reshape(1000,1)#reshaping into 1000 rows
#Convert X_test and Y_test into numpy array
X_test = np.array(x_test).reshape(100,784)#reshaping X_test into rows of 784 feature vector
Y_test = np.array(y_test).reshape(100,1)#reshaping Y_test into rows of 100 rows
k = 5 # define k
temp = [] # array created to store euclidean distances of a test image to all train images, it refreshes after each test image
resultf = [] # array created to store the final predictions
for i in X_test:
for j in X_train:
temp.append(np.linalg.norm(i-j)) #appending euclidean dist to temp
sort = np.argsort(temp)
resulti = np.zeros(10) # creating a classification vector to count the nearest k neighbours
for p in range(k):
resulti[Y_train[sort[p]]]+=1 # counting the repitition of a class in k neighbours
resultf.append(np.argmax(resulti)) # appending the latest result to the final array
temp = []
# reshaping the final predictions into a 100*1 vector
resultf = np.array(resultf).reshape(100,1)
# creating confusion matrix using predictions and y_test
mat = confusion_matrix(Y_test,resultf)
# measuring accuracy of the predictions
acc = accuracy_score(Y_test,resultf)
print(acc)
#converting confusion matrix into a dataframe
df_cm = pd.DataFrame(mat, index = [i for i in "0123456789"],columns = [i for i in "0123456789"])
#plotting the confusion matrix using seaborn library
plt.figure(figsize = (10,7))
sn.heatmap(df_cm, annot=True)
plt.show()
| [
"matplotlib.pyplot.show",
"torch.load",
"numpy.argmax",
"seaborn.heatmap",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.linalg.norm",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((275, 297), 'torch.load', 'torch.load', (['"""mnist.pt"""'], {}), "('mnist.pt')\n", (285, 297), False, 'import torch\n'), ((1579, 1612), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_test', 'resultf'], {}), '(Y_test, resultf)\n', (1595, 1612), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1660, 1691), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'resultf'], {}), '(Y_test, resultf)\n', (1674, 1691), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1761, 1850), 'pandas.DataFrame', 'pd.DataFrame', (['mat'], {'index': "[i for i in '0123456789']", 'columns': "[i for i in '0123456789']"}), "(mat, index=[i for i in '0123456789'], columns=[i for i in\n '0123456789'])\n", (1773, 1850), True, 'import pandas as pd\n'), ((1907, 1934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1917, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1966), 'seaborn.heatmap', 'sn.heatmap', (['df_cm'], {'annot': '(True)'}), '(df_cm, annot=True)\n', (1947, 1966), True, 'import seaborn as sn\n'), ((1968, 1978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1976, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1092), 'numpy.argsort', 'np.argsort', (['temp'], {}), '(temp)\n', (1086, 1092), True, 'import numpy as np\n'), ((1108, 1120), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1116, 1120), True, 'import numpy as np\n'), ((358, 375), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (366, 375), True, 'import numpy as np\n'), ((452, 469), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (460, 469), True, 'import numpy as np\n'), ((568, 584), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (576, 584), True, 'import numpy as np\n'), ((662, 678), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (670, 678), True, 'import numpy as np\n'), ((1325, 1343), 'numpy.argmax', 'np.argmax', (['resulti'], {}), '(resulti)\n', (1334, 1343), True, 'import numpy as np\n'), ((1477, 1494), 'numpy.array', 'np.array', (['resultf'], {}), '(resultf)\n', (1485, 1494), True, 'import numpy as np\n'), ((1009, 1030), 'numpy.linalg.norm', 'np.linalg.norm', (['(i - j)'], {}), '(i - j)\n', (1023, 1030), True, 'import numpy as np\n')] |
from config import const
from textwrap import TextWrapper
wrapper = TextWrapper(width=1000000000, expand_tabs=True, tabsize=const.TABSIZE, replace_whitespace=True,
drop_whitespace=False)
def tab_spaces(pos):
return const.TABSIZE - (pos % const.TABSIZE)
class VisualLine:
def __init__(self, text: str = ''):
self._visual_text = ''
self._logical_text = ''
self._tabs = []
if text:
self.set_text(text)
def set_text(self, text: str):
self._tabs = [i for i, c in enumerate(text) if c == '\t']
self._logical_text = text
self._visual_text = wrapper.fill(text)
extra = 0
for i in range(len(self._tabs)):
self._tabs[i] += extra
spaces = tab_spaces(self._tabs[i])
extra += spaces - 1
def insert(self, pos: int, text: str):
if pos < 0 or pos > self.get_logical_len():
return False
self.set_text(self._logical_text[0:pos] + text + self._logical_text[pos:])
return True
def append(self, text: str):
return self.insert(self.get_logical_len(), text)
def clip_coords(self, pos: int, n: int):
if pos < 0 or pos >= len(self._logical_text):
return pos, 0
right = pos + n
if right > len(self._logical_text):
right = len(self._logical_text)
return pos, right-pos
def erase(self, pos: int, n: int = 1):
if pos < 0 or pos >= len(self._logical_text):
return False
right = pos + n
if right > len(self._logical_text):
right = len(self._logical_text)
self.set_text(self._logical_text[0:pos] + self._logical_text[right:])
return True
def get_logical_len(self):
return len(self._logical_text)
def get_logical_text(self):
return self._logical_text
def get_visual_len(self):
return len(self._visual_text)
def get_visual_text(self):
return self._visual_text
def get_visual_index(self, pos: int):
if pos < 0:
return -1
for tab in self._tabs:
if pos <= tab:
break
pos += tab_spaces(tab) - 1
if pos > len(self._visual_text):
return -1
return pos
def get_logical_index(self, pos: int):
if pos > len(self._visual_text) or pos < 0:
return -1
for tab in reversed(self._tabs):
if pos > tab:
spaces = tab_spaces(tab)
nxt = tab + spaces
if pos >= nxt:
pos -= spaces - 1
else:
pos = tab
return pos
def split(self, pos: int):
if pos < 0 or pos > self.get_logical_len():
raise RuntimeError("Invalid line split")
if pos == self.get_logical_len():
return VisualLine()
text = self.get_logical_text()
next_line = VisualLine(text[pos:])
self.erase(pos, len(text) - pos)
return next_line
def extend(self, line):
if isinstance(line, VisualLine):
self.set_text(self._logical_text + line.get_logical_text())
elif isinstance(line, str):
self.set_text(self._logical_text + line)
else:
raise RuntimeError('Invalid type in VisualLine.extend')
def __repr__(self):
v = self._visual_text.replace(' ', '@')
return f'"{self._logical_text}" "{v}"'
| [
"textwrap.TextWrapper"
] | [((69, 191), 'textwrap.TextWrapper', 'TextWrapper', ([], {'width': '(1000000000)', 'expand_tabs': '(True)', 'tabsize': 'const.TABSIZE', 'replace_whitespace': '(True)', 'drop_whitespace': '(False)'}), '(width=1000000000, expand_tabs=True, tabsize=const.TABSIZE,\n replace_whitespace=True, drop_whitespace=False)\n', (80, 191), False, 'from textwrap import TextWrapper\n')] |
# Generated by Django 3.0.14 on 2022-01-03 17:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipes', '0007_auto_20220102_1228'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='mainImage',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='recipes.RecipeImage'),
),
]
| [
"django.db.models.ForeignKey"
] | [((373, 476), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""recipes.RecipeImage"""'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n to='recipes.RecipeImage')\n", (390, 476), False, 'from django.db import migrations, models\n')] |
import torch
from torch import nn
from model_train import LSTMnet, features_input,hidden_dim,layer,n_class,LR,class_weight
from sklearn.metrics import confusion_matrix,classification_report
import matplotlib.pyplot as plt
import seaborn as sns
def test(X,Y,model):
dic_result={'0->0':0,'0->1':0,'0->2':0,'1->0':0,'1->1':0,'1->2':0,'2->0':0,'2->1':0,'2->2':0}
test_output = model(X)
predict_y = torch.max(test_output, 1)[1].numpy()
for i in range(len(Y[0])):
if Y[0][i] == 0:
if predict_y[i] == 1:
dic_result['0->1'] = dic_result['0->1']+1
elif predict_y[i] == 2:
dic_result['0->2'] = dic_result['0->2'] + 1
elif predict_y[i] == 0:
dic_result['0->0'] = dic_result['0->0'] + 1
elif Y[0][i] == 1:
if predict_y[i] == 1:
dic_result['1->1'] = dic_result['1->1']+1
elif predict_y[i] == 2:
dic_result['1->2'] = dic_result['1->2'] + 1
elif predict_y[i] == 0:
dic_result['1->0'] = dic_result['1->0'] + 1
elif Y[0][i] == 2:
if predict_y[i] == 1:
dic_result['2->1'] = dic_result['2->1']+1
elif predict_y[i] == 2:
dic_result['2->2'] = dic_result['2->2'] + 1
elif predict_y[i] == 0:
dic_result['2->0'] = dic_result['2->0'] + 1
correct = (predict_y == Y[0].numpy()).astype(int).sum()
totoal = Y[0].size(0)
accuracy = float(correct) / float(totoal)
print('test_dataset: accuracy:{:<4.2f} | correct:{:<2d} | totoal:{:<2d}'.format(accuracy,correct, totoal))
print(dic_result)
sum=[dic_result['0->0']+dic_result['0->1']+dic_result['0->2'],dic_result['1->0']+dic_result['1->1']+dic_result['1->2'],dic_result['2->0']+dic_result['2->1']+dic_result['2->2']]
matrices=torch.tensor([[dic_result['0->0']/(sum[0]),dic_result['0->1']/(sum[0]),dic_result['0->2']/(sum[0])],[dic_result['1->0']/(sum[1]),dic_result['1->1']/(sum[1]),dic_result['1->2']/(sum[1])],[dic_result['2->0']/(sum[2]),dic_result['2->1']/(sum[2]),dic_result['2->2']/(sum[2])]])
print(matrices)
print(classification_report(Y[0], predict_y))
confu_matrix = confusion_matrix(Y[0],predict_y)
print(confu_matrix)
# plt.matshow(confu_matrix)
# plt.colorbar()
# plt.show()
f,ax=plt.subplots(figsize=(3,3))
sns.heatmap(confu_matrix,annot=True,linewidth=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("y_pred")
plt.ylabel("y_true")
plt.show()
if __name__ =='__main__':
model_load = torch.load('../model/model.pkl')
X_test_1 = torch.load('../data/test_X.pkl')
Y_test_1 = torch.load('../data/test_Y.pkl')
X_test_2 = torch.load('../data/test_X_2.pkl')
Y_test_2 = torch.load('../data/test_Y_2.pkl')
X_test_3 = torch.load('../data/test_X_3.pkl')
Y_test_3 = torch.load('../data/test_Y_3.pkl')
X_test_4 = torch.load('../data/test_X_4.pkl')
Y_test_4 = torch.load('../data/test_Y_4.pkl')
X_test_5 = torch.load('../data/test_X_5.pkl')
Y_test_5 = torch.load('../data/test_Y_5.pkl')
# print("test_1")
# test(X_test,Y_test,model_load)
# print("test_2")
# test(X_test_2,Y_test_2,model_load)
# print("test_3")
# test(X_test_3,Y_test_3,model_load)
# print("test_4")
# test(X_test_4,Y_test_4,model_load)
# print("test_5")
# test(X_test_5,Y_test_5,model_load)
X_test = torch.cat((X_test_1,X_test_2,X_test_3,X_test_4,X_test_5),0)
Y_test = torch.cat([Y_test_1, Y_test_2, Y_test_3, Y_test_4, Y_test_5],1)
test(X_test, Y_test, model_load)
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.xlabel",
"torch.load",
"torch.max",
"seaborn.heatmap",
"torch.tensor",
"matplotlib.pyplot.subplots",
"torch.cat",
"sklearn.metrics.confusion_matrix"
] | [((1904, 2199), 'torch.tensor', 'torch.tensor', (["[[dic_result['0->0'] / sum[0], dic_result['0->1'] / sum[0], dic_result[\n '0->2'] / sum[0]], [dic_result['1->0'] / sum[1], dic_result['1->1'] /\n sum[1], dic_result['1->2'] / sum[1]], [dic_result['2->0'] / sum[2], \n dic_result['2->1'] / sum[2], dic_result['2->2'] / sum[2]]]"], {}), "([[dic_result['0->0'] / sum[0], dic_result['0->1'] / sum[0], \n dic_result['0->2'] / sum[0]], [dic_result['1->0'] / sum[1], dic_result[\n '1->1'] / sum[1], dic_result['1->2'] / sum[1]], [dic_result['2->0'] /\n sum[2], dic_result['2->1'] / sum[2], dic_result['2->2'] / sum[2]]])\n", (1916, 2199), False, 'import torch\n'), ((2270, 2303), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y[0]', 'predict_y'], {}), '(Y[0], predict_y)\n', (2286, 2303), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((2411, 2439), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (2423, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2536), 'seaborn.heatmap', 'sns.heatmap', (['confu_matrix'], {'annot': '(True)', 'linewidth': '(0.5)', 'linecolor': '"""red"""', 'fmt': '""".0f"""', 'ax': 'ax'}), "(confu_matrix, annot=True, linewidth=0.5, linecolor='red', fmt=\n '.0f', ax=ax)\n", (2455, 2536), True, 'import seaborn as sns\n'), ((2532, 2552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y_pred"""'], {}), "('y_pred')\n", (2542, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2578), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y_true"""'], {}), "('y_true')\n", (2568, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((2644, 2676), 'torch.load', 'torch.load', (['"""../model/model.pkl"""'], {}), "('../model/model.pkl')\n", (2654, 2676), False, 'import torch\n'), ((2693, 2725), 'torch.load', 'torch.load', (['"""../data/test_X.pkl"""'], {}), "('../data/test_X.pkl')\n", (2703, 2725), False, 'import torch\n'), ((2742, 2774), 'torch.load', 'torch.load', (['"""../data/test_Y.pkl"""'], {}), "('../data/test_Y.pkl')\n", (2752, 2774), False, 'import torch\n'), ((2791, 2825), 'torch.load', 'torch.load', (['"""../data/test_X_2.pkl"""'], {}), "('../data/test_X_2.pkl')\n", (2801, 2825), False, 'import torch\n'), ((2842, 2876), 'torch.load', 'torch.load', (['"""../data/test_Y_2.pkl"""'], {}), "('../data/test_Y_2.pkl')\n", (2852, 2876), False, 'import torch\n'), ((2893, 2927), 'torch.load', 'torch.load', (['"""../data/test_X_3.pkl"""'], {}), "('../data/test_X_3.pkl')\n", (2903, 2927), False, 'import torch\n'), ((2944, 2978), 'torch.load', 'torch.load', (['"""../data/test_Y_3.pkl"""'], {}), "('../data/test_Y_3.pkl')\n", (2954, 2978), False, 'import torch\n'), ((2995, 3029), 'torch.load', 'torch.load', (['"""../data/test_X_4.pkl"""'], {}), "('../data/test_X_4.pkl')\n", (3005, 3029), False, 'import torch\n'), ((3046, 3080), 'torch.load', 'torch.load', (['"""../data/test_Y_4.pkl"""'], {}), "('../data/test_Y_4.pkl')\n", (3056, 3080), False, 'import torch\n'), ((3097, 3131), 'torch.load', 'torch.load', (['"""../data/test_X_5.pkl"""'], {}), "('../data/test_X_5.pkl')\n", (3107, 3131), False, 'import torch\n'), ((3148, 3182), 'torch.load', 'torch.load', (['"""../data/test_Y_5.pkl"""'], {}), "('../data/test_Y_5.pkl')\n", (3158, 3182), False, 'import torch\n'), ((3518, 3582), 'torch.cat', 'torch.cat', (['(X_test_1, X_test_2, X_test_3, X_test_4, X_test_5)', '(0)'], {}), '((X_test_1, X_test_2, X_test_3, X_test_4, X_test_5), 0)\n', (3527, 3582), False, 'import torch\n'), ((3592, 3656), 'torch.cat', 'torch.cat', (['[Y_test_1, Y_test_2, Y_test_3, Y_test_4, Y_test_5]', '(1)'], {}), '([Y_test_1, Y_test_2, Y_test_3, Y_test_4, Y_test_5], 1)\n', (3601, 3656), False, 'import torch\n'), ((2210, 2248), 'sklearn.metrics.classification_report', 'classification_report', (['Y[0]', 'predict_y'], {}), '(Y[0], predict_y)\n', (2231, 2248), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((417, 442), 'torch.max', 'torch.max', (['test_output', '(1)'], {}), '(test_output, 1)\n', (426, 442), False, 'import torch\n')] |
import torch
from .gsp import gft, similarity, degree
from .gsp import laplacian as laplacian_matrix
def eigenvalues(train, metric, nNeighbor):
shiftOperator = shift_operator(train, removeSelfConnections=True, laplacian=True, nNeighbor=nNeighbor)
shiftOperator = (shiftOperator + shiftOperator.T)/2
w, v = gft(shiftOperator)
return w
def knn_without_sym(graph, nNeighbor, setting):
if setting == 'column':
graph = graph.T
nNeighbor = min(graph.shape[1], nNeighbor)
kBiggest= torch.argsort(graph, 1)[:,int(-nNeighbor)] # sort the colums, rows by rows
# Store the weigths of the kth closest neighbour of each row of graph
thresholds = graph[torch.arange(graph.shape[0]), kBiggest].reshape(-1,1)
# Create adjacency_matrix
adj = (graph >= thresholds) * 1.0
# Weighted adjacency matrix
adj = adj.type(torch.float32)
adj = adj * graph
if setting == 'column':
adj = adj.T
return adj
# Shift operator (nearest neighbors kept in row or column)
def shift_operator(datapoints, removeSelfConnections=False, laplacian=False, nNeighbor=None, setting='row'):
nPoint = datapoints.shape[0]
shiftOperator = similarity(datapoints, datapoints, "cosine")
if removeSelfConnections:
for i in range(nPoint):
shiftOperator[i, i] = 0
if nNeighbor:
shiftOperator = knn_without_sym(shiftOperator, nNeighbor, setting)
if laplacian:
shiftOperator = (shiftOperator + shiftOperator.T)/2
shiftOperator = laplacian_matrix(shiftOperator, "combinatorial")
return shiftOperator
def diffused(labels, graph, alpha, kappa):
D = degree(graph)
D = D ** (-1/2)
D = torch.diag(D)
graph = torch.mm(torch.mm(D, graph), D)
graph = alpha * torch.eye(graph.shape[0]) + graph
filters = graph.clone()
for loop in range(kappa):
filters = torch.matmul(filters, graph)
propagatedSignal = torch.matmul(filters, labels)
return propagatedSignal
| [
"torch.eye",
"torch.mm",
"torch.argsort",
"torch.matmul",
"torch.diag",
"torch.arange"
] | [((1694, 1707), 'torch.diag', 'torch.diag', (['D'], {}), '(D)\n', (1704, 1707), False, 'import torch\n'), ((1944, 1973), 'torch.matmul', 'torch.matmul', (['filters', 'labels'], {}), '(filters, labels)\n', (1956, 1973), False, 'import torch\n'), ((515, 538), 'torch.argsort', 'torch.argsort', (['graph', '(1)'], {}), '(graph, 1)\n', (528, 538), False, 'import torch\n'), ((1729, 1747), 'torch.mm', 'torch.mm', (['D', 'graph'], {}), '(D, graph)\n', (1737, 1747), False, 'import torch\n'), ((1887, 1915), 'torch.matmul', 'torch.matmul', (['filters', 'graph'], {}), '(filters, graph)\n', (1899, 1915), False, 'import torch\n'), ((1777, 1802), 'torch.eye', 'torch.eye', (['graph.shape[0]'], {}), '(graph.shape[0])\n', (1786, 1802), False, 'import torch\n'), ((688, 716), 'torch.arange', 'torch.arange', (['graph.shape[0]'], {}), '(graph.shape[0])\n', (700, 716), False, 'import torch\n')] |
from django.shortcuts import render
from molo.profiles.admin import FrontendUsersModelAdmin, UserProfileModelAdmin
from molo.profiles.models import (
UserProfilesSettings, UserProfile, SecurityAnswer)
from wagtail.contrib.modeladmin.options import modeladmin_register
from wagtail.admin.site_summary import SummaryItem
from wagtail.core import hooks
class ProfileWarningMessagee(SummaryItem):
order = 100
template = 'admin/profile_warning_message.html'
@hooks.register('construct_homepage_panels')
def profile_warning_message(request, panels):
profile_settings = UserProfilesSettings.for_site(request.site)
if not profile_settings.country_code and \
profile_settings.show_mobile_number_field:
panels[:] = [ProfileWarningMessagee(request)]
modeladmin_register(FrontendUsersModelAdmin)
modeladmin_register(UserProfileModelAdmin)
class AccessErrorMessage(SummaryItem):
order = 100
template = 'wagtail/access_error_message.html'
@hooks.register('construct_homepage_panels')
def add_access_error_message_panel(request, panels):
if UserProfile.objects.filter(user=request.user).exists() and \
not request.user.is_superuser:
if not request.user.profile.admin_sites.filter(
pk=request.site.pk).exists():
panels[:] = [AccessErrorMessage(request)]
@hooks.register('before_delete_page')
def before_delete_security_question(request, page):
if SecurityAnswer.objects.filter(question_id=page.id):
return render(
request, 'admin/security_question_delete_warrning.html', {
'page': page,
'parent_id': page.get_parent().id
})
| [
"molo.profiles.models.UserProfilesSettings.for_site",
"wagtail.contrib.modeladmin.options.modeladmin_register",
"molo.profiles.models.SecurityAnswer.objects.filter",
"wagtail.core.hooks.register",
"molo.profiles.models.UserProfile.objects.filter"
] | [((470, 513), 'wagtail.core.hooks.register', 'hooks.register', (['"""construct_homepage_panels"""'], {}), "('construct_homepage_panels')\n", (484, 513), False, 'from wagtail.core import hooks\n'), ((785, 829), 'wagtail.contrib.modeladmin.options.modeladmin_register', 'modeladmin_register', (['FrontendUsersModelAdmin'], {}), '(FrontendUsersModelAdmin)\n', (804, 829), False, 'from wagtail.contrib.modeladmin.options import modeladmin_register\n'), ((830, 872), 'wagtail.contrib.modeladmin.options.modeladmin_register', 'modeladmin_register', (['UserProfileModelAdmin'], {}), '(UserProfileModelAdmin)\n', (849, 872), False, 'from wagtail.contrib.modeladmin.options import modeladmin_register\n'), ((984, 1027), 'wagtail.core.hooks.register', 'hooks.register', (['"""construct_homepage_panels"""'], {}), "('construct_homepage_panels')\n", (998, 1027), False, 'from wagtail.core import hooks\n'), ((1351, 1387), 'wagtail.core.hooks.register', 'hooks.register', (['"""before_delete_page"""'], {}), "('before_delete_page')\n", (1365, 1387), False, 'from wagtail.core import hooks\n'), ((583, 626), 'molo.profiles.models.UserProfilesSettings.for_site', 'UserProfilesSettings.for_site', (['request.site'], {}), '(request.site)\n', (612, 626), False, 'from molo.profiles.models import UserProfilesSettings, UserProfile, SecurityAnswer\n'), ((1447, 1497), 'molo.profiles.models.SecurityAnswer.objects.filter', 'SecurityAnswer.objects.filter', ([], {'question_id': 'page.id'}), '(question_id=page.id)\n', (1476, 1497), False, 'from molo.profiles.models import UserProfilesSettings, UserProfile, SecurityAnswer\n'), ((1088, 1133), 'molo.profiles.models.UserProfile.objects.filter', 'UserProfile.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (1114, 1133), False, 'from molo.profiles.models import UserProfilesSettings, UserProfile, SecurityAnswer\n')] |
"""
A basic example of the use of ROCCH.
Class counts of covtype:
for val in np.unique(dataset.target):
print(val, sum(dataset.target==val))
1 211840
2 283301
3 35754
4 2747
5 9493
6 17367
7 20510
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_covtype
from sklearn.metrics import roc_curve
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from pycost import ROCCH
class_transform = {}
for i in (1, 3, 6):
class_transform[i] = 1
for i in (2, 4, 5, 7):
class_transform[i] = 2
def fetch_covertype_binary():
covtype = fetch_covtype(shuffle=True)
# transform to binary
binary_target = np.array([class_transform[c] for c in covtype.target])
covtype.target = binary_target
return covtype
DATA_LIMIT = 100_000
def main(args):
rocch = ROCCH()
covtype = fetch_covertype_binary()
dtree = DecisionTreeClassifier(min_samples_leaf=4)
X, y = covtype.data[:DATA_LIMIT], covtype.target[:DATA_LIMIT]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
dtree.fit(X_train, y_train)
y_scored = dtree.predict_proba(X_test)[:,0]
(fpr, tpr, thresholds) = roc_curve(y_test, y_scored, pos_label=1)
plt.plot(fpr, tpr, 'k')
gnb = GaussianNB()
gnb.fit(X_train, y_train)
gnb_y_scored = gnb.predict_proba(X_test)[:,0]
(gnb_fpr, gnb_tpr, gnb_thresholds) = roc_curve(y_test, gnb_y_scored, pos_label=1)
plt.plot(gnb_fpr, gnb_tpr, 'r')
plt.show()
if __name__ == "__main__":
main( sys.argv[1:] )
| [
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"pycost.ROCCH",
"matplotlib.pyplot.plot",
"numpy.array",
"sklearn.metrics.roc_curve",
"sklearn.naive_bayes.GaussianNB",
"sklearn.datasets.fetch_covtype",
"matplotlib.pyplot.show"
] | [((690, 717), 'sklearn.datasets.fetch_covtype', 'fetch_covtype', ([], {'shuffle': '(True)'}), '(shuffle=True)\n', (703, 717), False, 'from sklearn.datasets import fetch_covtype\n'), ((764, 818), 'numpy.array', 'np.array', (['[class_transform[c] for c in covtype.target]'], {}), '([class_transform[c] for c in covtype.target])\n', (772, 818), True, 'import numpy as np\n'), ((925, 932), 'pycost.ROCCH', 'ROCCH', ([], {}), '()\n', (930, 932), False, 'from pycost import ROCCH\n'), ((984, 1026), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'min_samples_leaf': '(4)'}), '(min_samples_leaf=4)\n', (1006, 1026), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1132, 1170), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)'}), '(X, y, test_size=0.25)\n', (1148, 1170), False, 'from sklearn.model_selection import train_test_split\n'), ((1280, 1320), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_scored'], {'pos_label': '(1)'}), '(y_test, y_scored, pos_label=1)\n', (1289, 1320), False, 'from sklearn.metrics import roc_curve\n'), ((1325, 1348), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', '"""k"""'], {}), "(fpr, tpr, 'k')\n", (1333, 1348), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1371), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1369, 1371), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1493, 1537), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'gnb_y_scored'], {'pos_label': '(1)'}), '(y_test, gnb_y_scored, pos_label=1)\n', (1502, 1537), False, 'from sklearn.metrics import roc_curve\n'), ((1542, 1573), 'matplotlib.pyplot.plot', 'plt.plot', (['gnb_fpr', 'gnb_tpr', '"""r"""'], {}), "(gnb_fpr, gnb_tpr, 'r')\n", (1550, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1586, 1588), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import numpy as np
from core import Value
def test_exp():
a = Value(4.0)
b = Value(2.0)
x = a
x = x.exp()
x.backward()
print(a.grad)
assert a.grad == np.exp(4.0)
# print("Testing Relu")
# test_relu()
# print("Relu Test Passed")
# print("Testing Sigmoid")
# test_sigmoid()
# print("Sigmoid Test Passed")
test_exp()
| [
"numpy.exp",
"core.Value"
] | [((80, 90), 'core.Value', 'Value', (['(4.0)'], {}), '(4.0)\n', (85, 90), False, 'from core import Value\n'), ((99, 109), 'core.Value', 'Value', (['(2.0)'], {}), '(2.0)\n', (104, 109), False, 'from core import Value\n'), ((197, 208), 'numpy.exp', 'np.exp', (['(4.0)'], {}), '(4.0)\n', (203, 208), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright (C) 2015 <NAME> #
# All rights reserved #
# Name: host.py
# Author: Canux <EMAIL> #
# Version: V1.0 #
# Time: Thu 20 Aug 2015 02:27:23 AM EDT
######################################################################
# Description:
######################################################################
from base import NagiosAuto
import os
class Host(NagiosAuto):
"""This class have three options to create host file in nagios.
You can specify the template you need.
If you create a lots of host file at one time, this is more effeciency.
"""
def __init__(self, *args, **kwargs):
super(Host, self).__init__(*args, **kwargs)
self.g_dir = self.args.path + "/hosts/"
self.host_conf = self.conf + "/host/"
self.area_conf = self.conf + "/area/"
self.area_list = ["as", "us", "eu"]
if self.__class__.__name__ == "Host":
self.logger.debug("==== END DEBUG ====")
def define_options(self):
"""Define some options used for create host."""
super(Host, self).define_options()
self.host_parser.add_argument("-t", "--types",
action="append",
dest="types",
required=False,
help="The host types, eg: ['ad', 'mii', \
'mii_win-primary', 'mii_win-bck', 'ijcore', \
'ijcore_win-primary', 'ijcore_win-bck']. \
Read template from types.cfg, \
read hostname and ip address from types.txt. \
Use [types@mode] for normal host. \
mode=0 use dns as address. \
mode=1 use ip as address. \
Use types@vcenter for mii and ijcore esx server. \
Use types@miisite for mii_win-primary database \
For ijcore the database is IJCORE. \
eg: -t 1234@0 -t 4567@1 -t mii@vcenter ijcore@vcenter\
-t mii_win-primary@mii_site -t ijcore_win-primary -t ad \
-t ijcore_win-bck -t mii_win-bck. \
If just remove servers, just put it in etc/host/host.txt.")
def get_area(self, hostname):
"""Get the area us/eu/as according to hostname."""
try:
locate = hostname[0:2].upper()
self.logger.debug("locate: {}".format(locate))
for area in self.area_list:
area_file = self.area_conf + area + ".txt"
self.logger.debug("area_file: {}".format(area_file))
f = open(area_file, "r")
lines = f.readlines()
for line in lines:
if locate in line:
self.logger.debug("area: {}".format(area))
return area
self.not_exist(locate)
except Exception as e:
self.error("get_area: %s" % e)
def get_vcenter(self, vcenter):
"""Get the vcenter for vmware."""
try:
vcenterfile = self.area_conf + "vmware.txt"
self.logger.debug("vcenterfile: {}".format(vcenterfile))
fr = open(vcenterfile, "r")
lines = fr.readlines()
for line in lines:
if vcenter.lower() in line:
vcenter = "".join(line.split())
self.logger.debug("vcenter: {}".format(vcenter))
return vcenter
self.not_exist("vcenter: %s" % vcenter)
self.error("Please specify a usefull vcenter.")
except Exception as e:
self.error("get_vcenter: %s" % e)
def get_types(self, types):
try:
if types.split("@")[0] in ["ad", "mii_win-primary", "mii_win-bck",
"ijcore_win-primary", "ijcore_win-bck"]:
types = types
mode = 1
elif types.split("@")[0] in ["mii", "ijcore"]:
types = types
mode = 0
else:
if len(types.split("@")) != 2:
self.error("Please specify address mode for normal host.")
else:
old_type = types
types = old_type.split("@")[0]
mode = old_type.split("@")[1]
return types, mode
except Exception as e:
self.error("get_types: %s" % e)
def write_one_host(self, hostfile, lines, vcenter,
area, mii_site, hostname, address, env):
"""Write to one host file."""
try:
fw = open(hostfile, "w")
for l in lines:
self.logger.debug("l: {}".format(l))
if "ohtpl_area_%s" in l:
fw.write(l % area)
elif "ohtpl_env_%s" in l:
if env:
fw.write(l % env)
elif "ohtpl_sys_vmware_%s_%s" in l:
l_vcenter = l.replace("ohtpl_sys_vmware_%s_%s",
str(vcenter))
fw.write(l_vcenter)
elif "host_name" in l:
fw.write(l % hostname)
elif "address" in l:
fw.write(l % address)
elif "_MII_SITEDATABASE" in l:
fw.write(l % mii_site)
elif "%s" not in l:
fw.write(l)
# If %s inside but not specify, can not handle it.
else:
self.error("write_host: unknow argument %s inside.")
except Exception as e:
self.error("write_one_host: %s" % e)
def create_host(self):
"""Get types from -t and read hostname and address and write to the hosts in nagios."""
try:
vcenter = ""
area = ""
mii_site = ""
env = ""
for loop in range(0, len(self.args.types)):
types = self.args.types[loop]
self.logger.debug("types: {}".format(types))
(types, mode) = self.get_types(types)
if types in ["ijcore_win-primary", "ijcore_win-bck"]:
mii_site = "IJCORE"
elif types.split("@")[0] in ["mii_win-primary",
"mii_win-bck"]:
if len(types.split("@")) != 2:
self.error("Please specify _MII_SITEDATABASE")
else:
mii_site = types.split("@")[1]
elif types.split("@")[0] in ["mii", "ijcore"]:
if len(types.split("@")) != 2:
self.error("Please specify vcenter for \
mii and ijcore.")
else:
vcenter = types.split("@")[1]
vcenter = self.get_vcenter(vcenter)
types = types.split("@")[0]
# Get the template file.
template = self.host_conf + types + ".cfg"
self.logger.debug("template: {}".format(template))
ftr = open(template, "r")
lines = ftr.readlines()
# Get the hostname and address file.
host = self.host_conf + types + ".txt"
self.logger.debug("host: {}".format(host))
des_host = self.host_conf + types + ".tmp"
self.logger.debug("des_host: {}".format(des_host))
self.delete_blank_line(host, des_host)
fhr = open(des_host, "r")
h_lines = fhr.readlines()
for line in h_lines:
hostname = line.split()[0].split(".")[0].strip().upper()
self.logger.debug("hostname: {}".format(hostname))
address = line.split()[int(mode)].strip().lower()
self.logger.debug("address: {}".format(address))
if len([i for i in line.split() if i]) == 3:
env = line.split()[2].strip().lower()
self.logger.debug("env: {}".format(env))
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
area = self.get_area(hostname)
# Write to the host in nagios.
if os.path.isfile(hostfile):
self.already_exist("%s" % hostfile)
if self.args.force:
self.write_one_host(hostfile, lines, vcenter,
area, mii_site, hostname,
address, env)
else:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address, env)
except Exception as e:
self.error("create_host: %s" % e)
def delete_host(self):
files = self.host_conf + "host.txt"
self.logger.debug("files: {}".format(files))
des_files = self.host_conf + "host.tmp"
self.logger.debug("des_files: {}".format(des_files))
self.delete_blank_line(files, des_files)
self.fr = open(des_files, "r")
self.lines = self.fr.readlines()
for line in self.lines:
self.logger.debug("line: {}".format(line))
hostname = line.split()[0].split(".")[0].strip().upper()
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if not os.path.isfile(hostfile):
self.not_exist("%s" % hostfile)
else:
try:
os.remove(hostfile)
except Exception as e:
self.error("remove_host: %s" % e)
| [
"os.path.isfile",
"os.remove"
] | [((10292, 10316), 'os.path.isfile', 'os.path.isfile', (['hostfile'], {}), '(hostfile)\n', (10306, 10316), False, 'import os\n'), ((9040, 9064), 'os.path.isfile', 'os.path.isfile', (['hostfile'], {}), '(hostfile)\n', (9054, 9064), False, 'import os\n'), ((10425, 10444), 'os.remove', 'os.remove', (['hostfile'], {}), '(hostfile)\n', (10434, 10444), False, 'import os\n')] |
"""Archive landing page."""
import datetime
from typing import Dict, Any, Tuple, List, no_type_check
from flask import Response, url_for
from arxiv import status
from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED
from browse.controllers.archive_page.by_month_form import ByMonthForm
from browse.controllers.years_operating import years_operating, stats_by_year
from browse.services.util.response_headers import abs_expires_header
def get_archive(archive_id: str) -> Tuple[Dict[str, Any], int, Dict[str, Any]]:
"""Gets archive page."""
data: Dict[str, Any] = {}
response_headers: Dict[str, Any] = {}
if archive_id == "list":
return archive_index(archive_id, status=status.HTTP_200_OK)
archive = ARCHIVES.get(archive_id, None)
if not archive:
cat_id = CATEGORIES.get(archive_id, {}).get("in_archive", None)
archive = ARCHIVES.get(cat_id, None)
if not archive:
return archive_index(archive_id,
status=status.HTTP_404_NOT_FOUND)
else:
archive_id = cat_id
_write_expires_header(response_headers)
subsumed_by = ARCHIVES_SUBSUMED.get(archive_id, None)
if subsumed_by:
data["subsumed_id"] = archive_id
data["subsumed_category"] = CATEGORIES.get(archive_id, {})
data["subsumed_by"] = subsumed_by
subsuming_category = CATEGORIES.get(subsumed_by, {})
data["subsuming_category"] = subsuming_category
archive_id = subsuming_category.get("in_archive", None)
archive = ARCHIVES.get(archive_id, None)
years = years_operating(archive)
data["years"] = years
data["months"] = MONTHS
data["days"] = DAYS
data["archive_id"] = archive_id
data["archive"] = archive
data["list_form"] = ByMonthForm(archive_id, archive, years)
data["stats_by_year"] = stats_by_year(archive_id, archive, years)
data["category_list"] = category_list(archive_id)
data["catchup_to"] = datetime.date.today() - datetime.timedelta(days=7)
data["template"] = "archive/single_archive.html"
return data, status.HTTP_200_OK, response_headers
def archive_index(archive_id: str, status: int) -> Tuple[Dict[str, Any], int, Dict[str, Any]]:
"""Landing page for when there is no archive specified."""
data: Dict[str, Any] = {}
data["bad_archive"] = archive_id
archives = [
(id, ARCHIVES[id]["name"])
for id in ARCHIVES.keys()
if id not in ARCHIVES_SUBSUMED and not id.startswith("test")
]
archives.sort(key=lambda tpl: tpl[0])
data["archives"] = archives
defunct = [
(id, ARCHIVES[id]["name"], ARCHIVES_SUBSUMED.get(id, ""))
for id in ARCHIVES.keys()
if "end_date" in ARCHIVES[id]
]
defunct.sort(key=lambda tpl: tpl[0])
data["defunct"] = defunct
data["template"] = "archive/archive_list_all.html"
return data, status, {}
def subsumed_msg(archive: Dict[str, str], subsumed_by: str) -> Dict[str, str]:
"""Adds information about subsuming categories and archives."""
sb = CATEGORIES.get(subsumed_by, {"name": "unknown category"})
sa = ARCHIVES.get(sb.get("in_archive", None), {"name": "unknown archive"})
return {"subsumed_by_cat": sb, "subsumed_by_arch": sa}
def category_list(archive_id: str) -> List[Dict[str, str]]:
"""Retunrs categories for archive."""
cats = []
for cat_id in CATEGORIES:
cat = CATEGORIES[cat_id]
if(cat.get("in_archive", "yuck") == archive_id
and cat.get("is_active", True)):
cats.append({"id": cat_id,
"name": cat.get("name", ""),
"description": cat.get("description", "")})
cats.sort(key=lambda x: x["name"])
return cats
def _write_expires_header(response_headers: Dict[str, Any]) -> None:
"""Writes an expires header for the response."""
response_headers["Expires"] = abs_expires_header()[1]
DAYS = ["{:0>2d}".format(i) for i in range(1, 32)]
MONTHS = [
("01", "01 (Jan)"),
("02", "02 (Feb)"),
("03", "03 (Mar)"),
("04", "04 (Apr)"),
("05", "05 (May)"),
("06", "06 (Jun)"),
("07", "07 (Jul)"),
("08", "08 (Aug)"),
("09", "09 (Sep)"),
("10", "10 (Oct)"),
("11", "11 (Nov)"),
("12", "12 (Dec)"),
]
| [
"arxiv.taxonomy.definitions.ARCHIVES.get",
"arxiv.taxonomy.definitions.ARCHIVES_SUBSUMED.get",
"arxiv.taxonomy.definitions.ARCHIVES.keys",
"datetime.timedelta",
"browse.controllers.years_operating.years_operating",
"browse.controllers.archive_page.by_month_form.ByMonthForm",
"arxiv.taxonomy.definitions.CATEGORIES.get",
"datetime.date.today",
"browse.services.util.response_headers.abs_expires_header",
"browse.controllers.years_operating.stats_by_year"
] | [((761, 791), 'arxiv.taxonomy.definitions.ARCHIVES.get', 'ARCHIVES.get', (['archive_id', 'None'], {}), '(archive_id, None)\n', (773, 791), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((1175, 1214), 'arxiv.taxonomy.definitions.ARCHIVES_SUBSUMED.get', 'ARCHIVES_SUBSUMED.get', (['archive_id', 'None'], {}), '(archive_id, None)\n', (1196, 1214), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((1628, 1652), 'browse.controllers.years_operating.years_operating', 'years_operating', (['archive'], {}), '(archive)\n', (1643, 1652), False, 'from browse.controllers.years_operating import years_operating, stats_by_year\n'), ((1827, 1866), 'browse.controllers.archive_page.by_month_form.ByMonthForm', 'ByMonthForm', (['archive_id', 'archive', 'years'], {}), '(archive_id, archive, years)\n', (1838, 1866), False, 'from browse.controllers.archive_page.by_month_form import ByMonthForm\n'), ((1895, 1936), 'browse.controllers.years_operating.stats_by_year', 'stats_by_year', (['archive_id', 'archive', 'years'], {}), '(archive_id, archive, years)\n', (1908, 1936), False, 'from browse.controllers.years_operating import years_operating, stats_by_year\n'), ((3122, 3179), 'arxiv.taxonomy.definitions.CATEGORIES.get', 'CATEGORIES.get', (['subsumed_by', "{'name': 'unknown category'}"], {}), "(subsumed_by, {'name': 'unknown category'})\n", (3136, 3179), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((902, 928), 'arxiv.taxonomy.definitions.ARCHIVES.get', 'ARCHIVES.get', (['cat_id', 'None'], {}), '(cat_id, None)\n', (914, 928), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((1312, 1342), 'arxiv.taxonomy.definitions.CATEGORIES.get', 'CATEGORIES.get', (['archive_id', '{}'], {}), '(archive_id, {})\n', (1326, 1342), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((1414, 1445), 'arxiv.taxonomy.definitions.CATEGORIES.get', 'CATEGORIES.get', (['subsumed_by', '{}'], {}), '(subsumed_by, {})\n', (1428, 1445), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((1584, 1614), 'arxiv.taxonomy.definitions.ARCHIVES.get', 'ARCHIVES.get', (['archive_id', 'None'], {}), '(archive_id, None)\n', (1596, 1614), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((2017, 2038), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2036, 2038), False, 'import datetime\n'), ((2041, 2067), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2059, 2067), False, 'import datetime\n'), ((3975, 3995), 'browse.services.util.response_headers.abs_expires_header', 'abs_expires_header', ([], {}), '()\n', (3993, 3995), False, 'from browse.services.util.response_headers import abs_expires_header\n'), ((2478, 2493), 'arxiv.taxonomy.definitions.ARCHIVES.keys', 'ARCHIVES.keys', ([], {}), '()\n', (2491, 2493), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((2695, 2724), 'arxiv.taxonomy.definitions.ARCHIVES_SUBSUMED.get', 'ARCHIVES_SUBSUMED.get', (['id', '""""""'], {}), "(id, '')\n", (2716, 2724), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((2744, 2759), 'arxiv.taxonomy.definitions.ARCHIVES.keys', 'ARCHIVES.keys', ([], {}), '()\n', (2757, 2759), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n'), ((829, 859), 'arxiv.taxonomy.definitions.CATEGORIES.get', 'CATEGORIES.get', (['archive_id', '{}'], {}), '(archive_id, {})\n', (843, 859), False, 'from arxiv.taxonomy.definitions import ARCHIVES, CATEGORIES, ARCHIVES_SUBSUMED\n')] |
import re
def is_isogram(string: str) -> bool:
string = re.sub(r"\W", "", string).lower()
return len(set(string)) == len(string)
| [
"re.sub"
] | [((62, 87), 're.sub', 're.sub', (['"""\\\\W"""', '""""""', 'string'], {}), "('\\\\W', '', string)\n", (68, 87), False, 'import re\n')] |
"""
Problem 17
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115
(one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
"""
from helper import *
import inflect
def p17():
n = 1000
return len("".join(map(inflect.engine().number_to_words, range(1, n+1))).replace(' ', '').replace('-', ''))
if __name__ == "__main__":
print("Problem 17: %d" % p17())
| [
"inflect.engine"
] | [((604, 620), 'inflect.engine', 'inflect.engine', ([], {}), '()\n', (618, 620), False, 'import inflect\n')] |
# Copyright (C) 2021 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import json
import urllib
import jsonschema
from zhinst.toolkit.interface import LoggerModule
_logger = LoggerModule(__name__)
class CommandTable:
"""Implement a CommandTable representation.
The :class:`CommandTable` class implements the basic functionality
of the command table allowing the user to write and upload their
own command table.
"""
def __init__(self, parent, ct_schema_url: str, ct_node: str) -> None:
self._parent = parent
self._index = self._parent._index
self._device = self._parent._parent
self._ct_schema_url = ct_schema_url
self._node = ct_node
try:
request = urllib.request.Request(url=self._ct_schema_url)
with urllib.request.urlopen(request) as f:
self.ct_schema_dict = json.loads(f.read())
version = self.ct_schema_dict["definitions"]["header"]["properties"][
"version"
]["enum"]
self.ct_schema_version = version[len(version) - 1]
except Exception as ex:
self.ct_schema_dict = None
self.ct_schema_version = None
_logger.warning(
"The command table schema could not be downloaded from Zurich Instruments' server. "
"Therefore, command tables cannot be validated against the schema by zhinst-toolkit itself. "
"The automated check before upload is disabled."
f"{ex}"
)
def load(self, table, validate=None):
"""Load a given command table to the instrument"""
# Check if the input is a valid JSON
table_updated = self._to_dict(table)
if validate is None:
validate = self.ct_schema_dict is not None
if validate:
if not self.ct_schema_dict:
_logger.error(
"The command table schema is not available."
"The command table could not be validated.",
_logger.ExceptionTypes.ToolkitError,
)
self._validate(table_updated)
# Convert the json object
# Load the command table to the device
node = self._node + "/data"
self._device._set_vector(node, json.dumps(table_updated))
def download(self):
"""Downloads a command table"""
node = self._node + "/data"
return self._device._get_vector(node)
def _validate(self, table):
"""Ensure command table is valid JSON and compliant with schema"""
# Validation only works if the command table is in dictionary
# format (json object). Make the encessary conversion
jsonschema.validate(
table, schema=self.ct_schema_dict, cls=jsonschema.Draft4Validator
)
def _to_dict(self, table):
"""Check the input type and convert it to json object (dict)"""
if isinstance(table, str):
table_updated = json.loads(table)
elif isinstance(table, list):
table_updated = {
"$schema": self._ct_schema_url,
"table": table,
}
if self.ct_schema_version:
table_updated["header"] = ({"version": self.ct_schema_version},)
elif isinstance(table, dict):
table_updated = table
else:
_logger.error(
"The command table should be specified as either a string, or a list "
"of entries without a header, or a valid json as a dictionary.",
_logger.ExceptionTypes.ToolkitError,
)
return table_updated
| [
"json.loads",
"urllib.request.Request",
"json.dumps",
"jsonschema.validate",
"zhinst.toolkit.interface.LoggerModule",
"urllib.request.urlopen"
] | [((268, 290), 'zhinst.toolkit.interface.LoggerModule', 'LoggerModule', (['__name__'], {}), '(__name__)\n', (280, 290), False, 'from zhinst.toolkit.interface import LoggerModule\n'), ((2838, 2929), 'jsonschema.validate', 'jsonschema.validate', (['table'], {'schema': 'self.ct_schema_dict', 'cls': 'jsonschema.Draft4Validator'}), '(table, schema=self.ct_schema_dict, cls=jsonschema.\n Draft4Validator)\n', (2857, 2929), False, 'import jsonschema\n'), ((833, 880), 'urllib.request.Request', 'urllib.request.Request', ([], {'url': 'self._ct_schema_url'}), '(url=self._ct_schema_url)\n', (855, 880), False, 'import urllib\n'), ((2416, 2441), 'json.dumps', 'json.dumps', (['table_updated'], {}), '(table_updated)\n', (2426, 2441), False, 'import json\n'), ((3114, 3131), 'json.loads', 'json.loads', (['table'], {}), '(table)\n', (3124, 3131), False, 'import json\n'), ((898, 929), 'urllib.request.urlopen', 'urllib.request.urlopen', (['request'], {}), '(request)\n', (920, 929), False, 'import urllib\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-alerts/ampel/alert/reject/FullAlertRegister.py
# License: BSD-3-Clause
# Author: <NAME> <<EMAIL>>
# Date: 14.05.2020
# Last Modified Date: 26.05.2020
# Last Modified By: <NAME> <<EMAIL>>
from time import time
from struct import pack
from typing import Literal, BinaryIO, ClassVar
from ampel.protocol.AmpelAlertProtocol import AmpelAlertProtocol
from ampel.alert.reject.BaseAlertRegister import BaseAlertRegister
class FullAlertRegister(BaseAlertRegister):
"""
Record: alert_id, stock_id, timestamp, filter_res
"""
__slots__: ClassVar[tuple[str, ...]] = '_write', # type: ignore
struct: Literal['<QQIB'] = '<QQIB' # type: ignore[assignment]
def file(self, alert: AmpelAlertProtocol, filter_res: None | int = None) -> None:
self._write(pack('<QQIB', alert.id, alert.stock, int(time()), filter_res or 0))
@classmethod
def find_stock(cls, # type: ignore[override]
f: BinaryIO | str, stock_id: int | list[int], **kwargs
) -> None | list[tuple[int, ...]]:
return super().find_stock(f, stock_id=stock_id, offset_in_block=8, **kwargs)
| [
"time.time"
] | [((908, 914), 'time.time', 'time', ([], {}), '()\n', (912, 914), False, 'from time import time\n')] |
# MapNet tests using dummy data and hardmax (for discrete results)
import argparse, math
import torch as t
from mapnet import MapNet
from maze.mazes import extract_view
from transforms import Rigid2D
from utils import sub2ind, ind2sub
from overboard import tshow
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
def parse_map(input):
"""Convert map encoded as a string (characters: #*.) into a PyTorch array"""
input = input.split('\n')
out = t.zeros((3, len(input), len(input[0])))
for (h, line) in enumerate(input):
line = line.strip()
for (w, ch) in enumerate(line):
if ch == '#':
out[0, h, w] = 1
elif ch == '*':
out[1, h, w] = 1
else:
out[2, h, w] = 0 # 0.1
return out
def show_result(map, obs, out):
"""Show MapNet result as figures"""
if map is not None: tshow(map[0, :, :] - map[1, :, :],
title='gt map') # difference between one-hot features, result will be in {-1,0,1}
tshow(obs[0, :, 0, :, :] - obs[0, :, 1, :, :], title='obs')
tshow(out['softmax_poses'][0, ...], title='softmax_poses')
tshow(out['maps'][0, :, 0, :, :] - out['maps'][0, :, 1, :, :], title='maps')
def visualize_poses(poses, obs, map_sz, title):
"""Visualize poses/trajectory, and superimpose observations at those poses"""
# obs.shape = (batch, time, channels, height, width)
view_range = (obs.shape[-1] - 1) // 2
plt.figure(title)
for step in range(len(poses)):
plt.subplot(int(math.ceil(len(poses) / 8)), min(8, len(poses)), step + 1)
pose = poses[step]
pose = Rigid2D(x=pose[0], y=pose[1], ang=pose[2] * math.pi / 2)
pose = pose.apply(t.tensor).apply(t.Tensor.float)
# plot observations (top-down view) as a set of rectangles (one per cell)
for channel in (0, 1):
# local coordinates of cells, with origin at center of observation
local_pos = t.nonzero(obs[0, step, channel, :, :]).float() - view_range
# transform to global coordinates using pose
local_pos = Rigid2D(x=local_pos[:, 1], y=local_pos[:, 0], ang=t.zeros(local_pos.shape[0]))
points = local_pos + pose
# plot cells: ground for channel 0, wall for channel 1
rects = [plt.Rectangle((x, y), 1.0, 1.0)
for (x, y) in zip(points.x.tolist(), points.y.tolist())]
plt.gca().add_collection(PatchCollection(rects, facecolor='yb'[channel], edgecolor=None, alpha=0.3))
# plot pose
plt.scatter(pose.x + .5, pose.y + .5, s=20, c='r', marker='o', edgecolors=None)
plt.plot([pose.x + .5, pose.x + .5 + math.cos(pose.ang)], [pose.y + .5, pose.y + .5 + math.sin(pose.ang)], 'r')
# axes config
plt.axis('square')
plt.xlim(0, map_sz)
plt.ylim(map_sz, 0) # flip vertical axis
plt.grid(True)
plt.gca().set_xticks(range(0, map_sz))
plt.gca().set_yticks(range(0, map_sz))
def visualization_test(vectorization=False):
"""Show observations only, for manual inspection"""
mapnet = MapNet(cnn=lambda x: x, embedding_size=3, map_size=5,
aggregator='avg', hardmax=True, improved_padding=True, debug_vectorization=vectorization)
# get local observations
obs1 = """.#.
.*#
..."""
obs2 = """.*#
.#.
.#."""
obs3 = """#..
*#.
..."""
# shape = (batch=1, time, channels=1, height, width)
obs = [parse_map(o) for o in (obs1, obs2, obs3)]
obs = t.stack(obs, dim=0).unsqueeze(dim=0)
# run mapnet
out = mapnet(obs, debug_output=True)
# show results
show_result(None, obs, out)
def full_test(exhaustive=True, flip=False, vectorization=False):
"""Test MapNet with toy observations"""
'''# map with L-shape, ambiguous correlation result in some edge cases
map = parse_map("""...
*..
##*""")'''
# unambiguous map with only 2 identifiable tiles (allows triangulation)
map = parse_map("""...
*..
..#""")
# enlarge map by 0-padding
pad = 3
map = t.nn.functional.pad(map, [pad] * 4, value=0)
if flip: # rotates the map 180 degrees
map = map.flip(dims=[1, 2])
if not exhaustive:
# hand-crafted sequence of poses (x, y, angle)
poses = [
(1 + 1, 1, 0 + 1), # center (or around it)
(0, 2, 2), # bottom-left
(2, 2, 0), # bottom-right
(2, 0, 1), # top-right
]
else:
# exhaustive test of all valid poses
poses = [(x, y, ang) for x in range(0, 3) for y in range(0, 3) for ang in range(4)]
# start around center, to build initial map
# poses.insert(0, (1, 1, 0))
poses.insert(0, (2, 1, 1))
if flip: # replace initial direction so it points the other way
poses[0] = (poses[0][0], poses[0][1], 2)
# account for map padding in pose coordinates
poses = [(x + pad, y + pad, ang) for (x, y, ang) in poses]
# get local observations
obs = [extract_view(map, x, y, ang, view_range=2) for (x, y, ang) in poses]
obs = t.stack(obs, dim=0)
# batch of size 2, same samples
obs = t.stack((obs, obs), dim=0)
# run mapnet
mapnet = MapNet(cnn=lambda i: i, embedding_size=3, map_size=map.shape[-1],
aggregator='avg', hardmax=True, improved_padding=True, debug_vectorization=vectorization)
out = mapnet(obs)
# show results
print(t.tensor(poses)[1:, :]) # (x, y, angle)
print((out['softmax_poses'] > 0.5).nonzero()[:, (4, 3, 2)])
show_result(map, obs, out)
if True: # not exhaustive:
visualize_poses(poses, obs, map_sz=map.shape[-1], title="Ground truth observations")
pred_poses = [out['softmax_poses'][0, step, :, :, :].nonzero()[0, :].flip(dims=(0,)).tolist()
for step in range(len(poses) - 1)]
pred_poses.insert(0, [1 + pad, 1 + pad, 0]) # insert map-agnostic starting pose (centered facing right)
visualize_poses(pred_poses, obs, map_sz=map.shape[-1], title="Observations registered wrt predicted poses")
# compare to ground truth
for (step, (x, y, ang)) in enumerate(poses[1:]):
# place the ground truth in the same coordinate-frame as the map, which is
# created considering that the first frame is at the center looking right.
# also move from/to discretized poses.
gt_pose = Rigid2D(*mapnet.undiscretize_pose(t.tensor(x), t.tensor(y), t.tensor(ang)))
initial_gt_pose = Rigid2D(*mapnet.undiscretize_pose(*[t.tensor(x) for x in poses[0]]))
(x, y, ang, invalid) = mapnet.discretize_pose(gt_pose - initial_gt_pose)
assert x >= 2 and x <= map.shape[-1] - 2 and y >= 2 and y <= map.shape[
-1] - 2 and ang >= 0 and ang < 4, "GT poses going too much outside of bounds"
# probability of each pose, shape = (orientations, height, width)
p = out['softmax_poses'][0, step, :, :, :]
assert p[ang, y, x].item() > 0.5 # peak at correct location
assert p.sum().item() < 1.5 # no other peak elsewhere
assert (p >= 0).all().item() # all positive
def discretize_test():
"""Test pose discretization/undiscretization"""
mapnet = MapNet(cnn=lambda i: i, embedding_size=3, map_size=7,
aggregator='avg', hardmax=True, improved_padding=True)
# test data: all positions and angles
(x, y, ang) = t.meshgrid(t.arange(7, dtype=t.float) - 3,
t.arange(7, dtype=t.float) - 3,
t.arange(4, dtype=t.float) * math.pi / 2)
poses = Rigid2D(x, y, ang)
poses = poses.apply(t.Tensor.flatten)
# discretize and undiscretize
(bin_x, bin_y, bin_ang, invalid) = mapnet.discretize_pose(poses)
(x, y, ang) = mapnet.undiscretize_pose(bin_x, bin_y, bin_ang)
assert (x - poses.x).abs().max().item() < 1e-4
assert (y - poses.y).abs().max().item() < 1e-4
assert (ang - poses.ang).abs().max().item() < 1e-4
assert invalid.sum().item() < 1e-4
# test flat indexes
shape = [mapnet.orientations, mapnet.map_size, mapnet.map_size]
bin_idx = sub2ind([bin_ang, bin_y, bin_x], shape, check_bounds=True)
(ang, y, x) = ind2sub(bin_idx, shape)
(x, y, ang) = mapnet.undiscretize_pose(x, y, ang)
assert (x - poses.x).abs().max().item() < 1e-4
assert (y - poses.y).abs().max().item() < 1e-4
assert (ang - poses.ang).abs().max().item() < 1e-4
assert invalid.sum().item() < 1e-4
def discretize_center_test():
"""Test pose discretization center (0,0 should correspond to center bin of map)"""
mapnet = MapNet(cnn=lambda i: i, embedding_size=3, map_size=7,
aggregator='avg', hardmax=True, improved_padding=True)
center = (mapnet.map_size - 1) // 2
# test data: positions and angles around center, excluding boundaries
pos_range = t.linspace(-0.5, 0.5, 20)[1:-1]
ang_range = t.linspace(-math.pi / 4, math.pi / 4, 20)[1:-1]
(x, y, ang) = t.meshgrid(pos_range, pos_range, ang_range)
poses = Rigid2D(x, y, ang).apply(t.Tensor.flatten)
# discretize those poses, they should all map to the center bin
(bin_x, bin_y, bin_ang, invalid) = mapnet.discretize_pose(poses)
assert ((bin_x == center).all() and (bin_y == center).all()
and (bin_ang == 0).all() and not invalid.any())
# discretize positions and angles just outside center
(xo, yo, ango) = t.meshgrid(t.tensor([-0.6, 0.6]),
t.tensor([-0.6, 0.6]),
t.tensor([-0.26 * math.pi, 0.26 * math.pi]))
poses = Rigid2D(xo, yo, ango).apply(t.Tensor.flatten)
(xo, yo, ango, invalid) = mapnet.discretize_pose(poses)
assert ((xo != center).all() and (yo != center).all() and
(ango != 0).all() and not invalid.any())
# undiscretize center bin
(xc, yc, angc) = mapnet.undiscretize_pose(t.tensor(center), t.tensor(center), t.tensor(0))
assert xc == 0 and yc == 0 and angc == 0
if __name__ == '__main__':
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("--exhaustive", action="store_true", help="Check all positions and rotations")
parser.add_argument("--flip", action="store_true",
help="Test with 180-degrees-rotated world, also rotating initial pose 180 degrees")
parser.add_argument("--visualization-only", action="store_true",
help="Simpler test with visualization only (no checking)")
parser.add_argument("--discretize", action="store_true", help="Test discretization bounds")
args = parser.parse_args()
# run tests
if args.visualization_only:
visualization_test(vectorization=True)
elif args.discretize:
discretize_test()
discretize_center_test()
else:
full_test(exhaustive=args.exhaustive, flip=args.flip, vectorization=True)
print("Done.")
plt.show()
input() # keep tensor figures open until some input is given
| [
"utils.ind2sub",
"matplotlib.pyplot.grid",
"utils.sub2ind",
"math.cos",
"torch.nn.functional.pad",
"torch.arange",
"argparse.ArgumentParser",
"maze.mazes.extract_view",
"mapnet.MapNet",
"torch.meshgrid",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"overboard.tshow",
"matplotlib.pyplot.show",
"torch.stack",
"matplotlib.collections.PatchCollection",
"torch.nonzero",
"torch.tensor",
"matplotlib.pyplot.figure",
"transforms.Rigid2D",
"math.sin",
"torch.zeros",
"torch.linspace"
] | [((1083, 1142), 'overboard.tshow', 'tshow', (['(obs[0, :, 0, :, :] - obs[0, :, 1, :, :])'], {'title': '"""obs"""'}), "(obs[0, :, 0, :, :] - obs[0, :, 1, :, :], title='obs')\n", (1088, 1142), False, 'from overboard import tshow\n'), ((1147, 1205), 'overboard.tshow', 'tshow', (["out['softmax_poses'][0, ...]"], {'title': '"""softmax_poses"""'}), "(out['softmax_poses'][0, ...], title='softmax_poses')\n", (1152, 1205), False, 'from overboard import tshow\n'), ((1210, 1286), 'overboard.tshow', 'tshow', (["(out['maps'][0, :, 0, :, :] - out['maps'][0, :, 1, :, :])"], {'title': '"""maps"""'}), "(out['maps'][0, :, 0, :, :] - out['maps'][0, :, 1, :, :], title='maps')\n", (1215, 1286), False, 'from overboard import tshow\n'), ((1522, 1539), 'matplotlib.pyplot.figure', 'plt.figure', (['title'], {}), '(title)\n', (1532, 1539), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3343), 'mapnet.MapNet', 'MapNet', ([], {'cnn': '(lambda x: x)', 'embedding_size': '(3)', 'map_size': '(5)', 'aggregator': '"""avg"""', 'hardmax': '(True)', 'improved_padding': '(True)', 'debug_vectorization': 'vectorization'}), "(cnn=lambda x: x, embedding_size=3, map_size=5, aggregator='avg',\n hardmax=True, improved_padding=True, debug_vectorization=vectorization)\n", (3202, 3343), False, 'from mapnet import MapNet\n'), ((4323, 4367), 'torch.nn.functional.pad', 't.nn.functional.pad', (['map', '([pad] * 4)'], {'value': '(0)'}), '(map, [pad] * 4, value=0)\n', (4342, 4367), True, 'import torch as t\n'), ((5350, 5369), 'torch.stack', 't.stack', (['obs'], {'dim': '(0)'}), '(obs, dim=0)\n', (5357, 5369), True, 'import torch as t\n'), ((5417, 5443), 'torch.stack', 't.stack', (['(obs, obs)'], {'dim': '(0)'}), '((obs, obs), dim=0)\n', (5424, 5443), True, 'import torch as t\n'), ((5475, 5638), 'mapnet.MapNet', 'MapNet', ([], {'cnn': '(lambda i: i)', 'embedding_size': '(3)', 'map_size': 'map.shape[-1]', 'aggregator': '"""avg"""', 'hardmax': '(True)', 'improved_padding': '(True)', 'debug_vectorization': 'vectorization'}), "(cnn=lambda i: i, embedding_size=3, map_size=map.shape[-1],\n aggregator='avg', hardmax=True, improved_padding=True,\n debug_vectorization=vectorization)\n", (5481, 5638), False, 'from mapnet import MapNet\n'), ((7497, 7609), 'mapnet.MapNet', 'MapNet', ([], {'cnn': '(lambda i: i)', 'embedding_size': '(3)', 'map_size': '(7)', 'aggregator': '"""avg"""', 'hardmax': '(True)', 'improved_padding': '(True)'}), "(cnn=lambda i: i, embedding_size=3, map_size=7, aggregator='avg',\n hardmax=True, improved_padding=True)\n", (7503, 7609), False, 'from mapnet import MapNet\n'), ((7874, 7892), 'transforms.Rigid2D', 'Rigid2D', (['x', 'y', 'ang'], {}), '(x, y, ang)\n', (7881, 7892), False, 'from transforms import Rigid2D\n'), ((8409, 8467), 'utils.sub2ind', 'sub2ind', (['[bin_ang, bin_y, bin_x]', 'shape'], {'check_bounds': '(True)'}), '([bin_ang, bin_y, bin_x], shape, check_bounds=True)\n', (8416, 8467), False, 'from utils import sub2ind, ind2sub\n'), ((8486, 8509), 'utils.ind2sub', 'ind2sub', (['bin_idx', 'shape'], {}), '(bin_idx, shape)\n', (8493, 8509), False, 'from utils import sub2ind, ind2sub\n'), ((8894, 9006), 'mapnet.MapNet', 'MapNet', ([], {'cnn': '(lambda i: i)', 'embedding_size': '(3)', 'map_size': '(7)', 'aggregator': '"""avg"""', 'hardmax': '(True)', 'improved_padding': '(True)'}), "(cnn=lambda i: i, embedding_size=3, map_size=7, aggregator='avg',\n hardmax=True, improved_padding=True)\n", (8900, 9006), False, 'from mapnet import MapNet\n'), ((9270, 9313), 'torch.meshgrid', 't.meshgrid', (['pos_range', 'pos_range', 'ang_range'], {}), '(pos_range, pos_range, ang_range)\n', (9280, 9313), True, 'import torch as t\n'), ((10358, 10383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10381, 10383), False, 'import argparse, math\n'), ((11226, 11236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11234, 11236), True, 'import matplotlib.pyplot as plt\n'), ((931, 981), 'overboard.tshow', 'tshow', (['(map[0, :, :] - map[1, :, :])'], {'title': '"""gt map"""'}), "(map[0, :, :] - map[1, :, :], title='gt map')\n", (936, 981), False, 'from overboard import tshow\n'), ((1701, 1757), 'transforms.Rigid2D', 'Rigid2D', ([], {'x': 'pose[0]', 'y': 'pose[1]', 'ang': '(pose[2] * math.pi / 2)'}), '(x=pose[0], y=pose[1], ang=pose[2] * math.pi / 2)\n', (1708, 1757), False, 'from transforms import Rigid2D\n'), ((2634, 2720), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(pose.x + 0.5)', '(pose.y + 0.5)'], {'s': '(20)', 'c': '"""r"""', 'marker': '"""o"""', 'edgecolors': 'None'}), "(pose.x + 0.5, pose.y + 0.5, s=20, c='r', marker='o', edgecolors\n =None)\n", (2645, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2883), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (2873, 2883), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2911), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'map_sz'], {}), '(0, map_sz)\n', (2900, 2911), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2939), 'matplotlib.pyplot.ylim', 'plt.ylim', (['map_sz', '(0)'], {}), '(map_sz, 0)\n', (2928, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2985), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2979, 2985), True, 'import matplotlib.pyplot as plt\n'), ((5271, 5313), 'maze.mazes.extract_view', 'extract_view', (['map', 'x', 'y', 'ang'], {'view_range': '(2)'}), '(map, x, y, ang, view_range=2)\n', (5283, 5313), False, 'from maze.mazes import extract_view\n'), ((9155, 9180), 'torch.linspace', 't.linspace', (['(-0.5)', '(0.5)', '(20)'], {}), '(-0.5, 0.5, 20)\n', (9165, 9180), True, 'import torch as t\n'), ((9203, 9244), 'torch.linspace', 't.linspace', (['(-math.pi / 4)', '(math.pi / 4)', '(20)'], {}), '(-math.pi / 4, math.pi / 4, 20)\n', (9213, 9244), True, 'import torch as t\n'), ((9724, 9745), 'torch.tensor', 't.tensor', (['[-0.6, 0.6]'], {}), '([-0.6, 0.6])\n', (9732, 9745), True, 'import torch as t\n'), ((9779, 9800), 'torch.tensor', 't.tensor', (['[-0.6, 0.6]'], {}), '([-0.6, 0.6])\n', (9787, 9800), True, 'import torch as t\n'), ((9834, 9877), 'torch.tensor', 't.tensor', (['[-0.26 * math.pi, 0.26 * math.pi]'], {}), '([-0.26 * math.pi, 0.26 * math.pi])\n', (9842, 9877), True, 'import torch as t\n'), ((10192, 10208), 'torch.tensor', 't.tensor', (['center'], {}), '(center)\n', (10200, 10208), True, 'import torch as t\n'), ((10210, 10226), 'torch.tensor', 't.tensor', (['center'], {}), '(center)\n', (10218, 10226), True, 'import torch as t\n'), ((10228, 10239), 'torch.tensor', 't.tensor', (['(0)'], {}), '(0)\n', (10236, 10239), True, 'import torch as t\n'), ((3684, 3703), 'torch.stack', 't.stack', (['obs'], {'dim': '(0)'}), '(obs, dim=0)\n', (3691, 3703), True, 'import torch as t\n'), ((5704, 5719), 'torch.tensor', 't.tensor', (['poses'], {}), '(poses)\n', (5712, 5719), True, 'import torch as t\n'), ((7698, 7724), 'torch.arange', 't.arange', (['(7)'], {'dtype': 't.float'}), '(7, dtype=t.float)\n', (7706, 7724), True, 'import torch as t\n'), ((7759, 7785), 'torch.arange', 't.arange', (['(7)'], {'dtype': 't.float'}), '(7, dtype=t.float)\n', (7767, 7785), True, 'import torch as t\n'), ((9327, 9345), 'transforms.Rigid2D', 'Rigid2D', (['x', 'y', 'ang'], {}), '(x, y, ang)\n', (9334, 9345), False, 'from transforms import Rigid2D\n'), ((9892, 9913), 'transforms.Rigid2D', 'Rigid2D', (['xo', 'yo', 'ango'], {}), '(xo, yo, ango)\n', (9899, 9913), False, 'from transforms import Rigid2D\n'), ((2381, 2412), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(x, y)', '(1.0)', '(1.0)'], {}), '((x, y), 1.0, 1.0)\n', (2394, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2529, 2603), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['rects'], {'facecolor': '"""yb"""[channel]', 'edgecolor': 'None', 'alpha': '(0.3)'}), "(rects, facecolor='yb'[channel], edgecolor=None, alpha=0.3)\n", (2544, 2603), False, 'from matplotlib.collections import PatchCollection\n'), ((2994, 3003), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3001, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3041, 3050), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3048, 3050), True, 'import matplotlib.pyplot as plt\n'), ((7820, 7846), 'torch.arange', 't.arange', (['(4)'], {'dtype': 't.float'}), '(4, dtype=t.float)\n', (7828, 7846), True, 'import torch as t\n'), ((2225, 2252), 'torch.zeros', 't.zeros', (['local_pos.shape[0]'], {}), '(local_pos.shape[0])\n', (2232, 2252), True, 'import torch as t\n'), ((2504, 2513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2511, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2777), 'math.cos', 'math.cos', (['pose.ang'], {}), '(pose.ang)\n', (2767, 2777), False, 'import argparse, math\n'), ((2808, 2826), 'math.sin', 'math.sin', (['pose.ang'], {}), '(pose.ang)\n', (2816, 2826), False, 'import argparse, math\n'), ((6705, 6716), 'torch.tensor', 't.tensor', (['x'], {}), '(x)\n', (6713, 6716), True, 'import torch as t\n'), ((6718, 6729), 'torch.tensor', 't.tensor', (['y'], {}), '(y)\n', (6726, 6729), True, 'import torch as t\n'), ((6731, 6744), 'torch.tensor', 't.tensor', (['ang'], {}), '(ang)\n', (6739, 6744), True, 'import torch as t\n'), ((2033, 2071), 'torch.nonzero', 't.nonzero', (['obs[0, step, channel, :, :]'], {}), '(obs[0, step, channel, :, :])\n', (2042, 2071), True, 'import torch as t\n'), ((6810, 6821), 'torch.tensor', 't.tensor', (['x'], {}), '(x)\n', (6818, 6821), True, 'import torch as t\n')] |
# Copyright 2010 <NAME> (<EMAIL>) Original model
# Copyright 2021 <NAME> (<EMAIL>) Current model
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dudeney's Number problem in OR-tools CP-SAT Solver
From Pierre Schaus' blog post
"Dudeney number"
http://cp-is-fun.blogspot.com/2010/09/test-python.html
'''
I discovered yesterday Dudeney Numbers
A Dudeney Numbers is a positive integer that is a perfect cube such that the sum
of its decimal digits is equal to the cube root of the number. There are only six
Dudeney Numbers and those are very easy to find with CP.
I made my first experience with google cp solver to find these numbers (model below)
and must say that I found it very convenient to build CP models in python!
When you take a close look at the line:
solver.Add(sum([10**(n-i-1)*x[i] for i in range(n)]) == nb)
It is difficult to argue that it is very far from dedicated
optimization languages!
'''
Also see: http://en.wikipedia.org/wiki/Dudeney_number
This is a port of Pierre Schaus old CP model dudeney.py
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
class SolutionPrinter(cp.CpSolverSolutionCallback):
"""SolutionPrinter"""
def __init__(self, x, nb, s):
cp.CpSolverSolutionCallback.__init__(self)
self.__x = x
self.__nb = nb
self.__s = s
self.__solution_count = 0
def OnSolutionCallback(self):
self.__solution_count += 1
print("x:", [self.Value(v) for v in self.__x])
print("nb:", self.Value(self.__nb), "s:", self.Value(self.__s))
print()
def SolutionCount(self):
return self.__solution_count
def dudeney(n=6):
model = cp.CpModel()
x = [model.NewIntVar(0,9, 'x' + str(i)) for i in range(n)]
nb = model.NewIntVar(0, 10 ** n, 'nb') # Number
s = model.NewIntVar(1, 9 * n + 1, 's') # Digit sum
# This don't work since the current AddMultiplicationEquality
# don't handle more than two values.
# model.AddMultiplicationEquality(nb,[s,s,s])
# """
# Check failed: vars.size() == 2 (3 vs. 2) General int_prod not supported yet.
# *** Check failure stack trace: ***
# """
# Work-around:
s2 = model.NewIntVar(1, (9 * n + 1)**2, 's2')
model.AddMultiplicationEquality(s2,[s,s])
model.AddMultiplicationEquality(nb,[s,s2])
model.Add(sum([10 ** (n - i - 1) * x[i] for i in range(n)]) == nb)
model.Add(sum([x[i] for i in range(n)]) == s)
solver = cp.CpSolver()
solution_printer = SolutionPrinter(x, nb, s)
status = solver.SearchForAllSolutions(model,solution_printer)
if status != cp.OPTIMAL:
print("No solution!")
print()
print("NumSolutions:", solution_printer.SolutionCount())
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
if __name__ == '__main__':
dudeney(6)
| [
"ortools.sat.python.cp_model.CpSolverSolutionCallback.__init__",
"ortools.sat.python.cp_model.CpSolver",
"ortools.sat.python.cp_model.CpModel"
] | [((2396, 2408), 'ortools.sat.python.cp_model.CpModel', 'cp.CpModel', ([], {}), '()\n', (2406, 2408), True, 'from ortools.sat.python import cp_model as cp\n'), ((3147, 3160), 'ortools.sat.python.cp_model.CpSolver', 'cp.CpSolver', ([], {}), '()\n', (3158, 3160), True, 'from ortools.sat.python import cp_model as cp\n'), ((1940, 1982), 'ortools.sat.python.cp_model.CpSolverSolutionCallback.__init__', 'cp.CpSolverSolutionCallback.__init__', (['self'], {}), '(self)\n', (1976, 1982), True, 'from ortools.sat.python import cp_model as cp\n')] |
# https://adventofcode.com/2021/day/4
from src.util.types import Data, Solution
class BingoBoard:
def __init__(self, numbers):
self.numbers = [int(n) for n in numbers.replace("\n", " ").split()]
self.marked = set()
def mark(self, n):
if n in self.numbers:
self.marked.add(n)
return self.check_win(n)
return False
def get_score(self):
return sum(set(self.numbers).difference(self.marked))
def check_win(self, n):
"""Check if n completes the row or column it's in."""
i = self.numbers.index(n)
r = i // 5 * 5
c = i % 5
return (
self.marked.issuperset(self.numbers[r:r+5])
or self.marked.issuperset(self.numbers[c::5])
)
def prepare_data(data: str) -> tuple[list[int], list[BingoBoard]]:
data_numbers, *data_boards = data.split("\n\n")
numbers = [int(n) for n in data_numbers.split(",")]
boards = [BingoBoard(board) for board in data_boards]
return numbers, boards
def play(numbers: list[int], boards: list[BingoBoard]) -> tuple[int, int]:
winning_score = None
for n in numbers:
boards_to_remove = set()
for board in boards:
if board.mark(n):
boards_to_remove.add(board)
if winning_score is None:
winning_score = board.get_score() * n
elif len(boards) == 1:
losing_score = board.get_score() * n
return winning_score, losing_score
boards = [board for board in boards if board not in boards_to_remove]
raise RuntimeError("Should have returned a result.")
def solve(data: Data) -> Solution:
sample_data = prepare_data(data.samples[0])
sample_part_1, sample_part_2 = play(*sample_data)
challenge_data = prepare_data(data.input)
challenge_part_1, challenge_part_2 = play(*challenge_data)
return Solution(
samples_part_1=[sample_part_1],
samples_part_2=[sample_part_2],
part_1=challenge_part_1,
part_2=challenge_part_2
)
| [
"src.util.types.Solution"
] | [((1946, 2072), 'src.util.types.Solution', 'Solution', ([], {'samples_part_1': '[sample_part_1]', 'samples_part_2': '[sample_part_2]', 'part_1': 'challenge_part_1', 'part_2': 'challenge_part_2'}), '(samples_part_1=[sample_part_1], samples_part_2=[sample_part_2],\n part_1=challenge_part_1, part_2=challenge_part_2)\n', (1954, 2072), False, 'from src.util.types import Data, Solution\n')] |
#MIT License
#
#Copyright (c) 2020 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import csv
import sys
## USAGE ##
# simple script that fixes chunks so that read names never occur in more than one chunk
# first argument is input chunk
# second argument is chunk number of input chunk
file_name = str(sys.argv[1])
output_number = str(sys.argv[2])
with open('split_line_fix.'+output_number, 'w', newline = '') as confirmed:
w = csv.writer(confirmed, delimiter = '\t')
if output_number == 1:
pass
else:
with open(file_name, newline = '') as file:
file_reader = csv.reader(file, delimiter = '\t')
for row in file_reader:
current_line = row
first_read = current_line[3]
while current_line[3] == first_read:
w.writerow(current_line)
current_line = next(file_reader)
break | [
"csv.writer",
"csv.reader"
] | [((1437, 1474), 'csv.writer', 'csv.writer', (['confirmed'], {'delimiter': '"""\t"""'}), "(confirmed, delimiter='\\t')\n", (1447, 1474), False, 'import csv\n'), ((1605, 1637), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '"""\t"""'}), "(file, delimiter='\\t')\n", (1615, 1637), False, 'import csv\n')] |
# Generated by Django 2.1.15 on 2020-11-12 18:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20201112_1810'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='picture',
new_name='project_picture',
),
]
| [
"django.db.migrations.RenameField"
] | [((229, 326), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""project"""', 'old_name': '"""picture"""', 'new_name': '"""project_picture"""'}), "(model_name='project', old_name='picture', new_name=\n 'project_picture')\n", (251, 326), False, 'from django.db import migrations\n')] |
# Generated by Django 2.2.1 on 2019-05-07 21:01
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretix_cliques', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='clique',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"django.db.models.DateTimeField"
] | [((360, 434), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (380, 434), False, 'from django.db import migrations, models\n')] |
import urllib.parse
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from tock.remote_user_auth import ACCOUNT_INACTIVE_MSG
User = get_user_model()
class ViewsTests(TestCase):
def test_logout_logs_user_out(self):
user = User.objects.create_user(
username='foo'
)
self.client.force_login(user)
# Make sure we actually did the above successfully
self.assertTrue(user.is_authenticated)
uaa_redirect_url = settings.UAA_LOGOUT_URL
uaa_redirect_url += '?'
uaa_redirect_url += urllib.parse.urlencode({
'redirect': 'http://testserver/logout',
'client_id': settings.UAA_CLIENT_ID,
})
self.assertFalse(self.client.session.is_empty())
response = self.client.get('/logout')
self.assertRedirects(
response,
uaa_redirect_url,
fetch_redirect_response=False
)
self.assertTrue(self.client.session.is_empty())
def test_inactive_user_denied(self):
"""Inactive users cannot access Tock"""
user = User.objects.create_user(username='foo', is_active=False)
self.client.force_login(user)
r = self.client.get('/')
self.assertContains(r, ACCOUNT_INACTIVE_MSG, status_code=403)
| [
"django.contrib.auth.get_user_model"
] | [((198, 214), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (212, 214), False, 'from django.contrib.auth import get_user_model\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 06 14:34:21 2016
@author: <NAME>
"""
import numpy as np
from . import Layers as layers
from . import Utils as utils
class NeuralNetwork(object):
"""
Initializes a neural network.
Takes a dictionary of initialization options.
"""
def __init__(self, options):
self.input_dim = options['input_dim']
self.data_type = options.setdefault('data_type', np.float32)
self.init_scheme = options.setdefault('init_scheme', 'xavier')
self.layers = []
self.num_layers = 0
"""
Adds a layer to the neural network.
The layer must be of a valid type, and is associated with a dictionary.
If the layer has any special options or hyperparameters, these are indicated in the dictionary.
Otherwise, the dictionary is empty.
"""
def add_layer(self, layer_type, params):
if not self.layers:
in_dim = self.input_dim
else:
in_dim = self.layers[-1].out_dim
if 'weight_scale' in params:
weight_scale = params['weight_scale']
elif self.init_scheme == 'xavier':
weight_scale = 1./np.sqrt(in_dim)
if layer_type == 'SoftmaxLoss':
layer = layers.SoftmaxLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'SVMLoss':
layer = layers.SVMLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'Affine':
layer = layers.AffineLayer(in_dim, params['neurons'], weight_scale, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Batchnorm':
layer = layers.BatchnormLayer(in_dim, params['decay'], self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Dropout':
if 'seed' in params:
layer = layers.DropoutLayer(in_dim, params['dropout_param'], seed=params['seed'])
else:
layer = layers.DropoutLayer(in_dim, params['dropout_param'])
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'PReLU':
layer = layers.PReLULayer(in_dim, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'ReLU':
layer = layers.ReLULayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Sigmoid':
layer = layers.SigmoidLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Tanh':
layer = layers.TanhLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
else:
raise InvalidLayerException('Invalid layer: ' + layer_type)
"""
Performs forward propagation on the network, pushing a tensor through each layer in sequence.
Does not perform final layer classification.
"""
def forward(self, X, train=False):
X = X.astype(self.data_type)
forward_tensor = X
for layer in self.layers:
if layer == self.layers[-1]:
return forward_tensor
if isinstance(layer, layers.DropoutLayer) or isinstance(layer, layers.BatchnormLayer) and train:
forward_tensor = layer.forward_train(forward_tensor)
else:
forward_tensor = layer.forward(forward_tensor)
"""
Performs forward propagation, and performs final layer classification.
Returns an NxC matrix of class scores per given example.
"""
def classify(self, X):
X = X.astype(self.data_type)
scores = self.forward(X)
return self.layers[-1].evaluate(scores)
"""
Given a set of training examples and their corresponding scores, performs forward propagation and then
returns the final layer classifier loss and the derivative of that loss function.
"""
def loss(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
scores = self.forward(X, train=True)
loss, dx = self.layers[-1].loss(scores, y)
squared_sum = 0.0
for layer in self.layers:
if isinstance(layer, layers.AffineLayer):
squared_sum += np.sum(layer.W * layer.W)
loss += 0.5 * reg_param * squared_sum
return loss, dx
"""
Takes a set of training examples and corresponding scores.
Performs forward propagation, executes the final layer classifier loss function.
Then, performs backpropagation on the network and saves intermediate derivatives to the respective layers.
Returns the classifier loss and its derivative for progress reporting purposes.
"""
def backward(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
loss, dx = self.loss(X, y, reg_param)
for layer in reversed(self.layers):
if layer == self.layers[-1]:
continue
dx = layer.backward(dx)
if isinstance(layer, layers.AffineLayer):
layer.dW += reg_param * layer.W
return loss, dx
class InvalidLayerException(Exception):
pass | [
"numpy.sum",
"numpy.sqrt"
] | [((4392, 4417), 'numpy.sum', 'np.sum', (['(layer.W * layer.W)'], {}), '(layer.W * layer.W)\n', (4398, 4417), True, 'import numpy as np\n'), ((1177, 1192), 'numpy.sqrt', 'np.sqrt', (['in_dim'], {}), '(in_dim)\n', (1184, 1192), True, 'import numpy as np\n')] |
import torch
import os
import os.path as osp
from collections import OrderedDict
import torch.nn as nn
import misc_utils as utils
def load_state_dict(module, state_dict):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
"""
try:
module.load_state_dict(state_dict)
except:
try:
model_dict = module.state_dict()
not_initialized = {k.split('.')[0] for k, v in state_dict.items() if k not in model_dict}
state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
module.load_state_dict(state_dict)
utils.color_print('Warning: Pretrained network has excessive layers: ', 1, end='')
utils.color_print(str(sorted(not_initialized)), 1)
except:
utils.color_print('Warning: Pretrained network has fewer layers; The following are not initialized: ', 1, end='')
for k, v in state_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
not_initialized = set()
for k, v in model_dict.items():
if k not in state_dict or v.size() != state_dict[k].size():
not_initialized.add(k.split('.')[0])
utils.color_print(str(sorted(not_initialized)), 1)
module.load_state_dict(model_dict)
def load_checkpoint(load_dict,
filename,
map_location=None,
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Either a filepath or URL or modelzoo://xxxxxxx.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
state_dict = torch.load(filename, map_location)
# get state_dict from checkpoint
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# load state_dict
for key, model in load_dict.items():
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict[key])
else:
load_state_dict(model, state_dict[key])
return state_dict
def save_checkpoint(save_dict, filename):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``.
Args:
save_dict (dict): string to module map.
filename (str): Checkpoint filename.
"""
os.makedirs(osp.dirname(filename), exist_ok=True)
for key in save_dict:
model = save_dict[key]
if isinstance(model, nn.Module):
if hasattr(model, 'module'):
save_dict[key] = model.module
if hasattr(save_dict[key], 'state_dict'):
save_dict[key] = save_dict[key].state_dict()
torch.save(save_dict, filename)
| [
"os.path.dirname",
"torch.load",
"misc_utils.color_print",
"torch.save"
] | [((2325, 2359), 'torch.load', 'torch.load', (['filename', 'map_location'], {}), '(filename, map_location)\n', (2335, 2359), False, 'import torch\n'), ((3396, 3427), 'torch.save', 'torch.save', (['save_dict', 'filename'], {}), '(save_dict, filename)\n', (3406, 3427), False, 'import torch\n'), ((3059, 3080), 'os.path.dirname', 'osp.dirname', (['filename'], {}), '(filename)\n', (3070, 3080), True, 'import os.path as osp\n'), ((905, 991), 'misc_utils.color_print', 'utils.color_print', (['"""Warning: Pretrained network has excessive layers: """', '(1)'], {'end': '""""""'}), "('Warning: Pretrained network has excessive layers: ', 1,\n end='')\n", (922, 991), True, 'import misc_utils as utils\n'), ((1079, 1202), 'misc_utils.color_print', 'utils.color_print', (['"""Warning: Pretrained network has fewer layers; The following are not initialized: """', '(1)'], {'end': '""""""'}), "(\n 'Warning: Pretrained network has fewer layers; The following are not initialized: '\n , 1, end='')\n", (1096, 1202), True, 'import misc_utils as utils\n')] |
import yaml
import json
from os import listdir
from os.path import isfile, join
"""
[{
towerName
kingdom
numberOfabilities
abilities: [
{
name
description
levels:[
{ cost }
]
},
{
name
description
levels: [
{ level, cost }
]
}
]
}]
---
name: ""
number_of_abilities: 0
abilities:
- name: ""
description: ""
number_of_levels: 0
levels:
- description: ""
cost: 0
- description: ""
cost: 0
- description: ""
cost: 0
- name: ""
description: ""
number_of_levels: 0
levels:
- description: ""
cost: 0
- description: ""
cost: 0
- description: ""
cost: 0
- name: ""
description: ""
number_of_levels: 0
levels:
- description: ""
cost: 0
- description: ""
cost: 0
- description: ""
cost: 0
"""
def get_abilities(path, kingdom):
stream = open(path, "r")
towers = []
kr_towers = yaml.load_all(stream, yaml.Loader)
for i, tower in enumerate(kr_towers):
print(i, tower["name"])
abilities = tower["abilities"]
cleaned_abilities = []
for j, ability in enumerate(abilities):
print("-", j, ability["name"])
levels = ability["levels"]
cleaned_levels = []
for k, ability_level in enumerate(levels):
print("--", k)
cleaned_levels.append({"cost": ability_level["cost"]})
cleaned_abilities.append(
{
"abilityName": ability["name"].lower(),
"description": ability["description"].lower(),
"levels": cleaned_levels,
}
)
tower_abilities = {
"towerName": tower["name"].lower(),
"kingdom": kingdom,
"abilities": cleaned_abilities,
}
towers.append(tower_abilities)
return towers
towers_kr = get_abilities("./data/raw/KR/abilities.yml", "kingdom rush")
towers_krf = get_abilities("./data/raw/KRF/abilities.yml", "kingdom rush: frontiers")
towers_kro = get_abilities("./data/raw/KRO/abilities.yml", "kingdom rush: origins")
towers_krv = get_abilities(
"./data/raw/KRV/abilities/abilities.yml", "kingdom rush: vengeance"
)
towers2d = [towers_kr, towers_krf, towers_kro, towers_krv]
towers = [item for sublist in towers2d for item in sublist]
for i, tower in enumerate(towers):
print(i, tower["towerName"])
data = {"data": towers}
with open("./data/generated/json/abilities.json", "w") as fout:
json_dumps_str = json.dumps(data, indent=4)
print(json_dumps_str, file=fout)
| [
"json.dumps",
"yaml.load_all"
] | [((1225, 1259), 'yaml.load_all', 'yaml.load_all', (['stream', 'yaml.Loader'], {}), '(stream, yaml.Loader)\n', (1238, 1259), False, 'import yaml\n'), ((2855, 2881), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (2865, 2881), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# flake8: noqa
from qiniu import Auth, BucketManager
# 七牛账号的 Access Key 和 Secret Key
access_key = "<access_key>"
secret_key = "<secret_key>"
# 源 空间名
bucket_name = ""
# 文件名
key = ""
# 目标空间名
bucket_name_to = ""
# 文件名
key_to = "test7.txt"
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
ret, info = bucket.move(bucket_name, key, bucket_name_to, key_to)
print(ret)
print(info)
| [
"qiniu.Auth",
"qiniu.BucketManager"
] | [((271, 299), 'qiniu.Auth', 'Auth', (['access_key', 'secret_key'], {}), '(access_key, secret_key)\n', (275, 299), False, 'from qiniu import Auth, BucketManager\n'), ((310, 326), 'qiniu.BucketManager', 'BucketManager', (['q'], {}), '(q)\n', (323, 326), False, 'from qiniu import Auth, BucketManager\n')] |
from impacket import tds
from impacket.tds import SQLErrorException, TDS_LOGINACK_TOKEN, TDS_ERROR_TOKEN, TDS_ENVCHANGE_TOKEN, TDS_INFO_TOKEN, \
TDS_ENVCHANGE_VARCHAR, TDS_ENVCHANGE_DATABASE, TDS_ENVCHANGE_LANGUAGE, TDS_ENVCHANGE_CHARSET, TDS_ENVCHANGE_PACKETSIZE
#We hook these functions in the tds library to use CME's logger instead of printing the output to stdout
#The whole tds library in impacket needs a good overhaul to preserve my sanity
def printRowsCME(self):
if self.lastError is True:
return
out = ''
self.processColMeta()
#self.printColumnsHeader()
for row in self.rows:
for col in self.colMeta:
if row[col['Name']] != 'NULL':
out += col['Format'] % row[col['Name']] + self.COL_SEPARATOR + '\n'
return out
def printRepliesCME(self):
for keys in self.replies.keys():
for i, key in enumerate(self.replies[keys]):
if key['TokenType'] == TDS_ERROR_TOKEN:
error = "ERROR(%s): Line %d: %s" % (key['ServerName'].decode('utf-16le'), key['LineNumber'], key['MsgText'].decode('utf-16le'))
self.lastError = SQLErrorException("ERROR: Line %d: %s" % (key['LineNumber'], key['MsgText'].decode('utf-16le')))
self._MSSQL__rowsPrinter.error(error)
elif key['TokenType'] == TDS_INFO_TOKEN:
self._MSSQL__rowsPrinter.info("INFO(%s): Line %d: %s" % (key['ServerName'].decode('utf-16le'), key['LineNumber'], key['MsgText'].decode('utf-16le')))
elif key['TokenType'] == TDS_LOGINACK_TOKEN:
self._MSSQL__rowsPrinter.info("ACK: Result: %s - %s (%d%d %d%d) " % (key['Interface'], key['ProgName'].decode('utf-16le'), key['MajorVer'], key['MinorVer'], key['BuildNumHi'], key['BuildNumLow']))
elif key['TokenType'] == TDS_ENVCHANGE_TOKEN:
if key['Type'] in (TDS_ENVCHANGE_DATABASE, TDS_ENVCHANGE_LANGUAGE, TDS_ENVCHANGE_CHARSET, TDS_ENVCHANGE_PACKETSIZE):
record = TDS_ENVCHANGE_VARCHAR(key['Data'])
if record['OldValue'] == '':
record['OldValue'] = 'None'.encode('utf-16le')
elif record['NewValue'] == '':
record['NewValue'] = 'None'.encode('utf-16le')
if key['Type'] == TDS_ENVCHANGE_DATABASE:
_type = 'DATABASE'
elif key['Type'] == TDS_ENVCHANGE_LANGUAGE:
_type = 'LANGUAGE'
elif key['Type'] == TDS_ENVCHANGE_CHARSET:
_type = 'CHARSET'
elif key['Type'] == TDS_ENVCHANGE_PACKETSIZE:
_type = 'PACKETSIZE'
else:
_type = "%d" % key['Type']
self._MSSQL__rowsPrinter.info("ENVCHANGE(%s): Old Value: %s, New Value: %s" % (_type,record['OldValue'].decode('utf-16le'), record['NewValue'].decode('utf-16le')))
tds.MSSQL.printReplies = printRepliesCME
tds.MSSQL.printRows = printRowsCME | [
"impacket.tds.TDS_ENVCHANGE_VARCHAR"
] | [((2051, 2085), 'impacket.tds.TDS_ENVCHANGE_VARCHAR', 'TDS_ENVCHANGE_VARCHAR', (["key['Data']"], {}), "(key['Data'])\n", (2072, 2085), False, 'from impacket.tds import SQLErrorException, TDS_LOGINACK_TOKEN, TDS_ERROR_TOKEN, TDS_ENVCHANGE_TOKEN, TDS_INFO_TOKEN, TDS_ENVCHANGE_VARCHAR, TDS_ENVCHANGE_DATABASE, TDS_ENVCHANGE_LANGUAGE, TDS_ENVCHANGE_CHARSET, TDS_ENVCHANGE_PACKETSIZE\n')] |
import json
from collections import OrderedDict
from xml.etree import ElementTree as ET
def spark_node(node, in_list=False):
"""
"Sparkify" a single XML node.
:param node: The XML node to sparkify
:type node: ET.Element
:param in_list: Internal flag; used by this fn when sparkifying a list
:type in_list: bool
:return: list|dict
"""
kids = list(node)
if kids and all(c.tag == kids[0].tag for c in kids): # Homogeneous children; this must be an array
children = [
node for node in [spark_node(k, in_list=True) for k in kids]
if node is not None
]
# The following has not really been specified, but it prevents some mistransforms:
if not children:
return {}
if all(isinstance(j_kid, str) for j_kid in children):
# All of the children turned into bare strings:
return {node.tag: children} # Wrap them in an object
elif all(isinstance(j_kid, dict) and len(j_kid) == 1 for j_kid in children):
# The children got turned into 1-dicts?
n_kids = {}
for kid in children:
n_kids.update(kid)
return n_kids # Unravel them.
else: # Mixed bag? Return the list.
return children
if in_list and not kids and not node.items():
return node.text
dct = OrderedDict()
dct.xml_tag = node.tag
for kid in kids:
if list(kid): # Has children?
dct[kid.tag] = spark_node(kid)
elif kid.text:
dct[kid.tag] = kid.text
for attr, value in node.items():
dct["@" + attr] = value
return dct
def transform_xml_to_json(xml):
"""
Transform XML to JSON according to the Spark convention.
:param xml: XML string/bytes
:return: JSON string
"""
if isinstance(xml, str):
xml = xml.encode("UTF-8") # Here's hoping it's UTF-8
xml = ET.fromstring(xml)
dct = spark_node(xml)
return json.dumps(dct)
| [
"collections.OrderedDict",
"json.dumps",
"xml.etree.ElementTree.fromstring"
] | [((1391, 1404), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1402, 1404), False, 'from collections import OrderedDict\n'), ((1951, 1969), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['xml'], {}), '(xml)\n', (1964, 1969), True, 'from xml.etree import ElementTree as ET\n'), ((2007, 2022), 'json.dumps', 'json.dumps', (['dct'], {}), '(dct)\n', (2017, 2022), False, 'import json\n')] |
#!/usr/bin/env/python
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import csv
import argparse
import sys
#Esto va a permitir que se agregue inputs desde la terminal (justo cuando se ejecuta python3 remover_puntos.py argv[0]....)
try:
parser=argparse.ArgumentParser();
parser.add_argument("porcentaje_rem", help="Coloque como primer argumento el porcentaje de puntos a remover", type=float);
parser.add_argument("tipo estrella: ",help="Coloque el tipo de estrella con el que se va a trabajar");
parser.add_argument("numero ID estrella: ", help="Coloque el número de la estrella a la que le va a remover puntos");
args=parser.parse_args();
porcentaje=float(sys.argv[1]);
tipo_estrella=sys.argv[2];
numero_estrella=sys.argv[3];
except:
e = sys.exc_info()[0];
print(e);
#fin try
#Importar los números de las estrellas desde el archivo csv:
ID_estrellas=np.loadtxt('numero_estrellas.csv',delimiter=',',dtype='str', skiprows=1);
vecCep=ID_estrellas[:,0];
vecRRLyr=ID_estrellas[:,1];
vecECL=ID_estrellas[:,2];
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if tipo_estrella=='Cefeida' or tipo_estrella==0 or tipo_estrella=='0':
nombre_OGLE='OGLE-LMC-CEP-'
label_path='Datos/'+'1_Cefeidas'+'/I/'+nombre_OGLE;
#numero_estrella=vecCep;
elif tipo_estrella=='RR_Lyrae' or tipo_estrella==1 or tipo_estrella=='1':
nombre_OGLE='OGLE-LMC-RRLYR-'
label_path='Datos/'+'2_RR_Lyrae'+'/I/'+nombre_OGLE;
#numero_estrella=vecRRLyr;
else:
nombre_OGLE='OGLE-LMC-ECL-'
label_path='Datos/'+'3_BinariasEclipsantes'+'/I/'+nombre_OGLE;
#numero_estrella=vecECL;
#fin if
extension='.dat';
#numero_estrella='02889';
elSeniorArchivo=label_path+numero_estrella+extension;
datos=np.genfromtxt(elSeniorArchivo,delimiter=' ');
N_datos=len(datos[:,0]);
N_remover=int(porcentaje*N_datos);
elegidos=np.random.choice(N_datos, size=N_remover,replace=False);
datos_nuevos=np.delete(datos,elegidos,0);
nombre_archivo=nombre_OGLE+numero_estrella+extension;
with open(nombre_archivo, 'w', encoding='UTF8', newline='') as f:
writer=csv.writer(f, delimiter=' ');
writer.writerows(datos_nuevos);
#fin with
| [
"argparse.ArgumentParser",
"numpy.random.choice",
"numpy.delete",
"csv.writer",
"sys.exc_info",
"numpy.loadtxt",
"numpy.genfromtxt"
] | [((890, 964), 'numpy.loadtxt', 'np.loadtxt', (['"""numero_estrellas.csv"""'], {'delimiter': '""","""', 'dtype': '"""str"""', 'skiprows': '(1)'}), "('numero_estrellas.csv', delimiter=',', dtype='str', skiprows=1)\n", (900, 964), True, 'import numpy as np\n'), ((1840, 1885), 'numpy.genfromtxt', 'np.genfromtxt', (['elSeniorArchivo'], {'delimiter': '""" """'}), "(elSeniorArchivo, delimiter=' ')\n", (1853, 1885), True, 'import numpy as np\n'), ((1955, 2011), 'numpy.random.choice', 'np.random.choice', (['N_datos'], {'size': 'N_remover', 'replace': '(False)'}), '(N_datos, size=N_remover, replace=False)\n', (1971, 2011), True, 'import numpy as np\n'), ((2025, 2054), 'numpy.delete', 'np.delete', (['datos', 'elegidos', '(0)'], {}), '(datos, elegidos, 0)\n', (2034, 2054), True, 'import numpy as np\n'), ((272, 297), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (295, 297), False, 'import argparse\n'), ((2183, 2211), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""" """'}), "(f, delimiter=' ')\n", (2193, 2211), False, 'import csv\n'), ((776, 790), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (788, 790), False, 'import sys\n')] |
from setuptools import setup, find_packages
setup(
name='dynibatch',
version='0.9',
url='https://bitbucket.org/jul_dyni/dynibatch',
author='DYNI team',
author_email='<EMAIL>',
license='MIT',
packages=find_packages()
)
| [
"setuptools.find_packages"
] | [((229, 244), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (242, 244), False, 'from setuptools import setup, find_packages\n')] |
import uuid
from django.db import connection, models
from busshaming.enums import RouteMetric, MetricTimespan
UPSERT_ENTRY = '''
INSERT INTO busshaming_routeranking (id, route_id, date, timespan, metric, rank, display_rank, value)
VALUES (uuid_generate_v4(), %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT (date, timespan, metric, rank)
DO UPDATE
SET route_id = EXCLUDED.route_id,
display_rank = EXCLUDED.display_rank,
value = EXCLUDED.value
'''
class RouteRankingManager(models.Manager):
def upsert(self, route_id, date, timespan, metric, rank, display_rank, value):
with connection.cursor() as cursor:
cursor.execute(UPSERT_ENTRY, (route_id, date, timespan, metric, rank, display_rank, value))
class RouteRanking(models.Model):
"""Denormalization of top N of each different kind of ranking."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
route = models.ForeignKey('Route')
date = models.DateField(db_index=True)
timespan = models.PositiveSmallIntegerField(choices=MetricTimespan.choices())
metric = models.PositiveSmallIntegerField(choices=RouteMetric.choices())
rank = models.PositiveSmallIntegerField()
display_rank = models.PositiveSmallIntegerField()
value = models.FloatField()
class Meta:
index_together = (('date', 'timespan', 'metric'),)
unique_together = (('date', 'timespan', 'metric', 'rank'),)
| [
"django.db.models.FloatField",
"django.db.models.DateField",
"busshaming.enums.MetricTimespan.choices",
"django.db.models.ForeignKey",
"busshaming.enums.RouteMetric.choices",
"django.db.connection.cursor",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.UUIDField"
] | [((842, 896), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4'}), '(primary_key=True, default=uuid.uuid4)\n', (858, 896), False, 'from django.db import connection, models\n'), ((909, 935), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Route"""'], {}), "('Route')\n", (926, 935), False, 'from django.db import connection, models\n'), ((947, 978), 'django.db.models.DateField', 'models.DateField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (963, 978), False, 'from django.db import connection, models\n'), ((1149, 1183), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (1181, 1183), False, 'from django.db import connection, models\n'), ((1203, 1237), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (1235, 1237), False, 'from django.db import connection, models\n'), ((1250, 1269), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1267, 1269), False, 'from django.db import connection, models\n'), ((592, 611), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (609, 611), False, 'from django.db import connection, models\n'), ((1035, 1059), 'busshaming.enums.MetricTimespan.choices', 'MetricTimespan.choices', ([], {}), '()\n', (1057, 1059), False, 'from busshaming.enums import RouteMetric, MetricTimespan\n'), ((1115, 1136), 'busshaming.enums.RouteMetric.choices', 'RouteMetric.choices', ([], {}), '()\n', (1134, 1136), False, 'from busshaming.enums import RouteMetric, MetricTimespan\n')] |
import pytest
from netCDF4 import Dataset
from globeplot.plotting import GlobePlot
def test_rendering():
ds = Dataset('./test/ssmi_f18_201605260450_s.nc')
lat = ds.variables['lat_l'][:]
lon = ds.variables['lon_l'][:]
values = ds.variables['tb37v'][:]
plot = GlobePlot(lats=lat, lons=lon, data=values)
plot.show(title='Tb37 SSMIS F18 2016-05-26 04:50', creator='Helge',
creator_addr='http://www.dmi.dk', code_link='http://www.dmi.dk')
| [
"globeplot.plotting.GlobePlot",
"netCDF4.Dataset"
] | [((117, 161), 'netCDF4.Dataset', 'Dataset', (['"""./test/ssmi_f18_201605260450_s.nc"""'], {}), "('./test/ssmi_f18_201605260450_s.nc')\n", (124, 161), False, 'from netCDF4 import Dataset\n'), ((282, 324), 'globeplot.plotting.GlobePlot', 'GlobePlot', ([], {'lats': 'lat', 'lons': 'lon', 'data': 'values'}), '(lats=lat, lons=lon, data=values)\n', (291, 324), False, 'from globeplot.plotting import GlobePlot\n')] |
import random, os, math
import numpy as np
import torch
import torch.distributed.rpc as rpc
import torch.nn.functional as F
def rpc_test(x):
print(f"rpc test successfully: {x}")
return 0
def call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def remote_method(method, rref, *args, **kwargs):
args = [method, rref] + list(args)
return rpc.rpc_sync(rref.owner(), call_method, args=args, kwargs=kwargs, timeout=0)
def remote_method_async(method, rref, *args, **kwargs):
args = [method, rref] + list(args)
return rpc.rpc_async(rref.owner(), call_method, args=args, kwargs=kwargs, timeout=0)
def remote_method_remote(method, rref, *args, **kwargs):
args = [method, rref] + list(args)
return rpc.remote(rref.owner(), call_method, args=args, kwargs=kwargs, timeout=0)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def get_accuracy(test_loader, model, device, rnn=False):
model.eval()
correct_sum = 0.0
val_loss = 0.0
# Use GPU to evaluate if possible
with torch.no_grad():
if rnn:
bptt = 35
ntokens = 33278
num_batches = 0
for i in range(0, test_loader.size(0) - 1, bptt):
num_batches += 1
inputs, target = get_batch(test_loader, i, bptt)
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
output = output.view(-1, ntokens)
val_loss += len(inputs) * F.nll_loss(output, target).item()
val_loss /= (len(test_loader) - 1)
return math.exp(val_loss)
else:
for i, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
out = model(data)
pred = out.argmax(dim=1, keepdim=True)
pred, target = pred.to(device), target.to(device)
correct = pred.eq(target.view_as(pred)).sum().item()
correct_sum += correct
acc = correct_sum / len(test_loader.dataset)
return acc
class DataAggregation(object):
def __init__(self, rank):
self.rank = rank
self.weight = 0
self.data = {}
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def clear_data(self):
self.data = {}
class ExtraWorkLoader(object):
def __init__(self, dataloader):
self.dataloader = dataloader
self.dataiter = iter(dataloader)
def retrieve(self):
try:
data, target = next(self.dataiter)
except StopIteration:
self.dataiter = iter(self.dataloader)
data, target = next(self.dataiter)
return data, target
def adjust_learning_rate(base_lr, epoch):
return base_lr * 0.98 ** epoch
def load_model(model_name, class_num):
if model_name == "mnistnet":
from Networks.MnistNet import MnistNet
return MnistNet(class_num=class_num)
if model_name == "resnet":
from Networks.Resnet import ResNet18
return ResNet18(class_num=class_num)
if model_name == "vgg":
from Networks.VGG import VGG
return VGG("VGG11star", class_num=class_num)
if model_name == "mlp":
from Networks.MLP import MLP
return MLP(784, 200, 10)
if model_name == "cnncifar":
from Networks.CNNCifar import CNNCifar
return CNNCifar(class_num)
if model_name == "transformer":
from Networks.Transformer import TransformerModel
ntokens = 33278
emsize = 200
nhead = 2
nhid = 200
nlayers = 2
dropout = 0.2
return TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)
def count_parameters(model):
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
total_params += param
print(f"Total Trainable Params: {total_params}")
if __name__ == "__main__":
from Networks.Transformer import TransformerModel
ntokens = 33278
emsize = 200
nhead = 2
nhid = 200
nlayers = 2
dropout = 0.2
count_parameters(TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)) | [
"Networks.Transformer.TransformerModel",
"torch.nn.functional.nll_loss",
"Networks.VGG.VGG",
"Networks.Resnet.ResNet18",
"Networks.MLP.MLP",
"Networks.MnistNet.MnistNet",
"Networks.CNNCifar.CNNCifar",
"torch.no_grad",
"math.exp"
] | [((1196, 1211), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1209, 1211), False, 'import torch\n'), ((3217, 3246), 'Networks.MnistNet.MnistNet', 'MnistNet', ([], {'class_num': 'class_num'}), '(class_num=class_num)\n', (3225, 3246), False, 'from Networks.MnistNet import MnistNet\n'), ((3338, 3367), 'Networks.Resnet.ResNet18', 'ResNet18', ([], {'class_num': 'class_num'}), '(class_num=class_num)\n', (3346, 3367), False, 'from Networks.Resnet import ResNet18\n'), ((3448, 3485), 'Networks.VGG.VGG', 'VGG', (['"""VGG11star"""'], {'class_num': 'class_num'}), "('VGG11star', class_num=class_num)\n", (3451, 3485), False, 'from Networks.VGG import VGG\n'), ((3566, 3583), 'Networks.MLP.MLP', 'MLP', (['(784)', '(200)', '(10)'], {}), '(784, 200, 10)\n', (3569, 3583), False, 'from Networks.MLP import MLP\n'), ((3679, 3698), 'Networks.CNNCifar.CNNCifar', 'CNNCifar', (['class_num'], {}), '(class_num)\n', (3687, 3698), False, 'from Networks.CNNCifar import CNNCifar\n'), ((3932, 3996), 'Networks.Transformer.TransformerModel', 'TransformerModel', (['ntokens', 'emsize', 'nhead', 'nhid', 'nlayers', 'dropout'], {}), '(ntokens, emsize, nhead, nhid, nlayers, dropout)\n', (3948, 3996), False, 'from Networks.Transformer import TransformerModel\n'), ((4472, 4536), 'Networks.Transformer.TransformerModel', 'TransformerModel', (['ntokens', 'emsize', 'nhead', 'nhid', 'nlayers', 'dropout'], {}), '(ntokens, emsize, nhead, nhid, nlayers, dropout)\n', (4488, 4536), False, 'from Networks.Transformer import TransformerModel\n'), ((1785, 1803), 'math.exp', 'math.exp', (['val_loss'], {}), '(val_loss)\n', (1793, 1803), False, 'import random, os, math\n'), ((1684, 1710), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (1694, 1710), True, 'import torch.nn.functional as F\n')] |
import unittest
import time
import os
from datetime import datetime, timedelta
from unittest.mock import patch
from spaceone.core.unittest.result import print_data
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core.transaction import Transaction
from spaceone.core import utils
from spaceone.inventory.error import *
from spaceone.inventory.connector.storage_account import StorageAccountConnector
from spaceone.inventory.manager.storage_account_manager import StorageAccountManager
class TestVirtualNetworkManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.init_conf(package='spaceone.inventory')
config_path = os.environ.get('TEST_CONFIG')
test_config = utils.load_yaml_from_file(config_path)
cls.schema = 'azure_client_secret'
cls.azure_credentials = test_config.get('AZURE_CREDENTIALS', {})
cls.storage_account_connector = StorageAccountConnector(transaction=Transaction(), config={}, secret_data=cls.azure_credentials)
cls.storage_account_manager = StorageAccountManager(Transaction())
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
def test_collect_cloud_service(self, *args):
secret_data = self.azure_credentials
subscription_info = {
'subscription_id': '3ec64e1e-1ce8-4f2c-82a0-a7f6db0899ca',
'subscription_name': 'Azure subscription 1',
'tenant_id': '35f43e22-0c0b-4ff3-90aa-b2c04ef1054c'
}
params = {'options': {}, 'secret_data': secret_data, 'subscription_info': subscription_info, 'filter': {}}
application_gateways = self.storage_account_manager.collect_cloud_service(params)
for application_gateway in application_gateways:
print(application_gateway.to_primitive())
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner) | [
"spaceone.core.utils.load_yaml_from_file",
"spaceone.core.transaction.Transaction",
"os.environ.get",
"spaceone.core.config.init_conf",
"unittest.main"
] | [((1931, 1971), 'unittest.main', 'unittest.main', ([], {'testRunner': 'RichTestRunner'}), '(testRunner=RichTestRunner)\n', (1944, 1971), False, 'import unittest\n'), ((646, 692), 'spaceone.core.config.init_conf', 'config.init_conf', ([], {'package': '"""spaceone.inventory"""'}), "(package='spaceone.inventory')\n", (662, 692), False, 'from spaceone.core import config\n'), ((716, 745), 'os.environ.get', 'os.environ.get', (['"""TEST_CONFIG"""'], {}), "('TEST_CONFIG')\n", (730, 745), False, 'import os\n'), ((768, 806), 'spaceone.core.utils.load_yaml_from_file', 'utils.load_yaml_from_file', (['config_path'], {}), '(config_path)\n', (793, 806), False, 'from spaceone.core import utils\n'), ((1122, 1135), 'spaceone.core.transaction.Transaction', 'Transaction', ([], {}), '()\n', (1133, 1135), False, 'from spaceone.core.transaction import Transaction\n'), ((1001, 1014), 'spaceone.core.transaction.Transaction', 'Transaction', ([], {}), '()\n', (1012, 1014), False, 'from spaceone.core.transaction import Transaction\n')] |
import sqlite3
import os
class NewsDb:
def __init__(self, root):
path = os.path.join(root, 'news.db')
self.conn = sqlite3.connect(path)
self.create_table()
self.stmts = []
def __del__(self): self.conn.close()
def create_table(self):
cur = self.conn.cursor()
# cur.execute(self.__create_table_sql())
cur.executescript(self.__create_table_sql())
def __create_table_sql(self):
return '''
create table if not exists news(
id integer primary key,
published text,
url text,
title text,
body text -- URL先から本文だけを抽出したプレーンテキスト
);
create index if not exists idx_news on
news(published desc, id desc, url, title);
create table if not exists sources(
id integer primary key,
domain text, -- URLのドメイン名
name text, -- 情報源名
created text -- 登録日時(同一ドメイン名が複数あるとき新しいほうを表示する)
);
create index if not exists idx_sources on
sources(domain, created desc, id desc, name);
'''
def __insert_sql(self):
return 'insert into news(published,url,title,body) values(?,?,?,?)'
def append_insert_stmt(self, published, url, title, body):
self.stmts.append((published, url, title, body))
# self.stmts.append(
# "insert into news(published,url,title,body) values("
# + "'" + published + "',"
# + "'" + url + "',"
# + "'" + title + "',"
# + "'" + body + "'"
# + ");");
def insert(self):
if 0 == len(self.stmts): return
try:
cur = self.conn.cursor()
cur.executemany(self.__insert_sql(), self.stmts)
self.conn.commit()
self.stmts.clear()
except:
import traceback
traceback.print_exc()
self.conn.rollback()
# finally: self.stmts.clear()
| [
"traceback.print_exc",
"os.path.join",
"sqlite3.connect"
] | [((85, 114), 'os.path.join', 'os.path.join', (['root', '"""news.db"""'], {}), "(root, 'news.db')\n", (97, 114), False, 'import os\n'), ((135, 156), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (150, 156), False, 'import sqlite3\n'), ((1784, 1805), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1803, 1805), False, 'import traceback\n')] |
import json
import requests
import argparse
import configparser
import datetime
url = ''
TOKEN = ''
def read_config_file(config_file):
global url
global TOKEN
config = configparser.ConfigParser()
config.read(config_file)
url = config['URL']['post_entities_url']
TOKEN = config['TOKEN']['token']
def post_entities(input_file):
global url
now = datetime.datetime.now()
url = url + str(now.year) + '{num:02d}'.format(num=now.month) + '{num:02d}'.format(num=now.day)
print(url)
print(TOKEN)
headers = {
'Authorization': 'Bearer ' + TOKEN,
'Content-Type': 'application/json'
}
with open(input_file) as inf:
json_data = json.loads(inf.read())
for entity in json_data:
entities_data = {'doc': entity['entity'],
'id': entity['entity']}
try:
r = requests.post(url, data=json.dumps(entities_data), headers=headers)
print(r.content)
except Exception as ex:
print(ex)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Posts entities to Wit.ai",
usage="post_entities.py <config_file> <input_file> <app_name")
parser.add_argument('config_file', help='Config file')
parser.add_argument('input_file', help='Input file')
args = parser.parse_args()
read_config_file(args.config_file)
post_entities(input_file=args.input_file)
| [
"datetime.datetime.now",
"json.dumps",
"configparser.ConfigParser",
"argparse.ArgumentParser"
] | [((183, 210), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (208, 210), False, 'import configparser\n'), ((380, 403), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (401, 403), False, 'import datetime\n'), ((1074, 1205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Posts entities to Wit.ai"""', 'usage': '"""post_entities.py <config_file> <input_file> <app_name"""'}), "(description='Posts entities to Wit.ai', usage=\n 'post_entities.py <config_file> <input_file> <app_name')\n", (1097, 1205), False, 'import argparse\n'), ((905, 930), 'json.dumps', 'json.dumps', (['entities_data'], {}), '(entities_data)\n', (915, 930), False, 'import json\n')] |
import logging
import pathlib
import os.path
from datetime import datetime
import DCMT4Wrapper
from DCMT4Lib.CommonTools.DataStructure import *
from DCMT4Lib.MT4Funs.MarketInfoFuns import MarketInfoFuns
from DCMT4Lib.MT4Funs.CheckupFuns import CheckupFuns
from DCMT4Lib.MT4Funs.PredefinedVariablesFuns import PredefinedVariablesFuns
from DCMT4Lib.MT4Funs.ChartOperationsFuns import ChartOperationsFuns
from DCMT4Lib.MT4Funs.TradeFuns import TradeFuns
from DCMT4Lib.MT4Funs.ConversionFuns import ConversionFuns
from DCMT4Lib.MT4Funs.AccountInformationFuns import AccountInformationFuns
from DCMT4Lib.MT4Funs.TechnicalIndicatorsFuns import TechnicalIndicatorsFuns
from DCMT4Lib.MT4Funs.DateAndTimeFuns import DateAndTimeFuns
from DCMT4Lib.MT4Funs.ObjectFuns import ObjectFuns
from DCMT4Lib.MT4Funs.TestFuns import TestFuns
from DCMT4Lib.CommonTools.CommonDefine import *
from DCMT4Lib.CommonTools.EnumDefine import *
from pathlib import Path
def PrepareLogger(isToFile, isToConsole, file_name_with_path):
# create formatter
formatter = logging.Formatter("%(asctime)s [%(levelname)s] [%(funcName)s:%(lineno)s:0x%(thread)X] - %(message)s")
logger = logging.getLogger("DCMT4Wrapper")
logger.setLevel(logging.DEBUG)
if isToFile:
# create file handler
now = datetime.now()
curTime = now.strftime("%Y%m%d_%H%M%S")
log_file_path = os.path.join(pathlib.Path(file_name_with_path).parent.resolve(), "Log_Trace_Msg_" + os.path.splitext(os.path.basename(file_name_with_path))[0] + "_PY")
Path(log_file_path).mkdir(parents=True, exist_ok=True)
log_file_name = os.path.join(log_file_path, os.path.basename(file_name_with_path) + "_" + curTime + ".log")
fh = logging.FileHandler(log_file_name)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(fh)
if isToConsole:
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
return logger
class DCMT4WrapperBase(object):
def __init__(self, file_name_with_path):
self.__logger_base = PrepareLogger(True, False, file_name_with_path)
self.__fun_switcher = self.__Get_Fun_Switcher()
self.__wHD = None
self.__is_trace_msg = False
self.__market_info_funs = None
self.__checkup_funs = None
self.__predefined_variables_funs = None
self.__chart_operations_funs = None
self.__trade_funs = None
self.__conversion_funs = None
self.__technical_indicators_funs = None
self.__account_information_funs = None
self.__date_and_time_funs = None
self.__object_funs = None
self.__test_funs = None
self._param_keeper = ParamKeeper()
# override
def _OnInit(self):
raise NotImplementedError
# override
def _OnDeinit(self, reason):
raise NotImplementedError
# override
def _OnTick(self):
raise NotImplementedError
# override
def _OnTimer(self):
raise NotImplementedError
# override
def _OnTester(self):
raise NotImplementedError
# override
def _OnChartEvent(self, callerID, lparam, dparam, sparam):
raise NotImplementedError
def _TestCommunication(self):
self.__test_funs.TestCommunication()
# Predefined Variables
def _Bars(self):
return self.__predefined_variables_funs.Bars()
def _Volume(self, index):
return self.__predefined_variables_funs.Volume(index)
def _Open(self, index):
return self.__predefined_variables_funs.Open(index)
def _Close(self, index):
return self.__predefined_variables_funs.Close(index)
def _Ask(self):
return self.__predefined_variables_funs.Ask()
def _Bid(self):
return self.__predefined_variables_funs.Bid()
def _Time(self, index):
return self.__predefined_variables_funs.Time(index)
def _Digits(self):
return self.__predefined_variables_funs.Digits()
def _High(self, index):
return self.__predefined_variables_funs.High(index)
def _Low(self, index):
return self.__predefined_variables_funs.Low(index)
def _Point(self):
return self.__predefined_variables_funs.Point()
# Date and Time
def _TimeCurrent(self, dt_struct = None):
return self.__date_and_time_funs.TimeCurrent(dt_struct)
def _TimeLocal(self, dt_struct = None):
return self.__date_and_time_funs.TimeLocal(dt_struct)
def _TimeGMT(self, dt_struct = None):
return self.__date_and_time_funs.TimeGMT(dt_struct)
def _TimeDaylightSavings(self):
return self.__date_and_time_funs.TimeDaylightSavings()
def _TimeGMTOffset(self):
return self.__date_and_time_funs.TimeGMTOffset()
def _TimeToStruct(self, date_time, dt_struct):
self.__date_and_time_funs.TimeToStruct(date_time, dt_struct)
def _StructToTime(self, dt_struct):
return self.__date_and_time_funs.StructToTime(dt_struct)
def _Day(self):
return self.__date_and_time_funs.Day()
def _DayOfWeek(self):
return self.__date_and_time_funs.DayOfWeek()
def _DayOfYear(self):
return self.__date_and_time_funs.DayOfYear()
def _Hour(self):
return self.__date_and_time_funs.Hour()
def _Minute(self):
return self.__date_and_time_funs.Minute()
def _Month(self):
return self.__date_and_time_funs.Month()
def _Seconds(self):
return self.__date_and_time_funs.Seconds()
def _Year(self):
return self.__date_and_time_funs.Year()
# chart operations
def _ChartApplyTemplate(self, chart_id, filename):
return self.__chart_operations_funs.ChartApplyTemplate(chart_id, filename)
def _ChartSaveTemplate(self, chart_id, filename):
return self.__chart_operations_funs.ChartSaveTemplate(chart_id, filename)
def _ChartWindowFind(self, chart_id, indicator_shortname):
return self.__chart_operations_funs.ChartWindowFind(chart_id, indicator_shortname)
def _ChartTimePriceToXY(self, chart_id, sub_window, time, price):
return self.__chart_operations_funs.ChartTimePriceToXY(chart_id, sub_window, time, price)
def _ChartXYToTimePrice(self, chart_id, x, y):
return self.__chart_operations_funs.ChartXYToTimePrice(chart_id, x, y)
def _ChartOpen(self, symbol, period):
return self.__chart_operations_funs.ChartOpen(symbol, period)
def _ChartFirst(self):
return self.__chart_operations_funs.ChartFirst()
def _ChartNext(self, chart_id):
return self.__chart_operations_funs.ChartNext(chart_id)
def _ChartClose(self, chart_id = 0):
return self.__chart_operations_funs.ChartClose(chart_id)
def _ChartSymbol(self, chart_id = 0):
return self.__chart_operations_funs.ChartSymbol(chart_id)
def _ChartPeriod(self, chart_id = 0):
return self.__chart_operations_funs.ChartPeriod(chart_id)
def _ChartRedraw(self, chart_id = 0):
return self.__chart_operations_funs.ChartRedraw(chart_id)
def _ChartSetDouble(self, chart_id, prop_id, value):
return self.__chart_operations_funs.ChartSetDouble(chart_id, prop_id, value)
def _ChartSetInteger(self, chart_id, prop_id, value, sub_window = -1):
return self.__chart_operations_funs.ChartSetInteger(chart_id, prop_id, value, sub_window)
def _ChartSetString(self, chart_id, prop_id, str_value):
return self.__chart_operations_funs.ChartSetString(chart_id, prop_id, str_value)
def _ChartGetDouble(self, chart_id, prop_id, sub_window = 0):
return self.__chart_operations_funs.ChartGetDouble(chart_id, prop_id, sub_window)
def _ChartGetInteger(self, chart_id, prop_id, sub_window = 0):
return self.__chart_operations_funs.ChartGetInteger(chart_id, prop_id, sub_window)
def _ChartGetString(self, chart_id, prop_id):
return self.__chart_operations_funs.ChartGetString(chart_id, prop_id)
def _ChartNavigate(self, chart_id, position, shift = 0):
return self.__chart_operations_funs.ChartNavigate(chart_id, position, shift)
def _ChartID(self):
return self.__chart_operations_funs.ChartID()
def _ChartIndicatorDelete(self, chart_id, sub_window, indicator_shortname):
return self.__chart_operations_funs.ChartIndicatorDelete(chart_id, sub_window, indicator_shortname)
def _ChartIndicatorName(self, chart_id, sub_window, index):
return self.__chart_operations_funs.ChartIndicatorName(chart_id, sub_window, index)
def _ChartIndicatorsTotal(self, chart_id, sub_window):
return self.__chart_operations_funs.ChartIndicatorsTotal(chart_id, sub_window)
def _ChartWindowOnDropped(self):
return self.__chart_operations_funs.ChartWindowOnDropped()
def _ChartPriceOnDropped(self):
return self.__chart_operations_funs.ChartPriceOnDropped()
def _ChartTimeOnDropped(self):
return self.__chart_operations_funs.ChartTimeOnDropped()
def _ChartXOnDropped(self):
return self.__chart_operations_funs.ChartXOnDropped()
def _ChartYOnDropped(self):
return self.__chart_operations_funs.ChartYOnDropped()
def _ChartSetSymbolPeriod(self, chart_id, symbol, period):
return self.__chart_operations_funs.ChartSetSymbolPeriod(chart_id, symbol, period)
def _ChartScreenShot(self, chart_id, filename, width, height, align_mode = ALIGN_Enum.ALIGN_RIGHT):
return self.__chart_operations_funs.ChartScreenShot(chart_id, filename, width, height, align_mode)
def _Period(self):
return self.__chart_operations_funs.Period()
def _Symbol(self):
return self.__chart_operations_funs.Symbol()
def _WindowBarsPerChart(self):
return self.__chart_operations_funs.WindowBarsPerChart()
def _WindowExpertName(self):
return self.__chart_operations_funs.WindowExpertName()
def _WindowFind(self, name):
return self.__chart_operations_funs.WindowFind(name)
def _WindowFirstVisibleBar(self):
return self.__chart_operations_funs.WindowFirstVisibleBar()
def _WindowHandle(self, symbol, timeframe):
return self.__chart_operations_funs.WindowHandle(symbol, timeframe)
def _WindowIsVisible(self, index):
return self.__chart_operations_funs.WindowIsVisible(index)
def _WindowOnDropped(self):
return self.__chart_operations_funs.WindowOnDropped()
def _WindowPriceMax(self, index = 0):
return self.__chart_operations_funs.WindowPriceMax(index)
def _WindowPriceMin(self, index = 0):
return self.__chart_operations_funs.WindowPriceMin(index)
def _WindowPriceOnDropped(self):
return self.__chart_operations_funs.WindowPriceOnDropped()
def _WindowRedraw(self):
return self.__chart_operations_funs.WindowRedraw()
def _WindowScreenShot(self, filename, size_x, size_y, start_bar = -1, chart_scale = -1, chart_mode = -1):
return self.__chart_operations_funs.WindowScreenShot(filename, size_x, size_y, start_bar, chart_scale, chart_mode)
def _WindowTimeOnDropped(self):
return self.__chart_operations_funs.WindowTimeOnDropped()
def _WindowsTotal(self):
return self.__chart_operations_funs.WindowsTotal()
def _WindowXOnDropped(self):
return self.__chart_operations_funs.WindowXOnDropped()
def _WindowYOnDropped(self):
return self.__chart_operations_funs.WindowYOnDropped()
# Checkup
def _GetLastError(self):
return self.__checkup_funs.GetLastError()
def _IsStopped(self):
return self.__checkup_funs.IsStopped()
def _UninitializeReason(self):
return self.__checkup_funs.UninitializeReason()
def _MQLInfo(self, property_id):
return self.__checkup_funs.MQLInfo(property_id)
def _MQLSetInteger(self, property_id, property_value):
return self.__checkup_funs.MQLSetInteger(property_id, property_value)
def _TerminalInfo(self, property_id):
return self.__checkup_funs.TerminalInfo(property_id)
def _Period(self):
return self.__checkup_funs.Period()
def _IsConnected(self):
return self.__checkup_funs.IsConnected()
def _IsDemo(self):
return self.__checkup_funs.IsDemo()
def _IsDllsAllowed(self):
return self.__checkup_funs.IsDllsAllowed()
def _IsExpertEnabled(self):
return self.__checkup_funs.IsExpertEnabled()
def _IsLibrariesAllowed(self):
return self.__checkup_funs.IsLibrariesAllowed()
def _IsOptimization(self):
return self.__checkup_funs.IsOptimization()
def _IsTesting(self):
return self.__checkup_funs.IsTesting()
def _IsTradeAllowed(self, symbol = "", tested_time = None):
return self.__checkup_funs.IsTradeAllowed(symbol, tested_time)
def _IsTradeContextBusy(self):
return self.__checkup_funs.IsTradeContextBusy()
def _IsVisualMode(self):
return self.__checkup_funs.IsVisualMode()
def _TerminalCompany(self):
return self.__checkup_funs.TerminalCompany()
def _TerminalName(self):
return self.__checkup_funs.TerminalName()
def _TerminalPath(self):
return self.__checkup_funs.TerminalPath()
# Market Info
def _MarketInfo(self, symbol, mode_enum):
return self.__market_info_funs.MarketInfo(symbol, mode_enum)
def _SymbolsTotal(self, selected):
return self.__market_info_funs.SymbolsTotal(selected)
def _SymbolName(self, pos, selected):
return self.__market_info_funs.SymbolName(pos, selected)
def _SymbolSelect(self, name, select):
return self.__market_info_funs.SymbolSelect(name, select)
def _SymbolInfo(self, name, prop_id):
return self.__market_info_funs.SymbolInfo(name, prop_id)
def _SymbolInfoTick(self, symbol, tick):
return self.__market_info_funs.SymbolInfoTick(symbol, tick)
def _SymbolInfoSessionQuote(self, name, day_of_week, session_index):
return self.__market_info_funs.SymbolInfoSessionQuote(name, day_of_week, session_index)
def _SymbolInfoSessionTrade(self, name, day_of_week, session_index):
return self.__market_info_funs.SymbolInfoSessionTrade(name, day_of_week, session_index)
# Trade
def _OrderClose(self, ticket, lots, price, slippage, arrow_color):
return self.__trade_funs.OrderClose(ticket, lots, price, slippage, arrow_color)
def _OrderCloseBy(self, ticket, opposite, arrow_color):
return self.__trade_funs.OrderCloseBy(ticket, opposite, arrow_color)
def _OrderClosePrice(self):
return self.__trade_funs.OrderClosePrice()
def _OrderCloseTime(self):
return self.__trade_funs.OrderCloseTime()
def _OrderComment(self):
return self.__trade_funs.OrderComment()
def _OrderCommission(self):
return self.__trade_funs.OrderCommission()
def _OrderDelete(self, ticket, arrow_color):
return self.__trade_funs.OrderDelete(ticket, arrow_color)
def _OrderExpiration(self):
return self.__trade_funs.OrderExpiration()
def _OrderLots(self):
return self.__trade_funs.OrderLots()
def _OrderMagicNumber(self):
return self.__trade_funs.OrderMagicNumber()
def _OrderModify(self, ticket, price, stoploss, takeprofit, expiration, arrow_color):
return self.__trade_funs.OrderModify(ticket, price, stoploss, takeprofit, expiration, arrow_color)
def _OrderOpenPrice(self):
return self.__trade_funs.OrderOpenPrice()
def _OrderOpenTime(self):
return self.__trade_funs.OrderOpenTime()
def _OrderPrint(self):
return self.__trade_funs.OrderPrint()
def _OrderProfit(self):
return self.__trade_funs.OrderProfit()
def _OrderSelect(self, index, select, pool = MODE_Enum.MODE_TRADES):
return self.__trade_funs.OrderSelect(index, select, pool)
def _OrderSend(self, symbol, cmd, volume, price, slippage, stoploss, takeprofit, comment = "", magic = 0, expiration = None, arrow_color = ""):
return self.__trade_funs.OrderSend(symbol, cmd, volume, price, slippage, stoploss, takeprofit, comment, magic, expiration, arrow_color)
def _OrdersHistoryTotal(self):
return self.__trade_funs.OrdersHistoryTotal()
def _OrderStopLoss(self):
return self.__trade_funs.OrderStopLoss()
def _OrdersTotal(self):
return self.__trade_funs.OrdersTotal()
def _OrderSwap(self):
return self.__trade_funs.OrderSwap()
def _OrderSymbol(self):
return self.__trade_funs.OrderSymbol()
def _OrderTakeProfit(self):
return self.__trade_funs.OrderTakeProfit()
def _OrderTicket(self):
return self.__trade_funs.OrderTicket()
def _OrderType(self):
return self.__trade_funs.OrderType()
# Conversion
def _NormalizeDouble(self, value, digits):
return self.__conversion_funs.NormalizeDouble(value, digits)
# Accouont Information
def _AccountInfo(self, account_enum):
return self.__account_information_funs.AccountInfo(account_enum)
def _AccountBalance(self):
return self.__account_information_funs.AccountBalance()
def _AccountCredit(self):
return self.__account_information_funs.AccountCredit()
def _AccountCompany(self):
return self.__account_information_funs.AccountCompany()
def _AccountCurrency(self):
return self.__account_information_funs.AccountCurrency()
def _AccountEquity(self):
return self.__account_information_funs.AccountEquity()
def _AccountFreeMargin(self):
return self.__account_information_funs.AccountFreeMargin()
def _AccountFreeMarginCheck(self, symbol, cmd, volume):
return self.__account_information_funs.AccountFreeMarginCheck(symbol, cmd, volume)
def _AccountFreeMarginMode(self):
return self.__account_information_funs.AccountFreeMarginMode()
def _AccountLeverage(self):
return self.__account_information_funs.AccountLeverage()
def _AccountMargin(self):
return self.__account_information_funs.AccountMargin()
def _AccountName(self):
return self.__account_information_funs.AccountName()
def _AccountNumber(self):
return self.__account_information_funs.AccountNumber()
def _AccountProfit(self):
return self.__account_information_funs.AccountProfit()
def _AccountServer(self):
return self.__account_information_funs.AccountServer()
def _AccountStopoutLevel(self):
return self.__account_information_funs.AccountStopoutLevel()
def _AccountStopoutMode(self):
return self.__account_information_funs.AccountStopoutMode()
def _AccountEnumCheck(self):
return self.__account_information_funs.AccountEnumCheck()
# Object Funs
def _ObjectCreate(self, object_name, object_type, sub_window, time1, price1, time2=0, price2=0, time3=0, price3=0):
return self.__object_funs.ObjectCreate(object_name, object_type, sub_window, time1, price1, time2, price2, time3, price3)
def _ObjectName(self, object_index):
return self.__object_funs.ObjectName(object_index)
def _ObjectDelete(self, object_name, chart_id = -1):
return self.__object_funs.ObjectDelete(object_name, chart_id)
def _ObjectsDeleteAll(self, chart_id = -1, sub_window = -1, object_type = OBJ_Enum.OBJ_EMPTY, prefix = ""):
return self.__object_funs.ObjectsDeleteAll(chart_id, sub_window, object_type, prefix)
def _ObjectFind(self, object_name, chart_id = -1):
return self.__object_funs.ObjectFind(object_name, chart_id)
def _ObjectGetTimeByValue(self, chart_id, object_name, value, line_id = 0):
return self.__object_funs.ObjectGetTimeByValue(chart_id, object_name, value, line_id)
def _ObjectGetValueByTime(self, chart_id, object_name, time, line_id = 0):
return self.__object_funs.ObjectGetValueByTime(chart_id, object_name, time, line_id)
def _ObjectMove(self, object_name, point_index, time, price):
return self.__object_funs.ObjectMove(object_name, point_index, time, price)
def _ObjectsTotal(self, chart_id = -1, sub_window = -1, obj_type = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectsTotal(chart_id, sub_window, obj_type)
def _ObjectGetDouble(self, chart_id, object_name, prop_id, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectGetDouble(chart_id, object_name, prop_id, prop_modifier)
def _ObjectGetInteger(self, chart_id, object_name, prop_id, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectGetInteger(chart_id, object_name, prop_id, prop_modifier)
def _ObjectGetString(self, chart_id, object_name, prop_id, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectGetString(chart_id, object_name, prop_id, prop_modifier)
def _ObjectSetDouble(self, chart_id, object_name, prop_id, prop_value, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectSetDouble(chart_id, object_name, prop_id, prop_value, prop_modifier)
def _ObjectSetInteger(self, chart_id, object_name, prop_id, prop_value, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectSetInteger(chart_id, object_name, prop_id, prop_value, prop_modifier)
def _ObjectSetString(self, chart_id, object_name, prop_id, prop_value, prop_modifier = OBJ_Enum.OBJ_EMPTY):
return self.__object_funs.ObjectSetString(chart_id, object_name, prop_id, prop_value, prop_modifier)
def _TextSetFont(self, name, size, flags = FW_Enum.FW_DONTCARE, orientation = 0):
return self.__object_funs.TextSetFont(name, size, flags, orientation)
def _TextOut(self, text, x, y, anchor, width, height, color, color_format):
return self.__object_funs.TextOut(text, x, y, anchor, width, height, color, color_format)
def _TextGetSize(self, text):
return self.__object_funs.TextGetSize(text)
def _ObjectDescription(self, object_name):
return self.__object_funs.ObjectDescription(object_name)
def _ObjectGet(self, object_name, index):
return self.__object_funs.ObjectGet(object_name, index)
def _ObjectGetFiboDescription(self, object_name, index):
return self.__object_funs.ObjectGetFiboDescription(object_name, index)
def _ObjectGetShiftByValue(self, object_name, value):
return self.__object_funs.ObjectGetShiftByValue(object_name, value)
def _ObjectGetValueByShift(self, object_name, shift):
return self.__object_funs.ObjectGetValueByShift(object_name, shift)
def _ObjectSet(self, object_name, index, value):
return self.__object_funs.ObjectSet(object_name, index, value)
def _ObjectSetFiboDescription(self, object_name, index, text):
return self.__object_funs.ObjectSetFiboDescription(object_name, index, text)
def _ObjectSetText(self, object_name, text, font_size = 0, font_name = "", text_color = ""):
return self.__object_funs.ObjectSetText(object_name, text, font_size, font_name, text_color)
def _ObjectType(self, object_name):
return self.__object_funs.ObjectType(object_name)
# Technical Indicators
def _iMA(self, symbol, timeframe, ma_period, ma_shift, ma_method, applied_price, shift):
return self.__technical_indicators_funs.iMA(symbol, timeframe, ma_period, ma_shift, ma_method, applied_price, shift)
def __OnInit(self, msg):
self._param_keeper = self.__test_funs.GetInitParams()
ret = self._OnInit()
DCMT4Wrapper.Create_Msg(self.__wHD, 1)
DCMT4Wrapper.Set_Int_To_Msg(self.__wHD, ret.value)
return ret
def __OnDeinit(self, msg):
self._OnDeinit(msg[1])
def __OnTick(self, msg):
self._OnTick()
def __OnTimer(self, msg):
self._OnTimer()
def __OnTester(self, msg):
return self._OnTester()
def __OnChartEvent(self, msg):
lparam = msg[2] << 32
lparam = lparam | msg[3]
self._OnChartEvent(msg[1], lparam, msg[4], msg[5])
def __EndPY(self):
pass
def __Send_Msg_To_MT4(self, function_in_action):
msg = None
next_action = Fun_Enum.EndPY
while Fun_Enum.OnDeinit != next_action:
self.__is_trace_msg: self.__logger_base.info(f"""sending action: {function_in_action}""")
msg = DCMT4Wrapper.Send_Msg_To_MT4(self.__wHD, function_in_action.value)
next_action = Fun_Enum(msg[0])
self.__is_trace_msg: self.__logger_base.info(f"""receiving action: {next_action}""")
if Fun_Enum.EndPY == next_action:
break
func = self.__fun_switcher.get(next_action.value, lambda: self.__EndPY)
func(msg)
function_in_action = Fun_Enum.EndMT4
if Fun_Enum.OnDeinit == next_action:
self.__is_trace_msg: self.__logger_base.info(f"""sending action: {function_in_action}""")
msg = DCMT4Wrapper.Send_Msg_To_MT4_No_Wait(self.__wHD, function_in_action.value)
return msg
def __Get_Fun_Switcher(self):
return {
1: self.__OnInit
, 2: self.__OnTick
, 3: self.__OnTester
, 4: self.__OnChartEvent
, 5: self.__OnTimer
, 6: self.__OnDeinit
, 999999: self.__EndPY
}
def Run(self, childFileName):
self.__wHD = DCMT4Wrapper.Get_Handler()
self.__market_info_funs = MarketInfoFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__checkup_funs = CheckupFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__predefined_variables_funs = PredefinedVariablesFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__chart_operations_funs = ChartOperationsFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__trade_funs = TradeFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__conversion_funs = ConversionFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__technical_indicators_funs = TechnicalIndicatorsFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__account_information_funs = AccountInformationFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__date_and_time_funs = DateAndTimeFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__object_funs = ObjectFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__test_funs = TestFuns(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)
self.__is_trace_msg = DCMT4Wrapper.Is_Trace_Msg(self.__wHD)
self.__Send_Msg_To_MT4(Fun_Enum.OnInit)
| [
"logging.getLogger",
"logging.StreamHandler",
"DCMT4Lib.MT4Funs.PredefinedVariablesFuns.PredefinedVariablesFuns",
"DCMT4Lib.MT4Funs.AccountInformationFuns.AccountInformationFuns",
"DCMT4Lib.MT4Funs.TestFuns.TestFuns",
"DCMT4Lib.MT4Funs.DateAndTimeFuns.DateAndTimeFuns",
"DCMT4Wrapper.Send_Msg_To_MT4_No_Wait",
"DCMT4Lib.MT4Funs.MarketInfoFuns.MarketInfoFuns",
"DCMT4Wrapper.Set_Int_To_Msg",
"DCMT4Lib.MT4Funs.ConversionFuns.ConversionFuns",
"pathlib.Path",
"DCMT4Lib.MT4Funs.TradeFuns.TradeFuns",
"DCMT4Wrapper.Create_Msg",
"DCMT4Lib.MT4Funs.ChartOperationsFuns.ChartOperationsFuns",
"DCMT4Lib.MT4Funs.ObjectFuns.ObjectFuns",
"logging.FileHandler",
"DCMT4Wrapper.Send_Msg_To_MT4",
"DCMT4Lib.MT4Funs.CheckupFuns.CheckupFuns",
"DCMT4Wrapper.Is_Trace_Msg",
"DCMT4Lib.MT4Funs.TechnicalIndicatorsFuns.TechnicalIndicatorsFuns",
"logging.Formatter",
"datetime.datetime.now",
"DCMT4Wrapper.Get_Handler"
] | [((1043, 1154), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)s] [%(funcName)s:%(lineno)s:0x%(thread)X] - %(message)s"""'], {}), "(\n '%(asctime)s [%(levelname)s] [%(funcName)s:%(lineno)s:0x%(thread)X] - %(message)s'\n )\n", (1060, 1154), False, 'import logging\n'), ((1158, 1191), 'logging.getLogger', 'logging.getLogger', (['"""DCMT4Wrapper"""'], {}), "('DCMT4Wrapper')\n", (1175, 1191), False, 'import logging\n'), ((1289, 1303), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1301, 1303), False, 'from datetime import datetime\n'), ((1721, 1755), 'logging.FileHandler', 'logging.FileHandler', (['log_file_name'], {}), '(log_file_name)\n', (1740, 1755), False, 'import logging\n'), ((1959, 1982), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1980, 1982), False, 'import logging\n'), ((23800, 23838), 'DCMT4Wrapper.Create_Msg', 'DCMT4Wrapper.Create_Msg', (['self.__wHD', '(1)'], {}), '(self.__wHD, 1)\n', (23823, 23838), False, 'import DCMT4Wrapper\n'), ((23847, 23897), 'DCMT4Wrapper.Set_Int_To_Msg', 'DCMT4Wrapper.Set_Int_To_Msg', (['self.__wHD', 'ret.value'], {}), '(self.__wHD, ret.value)\n', (23874, 23897), False, 'import DCMT4Wrapper\n'), ((25671, 25697), 'DCMT4Wrapper.Get_Handler', 'DCMT4Wrapper.Get_Handler', ([], {}), '()\n', (25695, 25697), False, 'import DCMT4Wrapper\n'), ((25732, 25802), 'DCMT4Lib.MT4Funs.MarketInfoFuns.MarketInfoFuns', 'MarketInfoFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (25746, 25802), False, 'from DCMT4Lib.MT4Funs.MarketInfoFuns import MarketInfoFuns\n'), ((25833, 25900), 'DCMT4Lib.MT4Funs.CheckupFuns.CheckupFuns', 'CheckupFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (25844, 25900), False, 'from DCMT4Lib.MT4Funs.CheckupFuns import CheckupFuns\n'), ((25944, 26023), 'DCMT4Lib.MT4Funs.PredefinedVariablesFuns.PredefinedVariablesFuns', 'PredefinedVariablesFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (25967, 26023), False, 'from DCMT4Lib.MT4Funs.PredefinedVariablesFuns import PredefinedVariablesFuns\n'), ((26063, 26138), 'DCMT4Lib.MT4Funs.ChartOperationsFuns.ChartOperationsFuns', 'ChartOperationsFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26082, 26138), False, 'from DCMT4Lib.MT4Funs.ChartOperationsFuns import ChartOperationsFuns\n'), ((26167, 26232), 'DCMT4Lib.MT4Funs.TradeFuns.TradeFuns', 'TradeFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26176, 26232), False, 'from DCMT4Lib.MT4Funs.TradeFuns import TradeFuns\n'), ((26266, 26336), 'DCMT4Lib.MT4Funs.ConversionFuns.ConversionFuns', 'ConversionFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26280, 26336), False, 'from DCMT4Lib.MT4Funs.ConversionFuns import ConversionFuns\n'), ((26380, 26459), 'DCMT4Lib.MT4Funs.TechnicalIndicatorsFuns.TechnicalIndicatorsFuns', 'TechnicalIndicatorsFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26403, 26459), False, 'from DCMT4Lib.MT4Funs.TechnicalIndicatorsFuns import TechnicalIndicatorsFuns\n'), ((26502, 26580), 'DCMT4Lib.MT4Funs.AccountInformationFuns.AccountInformationFuns', 'AccountInformationFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26524, 26580), False, 'from DCMT4Lib.MT4Funs.AccountInformationFuns import AccountInformationFuns\n'), ((26617, 26688), 'DCMT4Lib.MT4Funs.DateAndTimeFuns.DateAndTimeFuns', 'DateAndTimeFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26632, 26688), False, 'from DCMT4Lib.MT4Funs.DateAndTimeFuns import DateAndTimeFuns\n'), ((26718, 26784), 'DCMT4Lib.MT4Funs.ObjectFuns.ObjectFuns', 'ObjectFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26728, 26784), False, 'from DCMT4Lib.MT4Funs.ObjectFuns import ObjectFuns\n'), ((26812, 26876), 'DCMT4Lib.MT4Funs.TestFuns.TestFuns', 'TestFuns', (['self.__logger_base', 'self.__wHD', 'self.__Send_Msg_To_MT4'], {}), '(self.__logger_base, self.__wHD, self.__Send_Msg_To_MT4)\n', (26820, 26876), False, 'from DCMT4Lib.MT4Funs.TestFuns import TestFuns\n'), ((26907, 26944), 'DCMT4Wrapper.Is_Trace_Msg', 'DCMT4Wrapper.Is_Trace_Msg', (['self.__wHD'], {}), '(self.__wHD)\n', (26932, 26944), False, 'import DCMT4Wrapper\n'), ((24627, 24693), 'DCMT4Wrapper.Send_Msg_To_MT4', 'DCMT4Wrapper.Send_Msg_To_MT4', (['self.__wHD', 'function_in_action.value'], {}), '(self.__wHD, function_in_action.value)\n', (24655, 24693), False, 'import DCMT4Wrapper\n'), ((25224, 25298), 'DCMT4Wrapper.Send_Msg_To_MT4_No_Wait', 'DCMT4Wrapper.Send_Msg_To_MT4_No_Wait', (['self.__wHD', 'function_in_action.value'], {}), '(self.__wHD, function_in_action.value)\n', (25260, 25298), False, 'import DCMT4Wrapper\n'), ((1536, 1555), 'pathlib.Path', 'Path', (['log_file_path'], {}), '(log_file_path)\n', (1540, 1555), False, 'from pathlib import Path\n'), ((1389, 1422), 'pathlib.Path', 'pathlib.Path', (['file_name_with_path'], {}), '(file_name_with_path)\n', (1401, 1422), False, 'import pathlib\n')] |
from setuptools import setup, find_packages
from cqupt import __generation__
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='cqupt',
version=__generation__,
description='CQUPT Piper is a command line tool to get info from jwzx.cqupt.edu.cn',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
url='https://github.com/mivinci/cqupt-piper',
packages=find_packages(),
install_requires=[
'PrettyTable',
'Pillow',
'requests',
'bs4'
],
python_requires='>=3.5',
entry_points={
'console_scripts': [
'cqupt = cqupt.piper:cli'
]
}
)
| [
"setuptools.find_packages"
] | [((461, 476), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (474, 476), False, 'from setuptools import setup, find_packages\n')] |
import os
import pypospack.utils
from sklearn.mixture import GaussianMixture
from pypospack.pyposmat.data import PyposmatDataFile
from pypospack.pyposmat.data import PyposmatConfigurationFile
if __name__ == "__main__":
# pypospack root directory
pypospack_root_dir = pypospack.utils.get_pypospack_root_directory()
config_fn = os.path.join(
pypospack_root_dir,
'data','Si__sw__data','pareto_optimization_unconstrained',
'pyposmat.config.in')
data_fn = os.path.join(
pypospack_root_dir,
'data','Si__sw__data','pareto_optimization_unconstrained',
'pyposmat.kde.20.out')
ref_config_fn = os.path.join(
pypospack_root_dir,
'data','Si__sw__data','reference_potentials',
'pyposmat.config.in')
ref_data_fn = os.path.join(
pypospack_root_dir,
'data','Si__sw__data','reference_potentials',
'pyposmat.kde.1.out')
o_config = PyposmatConfigurationFile()
o_config.read(filename=config_fn)
o_data = PyposmatDataFile()
o_data.read(filename=data_fn)
gmm_parameters = GaussianMixture(
n_components=10,
covariance_type='full',
random_state=0
).fit(o_data.df[o_config.parameter_names])
o_data.df['cluster_parameter_id'] = gmm_parameters.predict(o_data.df[o_config.parameter_names])
gmm_qoi = GaussianMixture(
n_components=10,
covariance_type='full',
random_state=0
).fit(o_data.df[o_config.qoi_names])
o_data.df['cluster_qoi_id'] = gmm_qoi.predict(o_data.df[o_config.qoi_names])
bijective_matrix = []
for parameter_cluster_id in range(10):
row = []
for qoi_cluster_id in range(10):
row.append(len(o_data.df[ (o_data.df['cluster_parameter_id']==parameter_cluster_id) & (o_data.df['cluster_qoi_id']==qoi_cluster_id)]))
bijective_matrix.append(row)
for row in bijective_matrix:
print(row)
print(80*'-')
# now the other way
bijective_matrix = []
for qoi_cluster_id in range(10):
row = []
for parameter_cluster_id in range(10):
row.append(len(o_data.df[ (o_data.df['cluster_parameter_id']==parameter_cluster_id) & (o_data.df['cluster_qoi_id']==qoi_cluster_id)]))
bijective_matrix.append(row)
for row in bijective_matrix:
print(row)
| [
"pypospack.pyposmat.data.PyposmatConfigurationFile",
"sklearn.mixture.GaussianMixture",
"os.path.join",
"pypospack.pyposmat.data.PyposmatDataFile"
] | [((341, 460), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""data"""', '"""Si__sw__data"""', '"""pareto_optimization_unconstrained"""', '"""pyposmat.config.in"""'], {}), "(pypospack_root_dir, 'data', 'Si__sw__data',\n 'pareto_optimization_unconstrained', 'pyposmat.config.in')\n", (353, 460), False, 'import os\n'), ((506, 626), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""data"""', '"""Si__sw__data"""', '"""pareto_optimization_unconstrained"""', '"""pyposmat.kde.20.out"""'], {}), "(pypospack_root_dir, 'data', 'Si__sw__data',\n 'pareto_optimization_unconstrained', 'pyposmat.kde.20.out')\n", (518, 626), False, 'import os\n'), ((678, 784), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""data"""', '"""Si__sw__data"""', '"""reference_potentials"""', '"""pyposmat.config.in"""'], {}), "(pypospack_root_dir, 'data', 'Si__sw__data',\n 'reference_potentials', 'pyposmat.config.in')\n", (690, 784), False, 'import os\n'), ((834, 940), 'os.path.join', 'os.path.join', (['pypospack_root_dir', '"""data"""', '"""Si__sw__data"""', '"""reference_potentials"""', '"""pyposmat.kde.1.out"""'], {}), "(pypospack_root_dir, 'data', 'Si__sw__data',\n 'reference_potentials', 'pyposmat.kde.1.out')\n", (846, 940), False, 'import os\n'), ((988, 1015), 'pypospack.pyposmat.data.PyposmatConfigurationFile', 'PyposmatConfigurationFile', ([], {}), '()\n', (1013, 1015), False, 'from pypospack.pyposmat.data import PyposmatConfigurationFile\n'), ((1068, 1086), 'pypospack.pyposmat.data.PyposmatDataFile', 'PyposmatDataFile', ([], {}), '()\n', (1084, 1086), False, 'from pypospack.pyposmat.data import PyposmatDataFile\n'), ((1143, 1215), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(10)', 'covariance_type': '"""full"""', 'random_state': '(0)'}), "(n_components=10, covariance_type='full', random_state=0)\n", (1158, 1215), False, 'from sklearn.mixture import GaussianMixture\n'), ((1418, 1490), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(10)', 'covariance_type': '"""full"""', 'random_state': '(0)'}), "(n_components=10, covariance_type='full', random_state=0)\n", (1433, 1490), False, 'from sklearn.mixture import GaussianMixture\n')] |
## formbuilder
from zope.interface import implementer
from nevow import rend
from nevow import loaders
from nevow import tags as T
from nevow import util
from formless import annotate
from formless import webform
from formless import configurable
from twisted.python import reflect
class BuilderCore(configurable.Configurable):
def __init__(self):
configurable.Configurable.__init__(self, None)
self.formElements = []
def getBindingNames(self, ctx):
return ['form']
def bind_form(self, ctx):
return annotate.MethodBinding(
'action',
annotate.Method(arguments=self.formElements))
def action(self, **kw):
print("ACTION!", kw)
def addElement(self, name, type):
self.formElements.append(
annotate.Argument(name, type()))
allTypes = [annotate.String, annotate.Text, annotate.Integer, annotate.Real, annotate.Password]
typeChoice = annotate.Choice(choices=allTypes, valueToKey=reflect.qual, keyToValue=reflect.namedAny, stringify=lambda x: x.__name__)
class IFormBuilder(annotate.TypedInterface):
def addElement(name=annotate.String(required=True), type=typeChoice):
"""Add Element
Add an element to this form.
"""
pass
addElement = annotate.autocallable(addElement)
def clearForm():
"""Clear Form
Clear this form.
"""
clearForm = annotate.autocallable(clearForm)
@implementer(IFormBuilder)
class FormBuilder(rend.Page):
addSlash = True
def __init__(self):
rend.Page.__init__(self)
self.clearForm()
def configurable_formBuilder(self, ctx):
return configurable.TypedInterfaceConfigurable(self)
def configurable_dynamicForm(self, ctx):
return self.builderCore
def addElement(self, name, type):
self.builderCore.addElement(name, type)
def clearForm(self):
self.builderCore = BuilderCore()
docFactory = loaders.stan(T.html[
T.head[
T.title["Form builder!"]],
T.style(type="text/css")[
open(util.resource_filename('formless', 'freeform-default.css')).read()],
T.body[
T.h1["Welcome to form builder"],
webform.renderForms('formBuilder'),
T.h2["Here is your form:"],
webform.renderForms('dynamicForm')]])
## Startup glue
from nevow import appserver
from twisted.application import service
from twisted.application import internet
application = service.Application('formbuilder')
internet.TCPServer(8080, appserver.NevowSite(FormBuilder())).setServiceParent(application)
| [
"formless.annotate.autocallable",
"formless.annotate.Method",
"twisted.application.service.Application",
"nevow.rend.Page.__init__",
"nevow.util.resource_filename",
"nevow.tags.style",
"formless.annotate.String",
"zope.interface.implementer",
"formless.configurable.TypedInterfaceConfigurable",
"formless.annotate.Choice",
"formless.webform.renderForms",
"formless.configurable.Configurable.__init__"
] | [((942, 1066), 'formless.annotate.Choice', 'annotate.Choice', ([], {'choices': 'allTypes', 'valueToKey': 'reflect.qual', 'keyToValue': 'reflect.namedAny', 'stringify': '(lambda x: x.__name__)'}), '(choices=allTypes, valueToKey=reflect.qual, keyToValue=\n reflect.namedAny, stringify=lambda x: x.__name__)\n', (957, 1066), False, 'from formless import annotate\n'), ((1478, 1503), 'zope.interface.implementer', 'implementer', (['IFormBuilder'], {}), '(IFormBuilder)\n', (1489, 1503), False, 'from zope.interface import implementer\n'), ((2503, 2537), 'twisted.application.service.Application', 'service.Application', (['"""formbuilder"""'], {}), "('formbuilder')\n", (2522, 2537), False, 'from twisted.application import service\n'), ((1294, 1327), 'formless.annotate.autocallable', 'annotate.autocallable', (['addElement'], {}), '(addElement)\n', (1315, 1327), False, 'from formless import annotate\n'), ((1434, 1466), 'formless.annotate.autocallable', 'annotate.autocallable', (['clearForm'], {}), '(clearForm)\n', (1455, 1466), False, 'from formless import annotate\n'), ((366, 412), 'formless.configurable.Configurable.__init__', 'configurable.Configurable.__init__', (['self', 'None'], {}), '(self, None)\n', (400, 412), False, 'from formless import configurable\n'), ((1133, 1163), 'formless.annotate.String', 'annotate.String', ([], {'required': '(True)'}), '(required=True)\n', (1148, 1163), False, 'from formless import annotate\n'), ((1587, 1611), 'nevow.rend.Page.__init__', 'rend.Page.__init__', (['self'], {}), '(self)\n', (1605, 1611), False, 'from nevow import rend\n'), ((1698, 1743), 'formless.configurable.TypedInterfaceConfigurable', 'configurable.TypedInterfaceConfigurable', (['self'], {}), '(self)\n', (1737, 1743), False, 'from formless import configurable\n'), ((609, 653), 'formless.annotate.Method', 'annotate.Method', ([], {'arguments': 'self.formElements'}), '(arguments=self.formElements)\n', (624, 653), False, 'from formless import annotate\n'), ((2070, 2094), 'nevow.tags.style', 'T.style', ([], {'type': '"""text/css"""'}), "(type='text/css')\n", (2077, 2094), True, 'from nevow import tags as T\n'), ((2243, 2277), 'formless.webform.renderForms', 'webform.renderForms', (['"""formBuilder"""'], {}), "('formBuilder')\n", (2262, 2277), False, 'from formless import webform\n'), ((2323, 2357), 'formless.webform.renderForms', 'webform.renderForms', (['"""dynamicForm"""'], {}), "('dynamicForm')\n", (2342, 2357), False, 'from formless import webform\n'), ((2113, 2171), 'nevow.util.resource_filename', 'util.resource_filename', (['"""formless"""', '"""freeform-default.css"""'], {}), "('formless', 'freeform-default.css')\n", (2135, 2171), False, 'from nevow import util\n')] |
# Copyright 2021 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testcases related to sending a diagnostic report to the service"""
# pylint: disable=missing-function-docstring
import importlib
from fhir.resources.bundle import Bundle
from nlp_insights import app
from test_nlp_insights.util import unstructured_text
from test_nlp_insights.util.compare import compare_actual_to_expected
from test_nlp_insights.util.fhir import (
make_docref_report,
make_attachment,
make_bundle,
make_patient_reference,
)
from test_nlp_insights.util.mock_service import (
make_mock_acd_service_class,
configure_acd,
make_mock_quick_umls_service_class,
configure_quick_umls,
)
from test_nlp_insights.util.resources import UnitTestUsingExternalResource
class TestDocRefReportUsingAcd(UnitTestUsingExternalResource):
"""Unit tests where a diagnostic report is posted for insights"""
def setUp(self) -> None:
# The application is defined globally in the module, so this is a potentially
# flawed way of reseting the state between test cases.
# It should work "well-enough" in most cases.
importlib.reload(app)
app.config.set_mock_nlp_service_class(
"acd",
make_mock_acd_service_class(
self.resource_path + "/acd/TestReportResponses.json"
),
)
def test_when_post_docref_then_condition_derived(self):
report = make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)
],
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=report.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_condition_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)
],
)
]
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_medication_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_MEDICATION)
],
)
]
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_medication_and_condition_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(
unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION
)
],
)
]
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_with_no_subject_then_nothing_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=None,
attachments=[
make_attachment(
unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION
)
],
)
]
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_with_suspect_and_family_history_then_no_conditions(
self,
):
"""The text for this one has a diagnosis that pertains to the patient, and a family history
disease. We should filter these out of the ACD results since these don't reflect known
conditions of the patient.
More accuracy than function, but the response is fixed and we need to be picking the
right values.
This really shows why we want to go through the attribute instead of just cuis.
"""
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(
unstructured_text.TEXT_FOR_CONDITION_SUSPECTED_AND_FAM_HISTORY
)
],
)
]
)
with app.app.test_client() as service:
configure_acd(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
class TestDocRefReportUsingQuickUmls(UnitTestUsingExternalResource):
"""Unit tests where a diagnostic report is posted for insights"""
def setUp(self) -> None:
# The application is defined globally in the module, so this is a potentially
# flawed way of reseting the state between test cases.
# It should work "well-enough" in most cases.
importlib.reload(app)
app.config.set_mock_nlp_service_class(
"quickumls",
make_mock_quick_umls_service_class(
self.resource_path + "/quickUmls/TestReportResponses.json"
),
)
def test_when_post_docref_then_condition_derived(self):
report = make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)
],
)
with app.app.test_client() as service:
configure_quick_umls(service)
insight_resp = service.post("/discoverInsights", data=report.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_condition_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)
],
)
]
)
with app.app.test_client() as service:
configure_quick_umls(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_medication_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(unstructured_text.TEXT_FOR_MEDICATION)
],
)
]
)
with app.app.test_client() as service:
configure_quick_umls(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_then_medication_and_condition_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=make_patient_reference(),
attachments=[
make_attachment(
unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION
)
],
)
]
)
with app.app.test_client() as service:
configure_quick_umls(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
def test_when_post_docref_bundle_with_no_subject_then_nothing_derived(self):
bundle = make_bundle(
[
make_docref_report(
subject=None,
attachments=[
make_attachment(
unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION
)
],
)
]
)
with app.app.test_client() as service:
configure_quick_umls(service)
insight_resp = service.post("/discoverInsights", data=bundle.json())
self.assertEqual(200, insight_resp.status_code)
actual_bundle = Bundle.parse_obj(insight_resp.get_json())
cmp = compare_actual_to_expected(
expected_path=self.expected_output_path(),
actual_resource=actual_bundle,
)
self.assertFalse(cmp, cmp.pretty())
| [
"test_nlp_insights.util.fhir.make_patient_reference",
"test_nlp_insights.util.fhir.make_attachment",
"test_nlp_insights.util.mock_service.make_mock_acd_service_class",
"nlp_insights.app.app.test_client",
"test_nlp_insights.util.mock_service.configure_quick_umls",
"importlib.reload",
"test_nlp_insights.util.mock_service.configure_acd",
"test_nlp_insights.util.mock_service.make_mock_quick_umls_service_class"
] | [((1671, 1692), 'importlib.reload', 'importlib.reload', (['app'], {}), '(app)\n', (1687, 1692), False, 'import importlib\n'), ((8229, 8250), 'importlib.reload', 'importlib.reload', (['app'], {}), '(app)\n', (8245, 8250), False, 'import importlib\n'), ((1771, 1856), 'test_nlp_insights.util.mock_service.make_mock_acd_service_class', 'make_mock_acd_service_class', (["(self.resource_path + '/acd/TestReportResponses.json')"], {}), "(self.resource_path +\n '/acd/TestReportResponses.json')\n", (1798, 1856), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((2183, 2204), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (2202, 2204), False, 'from nlp_insights import app\n'), ((2229, 2251), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (2242, 2251), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((3081, 3102), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (3100, 3102), False, 'from nlp_insights import app\n'), ((3127, 3149), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (3140, 3149), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((3971, 3992), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (3990, 3992), False, 'from nlp_insights import app\n'), ((4017, 4039), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (4030, 4039), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((4943, 4964), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (4962, 4964), False, 'from nlp_insights import app\n'), ((4989, 5011), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (5002, 5011), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((5894, 5915), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (5913, 5915), False, 'from nlp_insights import app\n'), ((5940, 5962), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (5953, 5962), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((7352, 7373), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (7371, 7373), False, 'from nlp_insights import app\n'), ((7398, 7420), 'test_nlp_insights.util.mock_service.configure_acd', 'configure_acd', (['service'], {}), '(service)\n', (7411, 7420), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((8335, 8433), 'test_nlp_insights.util.mock_service.make_mock_quick_umls_service_class', 'make_mock_quick_umls_service_class', (["(self.resource_path + '/quickUmls/TestReportResponses.json')"], {}), "(self.resource_path +\n '/quickUmls/TestReportResponses.json')\n", (8369, 8433), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((8765, 8786), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (8784, 8786), False, 'from nlp_insights import app\n'), ((8811, 8840), 'test_nlp_insights.util.mock_service.configure_quick_umls', 'configure_quick_umls', (['service'], {}), '(service)\n', (8831, 8840), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((9670, 9691), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (9689, 9691), False, 'from nlp_insights import app\n'), ((9716, 9745), 'test_nlp_insights.util.mock_service.configure_quick_umls', 'configure_quick_umls', (['service'], {}), '(service)\n', (9736, 9745), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((10567, 10588), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (10586, 10588), False, 'from nlp_insights import app\n'), ((10613, 10642), 'test_nlp_insights.util.mock_service.configure_quick_umls', 'configure_quick_umls', (['service'], {}), '(service)\n', (10633, 10642), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((11546, 11567), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (11565, 11567), False, 'from nlp_insights import app\n'), ((11592, 11621), 'test_nlp_insights.util.mock_service.configure_quick_umls', 'configure_quick_umls', (['service'], {}), '(service)\n', (11612, 11621), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((12504, 12525), 'nlp_insights.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (12523, 12525), False, 'from nlp_insights import app\n'), ((12550, 12579), 'test_nlp_insights.util.mock_service.configure_quick_umls', 'configure_quick_umls', (['service'], {}), '(service)\n', (12570, 12579), False, 'from test_nlp_insights.util.mock_service import make_mock_acd_service_class, configure_acd, make_mock_quick_umls_service_class, configure_quick_umls\n'), ((2012, 2036), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (2034, 2036), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((8589, 8613), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (8611, 8613), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((2080, 2143), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS'], {}), '(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)\n', (2095, 2143), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((8657, 8725), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)\n', (8672, 8725), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((2854, 2878), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (2876, 2878), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((3753, 3777), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (3775, 3777), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((4657, 4681), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (4679, 4681), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((7055, 7079), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (7077, 7079), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((9443, 9467), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (9465, 9467), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((10349, 10373), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (10371, 10373), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((11260, 11284), 'test_nlp_insights.util.fhir.make_patient_reference', 'make_patient_reference', ([], {}), '()\n', (11282, 11284), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((2938, 3001), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS'], {}), '(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)\n', (2953, 3001), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((3837, 3891), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_MEDICATION)\n', (3852, 3891), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((4741, 4809), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)\n', (4756, 4809), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((5692, 5760), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)\n', (5707, 5760), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((7139, 7218), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_SUSPECTED_AND_FAM_HISTORY'], {}), '(unstructured_text.TEXT_FOR_CONDITION_SUSPECTED_AND_FAM_HISTORY)\n', (7154, 7218), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((9527, 9590), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS'], {}), '(unstructured_text.TEXT_FOR_MULTIPLE_CONDITIONS)\n', (9542, 9590), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((10433, 10487), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_MEDICATION)\n', (10448, 10487), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((11344, 11412), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)\n', (11359, 11412), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n'), ((12302, 12370), 'test_nlp_insights.util.fhir.make_attachment', 'make_attachment', (['unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION'], {}), '(unstructured_text.TEXT_FOR_CONDITION_AND_MEDICATION)\n', (12317, 12370), False, 'from test_nlp_insights.util.fhir import make_docref_report, make_attachment, make_bundle, make_patient_reference\n')] |
import os
import sys
import ush
__all__ = ('cat', 'echo', 'env', 'fold', 'head', 'repeat', 'sha256sum',
'errmd5', 'pargs', 'pwd', 'STDOUT', 'PIPE', 's', 'sh')
SOURCE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)))
TEST_BIN_DIR = os.path.join(SOURCE_ROOT, 'bin')
def commands(*names):
argvs = []
for name in names:
script = os.path.join(TEST_BIN_DIR, '{0}.py'.format(name))
argvs.append([sys.executable, script])
return sh(*argvs)
def s(obj):
"""Helper to normalize linefeeds in strings."""
if isinstance(obj, bytes):
return obj.replace(b'\n', os.linesep.encode())
else:
return obj.replace('\n', os.linesep)
ush.Shell().export_as_module('sh', full_name=True)
import sh
for name in ['cat', 'env', 'fold', 'head', 'repeat', 'sha256sum', 'errmd5',
'pargs', 'pwd']:
script = os.path.join(TEST_BIN_DIR, '{0}.py'.format(name))
alias_dict = {name: [sys.executable, script]}
sh.alias(**alias_dict)
from sh import (cat, echo, env, fold, head, repeat, sha256sum, errmd5, pargs,
pwd)
STDOUT = ush.STDOUT
PIPE = ush.PIPE
| [
"os.linesep.encode",
"sh.alias",
"ush.Shell",
"os.path.join",
"os.path.dirname",
"sh"
] | [((258, 290), 'os.path.join', 'os.path.join', (['SOURCE_ROOT', '"""bin"""'], {}), "(SOURCE_ROOT, 'bin')\n", (270, 290), False, 'import os\n'), ((477, 487), 'sh', 'sh', (['*argvs'], {}), '(*argvs)\n', (479, 487), False, 'import sh\n'), ((981, 1003), 'sh.alias', 'sh.alias', ([], {}), '(**alias_dict)\n', (989, 1003), False, 'import sh\n'), ((215, 240), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (230, 240), False, 'import os\n'), ((697, 708), 'ush.Shell', 'ush.Shell', ([], {}), '()\n', (706, 708), False, 'import ush\n'), ((619, 638), 'os.linesep.encode', 'os.linesep.encode', ([], {}), '()\n', (636, 638), False, 'import os\n')] |
import logging
import sys
# from fortmatic.config import LOGGER_NAME
LOGGER_NAME = 'magic'
LOG_LEVEl = 'debug'
logger = logging.getLogger(LOGGER_NAME)
def _magic_log_level():
if LOG_LEVEl == 'debug':
return LOG_LEVEl
def format_log(message, **kwargs):
return dict(
{'message': message},
log_level=LOG_LEVEl,
serverice=LOGGER_NAME,
**kwargs,
)
def log_debug(message, **kwargs):
log_line = format_log(message, **kwargs)
if _magic_log_level() == 'debug':
print(log_line, file=sys.stderr)
logger.debug(log_line)
def log_info(message, **kwargs):
log_line = format_log(message, **kwargs)
if _magic_log_level() == 'debug':
print(log_line, file=sys.stderr)
logger.info(log_line)
| [
"logging.getLogger"
] | [((126, 156), 'logging.getLogger', 'logging.getLogger', (['LOGGER_NAME'], {}), '(LOGGER_NAME)\n', (143, 156), False, 'import logging\n')] |
import os, MySQLdb
''' Connect to DB '''
db = MySQLdb.connect(host="localhost", user="root", passwd="", db="dataminas")
db.autocommit(1)
cursor = db.cursor()
cursor.execute("select id, name_en from attrs_wld where length(id) = 5")
names = {}
for row in cursor.fetchall():
name = row[1].replace(", The","")
split = name.split(", ")
if len(split) > 1:
name = "{0} {1}".format(split[1],split[0])
names[name] = row[0]
unknowns = []
path = "/Users/Dave/Downloads/World Flags/"
for filename in os.listdir(path):
name = filename.split(".")[0]
name = name.replace("_"," ")
if name in names:
newname = "wld_"+names[name]+".png"
os.rename(path+filename,path+newname) | [
"MySQLdb.connect",
"os.listdir",
"os.rename"
] | [((47, 120), 'MySQLdb.connect', 'MySQLdb.connect', ([], {'host': '"""localhost"""', 'user': '"""root"""', 'passwd': '""""""', 'db': '"""dataminas"""'}), "(host='localhost', user='root', passwd='', db='dataminas')\n", (62, 120), False, 'import os, MySQLdb\n'), ((516, 532), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (526, 532), False, 'import os, MySQLdb\n'), ((675, 717), 'os.rename', 'os.rename', (['(path + filename)', '(path + newname)'], {}), '(path + filename, path + newname)\n', (684, 717), False, 'import os, MySQLdb\n')] |
from chat.models import Room, Message
class MessageService:
@staticmethod
def insert_message(room_name, message, user):
room = Room.objects.get(name=room_name)
m = Message(room=room, message=message, user=user)
m.save()
return m
| [
"chat.models.Message",
"chat.models.Room.objects.get"
] | [((146, 178), 'chat.models.Room.objects.get', 'Room.objects.get', ([], {'name': 'room_name'}), '(name=room_name)\n', (162, 178), False, 'from chat.models import Room, Message\n'), ((192, 238), 'chat.models.Message', 'Message', ([], {'room': 'room', 'message': 'message', 'user': 'user'}), '(room=room, message=message, user=user)\n', (199, 238), False, 'from chat.models import Room, Message\n')] |
import inspect
import materia
# import unittest.mock as mock
def test_packmol_input_one_structure_no_instructions():
packmol_input = materia.PackmolInput(
tolerance=1.0,
filetype="xyz",
output_name="packed",
)
packmol_input.add_structure(structure_filepath="/path/to/structure.xyz", number=300)
assert (
str(packmol_input)
== inspect.cleandoc(
"""tolerance 1.0
output packed.xyz
filetype xyz\n
structure /path/to/structure.xyz
number 300
end structure"""
)
+ "\n"
)
| [
"inspect.cleandoc",
"materia.PackmolInput"
] | [((140, 213), 'materia.PackmolInput', 'materia.PackmolInput', ([], {'tolerance': '(1.0)', 'filetype': '"""xyz"""', 'output_name': '"""packed"""'}), "(tolerance=1.0, filetype='xyz', output_name='packed')\n", (160, 213), False, 'import materia\n'), ((386, 680), 'inspect.cleandoc', 'inspect.cleandoc', (['"""tolerance 1.0\n output packed.xyz\n filetype xyz\n\n structure /path/to/structure.xyz\n number 300\n end structure"""'], {}), '(\n """tolerance 1.0\n output packed.xyz\n filetype xyz\n\n structure /path/to/structure.xyz\n number 300\n end structure"""\n )\n', (402, 680), False, 'import inspect\n')] |
from loguru import logger
from biquery_sql_etl.engines import bigquery_engine, rdbms_engine
from biquery_sql_etl.queries import sql_queries
from biquery_sql_etl.client import DataClient
logger.add('logs/queries.log', format="{time} {message}", level="INFO")
def init_pipeline():
"""Move data between Bigquery and MySQL."""
num_rows = 0
bqc = DataClient(bigquery_engine)
dbc = DataClient(rdbms_engine)
for table_name, query in sql_queries.items():
rows = bqc.fetch_rows(query)
insert = dbc.insert_rows(rows, table_name, replace=True)
logger.info(insert)
num_rows += len(rows)
logger.info(f"Completed migration of {num_rows} rows from BigQuery to MySQL.")
| [
"biquery_sql_etl.queries.sql_queries.items",
"loguru.logger.add",
"biquery_sql_etl.client.DataClient",
"loguru.logger.info"
] | [((188, 259), 'loguru.logger.add', 'logger.add', (['"""logs/queries.log"""'], {'format': '"""{time} {message}"""', 'level': '"""INFO"""'}), "('logs/queries.log', format='{time} {message}', level='INFO')\n", (198, 259), False, 'from loguru import logger\n'), ((358, 385), 'biquery_sql_etl.client.DataClient', 'DataClient', (['bigquery_engine'], {}), '(bigquery_engine)\n', (368, 385), False, 'from biquery_sql_etl.client import DataClient\n'), ((396, 420), 'biquery_sql_etl.client.DataClient', 'DataClient', (['rdbms_engine'], {}), '(rdbms_engine)\n', (406, 420), False, 'from biquery_sql_etl.client import DataClient\n'), ((450, 469), 'biquery_sql_etl.queries.sql_queries.items', 'sql_queries.items', ([], {}), '()\n', (467, 469), False, 'from biquery_sql_etl.queries import sql_queries\n'), ((635, 713), 'loguru.logger.info', 'logger.info', (['f"""Completed migration of {num_rows} rows from BigQuery to MySQL."""'], {}), "(f'Completed migration of {num_rows} rows from BigQuery to MySQL.')\n", (646, 713), False, 'from loguru import logger\n'), ((581, 600), 'loguru.logger.info', 'logger.info', (['insert'], {}), '(insert)\n', (592, 600), False, 'from loguru import logger\n')] |
from stiff.utils import parse_qs_single
def get_ann_pos_dict(ann):
anchor_poses = ann.attrib["anchor-positions"].split()
assert len(anchor_poses) == 1
anchor_pos_str = anchor_poses[0]
return parse_qs_single(anchor_pos_str)
def get_ann_pos(ann):
anchor_pos = get_ann_pos_dict(ann)
tok = int(anchor_pos["token"])
tok_len = int(anchor_pos["token-length"]) if "token-length" in anchor_pos else 1
return tok, tok_len
| [
"stiff.utils.parse_qs_single"
] | [((209, 240), 'stiff.utils.parse_qs_single', 'parse_qs_single', (['anchor_pos_str'], {}), '(anchor_pos_str)\n', (224, 240), False, 'from stiff.utils import parse_qs_single\n')] |
from rest_framework import filters
from app.models import Goods
import django_filters
class goodsfilter(filters.FilterSet):
categoryid = django_filters.NumberFilter('categoryid')
childcid = django_filters.NumberFilter('childcid')
class Meta:
model = Goods
fields = ['id']
| [
"django_filters.NumberFilter"
] | [((143, 184), 'django_filters.NumberFilter', 'django_filters.NumberFilter', (['"""categoryid"""'], {}), "('categoryid')\n", (170, 184), False, 'import django_filters\n'), ((201, 240), 'django_filters.NumberFilter', 'django_filters.NumberFilter', (['"""childcid"""'], {}), "('childcid')\n", (228, 240), False, 'import django_filters\n')] |
import os
import pickle
from common.methods.pcg_method import PCGMethod
from typing import Any, Dict, List, Callable, Tuple, Union
from common.types import Verbosity
from common.utils import get_date, save_compressed_pickle
from games.game import Game
from games.level import Level
from experiments.logger import Logger
import neat
import numpy as np
from novelty_neat.fitness.fitness import NeatFitnessFunction
from novelty_neat.generation import NeatLevelGenerator
from novelty_neat.types import LevelNeuralNet
# This function returns a single level given some input and a neural network.
# This function takes in a list of networks and returns a list of floats representing their fitnesses.
# Since it's a callable, it can be a class that stores some state.
class NoveltyNeatPCG(PCGMethod):
"""This is the method that uses NEAT to evolve a neural network to generate levels,
and uses novelty search as the fitness function to ensure individuals are valid.
"""
def __init__(self, game: Game, init_level: Level, level_generator: NeatLevelGenerator,
fitness_calculator: NeatFitnessFunction, neat_config: neat.Config,
num_generations: int=10, num_random_vars=2
) -> None:
"""Relatively general constructor, where all interesting behaviour can be provided using different callables.
Args:
game (Game): The game that levels should be generated for
init_level (Level): The initial level to use as a starting point
level_generator (NeatLevelGenerator): This should take in some input, and a network and return a Level.
fitness_calculator (NeatFitnessFunction): This should take in a list of networks and return a list of fitnesses.
neat_config (neat.Config): The configuration used for the NEAT algorithm.
num_generations (int): How many generations to train for.
num_random_vars (int): How many random variables should we use as input to the generation process.
"""
super().__init__(game, init_level)
self.level_generator = level_generator
self.fitness_calculator = fitness_calculator
self.neat_config = neat_config
self.pop = neat.Population(self.neat_config)
self.best_agent: Union[neat.DefaultGenome, None] = None
self.num_generations = num_generations
self.num_random_vars = num_random_vars
def train(self, logger: Logger) -> List[Dict[str, Any]]:
"""The training procedure to follow. We basically just call `run` on self.population
using the functions provided.
Args:
logger (Logger):
Returns:
List[Dict[str, Any]]:
"""
# This is the training procedure.
steps = 0
def fitness(genomes: List[Tuple[int, neat.DefaultGenome]], config: neat.Config):
nonlocal steps
nets = []
for genome_id, genome in genomes:
nets.append(neat.nn.FeedForwardNetwork.create(genome, config))
all_fitnesses = self.fitness_calculator(nets)
min_idx = 0; max_idx = 0
idx = 0
for fit, (_, genome) in zip(all_fitnesses, genomes):
genome.fitness = fit
if fit > all_fitnesses[max_idx]: max_idx = idx
if fit < all_fitnesses[min_idx]: min_idx = idx
idx += 1
# Log some info.
logger.log({
'mean_fitness': np.mean(all_fitnesses),
'max_fitness': np.max(all_fitnesses),
'min_fitness': np.min(all_fitnesses),
'all_fitness': all_fitnesses if logger and logger.LOG_ALL_FITNESSES else []
}, step=steps)
steps += 1
if logger.verbose == Verbosity.PROGRESS:
print(f"\r{steps} / {self.num_generations}", end='')
if logger and logger.LOG_ALL_FITNESSES:
folder = f"results/all_models/pcgnn/{logger.seed}/{self.game.__class__.__name__}/min"
os.makedirs(folder, exist_ok=True)
save_compressed_pickle(os.path.join(folder, f'gen_{str(steps).zfill(3)}'), {'config': config, 'net': nets[min_idx], 'genome': genomes[min_idx]})
folder = f"results/all_models/pcgnn/{logger.seed}/{self.game.__class__.__name__}/max"
os.makedirs(folder, exist_ok=True)
save_compressed_pickle(os.path.join(folder, f'gen_{str(steps).zfill(3)}'), {'config': config, 'net': nets[max_idx], 'genome': genomes[max_idx]})
folder = f"results/all_models/pcgnn/{logger.seed}/{self.game.__class__.__name__}/alls"
os.makedirs(folder, exist_ok=True)
save_compressed_pickle(os.path.join(folder, f'gen_{str(steps).zfill(3)}'), {'config': config, 'nets': nets, 'genomes': genomes})
self.fitness_calculator.logger = logger
self.best_agent = self.pop.run(fitness_function=fitness, n=self.num_generations)
if logger.verbose == Verbosity.PROGRESS:
print("")
return [{'final_agent': self.best_agent, 'population': self.pop}]
def generate_level(self) -> Level:
"""Simple generates a level by calling level_generator with self.best_agent.
Returns:
Level:
"""
assert self.best_agent is not None, "self.best_agent should not be None. Run train first"
return self.level_generator(neat.nn.FeedForwardNetwork.create(self.best_agent, self.neat_config))
@classmethod
def name(cls):
"""
Returns a name of this class
"""
return str(cls.__name__)
def save_best_individual(self) -> str:
""" Saves the best individual to a file 'results/scratch/neat_novelty/{self.name()}/{get_date()}/best.p'
"""
folder = f"results/scratch/neat_novelty/{self.name()}/{get_date()}"
file = os.path.join(folder, 'best.p')
os.makedirs(folder, exist_ok=True)
with open(file, 'wb+') as f:
pickle.dump({'best_individual': self.best_agent, 'config': self.neat_config}, f)
return file | [
"numpy.mean",
"pickle.dump",
"os.makedirs",
"neat.Population",
"os.path.join",
"neat.nn.FeedForwardNetwork.create",
"numpy.max",
"common.utils.get_date",
"numpy.min"
] | [((2273, 2306), 'neat.Population', 'neat.Population', (['self.neat_config'], {}), '(self.neat_config)\n', (2288, 2306), False, 'import neat\n'), ((6124, 6154), 'os.path.join', 'os.path.join', (['folder', '"""best.p"""'], {}), "(folder, 'best.p')\n", (6136, 6154), False, 'import os\n'), ((6163, 6197), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (6174, 6197), False, 'import os\n'), ((5655, 5723), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['self.best_agent', 'self.neat_config'], {}), '(self.best_agent, self.neat_config)\n', (5688, 5723), False, 'import neat\n'), ((6247, 6332), 'pickle.dump', 'pickle.dump', (["{'best_individual': self.best_agent, 'config': self.neat_config}", 'f'], {}), "({'best_individual': self.best_agent, 'config': self.neat_config}, f\n )\n", (6258, 6332), False, 'import pickle\n'), ((4182, 4216), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (4193, 4216), False, 'import os\n'), ((4513, 4547), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (4524, 4547), False, 'import os\n'), ((4845, 4879), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (4856, 4879), False, 'import os\n'), ((6096, 6106), 'common.utils.get_date', 'get_date', ([], {}), '()\n', (6104, 6106), False, 'from common.utils import get_date, save_compressed_pickle\n'), ((3059, 3108), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (3092, 3108), False, 'import neat\n'), ((3603, 3625), 'numpy.mean', 'np.mean', (['all_fitnesses'], {}), '(all_fitnesses)\n', (3610, 3625), True, 'import numpy as np\n'), ((3658, 3679), 'numpy.max', 'np.max', (['all_fitnesses'], {}), '(all_fitnesses)\n', (3664, 3679), True, 'import numpy as np\n'), ((3712, 3733), 'numpy.min', 'np.min', (['all_fitnesses'], {}), '(all_fitnesses)\n', (3718, 3733), True, 'import numpy as np\n')] |
from encoderpy import target_encoder
import pandas as pd
import pytest
data = pd.read_csv("data/testing_data.csv")
train1 = data.query("train_test_1 == 'train'")
test1 = data.query("train_test_1 == 'test'")
train2 = data.query("train_test_3 == 'train'")
test2 = data.query("train_test_3 == 'test'")
train_encode1, test_encode1 = target_encoder.target_encoder(
X_train=train1,
y=train1.target_bin,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
X_test=test1,
prior=0.5,
objective='binary')
train_encode2, test_encode2 = target_encoder.target_encoder(
X_train=train2,
y=train2.target_bin,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
X_test=test2,
prior=0.5,
objective='binary')
target_cha = train1.target_bin.replace({
train1.target_bin.unique()[0]: "a",
train1.target_bin.unique()[1]: "b"
}
)
def check_exception():
# check if the function handles invalid inputs.
# check input of objective
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_bin,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
X_test=test1,
prior=0.5,
objective='something')
# check if cat_columns is a list
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_bin,
cat_columns="not list")
# check if prior is a numeric value
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_bin,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
prior='string')
# check if y is a pandas series
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=[1, 2],
cat_columns=['feature_cat_chr', 'feature_cat_num'])
# check if length y equals to length X_train
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=pd.Series([1, 2]),
cat_columns=['feature_cat_chr', 'feature_cat_num'])
# check if X_train is pandas dataframe
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=[1, 2],
y=train1.target_bin,
cat_columns=['feature_cat_chr', 'feature_cat_num'])
# check if X_train contains cat_columns
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_bin,
cat_columns=['something'])
# check if target variable is numeric for regression objective
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=target_cha,
cat_columns=['feature_cat_chr', 'feature_cat_num'])
# check if target is binary
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_cont,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
objective='binary')
# check if X_test is pandas dataframe
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_cont,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
X_test=[1, 2])
# check if X_test contains cat_columns
with pytest.raises(Exception):
target_encoder.target_encoder(
X_train=train1,
y=train1.target_cont,
cat_columns=['something'],
X_test=test1)
check_exception()
def test_output():
# check if the outputs are correct.
# test value
assert train_encode1.feature_cat_chr.iloc[0] == 0.43, \
'The encoded value for training dataset is wrong'
assert test_encode2['feature_cat_chr'].iloc[0] == 0.5, \
'The encoded value for unseen test dataset is wrong'
# check shape
assert train_encode1.shape == train1.shape, \
"The shape of training dataset is wrong"
assert test_encode1.shape == test1.shape, \
"The shape of testing datset is wrong"
# check when X_test is none
assert len(target_encoder.target_encoder(
X_train=train1,
y=target_cha,
cat_columns=['feature_cat_chr', 'feature_cat_num'],
objective='binary')) == 1, \
"The function does not run when X_test is none."
test_output()
| [
"pandas.Series",
"pytest.raises",
"pandas.read_csv",
"encoderpy.target_encoder.target_encoder"
] | [((79, 115), 'pandas.read_csv', 'pd.read_csv', (['"""data/testing_data.csv"""'], {}), "('data/testing_data.csv')\n", (90, 115), True, 'import pandas as pd\n'), ((333, 505), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_bin', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'X_test': 'test1', 'prior': '(0.5)', 'objective': '"""binary"""'}), "(X_train=train1, y=train1.target_bin,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], X_test=test1, prior\n =0.5, objective='binary')\n", (362, 505), False, 'from encoderpy import target_encoder\n'), ((553, 725), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train2', 'y': 'train2.target_bin', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'X_test': 'test2', 'prior': '(0.5)', 'objective': '"""binary"""'}), "(X_train=train2, y=train2.target_bin,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], X_test=test2, prior\n =0.5, objective='binary')\n", (582, 725), False, 'from encoderpy import target_encoder\n'), ((989, 1013), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1002, 1013), False, 'import pytest\n'), ((1023, 1198), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_bin', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'X_test': 'test1', 'prior': '(0.5)', 'objective': '"""something"""'}), "(X_train=train1, y=train1.target_bin,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], X_test=test1, prior\n =0.5, objective='something')\n", (1052, 1198), False, 'from encoderpy import target_encoder\n'), ((1309, 1333), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1322, 1333), False, 'import pytest\n'), ((1343, 1437), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_bin', 'cat_columns': '"""not list"""'}), "(X_train=train1, y=train1.target_bin,\n cat_columns='not list')\n", (1372, 1437), False, 'from encoderpy import target_encoder\n'), ((1520, 1544), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1533, 1544), False, 'import pytest\n'), ((1554, 1692), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_bin', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'prior': '"""string"""'}), "(X_train=train1, y=train1.target_bin,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], prior='string')\n", (1583, 1692), False, 'from encoderpy import target_encoder\n'), ((1783, 1807), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1796, 1807), False, 'import pytest\n'), ((1817, 1929), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': '[1, 2]', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']"}), "(X_train=train1, y=[1, 2], cat_columns=[\n 'feature_cat_chr', 'feature_cat_num'])\n", (1846, 1929), False, 'from encoderpy import target_encoder\n'), ((2020, 2044), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2033, 2044), False, 'import pytest\n'), ((2262, 2286), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2275, 2286), False, 'import pytest\n'), ((2296, 2418), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': '[1, 2]', 'y': 'train1.target_bin', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']"}), "(X_train=[1, 2], y=train1.target_bin,\n cat_columns=['feature_cat_chr', 'feature_cat_num'])\n", (2325, 2418), False, 'from encoderpy import target_encoder\n'), ((2505, 2529), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2518, 2529), False, 'import pytest\n'), ((2539, 2636), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_bin', 'cat_columns': "['something']"}), "(X_train=train1, y=train1.target_bin,\n cat_columns=['something'])\n", (2568, 2636), False, 'from encoderpy import target_encoder\n'), ((2746, 2770), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2759, 2770), False, 'import pytest\n'), ((2780, 2896), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'target_cha', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']"}), "(X_train=train1, y=target_cha, cat_columns=[\n 'feature_cat_chr', 'feature_cat_num'])\n", (2809, 2896), False, 'from encoderpy import target_encoder\n'), ((2970, 2994), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2983, 2994), False, 'import pytest\n'), ((3004, 3147), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_cont', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'objective': '"""binary"""'}), "(X_train=train1, y=train1.target_cont,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], objective='binary')\n", (3033, 3147), False, 'from encoderpy import target_encoder\n'), ((3244, 3268), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3257, 3268), False, 'import pytest\n'), ((3278, 3416), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_cont', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'X_test': '[1, 2]'}), "(X_train=train1, y=train1.target_cont,\n cat_columns=['feature_cat_chr', 'feature_cat_num'], X_test=[1, 2])\n", (3307, 3416), False, 'from encoderpy import target_encoder\n'), ((3514, 3538), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3527, 3538), False, 'import pytest\n'), ((3548, 3660), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'train1.target_cont', 'cat_columns': "['something']", 'X_test': 'test1'}), "(X_train=train1, y=train1.target_cont,\n cat_columns=['something'], X_test=test1)\n", (3577, 3660), False, 'from encoderpy import target_encoder\n'), ((4305, 4441), 'encoderpy.target_encoder.target_encoder', 'target_encoder.target_encoder', ([], {'X_train': 'train1', 'y': 'target_cha', 'cat_columns': "['feature_cat_chr', 'feature_cat_num']", 'objective': '"""binary"""'}), "(X_train=train1, y=target_cha, cat_columns=[\n 'feature_cat_chr', 'feature_cat_num'], objective='binary')\n", (4334, 4441), False, 'from encoderpy import target_encoder\n'), ((2127, 2144), 'pandas.Series', 'pd.Series', (['[1, 2]'], {}), '([1, 2])\n', (2136, 2144), True, 'import pandas as pd\n')] |
from init_stop.init_stop_sequences import *
import Communication.tests.fake_serial as fake_ser
def init_fake_sequence():
camera_index, port = init_parse()
cam = init_camera(camera_index)
ser = init_fake_port(port)
init_opencr(ser)
return cam, port, ser
def init_fake_port(port):
print("Opening Serial communication with {0}.".format(port))
ser = fake_ser.FakeSerial()
ser.flush()
return ser | [
"Communication.tests.fake_serial.FakeSerial"
] | [((376, 397), 'Communication.tests.fake_serial.FakeSerial', 'fake_ser.FakeSerial', ([], {}), '()\n', (395, 397), True, 'import Communication.tests.fake_serial as fake_ser\n')] |
# Modified file from https://github.com/Yale-LILY/SummEval/blob/master/data_processing/pair_data.py
"""
Script for recreating the full model outputs from CNN/DM Story files.
CNN/DM Story files can be downloaded from https://cs.nyu.edu/~kcho/DMQA/
"""
import argparse
import json
import os
from tqdm import tqdm
from glob import glob
def parse_story_file(content):
"""
Remove article highlights and unnecessary white characters.
"""
content_raw = content.split("@highlight")[0]
content = " ".join(filter(None, [x.strip() for x in content_raw.split("\n")]))
return content
def annotation_pairing(data_annotations, story_files):
print("Processing file:", data_annotations)
with open(data_annotations) as fd:
dataset = [json.loads(line) for line in fd]
for example in dataset:
story_path = os.path.join(story_files, example["filepath"])
with open(story_path) as fd:
story_content = fd.read()
example["text"] = parse_story_file(story_content)
paired_file = data_annotations.replace("aligned", "aligned.paired")
if os.path.dirname(paired_file):
os.makedirs(os.path.dirname(paired_file), exist_ok=True)
with open(paired_file, "w") as fd:
for example in dataset:
fd.write(json.dumps(example, ensure_ascii=False) + "\n")
def output_pairing(aligned_data, story_files, model_outputs):
"""
Walk data sub-directories and recreate examples
"""
for unpaired_path in glob(f'{aligned_data}/*/aligned/*'):
filename = os.path.basename(unpaired_path)
if not (".jsonl" in filename and "aligned" in filename and os.path.isfile(unpaired_path)):
continue
print("Processing file:", unpaired_path)
with open(unpaired_path) as fd:
dataset = [json.loads(line) for line in fd]
for example in tqdm(dataset):
story_path = os.path.join(story_files, example["filepath"])
with open(story_path) as fd:
story_content = fd.read()
example["text"] = parse_story_file(story_content)
paired_filename = filename.replace("aligned", "aligned.paired")
paired_path = os.path.join(model_outputs, "paired", paired_filename)
os.makedirs(os.path.dirname(paired_path), exist_ok=True)
with open(paired_path, "w") as fd:
for example in dataset:
fd.write(json.dumps(example, ensure_ascii=False) + "\n")
def run_pair_data(data_annotations: str = None, model_outputs: str = None, story_files: str = None):
if not (data_annotations or model_outputs) or not story_files:
raise RuntimeError("To run script please specify `data_annotations` to pair human annotation data or"
"`model_outputs` to pair generated summaries. Story files should be specified in either case.")
if model_outputs:
aligned_data = model_outputs # os.path.join(model_outputs, "aligned")
if data_annotations:
annotation_pairing(data_annotations, story_files)
if model_outputs and story_files:
output_pairing(aligned_data, story_files, model_outputs)
# if __name__ == "__main__":
# PARSER = argparse.ArgumentParser()
# PARSER.add_argument("--data_annotations", type=str, help="Path to file human annotations")
# PARSER.add_argument("--model_outputs", type=str, help="Path to directory holding model data")
# PARSER.add_argument("--story_files", type=str, help="Path to directory holding CNNDM story files")
# ARGS = PARSER.parse_args()
#
#
# if not (ARGS.data_annotations or ARGS.model_outputs) or not ARGS.story_files:
# raise RuntimeError("To run script please specify `data_annotations` to pair human annotation data or"
# "`model_outputs` to pair generated summaries. Story files should be specified in either case.")
#
# if ARGS.model_outputs:
# ARGS.aligned_data = os.path.join(ARGS.model_outputs, "aligned")
#
# if ARGS.data_annotations:
# annotation_pairing(ARGS)
#
# if ARGS.model_outputs and ARGS.story_files:
# output_pairing(ARGS) | [
"json.loads",
"tqdm.tqdm",
"os.path.join",
"json.dumps",
"os.path.isfile",
"os.path.dirname",
"os.path.basename",
"glob.glob"
] | [((1107, 1135), 'os.path.dirname', 'os.path.dirname', (['paired_file'], {}), '(paired_file)\n', (1122, 1135), False, 'import os\n'), ((1499, 1534), 'glob.glob', 'glob', (['f"""{aligned_data}/*/aligned/*"""'], {}), "(f'{aligned_data}/*/aligned/*')\n", (1503, 1534), False, 'from glob import glob\n'), ((842, 888), 'os.path.join', 'os.path.join', (['story_files', "example['filepath']"], {}), "(story_files, example['filepath'])\n", (854, 888), False, 'import os\n'), ((1555, 1586), 'os.path.basename', 'os.path.basename', (['unpaired_path'], {}), '(unpaired_path)\n', (1571, 1586), False, 'import os\n'), ((1878, 1891), 'tqdm.tqdm', 'tqdm', (['dataset'], {}), '(dataset)\n', (1882, 1891), False, 'from tqdm import tqdm\n'), ((2210, 2264), 'os.path.join', 'os.path.join', (['model_outputs', '"""paired"""', 'paired_filename'], {}), "(model_outputs, 'paired', paired_filename)\n", (2222, 2264), False, 'import os\n'), ((759, 775), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (769, 775), False, 'import json\n'), ((1157, 1185), 'os.path.dirname', 'os.path.dirname', (['paired_file'], {}), '(paired_file)\n', (1172, 1185), False, 'import os\n'), ((1918, 1964), 'os.path.join', 'os.path.join', (['story_files', "example['filepath']"], {}), "(story_files, example['filepath'])\n", (1930, 1964), False, 'import os\n'), ((2285, 2313), 'os.path.dirname', 'os.path.dirname', (['paired_path'], {}), '(paired_path)\n', (2300, 2313), False, 'import os\n'), ((1655, 1684), 'os.path.isfile', 'os.path.isfile', (['unpaired_path'], {}), '(unpaired_path)\n', (1669, 1684), False, 'import os\n'), ((1821, 1837), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1831, 1837), False, 'import json\n'), ((1294, 1333), 'json.dumps', 'json.dumps', (['example'], {'ensure_ascii': '(False)'}), '(example, ensure_ascii=False)\n', (1304, 1333), False, 'import json\n'), ((2434, 2473), 'json.dumps', 'json.dumps', (['example'], {'ensure_ascii': '(False)'}), '(example, ensure_ascii=False)\n', (2444, 2473), False, 'import json\n')] |
from src.riko import Riko, DictModel, ObjectModel, INSERT
class BlogArticle(ObjectModel):
"""
Model object defined in `object` like class.
"""
ak = "aid" # ak (auto increment id) name declaration, the field will be update when call `insert()`
pk = ["aid"] # primary keys name declaration, for identified a object for performing ORM operations
def __init__(self):
super().__init__()
self.aid = None
self.author_uid = ""
self.title = ""
self.content = ""
class BlogRating(ObjectModel):
"""
Model object defined in `object` like class.
"""
ak = "aid"
pk = ["aid"]
def __init__(self):
super().__init__()
self.aid = None
self.rating = 0
class BlogUser(DictModel):
"""
Model object defined in `dict` like class.
"""
ak = "uid"
pk = ["uid"]
fields = ["username", "age"]
if __name__ == '__main__':
# set db config
Riko.update_default(database="blog")
# create object
article1 = BlogArticle.create(author_uid=12, title="Koito yuu", content="Koito yuu loves Nanami Touko.")
# return auto increment id, the object `aid` will be set to this id automatically since declared at `ak` meta
article1_id = article1.insert()
# update object fields
article1.content += " (updated)"
article1.save()
# delete object
article1.delete()
# get all article
all_article = BlogArticle.get()
# get some column with condition
article_list1 = BlogArticle.get(return_columns=("title",), _where_raw=("aid < 10",))
article_list2 = BlogArticle.get(return_columns=("title", "content"), aid=1, author_uid=12)
# order, limit and offset
article_page1 = BlogArticle.get(return_columns=("title",), _order="title", _limit=5, _offset=1)
article_page2 = BlogArticle.get(return_columns=("title",), _order=("title", "author_uid"), _limit=5, _offset=1)
# select query
select_result1 = (BlogUser
.select()
.where(username="Rinka")
.pagination(1, 3)
.order_by("age")
.get())
select_result2 = (BlogArticle
.select(return_columns=('title',))
.alias("t")
.distinct()
.where(author_uid=12)
.where_raw("t.aid <= %(aid_limit)s")
.get({'aid_limit': 3}))
# insert query
insert_id = (BlogRating
.insert_query()
.values(aid=233, rating=99)
.go(return_last_id=True))
# batch insert
articles2insert = list()
articles2insert.append((12, "Bloom into you 1", "Test content 1"))
articles2insert.append((12, "Bloom into you 2", "Test content 2"))
articles2insert.append((12, "Bloom into you 3", "Test content 3"))
affected_row1 = (BlogArticle
.insert_many()
.values(["author_uid", "title", "content"], articles2insert)
.go())
article_x4 = BlogArticle.create(author_uid=13, title="Bloom into you 4", content="Test content 4")
article_x5 = BlogArticle.create(author_uid=13, title="Bloom into you 5", content="Test content 5")
affected_row2 = (BlogArticle
.insert_many()
.from_objects([article_x4, article_x5])
.go())
# delete query
affected_row3 = (BlogRating
.delete_query()
.where(rating=99)
.go())
affected_row4 = (BlogRating
.delete_query()
.where_raw("aid >= 6", "aid <= 7")
.go())
# BlogRating.delete_query().go() # delete all
# left join table
left = (BlogArticle
.select()
.alias("a")
.left_join(BlogRating, alias="r", on=("a.aid = r.aid",))
.get())
# right join table
right = (BlogArticle
.select()
.alias("a")
.right_join(BlogUser, alias="u", on="u.uid = a.author_uid")
.get())
# natural join table
rating_info = (BlogArticle
.select()
.natural_join(BlogRating)
.get())
# inner join table
article_author = (BlogArticle
.select(return_columns=('u.username', 'a.title'))
.alias("a")
.join(BlogUser, on=("a.author_uid = u.uid",), alias="u")
.order_by("username")
.get())
# insert with primary key conflict
user_pk_conflict = BlogUser.create(uid=1, username="Test_Dupicate", age=168)
try:
user_pk_conflict.insert()
except:
print("conflict!")
user_pk_conflict.insert(on_duplicate_key_replace=INSERT.DUPLICATE_KEY_IGNORE)
user_pk_conflict.insert(on_duplicate_key_replace=INSERT.DUPLICATE_KEY_REPLACE)
# on duplicate key update
user_pk_conflict.insert(on_duplicate_key_replace=INSERT.DUPLICATE_KEY_UPDATE, age=user_pk_conflict["age"] + 1)
# count with condition
article_number1 = BlogArticle.count(aid=3)
article_number2 = BlogArticle.count(_where_raw=("aid <= 3",))
# existence
article_existence = BlogArticle.has(_where_raw=("aid <= 3",))
existence_no = BlogArticle.has(aid=-1)
existence_yes = BlogArticle.has(aid=3, title="Koito yuu")
# transaction
article_tx = BlogArticle.create(author_uid=15, title="Transaction test", content="Aha, a transaction.")
article_tx.insert()
with article_tx.db_session_.start_transaction() as _t:
article_tx.title = "Transaction test (title updated)"
article_tx.save(t=_t)
# t = 1 / 0 # uncomment this to raise exception, and transaction will rollback
article_tx.content = "Aha, a transaction. (content updated)"
article_tx.save(t=_t)
| [
"src.riko.Riko.update_default"
] | [((962, 998), 'src.riko.Riko.update_default', 'Riko.update_default', ([], {'database': '"""blog"""'}), "(database='blog')\n", (981, 998), False, 'from src.riko import Riko, DictModel, ObjectModel, INSERT\n')] |
from collections import namedtuple
from datetime import datetime
from livestyled.models.competition import Competition
from livestyled.models.season import Season
from livestyled.models.sport_venue import SportVenue
from livestyled.models.team import Team
Score = namedtuple('Score', 'goals, penalties')
Url = namedtuple('Url', 'title, url, is_enabled')
class Fixture:
def __init__(
self,
id,
external_id,
start_at,
is_fulltime,
is_terminated,
home_id,
away_id,
home_score,
away_score,
season_id,
competition_id,
venue_id,
status,
url,
allow_overwrite=False
):
self._id = id
self._external_id = external_id
self._start_at = start_at
self._is_fulltime = is_fulltime
self._is_terminated = is_terminated
self._home = Team.placeholder(id=home_id)
self._away = Team.placeholder(id=away_id)
self._home_goals = home_score['goals']
self._home_penalties = home_score['penalties']
self._away_goals = away_score['goals']
self._away_penalties = away_score['penalties']
self._season = Season.placeholder(id=season_id)
self._competition = Competition.placeholder(id=competition_id)
self._venue = SportVenue.placeholder(id=venue_id)
self._status = status
if url:
if isinstance(url, Url):
self.url = url
elif isinstance(url, dict):
self.url = Url(**url)
else:
self.url = None
self.allow_overwrite = allow_overwrite
@classmethod
def create_new(
cls,
external_id: str,
start_at: datetime,
is_fulltime: bool,
is_terminated: bool,
home: Team or str or int,
away: Team or str or int,
home_goals: int or None,
home_penalties: int or None,
away_goals: int or None,
away_penalties: int or None,
season: Season or str or int,
competition: Competition or str or int,
venue: SportVenue or str or int,
status: str,
url: Url,
allow_overwrite: bool = False,
):
fixture = Fixture(
id=None,
external_id=external_id,
start_at=start_at,
is_fulltime=is_fulltime,
is_terminated=is_terminated,
home_id=None,
away_id=None,
home_score={'goals': home_goals, 'penalties': home_penalties},
away_score={'goals': away_goals, 'penalties': away_penalties},
season_id=None,
competition_id=None,
venue_id=None,
status=status,
url=url,
allow_overwrite=allow_overwrite
)
if isinstance(home, (str, int)):
home = Team.placeholder(id=home)
fixture._home = home
if isinstance(away, (str, int)):
away = Team.placeholder(id=away)
fixture._away = away
if isinstance(season, (str, int)):
season = Season.placeholder(id=season)
fixture._season = season
if isinstance(competition, (str, int)):
competition = Competition.placeholder(id=competition)
fixture._competition = competition
if isinstance(venue, (str, int)):
venue = SportVenue.placeholder(id=venue)
fixture._venue = venue
return fixture
@property
def id(self):
return self._id
@property
def competition_id(self):
return self._competition.id
@property
def home_id(self):
return self._home.id
@property
def away_id(self):
return self._away.id
@property
def home(self):
return self._home
@property
def away(self):
return self._away
@property
def season_id(self):
return self._season.id
@property
def venue_id(self):
return self._venue.id
@property
def home_score(self):
return Score(
goals=self._home_goals,
penalties=self._home_penalties
)
@property
def home_goals(self):
return self._home_goals
@home_goals.setter
def home_goals(self, goals):
self._home_goals = goals
@property
def away_score(self):
return Score(
goals=self._away_goals,
penalties=self._away_penalties
)
@property
def away_goals(self):
return self._away_goals
@away_goals.setter
def away_goals(self, goals):
self._away_goals = goals
@property
def status(self):
return self._status
@property
def is_fulltime(self):
return self._is_fulltime
@property
def is_terminated(self):
return self._is_terminated
@property
def start_at(self):
return self._start_at
@property
def external_id(self):
return self._external_id
@property
def home_penalties(self):
return self._home_penalties
@property
def away_penalties(self):
return self._away_penalties
def __repr__(self):
return '<Fixture(id={self.id!r})>'.format(self=self)
def diff(self, other):
differences = {}
fields = (
'competition_id', 'home_id', 'away_id', 'season_id', 'venue_id',
'home_score', 'away_score', 'status', 'is_fulltime', 'start_at', 'external_id', 'url',
'allow_overwrite', 'is_terminated'
)
for field in fields:
if getattr(self, field) != getattr(other, field):
differences[field] = getattr(self, field)
return differences
| [
"collections.namedtuple",
"livestyled.models.team.Team.placeholder",
"livestyled.models.sport_venue.SportVenue.placeholder",
"livestyled.models.competition.Competition.placeholder",
"livestyled.models.season.Season.placeholder"
] | [((267, 306), 'collections.namedtuple', 'namedtuple', (['"""Score"""', '"""goals, penalties"""'], {}), "('Score', 'goals, penalties')\n", (277, 306), False, 'from collections import namedtuple\n'), ((313, 356), 'collections.namedtuple', 'namedtuple', (['"""Url"""', '"""title, url, is_enabled"""'], {}), "('Url', 'title, url, is_enabled')\n", (323, 356), False, 'from collections import namedtuple\n'), ((967, 995), 'livestyled.models.team.Team.placeholder', 'Team.placeholder', ([], {'id': 'home_id'}), '(id=home_id)\n', (983, 995), False, 'from livestyled.models.team import Team\n'), ((1017, 1045), 'livestyled.models.team.Team.placeholder', 'Team.placeholder', ([], {'id': 'away_id'}), '(id=away_id)\n', (1033, 1045), False, 'from livestyled.models.team import Team\n'), ((1273, 1305), 'livestyled.models.season.Season.placeholder', 'Season.placeholder', ([], {'id': 'season_id'}), '(id=season_id)\n', (1291, 1305), False, 'from livestyled.models.season import Season\n'), ((1334, 1376), 'livestyled.models.competition.Competition.placeholder', 'Competition.placeholder', ([], {'id': 'competition_id'}), '(id=competition_id)\n', (1357, 1376), False, 'from livestyled.models.competition import Competition\n'), ((1399, 1434), 'livestyled.models.sport_venue.SportVenue.placeholder', 'SportVenue.placeholder', ([], {'id': 'venue_id'}), '(id=venue_id)\n', (1421, 1434), False, 'from livestyled.models.sport_venue import SportVenue\n'), ((3011, 3036), 'livestyled.models.team.Team.placeholder', 'Team.placeholder', ([], {'id': 'home'}), '(id=home)\n', (3027, 3036), False, 'from livestyled.models.team import Team\n'), ((3126, 3151), 'livestyled.models.team.Team.placeholder', 'Team.placeholder', ([], {'id': 'away'}), '(id=away)\n', (3142, 3151), False, 'from livestyled.models.team import Team\n'), ((3245, 3274), 'livestyled.models.season.Season.placeholder', 'Season.placeholder', ([], {'id': 'season'}), '(id=season)\n', (3263, 3274), False, 'from livestyled.models.season import Season\n'), ((3382, 3421), 'livestyled.models.competition.Competition.placeholder', 'Competition.placeholder', ([], {'id': 'competition'}), '(id=competition)\n', (3405, 3421), False, 'from livestyled.models.competition import Competition\n'), ((3527, 3559), 'livestyled.models.sport_venue.SportVenue.placeholder', 'SportVenue.placeholder', ([], {'id': 'venue'}), '(id=venue)\n', (3549, 3559), False, 'from livestyled.models.sport_venue import SportVenue\n')] |
from pytest import raises
from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask
from aswan.scheduler.core import NotEnoughResourcesToContinue
from aswan.scheduler.resource import ResourceBundle
from aswan.tests.integration.scheduler.test_scheduler_basics import AddActor
class ResourceOne(Resource):
pass
class ResourceTwo(Resource):
pass
class NumberResource(Resource):
def __init__(self, base: int):
self.base = base
def test_insufficient_resources():
limits = [
ResourceLimit(ResourceOne, global_limit=0),
]
scheduler = Scheduler(AddActor, resource_limits=limits)
with raises(NotEnoughResourcesToContinue):
scheduler.refill_task_queue(
[SchedulerTask(argument=0, resource_needs=[ResourceOne()])]
)
scheduler.join()
def test_resource_based_reorganization():
limits = [
ResourceLimit(ResourceOne, global_limit=1),
ResourceLimit(ResourceTwo, global_limit=1),
]
scheduler = Scheduler(AddActor, resource_limits=limits)
scheduler.refill_task_queue(
[
SchedulerTask(argument=i, resource_needs=[ResourceOne()])
for i in range(5)
]
)
assert scheduler._used_actor_resources == ResourceBundle([ResourceOne()])
scheduler.wait_until_n_tasks_remain(0)
scheduler.refill_task_queue(
[
SchedulerTask(argument=i, resource_needs=[ResourceTwo()])
for i in range(5)
]
)
assert scheduler._used_actor_resources == ResourceBundle([ResourceTwo()])
scheduler.wait_until_n_tasks_remain(0)
scheduler.join()
def test_mid_refill_reorganization():
limits = [
ResourceLimit(
NumberResource,
global_limit=1,
target_attribute="base",
limit_kind="nunique",
),
]
scheduler = Scheduler(
AddActor, resource_limits=limits, reorganize_after_every_task=True
)
scheduler.refill_task_queue(
[
SchedulerTask(argument=i, resource_needs=[NumberResource(1)])
for i in range(5)
]
+ [
SchedulerTask(argument=i, resource_needs=[NumberResource(2)])
for i in range(5, 10)
]
)
scheduler.wait_until_n_tasks_remain(0)
scheduler.join()
assert list(range(1, 11)) == list(
sorted(scheduler.get_processed_results())
)
| [
"aswan.scheduler.ResourceLimit",
"aswan.scheduler.Scheduler",
"pytest.raises"
] | [((596, 639), 'aswan.scheduler.Scheduler', 'Scheduler', (['AddActor'], {'resource_limits': 'limits'}), '(AddActor, resource_limits=limits)\n', (605, 639), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((1019, 1062), 'aswan.scheduler.Scheduler', 'Scheduler', (['AddActor'], {'resource_limits': 'limits'}), '(AddActor, resource_limits=limits)\n', (1028, 1062), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((1884, 1961), 'aswan.scheduler.Scheduler', 'Scheduler', (['AddActor'], {'resource_limits': 'limits', 'reorganize_after_every_task': '(True)'}), '(AddActor, resource_limits=limits, reorganize_after_every_task=True)\n', (1893, 1961), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((530, 572), 'aswan.scheduler.ResourceLimit', 'ResourceLimit', (['ResourceOne'], {'global_limit': '(0)'}), '(ResourceOne, global_limit=0)\n', (543, 572), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((650, 686), 'pytest.raises', 'raises', (['NotEnoughResourcesToContinue'], {}), '(NotEnoughResourcesToContinue)\n', (656, 686), False, 'from pytest import raises\n'), ((900, 942), 'aswan.scheduler.ResourceLimit', 'ResourceLimit', (['ResourceOne'], {'global_limit': '(1)'}), '(ResourceOne, global_limit=1)\n', (913, 942), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((952, 994), 'aswan.scheduler.ResourceLimit', 'ResourceLimit', (['ResourceTwo'], {'global_limit': '(1)'}), '(ResourceTwo, global_limit=1)\n', (965, 994), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n'), ((1708, 1804), 'aswan.scheduler.ResourceLimit', 'ResourceLimit', (['NumberResource'], {'global_limit': '(1)', 'target_attribute': '"""base"""', 'limit_kind': '"""nunique"""'}), "(NumberResource, global_limit=1, target_attribute='base',\n limit_kind='nunique')\n", (1721, 1804), False, 'from aswan.scheduler import Resource, ResourceLimit, Scheduler, SchedulerTask\n')] |
import json
import os
import argparse
import csv
import numpy as np
from copy import deepcopy
train_idx = np.random.choice(500, 400, replace=False)
test_idx = [item for item in range(500) if item not in train_idx]
annotations = open("fs_dataset.csv")
anno = csv.DictReader(annotations)
train = open("train_dataset.txt", 'w')
test = open("test_dataset.txt", 'w')
whole_list = []
for row in anno:
line = ' '.join([row['number'], row['tes'], row['pcs'], row['ded']]) + '\n'
if int(row['number']) in train_idx:
train.write(line)
else:
test.write(line)
train.close()
test.close()
| [
"numpy.random.choice",
"csv.DictReader"
] | [((106, 147), 'numpy.random.choice', 'np.random.choice', (['(500)', '(400)'], {'replace': '(False)'}), '(500, 400, replace=False)\n', (122, 147), True, 'import numpy as np\n'), ((258, 285), 'csv.DictReader', 'csv.DictReader', (['annotations'], {}), '(annotations)\n', (272, 285), False, 'import csv\n')] |
import logging
import httpx
import notion_client
from notion_client import APIResponseError
from notion_database.service import (
convert_notion_database_resp_dict_to_simple_database_dict,
)
from notion_properties.dto import NotionPropertyDto
from notion_properties.service import (
create_properties_dict_for_create_page_api_request_from_property_dto_list,
)
from tasks.service import create_notion_task_property_list_from_db_schema
from .models import RecurringTask
logger = logging.getLogger(__name__)
def create_recurring_task_in_notion(task_pk):
logger.info(f"Creating new task for PK: {task_pk}")
try:
task_model = RecurringTask.objects.all().filter(pk=task_pk)[0]
except IndexError:
raise Exception(
f"Task with id {task_pk} be created because it did not exist in Database anymore."
)
notion_db_model = task_model.database
if (
notion_db_model is None
or notion_db_model.database_id is None
or notion_db_model.database_id == ""
):
logger.info(
f"Database id was not set for Recurring Task with PK {task_pk}! Cannot handle request."
)
return
user = task_model.owner
workspace_access_queryset = user.workspace_access.all()
if workspace_access_queryset.count == 0:
raise Exception("User did not have a workspace access.")
workspace_access_queried = list(workspace_access_queryset)[0]
if task_model.workspace is None:
task_model.workspace = workspace_access_queried.workspace
task_model.save()
elif task_model.workspace.pk != workspace_access_queried.workspace.pk:
raise Exception("Users workspace does not match the one they have access to")
try:
client = notion_client.Client(auth=workspace_access_queried.access_token)
except IndexError:
logger.error("User did not have a workspace access")
raise Exception("User did not have a workspace access.")
# Fetch the given database from Notion
try:
notion_db_schema_resp_dict = client.databases.retrieve(
database_id=notion_db_model.database_id
)
except (httpx.HTTPStatusError, APIResponseError) as error:
if error.code == "unauthorized":
raise Exception("invalid api token")
logger.info("Failed to retrieve Database for Task!")
task_model.database = None
task_model.save()
return
database_dict = convert_notion_database_resp_dict_to_simple_database_dict(
notion_db_schema_resp_dict
)
current_task_properties_value_by_id_dict = {
property_dict["id"]: property_dict["value"]
for property_dict in task_model.properties_json
}
# Check which properties are still in the Database
property_dict_list = create_notion_task_property_list_from_db_schema(
db_schema_dict_list=[
property_dto.dto_dict() for property_dto in database_dict["properties"]
],
property_value_by_id_dict=current_task_properties_value_by_id_dict,
)
# check if the provided property type is that in the schema
request_properties_dict = (
create_properties_dict_for_create_page_api_request_from_property_dto_list(
[
NotionPropertyDto.from_dto_dict(property_dict)
for property_dict in property_dict_list
]
)
)
page_parent_dict = {"database_id": notion_db_model.database_id}
client.pages.create(parent=page_parent_dict, properties=request_properties_dict)
# Don't save any models within the task - will cause back-to-back chains of jobs going off
logger.debug(f"Created recurring task with id {task_model.pk} successfully.")
| [
"logging.getLogger",
"notion_database.service.convert_notion_database_resp_dict_to_simple_database_dict",
"notion_properties.dto.NotionPropertyDto.from_dto_dict",
"notion_client.Client"
] | [((489, 516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'import logging\n'), ((2464, 2554), 'notion_database.service.convert_notion_database_resp_dict_to_simple_database_dict', 'convert_notion_database_resp_dict_to_simple_database_dict', (['notion_db_schema_resp_dict'], {}), '(\n notion_db_schema_resp_dict)\n', (2521, 2554), False, 'from notion_database.service import convert_notion_database_resp_dict_to_simple_database_dict\n'), ((1762, 1826), 'notion_client.Client', 'notion_client.Client', ([], {'auth': 'workspace_access_queried.access_token'}), '(auth=workspace_access_queried.access_token)\n', (1782, 1826), False, 'import notion_client\n'), ((3272, 3318), 'notion_properties.dto.NotionPropertyDto.from_dto_dict', 'NotionPropertyDto.from_dto_dict', (['property_dict'], {}), '(property_dict)\n', (3303, 3318), False, 'from notion_properties.dto import NotionPropertyDto\n')] |
from torch import nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
from torchvision.ops import nms
from nts_net import resnet
import numpy as np
from nts_net.anchors import generate_default_anchor_maps
from nts_net.config import CAT_NUM, PROPOSAL_NUM, test_model
class ProposalNet(nn.Module):
def __init__(self):
super(ProposalNet, self).__init__()
self.down1 = nn.Conv2d(2048, 128, 3, 1, 1)
self.down2 = nn.Conv2d(128, 128, 3, 2, 1)
self.down3 = nn.Conv2d(128, 128, 3, 2, 1)
self.ReLU = nn.ReLU()
self.tidy1 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy2 = nn.Conv2d(128, 6, 1, 1, 0)
self.tidy3 = nn.Conv2d(128, 9, 1, 1, 0)
def forward(self, x):
batch_size = x.size(0)
d1 = self.ReLU(self.down1(x))
d2 = self.ReLU(self.down2(d1))
d3 = self.ReLU(self.down3(d2))
t1 = self.tidy1(d1).view(batch_size, -1)
t2 = self.tidy2(d2).view(batch_size, -1)
t3 = self.tidy3(d3).view(batch_size, -1)
return torch.cat((t1, t2, t3), dim=1)
class attention_net(nn.Module):
def __init__(self, topN=6, device='cpu', num_classes=200):
super(attention_net, self).__init__()
self.device = device
self.pretrained_model = resnet.resnet50(pretrained=True)
self.pretrained_model.avgpool = nn.AdaptiveAvgPool2d(1)
self.pretrained_model.fc = nn.Linear(512 * 4, num_classes)
self.proposal_net = ProposalNet()
self.topN = topN
self.concat_net = nn.Linear(2048 * (CAT_NUM + 1), num_classes)
self.partcls_net = nn.Linear(512 * 4, num_classes)
_, edge_anchors, _ = generate_default_anchor_maps()
self.pad_side = 224
self.edge_anchors = (edge_anchors + 224).astype(np.int)
def forward(self, x):
resnet_out, rpn_feature, feature = self.pretrained_model(x)
x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0)
batch = x.size(0)
# we will reshape rpn to shape: batch * nb_anchor
rpn_score = self.proposal_net(rpn_feature.detach())
top_n_index_list = []
top_n_prob_list = []
top_n_coordinates = []
edge_anchors_copy = torch.tensor(self.edge_anchors, dtype=torch.float32).to(self.device)
zero_tensor = torch.zeros(rpn_score.size()[1], 1)
for i in range(batch):
rpn_score_reshape = rpn_score[i]
nms_output_idx = nms(edge_anchors_copy, rpn_score_reshape, iou_threshold=0.25)
nms_output_score = torch.gather(rpn_score_reshape, 0, nms_output_idx)
nms_output_anchors = torch.index_select(edge_anchors_copy, 0, nms_output_idx)
top_n_result = torch.topk(nms_output_score, self.topN)
top_n_anchors = torch.index_select(nms_output_anchors, 0, top_n_result[1])
y0_1, x0_1, y1_1, x1_1 = torch.split(top_n_anchors, 1, dim=1)
top_n_anchors_1 = torch.cat([x0_1, y0_1, x1_1, y1_1], dim=1)
top_n_index_origin = torch.index_select(nms_output_idx, 0, top_n_result[1])
top_n_index_list.append(top_n_index_origin)
top_n_prob_list.append(top_n_result[0])
top_n_coordinates.append(top_n_anchors_1)
top_n_index = torch.stack(top_n_index_list)
top_n_prob = torch.stack(top_n_prob_list)
part_imgs = torch.zeros([batch, self.topN, 3, 224, 224])
for i in range(batch):
for j in range(self.topN):
y0 = top_n_coordinates[i][j][1].long()
x0 = top_n_coordinates[i][j][0].long()
y1 = top_n_coordinates[i][j][3].long()
x1 = top_n_coordinates[i][j][2].long()
part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode='bilinear',
align_corners=True)
part_imgs = part_imgs.view(batch * self.topN, 3, 224, 224).to(self.device)
_, _, part_features = self.pretrained_model(part_imgs.detach())
part_feature = part_features.view(batch, self.topN, -1)
part_feature = part_feature[:, :CAT_NUM, ...].contiguous()
part_feature = part_feature.view(batch, -1)
# concat_logits have the shape: B*200
concat_out = torch.cat([part_feature, feature], dim=1)
concat_logits = self.concat_net(concat_out)
raw_logits = resnet_out
# part_logits have the shape: B*N*200
part_logits = self.partcls_net(part_features).view(batch, self.topN, -1)
return top_n_coordinates, concat_out, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob
def list_loss(logits, targets):
temp = F.log_softmax(logits, -1)
loss = [-temp[i][targets[i].item()] for i in range(logits.size(0))]
return torch.stack(loss)
def ranking_loss(score, targets, proposal_num=PROPOSAL_NUM):
loss = Variable(torch.zeros(1).cuda())
batch_size = score.size(0)
for i in range(proposal_num):
targets_p = (targets > targets[:, i].unsqueeze(1)).type(torch.cuda.FloatTensor)
pivot = score[:, i].unsqueeze(1)
loss_p = (1 - pivot + score) * targets_p
loss_p = torch.sum(F.relu(loss_p))
loss += loss_p
return loss / batch_size
def ntsnet(model_path, device='cuda'):
net = attention_net(topN=PROPOSAL_NUM, device=device)
ckpt = torch.load(model_path)
net.load_state_dict(ckpt['net_state_dict'])
print('test_acc:', ckpt['test_acc'])
return net
if __name__ == '__main__':
net = ntsnet(test_model)
| [
"torch.nn.ReLU",
"torch.nn.functional.interpolate",
"torch.nn.functional.pad",
"torchvision.ops.nms",
"torch.nn.AdaptiveAvgPool2d",
"torch.gather",
"torch.split",
"torch.topk",
"torch.nn.functional.log_softmax",
"torch.nn.functional.relu",
"torch.cat",
"torch.index_select",
"nts_net.resnet.resnet50",
"torch.load",
"torch.stack",
"torch.nn.Conv2d",
"torch.tensor",
"nts_net.anchors.generate_default_anchor_maps",
"torch.nn.Linear",
"torch.zeros"
] | [((4780, 4805), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (4793, 4805), True, 'import torch.nn.functional as F\n'), ((4889, 4906), 'torch.stack', 'torch.stack', (['loss'], {}), '(loss)\n', (4900, 4906), False, 'import torch\n'), ((5461, 5483), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (5471, 5483), False, 'import torch\n'), ((438, 467), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(128)', '(3)', '(1)', '(1)'], {}), '(2048, 128, 3, 1, 1)\n', (447, 467), False, 'from torch import nn\n'), ((489, 517), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(2)', '(1)'], {}), '(128, 128, 3, 2, 1)\n', (498, 517), False, 'from torch import nn\n'), ((539, 567), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(2)', '(1)'], {}), '(128, 128, 3, 2, 1)\n', (548, 567), False, 'from torch import nn\n'), ((588, 597), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (595, 597), False, 'from torch import nn\n'), ((619, 645), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(6)', '(1)', '(1)', '(0)'], {}), '(128, 6, 1, 1, 0)\n', (628, 645), False, 'from torch import nn\n'), ((667, 693), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(6)', '(1)', '(1)', '(0)'], {}), '(128, 6, 1, 1, 0)\n', (676, 693), False, 'from torch import nn\n'), ((715, 741), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(9)', '(1)', '(1)', '(0)'], {}), '(128, 9, 1, 1, 0)\n', (724, 741), False, 'from torch import nn\n'), ((1078, 1108), 'torch.cat', 'torch.cat', (['(t1, t2, t3)'], {'dim': '(1)'}), '((t1, t2, t3), dim=1)\n', (1087, 1108), False, 'import torch\n'), ((1313, 1345), 'nts_net.resnet.resnet50', 'resnet.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1328, 1345), False, 'from nts_net import resnet\n'), ((1386, 1409), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (1406, 1409), False, 'from torch import nn\n'), ((1445, 1476), 'torch.nn.Linear', 'nn.Linear', (['(512 * 4)', 'num_classes'], {}), '(512 * 4, num_classes)\n', (1454, 1476), False, 'from torch import nn\n'), ((1570, 1614), 'torch.nn.Linear', 'nn.Linear', (['(2048 * (CAT_NUM + 1))', 'num_classes'], {}), '(2048 * (CAT_NUM + 1), num_classes)\n', (1579, 1614), False, 'from torch import nn\n'), ((1642, 1673), 'torch.nn.Linear', 'nn.Linear', (['(512 * 4)', 'num_classes'], {}), '(512 * 4, num_classes)\n', (1651, 1673), False, 'from torch import nn\n'), ((1703, 1733), 'nts_net.anchors.generate_default_anchor_maps', 'generate_default_anchor_maps', ([], {}), '()\n', (1731, 1733), False, 'from nts_net.anchors import generate_default_anchor_maps\n'), ((1937, 2038), 'torch.nn.functional.pad', 'F.pad', (['x', '(self.pad_side, self.pad_side, self.pad_side, self.pad_side)'], {'mode': '"""constant"""', 'value': '(0)'}), "(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode\n ='constant', value=0)\n", (1942, 2038), True, 'import torch.nn.functional as F\n'), ((3337, 3366), 'torch.stack', 'torch.stack', (['top_n_index_list'], {}), '(top_n_index_list)\n', (3348, 3366), False, 'import torch\n'), ((3388, 3416), 'torch.stack', 'torch.stack', (['top_n_prob_list'], {}), '(top_n_prob_list)\n', (3399, 3416), False, 'import torch\n'), ((3437, 3481), 'torch.zeros', 'torch.zeros', (['[batch, self.topN, 3, 224, 224]'], {}), '([batch, self.topN, 3, 224, 224])\n', (3448, 3481), False, 'import torch\n'), ((4372, 4413), 'torch.cat', 'torch.cat', (['[part_feature, feature]'], {'dim': '(1)'}), '([part_feature, feature], dim=1)\n', (4381, 4413), False, 'import torch\n'), ((2529, 2590), 'torchvision.ops.nms', 'nms', (['edge_anchors_copy', 'rpn_score_reshape'], {'iou_threshold': '(0.25)'}), '(edge_anchors_copy, rpn_score_reshape, iou_threshold=0.25)\n', (2532, 2590), False, 'from torchvision.ops import nms\n'), ((2622, 2672), 'torch.gather', 'torch.gather', (['rpn_score_reshape', '(0)', 'nms_output_idx'], {}), '(rpn_score_reshape, 0, nms_output_idx)\n', (2634, 2672), False, 'import torch\n'), ((2706, 2762), 'torch.index_select', 'torch.index_select', (['edge_anchors_copy', '(0)', 'nms_output_idx'], {}), '(edge_anchors_copy, 0, nms_output_idx)\n', (2724, 2762), False, 'import torch\n'), ((2790, 2829), 'torch.topk', 'torch.topk', (['nms_output_score', 'self.topN'], {}), '(nms_output_score, self.topN)\n', (2800, 2829), False, 'import torch\n'), ((2858, 2916), 'torch.index_select', 'torch.index_select', (['nms_output_anchors', '(0)', 'top_n_result[1]'], {}), '(nms_output_anchors, 0, top_n_result[1])\n', (2876, 2916), False, 'import torch\n'), ((2954, 2990), 'torch.split', 'torch.split', (['top_n_anchors', '(1)'], {'dim': '(1)'}), '(top_n_anchors, 1, dim=1)\n', (2965, 2990), False, 'import torch\n'), ((3021, 3063), 'torch.cat', 'torch.cat', (['[x0_1, y0_1, x1_1, y1_1]'], {'dim': '(1)'}), '([x0_1, y0_1, x1_1, y1_1], dim=1)\n', (3030, 3063), False, 'import torch\n'), ((3097, 3151), 'torch.index_select', 'torch.index_select', (['nms_output_idx', '(0)', 'top_n_result[1]'], {}), '(nms_output_idx, 0, top_n_result[1])\n', (3115, 3151), False, 'import torch\n'), ((5283, 5297), 'torch.nn.functional.relu', 'F.relu', (['loss_p'], {}), '(loss_p)\n', (5289, 5297), True, 'import torch.nn.functional as F\n'), ((2296, 2348), 'torch.tensor', 'torch.tensor', (['self.edge_anchors'], {'dtype': 'torch.float32'}), '(self.edge_anchors, dtype=torch.float32)\n', (2308, 2348), False, 'import torch\n'), ((3812, 3917), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_pad[i:i + 1, :, y0:y1, x0:x1]'], {'size': '(224, 224)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x_pad[i:i + 1, :, y0:y1, x0:x1], size=(224, 224), mode=\n 'bilinear', align_corners=True)\n", (3825, 3917), True, 'import torch.nn.functional as F\n'), ((4990, 5004), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (5001, 5004), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import logging
from typing import Optional
from ...core.app import App
class ConsoleBot:
log = logging.getLogger(__name__)
def __init__(self) -> None:
self.app: Optional[App] = None
self.search_mode = False
# end def
from .integration import process_chapter_range, start
from .get_crawler import (choose_a_novel, confirm_retry,
get_crawlers_to_search, get_novel_url)
from .login_info import get_login_info
from .output_style import (force_replace_old, get_output_formats,
get_output_path, should_pack_by_volume)
from .range_selection import (get_range_from_chapters,
get_range_from_volumes, get_range_selection,
get_range_using_index, get_range_using_urls)
# end class
| [
"logging.getLogger"
] | [((126, 153), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (143, 153), False, 'import logging\n')] |
import time
from functional import Stream
class Timer():
def __init__(self, name=''):
self.name = name
self.start = time.time()
@property
def elapsed(self):
return time.time() - self.start
def __enter__(self):
return self
def __exit__(self, *_):
self.checkpoint('done')
def checkpoint(self, name=''):
print('{timer}: {checkpoint} in {elapsed:.3f} seconds'.format(
timer=self.name,
checkpoint=name,
elapsed=self.elapsed,
).strip())
if __name__ == '__main__':
iterations = 5000000
lambda1 = lambda x: x * 2
lambda2 = lambda x: x * x
def func1(x): return x * 2
def func2(x): return x * x
with Timer('Stream'):
result = Stream(range(iterations)) \
.map(lambda1) \
.map(lambda2) \
.as_tuple()
assert len(result) == iterations
with Timer('List'):
result = range(iterations)
result = [x * 2 for x in result]
result = [x * x for x in result]
result = tuple(result)
assert len(result) == iterations
with Timer('List Labmda'):
result = range(iterations)
result = [lambda1(x) for x in result]
result = [lambda2(x) for x in result]
result = tuple(result)
assert len(result) == iterations
with Timer('Generator'):
result = range(iterations)
result = (x * 2 for x in result)
result = (x * x for x in result)
result = tuple(result)
assert len(result) == iterations
with Timer('Generator Lambda'):
result = range(iterations)
result = (lambda1(x) for x in result)
result = (lambda2(x) for x in result)
result = tuple(result)
assert len(result) == iterations
with Timer('Generator Function'):
result = range(iterations)
result = (func1(x) for x in result)
result = (func2(x) for x in result)
result = tuple(result)
assert len(result) == iterations
| [
"time.time"
] | [((139, 150), 'time.time', 'time.time', ([], {}), '()\n', (148, 150), False, 'import time\n'), ((204, 215), 'time.time', 'time.time', ([], {}), '()\n', (213, 215), False, 'import time\n')] |
import rvisa as visa
import time
from lightlab import visalogger as logger
from .driver_base import InstrumentSessionBase
from .visa_object import VISAObject
OPEN_RETRIES = 5
CR = '\r'
LF = '\n'
# Note: this class inherits from the VISAObject class and simply overrides specific methods.
class RVISAObject(VISAObject):
def __init__(self, address=None, tempSess=False, url=None, timeout=None):
'''
Args:
tempSess (bool): If True, the session is opened and closed every time there is a command
address (str): The full visa address
url (str, required): the remote instrumentation server link
'''
self.tempSess = tempSess
self.resMan = None
self.mbSession = None
self.address = address
self._open_retries = 0
self.__timeout = timeout
self.__termination=None
# RVisa edit:
self.url = url
def open(self):
if self.mbSession is not None:
return
if self.address is None:
raise RuntimeError("Attempting to open connection to unknown address.")
if self.url is None:
raise RuntimeError("Remote instrumentation connection is unset.")
if self.resMan is None:
self.resMan = visa.ResourceManager(self.url,self.__timeout)
try:
self.mbSession = self.resMan.open_resource(self.address)
if not self.tempSess:
logger.debug('Opened %s', self.address)
except Exception as err:
logger.warning(f"There was a problem connecting. Error:\n {err}")
def close(self):
if self.mbSession is None:
return
try:
self.mbSession.close()
except Exception as err:
print(err)
logger.error(f"There was a problem connectin. Error:\n {err}")
raise
self.mbSession = None
if not self.tempSess:
logger.debug('Closed %s', self.address)
def query(self, queryStr, withTimeout=None):
retStr = None
timeout = withTimeout
try:
self.open()
logger.debug('%s - Q - %s', self.address, queryStr)
try:
if timeout is None:
retStr = self.mbSession.query(queryStr)
else:
# TODO: test this timeout version
retStr = self.mbSession.query(queryStr,timeout)
except Exception:
logger.error('Problem querying to %s', self.address)
# self.close()
raise
retStr = retStr.rstrip()
logger.debug('Query Read - %s', retStr)
finally:
if self.tempSess:
self.close()
return retStr
def write(self, writeStr):
try:
self.open()
try:
self.mbSession.write(writeStr)
except Exception:
logger.error('Problem writing to %s', self.address)
raise
logger.debug('%s - W - %s', self.address, writeStr)
finally:
if self.tempSess:
self.close()
@property
def timeout(self):
if self.__timeout is None:
if self.mbSession is None:
try:
self.open()
finally:
if self.tempSess:
self.close()
else:
pass # NOTE: RVisa does not have a built-in attribute for timeouts under our ResourceManager. Timeouts are handled on a query-basis.
return self.__timeout
@property
def termination(self):
if self.mbSession is not None:
self._termination = self.mbSession.write_termination
return self._termination
@termination.setter
def termination(self, value):
if value in (CR, LF, CR + LF, ''):
self._termination = value
else:
raise ValueError("Termination must be one of these: CR, CRLF, LR, ''")
if self.mbSession is not None:
self.mbSession.write_termination = value | [
"lightlab.visalogger.debug",
"rvisa.ResourceManager",
"lightlab.visalogger.error",
"lightlab.visalogger.warning"
] | [((1312, 1358), 'rvisa.ResourceManager', 'visa.ResourceManager', (['self.url', 'self.__timeout'], {}), '(self.url, self.__timeout)\n', (1332, 1358), True, 'import rvisa as visa\n'), ((2010, 2049), 'lightlab.visalogger.debug', 'logger.debug', (['"""Closed %s"""', 'self.address'], {}), "('Closed %s', self.address)\n", (2022, 2049), True, 'from lightlab import visalogger as logger\n'), ((2205, 2256), 'lightlab.visalogger.debug', 'logger.debug', (['"""%s - Q - %s"""', 'self.address', 'queryStr'], {}), "('%s - Q - %s', self.address, queryStr)\n", (2217, 2256), True, 'from lightlab import visalogger as logger\n'), ((2715, 2754), 'lightlab.visalogger.debug', 'logger.debug', (['"""Query Read - %s"""', 'retStr'], {}), "('Query Read - %s', retStr)\n", (2727, 2754), True, 'from lightlab import visalogger as logger\n'), ((3122, 3173), 'lightlab.visalogger.debug', 'logger.debug', (['"""%s - W - %s"""', 'self.address', 'writeStr'], {}), "('%s - W - %s', self.address, writeStr)\n", (3134, 3173), True, 'from lightlab import visalogger as logger\n'), ((1490, 1529), 'lightlab.visalogger.debug', 'logger.debug', (['"""Opened %s"""', 'self.address'], {}), "('Opened %s', self.address)\n", (1502, 1529), True, 'from lightlab import visalogger as logger\n'), ((1575, 1643), 'lightlab.visalogger.warning', 'logger.warning', (['f"""There was a problem connecting. Error:\n {err}"""'], {}), '(f"""There was a problem connecting. Error:\n {err}""")\n', (1589, 1643), True, 'from lightlab import visalogger as logger\n'), ((1857, 1922), 'lightlab.visalogger.error', 'logger.error', (['f"""There was a problem connectin. Error:\n {err}"""'], {}), '(f"""There was a problem connectin. Error:\n {err}""")\n', (1869, 1922), True, 'from lightlab import visalogger as logger\n'), ((2560, 2612), 'lightlab.visalogger.error', 'logger.error', (['"""Problem querying to %s"""', 'self.address'], {}), "('Problem querying to %s', self.address)\n", (2572, 2612), True, 'from lightlab import visalogger as logger\n'), ((3036, 3087), 'lightlab.visalogger.error', 'logger.error', (['"""Problem writing to %s"""', 'self.address'], {}), "('Problem writing to %s', self.address)\n", (3048, 3087), True, 'from lightlab import visalogger as logger\n')] |
import os
import requests
from pathlib import Path
def download_yandex(sharing_link, file_path):
API_ENDPOINT = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
pk_request = requests.get(API_ENDPOINT.format(sharing_link))
r = requests.get(pk_request.json()['href'])
open(file_path, 'wb').write(r.content)
data_path = os.path.join(Path(__file__).parent.parent, 'data/bronze/interactions.parquet.gz')
download_yandex('https://disk.yandex.ru/d/QsJkkMN5KDQobA', data_path)
if os.path.exists(data_path):
print('Dataset downloaded: {}'.format(data_path)) | [
"os.path.exists",
"pathlib.Path"
] | [((513, 538), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (527, 538), False, 'import os\n'), ((370, 384), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (374, 384), False, 'from pathlib import Path\n')] |
from setuptools import find_packages
from setuptools import setup
from os import path as os_path
this_directory = os_path.abspath(os_path.dirname(__file__))
def read_file(filename):
with open(os_path.join(this_directory, filename), encoding='utf-8') as f:
long_description = f.read()
return long_description
def read_requirement(filename):
return [line.strip() for line in read_file(filename).splitlines() if not line.startswith('#')]
setup(
name="handyML",
version="0.0.1b",
author="ITryagain",
author_email="<EMAIL>",
python_requires='>=3.6.0',
description="A library for data science",
long_description=read_file('README.md'),
long_description_content_type="text/markdown",
url="https://github.com/leo6033/handyML",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
install_requires=read_requirement('requirements.txt'),
) | [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((135, 160), 'os.path.dirname', 'os_path.dirname', (['__file__'], {}), '(__file__)\n', (150, 160), True, 'from os import path as os_path\n'), ((989, 1004), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1002, 1004), False, 'from setuptools import find_packages\n'), ((205, 243), 'os.path.join', 'os_path.join', (['this_directory', 'filename'], {}), '(this_directory, filename)\n', (217, 243), True, 'from os import path as os_path\n')] |
#!/usr/bin/env python
import logging
import sys
import ssl
import argparse
import os
# Make sure the flickrapi module from the source distribution is used
sys.path.insert(0, 'flickrapi')
import flickrapi
logging.basicConfig(
format = '%(asctime)s %(name)s %(levelname)s: %(message)s',
# filename = logfile,
# level = logging.DEBUG)
level = logging.INFO)
mainlogger = logging.getLogger(__name__)
mainlogger.info('running flickr downloading client')
class FlickrAccess:
def __init__(self):
api_key = unicode(os.environ['API_KEY'])
api_secret = unicode(os.environ['API_SECRET'])
self.logger = logging.getLogger(__name__ + '.FlickrAccess')
self.flickr = flickrapi.FlickrAPI(api_key, api_secret, store_token = True)
def ensurePermission(self, perm):
uperm = unicode(perm)
if not self.flickr.token_valid(perms=uperm):
self.logger.info('acquire permission ' + uperm)
# Get a request token
self.flickr.get_request_token(oauth_callback='oob')
# Open a browser at the authentication URL. Do this however
# you want, as long as the user visits that URL.
authorize_url = self.flickr.auth_url(perms=uperm)
#webbrowser.open_new_tab(authorize_url)
print(' Please paste this URL into your browser and copy the verification code: ' + authorize_url)
# Get the verifier code from the user. Do this however you
# want, as long as the user gives the application the code.
verifier = unicode(raw_input(' Verifier code: '))
# Trade the request token for an access token
self.flickr.get_access_token(verifier)
class DownloadIt:
def __init__(self, flickraccess):
assert flickraccess != None
flickraccess.ensurePermission('read')
self.flickr = flickraccess.flickr
def run(self):
walkingcount = 0
allSLLContext = ssl._create_unverified_context()
self.flickr.ensurePermission('write')
with open('urls.txt', 'w') as urlfile:
for walkingphoto in self.flickr.walk(user_id = 'me', extras = 'url_o', per_page = '500'):
walkingcount += 1
photoid = walkingphoto.get('id')
urlo = walkingphoto.get('url_o')
print('checking #' + str(walkingcount) + ': ' + photoid + ', url_o: ' + urlo)
urlfile.write(urlo + '\n')
def main(argv):
parser = argparse.ArgumentParser(description='Inquire URLs from flickr')
parser.add_argument('--inquire', action='store_true', help='Inquire all download URL for images in O')
parser.add_argument('--debug', action='store_true', help='Debug logging')
args = parser.parse_args()
#print(args)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
mainlogger.debug(str(args))
if args.inquire:
DownloadIt(FlickrAccess()).run()
else:
parser.print_help()
if __name__ == "__main__":
main(sys.argv)
| [
"logging.basicConfig",
"logging.getLogger",
"sys.path.insert",
"argparse.ArgumentParser",
"flickrapi.FlickrAPI",
"ssl._create_unverified_context"
] | [((155, 186), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""flickrapi"""'], {}), "(0, 'flickrapi')\n", (170, 186), False, 'import sys\n'), ((205, 307), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(name)s %(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s %(name)s %(levelname)s: %(message)s', level=logging.INFO)\n", (224, 307), False, 'import logging\n'), ((398, 425), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (415, 425), False, 'import logging\n'), ((2532, 2595), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inquire URLs from flickr"""'}), "(description='Inquire URLs from flickr')\n", (2555, 2595), False, 'import argparse\n'), ((655, 700), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.FlickrAccess')"], {}), "(__name__ + '.FlickrAccess')\n", (672, 700), False, 'import logging\n'), ((723, 781), 'flickrapi.FlickrAPI', 'flickrapi.FlickrAPI', (['api_key', 'api_secret'], {'store_token': '(True)'}), '(api_key, api_secret, store_token=True)\n', (742, 781), False, 'import flickrapi\n'), ((2017, 2049), 'ssl._create_unverified_context', 'ssl._create_unverified_context', ([], {}), '()\n', (2047, 2049), False, 'import ssl\n'), ((2857, 2876), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2874, 2876), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
from mock import patch, Mock, create_autospec
from datetime import datetime
from django.test.testcases import TransactionTestCase
from django.utils import timezone
from background_task.models import Task
@patch('background_task.models.Task.objects')
class UnlockTaskTestCase(TransactionTestCase):
def setUp(self):
self.task = create_autospec(Task)
self.task.locked_by = 'locked_by'
self.task.locked_at = timezone.now()
self.task.run_at = timezone.now()
self.task.task_name = 'TaskName'
@patch('background_task.models.timezone')
def test_unlock_clears_locked_at_and_locked_by(self, django_timezone, Task_objects):
Task_objects.exclude.return_value.filter.return_value.exists.return_value = False
run_at = self.task.run_at
Task.unlock(self.task)
self.assertIsNone(self.task.locked_by)
self.assertIsNone(self.task.locked_at)
self.assertEqual(self.task.run_at, django_timezone.now())
Task_objects.exclude.assert_called_once_with(pk=self.task.pk)
Task_objects.exclude().filter.assert_called_once_with(
task_name=self.task.task_name,
run_at__gte=run_at
)
self.task.save.assert_called_once()
def test_unlock_sets_run_at_to_before_next_task(self, Task_objects):
next_task = Task_objects.exclude().filter()
next_task.exists.return_value = True
next_task.earliest.return_value.run_at = datetime(year=2021, month=3, day=10)
Task.unlock(self.task)
expected = datetime(year=2021, month=3, day=9, hour=23, minute=59, second=59, microsecond=999000)
self.assertEqual(self.task.run_at, expected)
next_task.earliest.assert_called_once_with('run_at')
self.task.save.assert_called_once()
class TaskRunsAsynchronouslyTestCase(TransactionTestCase):
def setUp(self):
self.task = create_autospec(Task)
self.task.force_synchronous_execution = False
@patch('background_task.models.app_settings')
def test_not_forcing_sync_and_global_async_is_true_returns_true(self, app_settings):
app_settings.BACKGROUND_TASK_RUN_ASYNC = True
self.assertTrue(Task.runs_async(self.task))
@patch('background_task.models.app_settings')
def test_not_forcing_sync_and_global_async_is_false_returns_false(self, app_settings):
app_settings.BACKGROUND_TASK_RUN_ASYNC = False
self.assertFalse(Task.runs_async(self.task))
@patch('background_task.models.app_settings')
def test_forcing_sync_and_global_async_is_true_returns_false(self, app_settings):
app_settings.BACKGROUND_TASK_RUN_ASYNC = True
self.task.force_synchronous_execution = True
self.assertFalse(Task.runs_async(self.task))
@patch('background_task.models.app_settings')
def test_forcing_sync_and_global_async_is_false_returns_true(self, app_settings):
app_settings.BACKGROUND_TASK_RUN_ASYNC = False
self.task.force_synchronous_execution = True
self.assertFalse(Task.runs_async(self.task))
| [
"datetime.datetime",
"background_task.models.Task.runs_async",
"mock.patch",
"background_task.models.Task.unlock",
"django.utils.timezone.now",
"mock.create_autospec"
] | [((233, 277), 'mock.patch', 'patch', (['"""background_task.models.Task.objects"""'], {}), "('background_task.models.Task.objects')\n", (238, 277), False, 'from mock import patch, Mock, create_autospec\n'), ((565, 605), 'mock.patch', 'patch', (['"""background_task.models.timezone"""'], {}), "('background_task.models.timezone')\n", (570, 605), False, 'from mock import patch, Mock, create_autospec\n'), ((2016, 2060), 'mock.patch', 'patch', (['"""background_task.models.app_settings"""'], {}), "('background_task.models.app_settings')\n", (2021, 2060), False, 'from mock import patch, Mock, create_autospec\n'), ((2263, 2307), 'mock.patch', 'patch', (['"""background_task.models.app_settings"""'], {}), "('background_task.models.app_settings')\n", (2268, 2307), False, 'from mock import patch, Mock, create_autospec\n'), ((2514, 2558), 'mock.patch', 'patch', (['"""background_task.models.app_settings"""'], {}), "('background_task.models.app_settings')\n", (2519, 2558), False, 'from mock import patch, Mock, create_autospec\n'), ((2812, 2856), 'mock.patch', 'patch', (['"""background_task.models.app_settings"""'], {}), "('background_task.models.app_settings')\n", (2817, 2856), False, 'from mock import patch, Mock, create_autospec\n'), ((366, 387), 'mock.create_autospec', 'create_autospec', (['Task'], {}), '(Task)\n', (381, 387), False, 'from mock import patch, Mock, create_autospec\n'), ((460, 474), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (472, 474), False, 'from django.utils import timezone\n'), ((502, 516), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (514, 516), False, 'from django.utils import timezone\n'), ((828, 850), 'background_task.models.Task.unlock', 'Task.unlock', (['self.task'], {}), '(self.task)\n', (839, 850), False, 'from background_task.models import Task\n'), ((1496, 1532), 'datetime.datetime', 'datetime', ([], {'year': '(2021)', 'month': '(3)', 'day': '(10)'}), '(year=2021, month=3, day=10)\n', (1504, 1532), False, 'from datetime import datetime\n'), ((1542, 1564), 'background_task.models.Task.unlock', 'Task.unlock', (['self.task'], {}), '(self.task)\n', (1553, 1564), False, 'from background_task.models import Task\n'), ((1585, 1675), 'datetime.datetime', 'datetime', ([], {'year': '(2021)', 'month': '(3)', 'day': '(9)', 'hour': '(23)', 'minute': '(59)', 'second': '(59)', 'microsecond': '(999000)'}), '(year=2021, month=3, day=9, hour=23, minute=59, second=59,\n microsecond=999000)\n', (1593, 1675), False, 'from datetime import datetime\n'), ((1933, 1954), 'mock.create_autospec', 'create_autospec', (['Task'], {}), '(Task)\n', (1948, 1954), False, 'from mock import patch, Mock, create_autospec\n'), ((2228, 2254), 'background_task.models.Task.runs_async', 'Task.runs_async', (['self.task'], {}), '(self.task)\n', (2243, 2254), False, 'from background_task.models import Task\n'), ((2479, 2505), 'background_task.models.Task.runs_async', 'Task.runs_async', (['self.task'], {}), '(self.task)\n', (2494, 2505), False, 'from background_task.models import Task\n'), ((2777, 2803), 'background_task.models.Task.runs_async', 'Task.runs_async', (['self.task'], {}), '(self.task)\n', (2792, 2803), False, 'from background_task.models import Task\n'), ((3076, 3102), 'background_task.models.Task.runs_async', 'Task.runs_async', (['self.task'], {}), '(self.task)\n', (3091, 3102), False, 'from background_task.models import Task\n')] |
# These are functions that are used by the server and the client. They handle
# the conversion from dictionnarys to strings, so the networking server can convert it into bytes, and
# the conversion from strings to the board.
import os
import sys
from rich import print
from sys import exit as quit
class End(Exception):
pass
# clear the console
def clear():
os.system("clear" if "win" not in sys.platform else "cls")
# get board
def get_board():
board = {}
for letter in ["a", "b", "c"]:
board[letter] = {}
for number in ["1", "2", "3"]:
board[letter][number] = "-"
return board
# prints out the board
def print_board(board):
banner = """
-------------
a | 1 | 2 | 3 |
-------------
b | 4 | 5 | 6 |
-------------
c | 7 | 8 | 9 |
-------------
"""
ind = 0
for letter in board:
for num in board[letter]:
ind += 1
banner = banner.replace(str(ind), board[letter][num]
.replace("-", " ")
.replace("x", "[red]x[/red]")
.replace("o", "[blue]o[/blue]"))
banner = " 1 2 3" + banner # numbers at the top would get replaced too
print(f"[white]{banner}")
| [
"os.system",
"rich.print"
] | [((366, 424), 'os.system', 'os.system', (["('clear' if 'win' not in sys.platform else 'cls')"], {}), "('clear' if 'win' not in sys.platform else 'cls')\n", (375, 424), False, 'import os\n'), ((1282, 1307), 'rich.print', 'print', (['f"""[white]{banner}"""'], {}), "(f'[white]{banner}')\n", (1287, 1307), False, 'from rich import print\n')] |
from flask import Blueprint
service = Blueprint('service', __name__)
| [
"flask.Blueprint"
] | [((39, 69), 'flask.Blueprint', 'Blueprint', (['"""service"""', '__name__'], {}), "('service', __name__)\n", (48, 69), False, 'from flask import Blueprint\n')] |
from flask import Flask, render_template, url_for, request, redirect
from ska_sdp_opinterface import model
app = Flask(__name__)
@app.route("/test")
def hello_world():
return "Hello, World!"
@app.route("/")
@app.route("/db-list")
def db_list():
return render_template(
"db_list.html", title="Database Contents List", entries=model.get_raw_dict()
)
@app.route("/db-tree")
def db_tree():
return render_template(
"db_tree.html", title="Database Contents Tree", data=model.get_tree_data()
)
@app.route("/workflows")
def workflows():
return render_template(
"workflows.html", title="Workflows", data=model.get_workflows()
)
@app.route("/db-create", methods=["POST", "GET"])
def db_create():
if request.method == "POST":
key = request.form["key"]
value = request.form["value"]
model.create_entry(key, value)
return redirect(url_for("db_list"))
return render_template("db_create.html")
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"ska_sdp_opinterface.model.create_entry",
"flask.Flask",
"ska_sdp_opinterface.model.get_tree_data",
"flask.url_for",
"ska_sdp_opinterface.model.get_workflows",
"ska_sdp_opinterface.model.get_raw_dict"
] | [((115, 130), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (120, 130), False, 'from flask import Flask, render_template, url_for, request, redirect\n'), ((950, 983), 'flask.render_template', 'render_template', (['"""db_create.html"""'], {}), "('db_create.html')\n", (965, 983), False, 'from flask import Flask, render_template, url_for, request, redirect\n'), ((863, 893), 'ska_sdp_opinterface.model.create_entry', 'model.create_entry', (['key', 'value'], {}), '(key, value)\n', (881, 893), False, 'from ska_sdp_opinterface import model\n'), ((347, 367), 'ska_sdp_opinterface.model.get_raw_dict', 'model.get_raw_dict', ([], {}), '()\n', (365, 367), False, 'from ska_sdp_opinterface import model\n'), ((503, 524), 'ska_sdp_opinterface.model.get_tree_data', 'model.get_tree_data', ([], {}), '()\n', (522, 524), False, 'from ska_sdp_opinterface import model\n'), ((653, 674), 'ska_sdp_opinterface.model.get_workflows', 'model.get_workflows', ([], {}), '()\n', (672, 674), False, 'from ska_sdp_opinterface import model\n'), ((918, 936), 'flask.url_for', 'url_for', (['"""db_list"""'], {}), "('db_list')\n", (925, 936), False, 'from flask import Flask, render_template, url_for, request, redirect\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import json
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import dataframe_image as dfi
from matplotlib.patches import Patch
sns.set()
df = pd.read_json('result/lxmert_experiment.json')
columns_list = sorted(df.columns.to_list())
sebnet = [s for s in columns_list if 'Low_Magnitude' in s]
re=[]
for sparcity_level in np.arange(10, 100, 10):
result = []
result.append(sparcity_level)
for subnet_mode in ['Low_Magnitude', 'Random', 'High_Magnitude']:
sebnet = [s for s in columns_list if f'{subnet_mode}-{sparcity_level}' in s][0]
sebnet = df[sebnet].to_dict()
result.append(str(sebnet['pruning_result'][list(sebnet['pruning_result'].keys())[-2]]+0.1))
result.append(sebnet['retrain_result']['Epoch 3 Valid'])
# print(len(result))
print(result)
re.append(result)
# re.append(df['vqa_lxmert_finetuned_seed0/finetune_result.json'].to_dict()['Epoch 3 Valid'])
# print(re)
# sebnet = pd.DataFrame(re)
sebnet = pd.DataFrame(re, columns=['Sparcity Level','Low Magnitude(pruned)','Low Magnitude (retrain)','Random (pruned)','Random (retrain)','High_Magnitude (pruned)','High Magnitude (retrain)'])
# sebnet = sebnet.reset_index(drop=True, inplace=True)
print(sebnet.head())
dfi.export(sebnet, '../report/images/report.PNG')
def flip(items, ncol):
return itertools.chain(*[items[i::ncol] for i in range(ncol)])
experiment_names = ['Low Magnitude Pruning Subnet (retrained)', 'Low Magnitude Pruning Subnet (pruned)',
'Random Pruning Subnet (retrained)', 'Random Pruning Subnet (retrained)',
'High Magnitude Pruning Subnet (retrained)', 'High Magnitude Pruning Subnet (retrained)',
'Unpruned Baseline']
def find_result(sparcity):
result = []
df = pd.read_json('result/lxmert_experiment.json')
columns_list = sorted(df.columns.to_list())
for subnet_mode in ['Low_Magnitude', 'Random', 'High_Magnitude']:
sebnet = [s for s in columns_list if f'{subnet_mode}-{sparcity_level}' in s][0]
sebnet = df[sebnet].to_dict()
result.append(sebnet['retrain_result']['Epoch 3 Valid'])
result.append(sebnet['pruning_result'][list(sebnet['pruning_result'].keys())[-2]]+0.1)
result.append(df['vqa_lxmert_finetuned_seed0/finetune_result.json'].to_dict()['Epoch 3 Valid'])
print(result)
return result
fig, axs = plt.subplots(3, 3, figsize=(9,9))
my_cmap = sns.color_palette("Paired") + sns.color_palette("Paired")[:8]
patterns = [ r"*" , r"|", r"\\" , r"\\||" , r"--", r"--||", r"//", r"//||", "xx", "xx||", "..", "..||", "oo", "oo||"]
# x_pos = np.arange(1, 8, 1)
x_pos = np.arange(len(experiment_names))
bbox_to_anchor_left=0.6
for i, sparcity_level in enumerate(np.arange(10, 100, 10)):
means = find_result(sparcity_level)
print(len(means), len(x_pos))
row = i // 3
col = i % 3
print(row, col)
axs[row, col].bar(x_pos, means, 0.7, align='center', color=my_cmap)#my_cmap(my_norm(range(len(x_pos)))))
axs[row, col].set_ylabel('Val Accuracy')
axs[row, col].set_title(f'VQA ({sparcity_level}%)')
axs[row, col].set_xticks([])
bars = axs[row, col].patches
for bar, hatch in zip(bars, patterns): # loop over bars and hatches to set hatches in correct order
bar.set_hatch(hatch)
legend_elements = [Patch(facecolor=my_cmap[i], hatch=patterns[i], label=exp) for i, exp in enumerate(experiment_names)]
l_col = 3
legend = plt.legend(flip(legend_elements, l_col), flip(experiment_names, l_col), loc='best', ncol=l_col, bbox_to_anchor=(0.93, -0.1), labelspacing=1.5, handlelength=4)
for patch in legend.get_patches():
patch.set_height(10)
patch.set_y(-1)
plt.subplots_adjust(right=1.5)
fig.tight_layout()
# plt.show()
fig.savefig('../report/images/experiment_result.PNG', bbox_inches='tight')
#---------------------------------------------------------------------------------------------------------------------------#
for subnet_mode in ['High_Magnitude', 'Low_Magnitude', 'Random']:
subnetwork = [s for s in columns_list if subnet_mode in s and 'seed1' in s]
print("subnetwork : ", subnetwork)
retrain_result = []
reset_initial_weight = []
pruned = []
for sub_member in subnetwork:
member = df[sub_member].to_dict()
retrain_result.append(member['retrain_result']['Epoch 3 Valid'])
reset_initial_weight.append(member['pruning_result']['accuarcy after pruning'])
pruned.append(member['pruning_result'][list(member['pruning_result'].keys())[-2]])
sparcity_level = np.arange(10, 100, 10)
baseline_model = [df['vqa_lxmert_finetuned_seed0/finetune_result.json'].to_dict()['Epoch 3 Valid']]*9
with plt.style.context('ggplot'):
#['.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd', '|', '_', 'P', 'X', 4, 5, 6, 7, 8, 9, 10, 11]
# fig = plt.figure(figsize=(7,10))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_ylabel('Val Accuracy')
ax.set_xlabel('Sparcity Level (%)')
name_plot = subnet_mode.replace('_', ' ')
ax.set_title(f'{name_plot} Pruning LXMERT On VQA')
# ax.plot(sparcity_level, retrain_result, marker = "<", label = 'Low Magnitude Subnetwork (retrained)')
# ax.plot(sparcity_level, reset_initial_weight, marker = "h", label = 'Low Magnitude Subnetwork (reset initial weight)')
# ax.plot(sparcity_level, pruned, marker = "*", label = 'Low Magnitude Subnetwork (pruned)')
# ax.plot(sparcity_level, baseline_model,marker = ".", label = 'Unpruned Baseline')
ax.plot(sparcity_level, retrain_result, marker = ">", label = 'retrained')
ax.plot(sparcity_level, reset_initial_weight, marker = "h", label = 'reset initial weight')
var = -0.05
if subnet_mode != 'High_Magnitude':
ax.plot(sparcity_level, pruned, marker = "*", label = 'pruned')
var = 0
ax.plot(sparcity_level, baseline_model, '--r', label = 'Unpruned Baseline')
plt.legend(loc = "best", ncol=4, bbox_to_anchor=(0.95+var, -0.2), labelspacing=1, handlelength=4)
plt.subplots_adjust(right=1.5)
plt.savefig(f'../report/images/{subnet_mode}_experiment_result.PNG', bbox_inches='tight')
plt.cla()
plt.clf()
# plt.show()
# dfi.export(df, '../reports/tokenization.png')
| [
"matplotlib.pyplot.subplots_adjust",
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"numpy.arange",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.context",
"matplotlib.patches.Patch",
"dataframe_image.export",
"pandas.DataFrame",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"pandas.read_json"
] | [((221, 230), 'seaborn.set', 'sns.set', ([], {}), '()\n', (228, 230), True, 'import seaborn as sns\n'), ((238, 283), 'pandas.read_json', 'pd.read_json', (['"""result/lxmert_experiment.json"""'], {}), "('result/lxmert_experiment.json')\n", (250, 283), True, 'import pandas as pd\n'), ((417, 439), 'numpy.arange', 'np.arange', (['(10)', '(100)', '(10)'], {}), '(10, 100, 10)\n', (426, 439), True, 'import numpy as np\n'), ((1061, 1259), 'pandas.DataFrame', 'pd.DataFrame', (['re'], {'columns': "['Sparcity Level', 'Low Magnitude(pruned)', 'Low Magnitude (retrain)',\n 'Random (pruned)', 'Random (retrain)', 'High_Magnitude (pruned)',\n 'High Magnitude (retrain)']"}), "(re, columns=['Sparcity Level', 'Low Magnitude(pruned)',\n 'Low Magnitude (retrain)', 'Random (pruned)', 'Random (retrain)',\n 'High_Magnitude (pruned)', 'High Magnitude (retrain)'])\n", (1073, 1259), True, 'import pandas as pd\n'), ((1322, 1371), 'dataframe_image.export', 'dfi.export', (['sebnet', '"""../report/images/report.PNG"""'], {}), "(sebnet, '../report/images/report.PNG')\n", (1332, 1371), True, 'import dataframe_image as dfi\n'), ((2474, 2508), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(9, 9)'}), '(3, 3, figsize=(9, 9))\n', (2486, 2508), True, 'import matplotlib.pyplot as plt\n'), ((3799, 3829), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(1.5)'}), '(right=1.5)\n', (3818, 3829), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1916), 'pandas.read_json', 'pd.read_json', (['"""result/lxmert_experiment.json"""'], {}), "('result/lxmert_experiment.json')\n", (1883, 1916), True, 'import pandas as pd\n'), ((2518, 2545), 'seaborn.color_palette', 'sns.color_palette', (['"""Paired"""'], {}), "('Paired')\n", (2535, 2545), True, 'import seaborn as sns\n'), ((2830, 2852), 'numpy.arange', 'np.arange', (['(10)', '(100)', '(10)'], {}), '(10, 100, 10)\n', (2839, 2852), True, 'import numpy as np\n'), ((3438, 3495), 'matplotlib.patches.Patch', 'Patch', ([], {'facecolor': 'my_cmap[i]', 'hatch': 'patterns[i]', 'label': 'exp'}), '(facecolor=my_cmap[i], hatch=patterns[i], label=exp)\n', (3443, 3495), False, 'from matplotlib.patches import Patch\n'), ((4677, 4699), 'numpy.arange', 'np.arange', (['(10)', '(100)', '(10)'], {}), '(10, 100, 10)\n', (4686, 4699), True, 'import numpy as np\n'), ((2548, 2575), 'seaborn.color_palette', 'sns.color_palette', (['"""Paired"""'], {}), "('Paired')\n", (2565, 2575), True, 'import seaborn as sns\n'), ((4816, 4843), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""ggplot"""'], {}), "('ggplot')\n", (4833, 4843), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5073, 5075), True, 'import matplotlib.pyplot as plt\n'), ((6178, 6279), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'ncol': '(4)', 'bbox_to_anchor': '(0.95 + var, -0.2)', 'labelspacing': '(1)', 'handlelength': '(4)'}), "(loc='best', ncol=4, bbox_to_anchor=(0.95 + var, -0.2),\n labelspacing=1, handlelength=4)\n", (6188, 6279), True, 'import matplotlib.pyplot as plt\n'), ((6284, 6314), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(1.5)'}), '(right=1.5)\n', (6303, 6314), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../report/images/{subnet_mode}_experiment_result.PNG"""'], {'bbox_inches': '"""tight"""'}), "(f'../report/images/{subnet_mode}_experiment_result.PNG',\n bbox_inches='tight')\n", (6334, 6416), True, 'import matplotlib.pyplot as plt\n'), ((6421, 6430), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (6428, 6430), True, 'import matplotlib.pyplot as plt\n'), ((6439, 6448), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6446, 6448), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import unicode_literals
import os
import stat
import uuid
import dvc.logger as logger
from dvc.system import System
from dvc.utils import copyfile, move, remove
from dvc.exceptions import DvcException
def _unprotect_file(path):
if System.is_symlink(path) or System.is_hardlink(path):
logger.debug("Unprotecting '{}'".format(path))
tmp = os.path.join(os.path.dirname(path), "." + str(uuid.uuid4()))
move(path, tmp)
copyfile(tmp, path)
remove(tmp)
else:
logger.debug(
"Skipping copying for '{}', since it is not "
"a symlink or a hardlink.".format(path)
)
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
def _unprotect_dir(path):
for root, dirs, files in os.walk(str(path)):
for f in files:
path = os.path.join(root, f)
_unprotect_file(path)
def unprotect(path):
if not os.path.exists(path):
raise DvcException(
"can't unprotect non-existing data '{}'".format(path)
)
if os.path.isdir(path):
_unprotect_dir(path)
else:
_unprotect_file(path)
| [
"os.path.exists",
"dvc.utils.remove",
"os.path.join",
"uuid.uuid4",
"dvc.system.System.is_hardlink",
"dvc.utils.copyfile",
"os.path.isdir",
"os.path.dirname",
"dvc.system.System.is_symlink",
"os.stat",
"dvc.utils.move"
] | [((1068, 1087), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1081, 1087), False, 'import os\n'), ((255, 278), 'dvc.system.System.is_symlink', 'System.is_symlink', (['path'], {}), '(path)\n', (272, 278), False, 'from dvc.system import System\n'), ((282, 306), 'dvc.system.System.is_hardlink', 'System.is_hardlink', (['path'], {}), '(path)\n', (300, 306), False, 'from dvc.system import System\n'), ((447, 462), 'dvc.utils.move', 'move', (['path', 'tmp'], {}), '(path, tmp)\n', (451, 462), False, 'from dvc.utils import copyfile, move, remove\n'), ((472, 491), 'dvc.utils.copyfile', 'copyfile', (['tmp', 'path'], {}), '(tmp, path)\n', (480, 491), False, 'from dvc.utils import copyfile, move, remove\n'), ((501, 512), 'dvc.utils.remove', 'remove', (['tmp'], {}), '(tmp)\n', (507, 512), False, 'from dvc.utils import copyfile, move, remove\n'), ((934, 954), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (948, 954), False, 'import os\n'), ((391, 412), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (406, 412), False, 'import os\n'), ((844, 865), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (856, 865), False, 'import os\n'), ((685, 698), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (692, 698), False, 'import os\n'), ((424, 436), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (434, 436), False, 'import uuid\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Optional, List, Union, Tuple
from hwt.code import If, Or
from hwt.code_utils import connect_optional, rename_signal
from hwt.hdl.frameTmpl import FrameTmpl
from hwt.hdl.transTmpl import TransTmpl
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.defs import BIT
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct, HStructField
from hwt.hdl.types.union import HUnion
from hwt.hdl.types.utils import is_only_padding
from hwt.interfaces.std import Handshaked, Signal, VldSynced
from hwt.interfaces.structIntf import StructIntf
from hwt.interfaces.unionIntf import UnionSource
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.math import log2ceil
from hwt.serializer.mode import serializeParamsUniq
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwtLib.abstract.frame_utils.alignment_utils import next_frame_offsets
from hwtLib.abstract.template_configured import TemplateConfigured, \
HdlType_separate
from hwtLib.amba.axis import AxiStream
from hwtLib.amba.axis_comp.base import AxiSCompBase
from hwtLib.amba.axis_comp.frame_deparser.utils import drill_down_in_HStruct_fields
from hwtLib.amba.axis_comp.frame_join._join import AxiS_FrameJoin
from hwtLib.amba.axis_comp.frame_parser.field_connector import AxiS_frameParserFieldConnector
from hwtLib.amba.axis_comp.frame_parser.footer_split import AxiS_footerSplit
from hwtLib.amba.axis_comp.frame_parser.word_factory import WordFactory
from hwtLib.handshaked.streamNode import StreamNode
from hwt.hdl.transPart import TransPart
from hwt.hdl.frameTmplUtils import ChoicesOfFrameParts
def is_non_const_stream(t: HdlType):
if isinstance(t, HStream):
try:
t.bit_length()
except TypeError:
return True
return False
def can_be_zero_sized(t: HdlType):
while isinstance(t, HStruct) and len(t.fields) == 1:
t = t.fields[0].dtype
if isinstance(t, HStream):
return t.len_min == 0
return isinstance(t, HStruct) and not t.fields
class _AxiS_frameParserChildMeta():
def __init__(self, t: HdlType, is_padding: bool, is_const_sized: bool):
self.t = t
self.is_padding = is_padding
self.is_const_sized = is_const_sized
self.can_be_zero_len = not is_const_sized and can_be_zero_sized(t)
def connect_with_clear(clear: RtlSignal, din: RtlSignal, dout: RtlSignal):
If(clear,
dout(0)
).Else(
dout(din)
)
@serializeParamsUniq
class AxiS_frameParser(AxiSCompBase, TemplateConfigured):
"""
Parse frame specified by HType (HStruct, HUnion, ...) into fields
:note: if special frame format is required,
it can be specified by TransTmpl instance and list of FrameTmpl
(Output data structure can be splited into multiple frames as required)
.. figure:: ./_static/AxiS_frameParser.png
:note: names in the figure are just illustrative
:ivar ~.dataIn: the AxiStream interface for input frame
:ivar ~.dataOut: output field interface generated from input type description
:ivar ~.children: List[AxiS_frameParser] which contains a list of children components
in the cases where this component works only as a wrapper of pipeline composed from actual parsers
:ivar ~.children_meta: List[_AxiS_frameParserChildMeta] additional info for children list
.. hwt-autodoc:: _example_AxiS_frameParser
"""
def __init__(self, structT: HdlType,
tmpl: Optional[TransTmpl]=None,
frames: Optional[List[FrameTmpl]]=None):
"""
:param structT: instance of HStruct which specifies
data format to download
:param tmpl: instance of TransTmpl for this structT
:param frames: list of FrameTmpl instances for this tmpl
:note: if tmpl and frames are None they are resolved
from structT parseTemplate
:note: this unit can parse sequence of frames,
if they are specified by "frames"
:attention: structT can not contain fields with variable size
like HStream
"""
TemplateConfigured.__init__(self, structT, tmpl, frames)
AxiSCompBase.__init__(self)
def _config(self):
self.intfCls._config(self)
self.T = Param(self._structT)
self.TRANSACTION_TEMPLATE = Param(self._tmpl)
self.FRAME_TEMPLATES = Param(None if self._frames is None else tuple(self._frames))
# if this is true field interfaces will be of type VldSynced
# and single ready signal will be used for all
# else every interface will be instance of Handshaked and it will
# have it's own ready(rd) signal
self.SHARED_READY = Param(False)
# if true, a new state for overflow will be created in FSM
self.OVERFLOW_SUPPORT = Param(False)
# if True, a frame shorter than expected will cause the reset of main FSM
self.UNDERFLOW_SUPPORT = Param(False)
def _mkFieldIntf(self, parent: Union[StructIntf, UnionSource],
structField: HStructField):
"""
Create an interface to export the data specified by the member of the structure
"""
t = structField.dtype
path = parent._field_path / structField.name
if isinstance(t, HUnion):
i = UnionSource(t, path, parent._instantiateFieldFn)
i._fieldsToInterfaces = parent._fieldsToInterfaces
return i
elif isinstance(t, HStruct):
i = StructIntf(t, path, parent._instantiateFieldFn)
i._fieldsToInterfaces = parent._fieldsToInterfaces
return i
elif isinstance(t, HStream):
if self.SHARED_READY:
raise NotImplementedError("SHARED_READY=True and HStream field", structField)
else:
i = AxiStream()
i._updateParamsFrom(self)
return i
else:
if self.SHARED_READY:
i = VldSynced()
else:
i = Handshaked()
i.DATA_WIDTH = structField.dtype.bit_length()
return i
def _declr(self):
if self.ID_WIDTH:
raise NotImplementedError(self.ID_WIDTH)
if self.DEST_WIDTH:
raise NotImplementedError(self.DEST_WIDTH)
addClkRstn(self)
t = self._structT
if isinstance(t, HStruct):
intfCls = StructIntf
elif isinstance(t, HUnion):
intfCls = UnionSource
else:
raise TypeError(t)
# input stream
with self._paramsShared():
self.dataIn = self.intfCls()
if self.SHARED_READY:
self.dataOut_ready = Signal()
# parsed data
if is_only_padding(t):
self.dataOut = None
else:
self.dataOut = intfCls(t, tuple(), self._mkFieldIntf)._m()
self.parseTemplate()
if self.OVERFLOW_SUPPORT:
# flag which is 1 if we are behind the data which
# we described by type in configuration
# :note: if the data is unaligned this may be 1 in last parsed word
# as well
self.parsing_overflow: Signal = Signal()._m()
if self.UNDERFLOW_SUPPORT:
# flag which is 1 if the input stream ended prematurely
# and main FSM will be restarted
self.error_underflow: Signal = Signal()._m()
def parseTemplate(self):
"""
Load the configuration from the parameters
"""
t = self._structT
try:
t.bit_length()
is_const_size_frame = True
except TypeError:
is_const_size_frame = False
self.children_meta: List[_AxiS_frameParserChildMeta] = []
if is_const_size_frame:
self.children = []
super(AxiS_frameParser, self).parseTemplate()
else:
if self._tmpl or self._frames:
raise NotImplementedError("Dynamic input size and the redefinition of the placement of fields in the data")
children_meta = self.children_meta
# try to cut the data type on const and variable size chunks
# to simplify the parsing logic as these chunks can be handled
# as usuall and only code specific to this case is handling of overlaps
# of such a segments
children = HObjList()
separated = list(HdlType_separate(t, is_non_const_stream))
if len(separated) > 1 or separated[0][0]:
# it may be required to delegate this on children
first = True
for is_non_const_sized, s_t in separated:
_is_padding = is_only_padding(s_t)
if is_non_const_sized or (not first and _is_padding):
c = None
else:
c = self.__class__(s_t)
first = False
children.append(c)
cmeta = _AxiS_frameParserChildMeta(s_t, _is_padding, not is_non_const_sized)
children_meta.append(cmeta)
if len(children_meta) >= 2 and \
children_meta[0].is_const_sized and\
not children_meta[1].is_const_sized:
# we will parse const-size prefix and
# then there will be a variable size suffix
children[0].OVERFLOW_SUPPORT = True
with self._paramsShared(exclude=({"OVERFLOW_SUPPORT", "T"}, {})):
self.children = children
def parser_fsm(self, words: List[Tuple[int, List[Union[TransPart, ChoicesOfFrameParts]], bool]]):
din = self.dataIn
maxWordIndex = words[-1][0]
word_index_max_val = maxWordIndex
if self.OVERFLOW_SUPPORT:
word_index_max_val += 1
hasMultipleWords = word_index_max_val > 0
if hasMultipleWords:
wordIndex = self._reg("wordIndex", Bits(
log2ceil(word_index_max_val + 1)), 0)
else:
wordIndex = None
allOutNodes = WordFactory(wordIndex)
if not is_only_padding(self._structT):
fc = AxiS_frameParserFieldConnector(self, self.dataIn, self.dataOut)
fc.connectParts(allOutNodes, words, wordIndex)
in_vld = din.valid
if self.SHARED_READY:
out_ready = self.dataOut_ready
din.ready(out_ready)
else:
out_ready = self._sig("out_ready")
out_ready(allOutNodes.ack())
out_en = BIT.from_py(1)
if self.OVERFLOW_SUPPORT:
out_en = out_en & ~self.parsing_overflow
allOutNodes.sync(out_en, in_vld)
if self.OVERFLOW_SUPPORT:
out_ready = out_ready | self.parsing_overflow
din.ready(out_ready)
if hasMultipleWords:
incr = wordIndex(wordIndex + 1)
data_ack = rename_signal(self, in_vld & out_ready, "data_ack")
aligned = self._structT.bit_length() % self.DATA_WIDTH == 0
if self.UNDERFLOW_SUPPORT:
last = din.last
self.error_underflow(data_ack & last & (
(wordIndex < maxWordIndex) if not aligned else wordIndex != maxWordIndex)
)
else:
last = wordIndex._eq(maxWordIndex)
if self.OVERFLOW_SUPPORT:
last = din.last
incr = If(wordIndex != word_index_max_val,
incr
)
if aligned:
overflow = wordIndex._eq(word_index_max_val)
else:
overflow = wordIndex >= maxWordIndex
self.parsing_overflow(overflow)
If(data_ack,
If(last,
wordIndex(0)
).Else(
incr
)
)
def delegate_to_children(self):
"""
After parsing task was split on some subtasks
we are instantiating the child components to handle them
and on this level we need to handle the synchronization between them
"""
if self.SHARED_READY:
raise NotImplementedError()
assert len(self.children_meta) == len(self.children), \
(self.children_meta, self.children)
if self.OVERFLOW_SUPPORT:
raise NotImplementedError()
din = self.dataIn
# :note: the children tasks are produced by disolving of original
# parsed data type on const and non-const sized segments
if len(self.children) == 2:
c0, c1 = self.children
cm0, cm1 = self.children_meta
if not cm0.is_const_sized and cm1.is_const_sized:
# suffix parser, split suffix and parse it in child sub component
if cm0.can_be_zero_len:
assert self.USE_KEEP or self.USE_STRB, "keep or strb signal on AxiStream is required to mark 0 length packets"
fs = AxiS_footerSplit()
fs._updateParamsFrom(self)
fs.FOOTER_WIDTH = cm1.t.bit_length()
self.footer_split = fs
fs.dataIn(din)
prefix_t, prefix = drill_down_in_HStruct_fields(cm0.t, self.dataOut)
if cm0.is_padding:
# padding
fs.dataOut[0].ready(1)
else:
assert isinstance(prefix_t, HStream), prefix_t
prefix(fs.dataOut[0])
suffix = fs.dataOut[1]
if cm1.is_padding:
# ignore suffix entirely
suffix.ready(1)
else:
# parse suffix in child component
suffix_offsets = next_frame_offsets(prefix_t, self.DATA_WIDTH)
if suffix_offsets != [0, ]:
# add aligment logic
align = AxiS_FrameJoin()
align._updateParamsFrom(
self,
exclude=({"T"}, {}))
align.USE_KEEP = True
align.USE_STRB = False
align.OUT_OFFSET = 0
align.T = HStruct(
(HStream(cm1.t, frame_len=1,
start_offsets=[x // 8 for x in suffix_offsets]),
"f0"))
self.suffix_align = align
align.dataIn[0](suffix, exclude=[suffix.strb, align.dataIn[0].keep])
align.dataIn[0].keep(suffix.strb)
suffix = align.dataOut
c1.dataIn(suffix, exclude=[suffix.keep,
c1.dataIn.strb])
c1.dataIn.strb(suffix.keep)
else:
c1.dataIn(suffix)
if not cm1.is_padding:
connect_optional(c1.dataOut, self.dataOut)
elif cm0.is_const_sized and not cm1.is_const_sized:
# prefix parser, parser prefix in subcomponent
# and let rest to a suffix
if not cm0.is_padding:
connect_optional(c0.dataOut, self.dataOut)
masters = [din]
if cm1.is_padding:
slaves = [c0.dataIn, ]
extraConds = None
skipWhen = None
else:
suffix_t, suffix = drill_down_in_HStruct_fields(cm1.t, self.dataOut)
assert isinstance(suffix_t, HStream), suffix_t
t1_offset = c0._structT.bit_length() % self.DATA_WIDTH
if t1_offset == 0:
slaves = [c0.dataIn, suffix]
if cm1.can_be_zero_len:
assert suffix.USE_KEEP or suffix.USE_STRB, "keep or strb signal on AxiStream is required to mark 0 length packets"
is_zero_len = ~c0.parsing_overflow & din.valid & din.last
# this is a stream after some header, we need to assert that we
# output the 0B packet (valid=1, ready=1, last=1, keep=0) at the end
extraConds = {
# c0.dataIn:~c0.parsing_overflow,
suffix: c0.parsing_overflow | (din.valid & din.last),
}
skipWhen = {
suffix:~c0.parsing_overflow & ~(din.valid & din.last),
}
controll_signals = [din.valid, din.ready]
if suffix.USE_KEEP:
controll_signals.append(suffix.keep)
connect_with_clear(is_zero_len, din.keep, suffix.keep)
if suffix.USE_STRB:
controll_signals.append(suffix.strb)
connect_with_clear(is_zero_len, din.strb, suffix.strb)
suffix(din, exclude=controll_signals)
else:
# t1 is aligned on word boundary
# and does not require any first word mask modification
# We enable the input to c1 once the c0 is finished with the parsing (parsing_overflow=1)
extraConds = {
# c0.dataIn:~c0.parsing_overflow,
suffix: c0.parsing_overflow,
}
skipWhen = {
suffix:~c0.parsing_overflow,
}
suffix(din, exclude=[din.valid, din.ready])
else:
raise NotImplementedError("prefix parser- modify mask for suffix first word")
StreamNode(masters, slaves,
extraConds=extraConds,
skipWhen=skipWhen).sync()
c0.dataIn(din, exclude=[din.valid, din.ready])
else:
raise NotImplementedError("multiple(2) non-constant size segments in parsed datastructure, do parse frame incrementally instead")
else:
raise NotImplementedError("multiple non-constant size segments in parsed datastructure, do parse frame incrementally instead")
if self.UNDERFLOW_SUPPORT:
self.error_underflow(Or(*(c.error_underflow for c in self.children if c is not None)))
propagateClkRstn(self)
def _impl(self):
"""
Output data signals are directly connected to input in most of the cases,
exceptions are:
* Delayed parts of fields which were parsed in some previous input word
for fields wich are crossing input word boundaries
* Streams may have alignment logic if required
"""
if self.children_meta:
self.delegate_to_children()
else:
words = list(self.chainFrameWords())
self.parser_fsm(words)
def _example_AxiS_frameParser():
from hwtLib.types.ctypes import uint8_t, uint16_t, uint32_t, uint64_t
# t = HStruct(
# (uint64_t, "item0"), # tuples (type, name) where type has to be instance of Bits type
# (uint64_t, None), # name = None means this field will be ignored
# (uint64_t, "item1"),
# (uint64_t, None),
# (uint16_t, "item2"),
# (uint16_t, "item3"),
# (uint32_t, "item4"),
# (uint32_t, None),
# (uint64_t, "item5"), # this word is split on two bus words
# (uint32_t, None),
# (uint64_t, None),
# (uint64_t, None),
# (uint64_t, None),
# (uint64_t, "item6"),
# (uint64_t, "item7"),
# (HStruct(
# (uint64_t, "item0"),
# (uint64_t, "item1"),
# ),
# "struct0")
# )
# t = HUnion(
# (uint32_t, "a"),
# (uint32_t, "b")
# )
# t = HUnion(
# (HStruct(
# (uint64_t, "itemA0"),
# (uint64_t, "itemA1")
# ), "frameA"),
# (HStruct(
# (uint32_t, "itemB0"),
# (uint32_t, "itemB1"),
# (uint32_t, "itemB2"),
# (uint32_t, "itemB3")
# ), "frameB")
# )
t = HStruct(
(HStream(uint8_t, frame_len=(0, 1)), "frame0"),
(uint16_t, "footer")
)
u = AxiS_frameParser(t)
u.USE_STRB = True
u.DATA_WIDTH = 32
return u
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = _example_AxiS_frameParser()
print(to_rtl_str(u))
| [
"hwtLib.amba.axis_comp.frame_parser.footer_split.AxiS_footerSplit",
"hwt.code_utils.connect_optional",
"hwtLib.amba.axis.AxiStream",
"hwt.code.If",
"hwt.code.Or",
"hwt.interfaces.utils.propagateClkRstn",
"hwt.interfaces.structIntf.StructIntf",
"hwtLib.amba.axis_comp.base.AxiSCompBase.__init__",
"hwtLib.amba.axis_comp.frame_parser.word_factory.WordFactory",
"hwtLib.abstract.template_configured.HdlType_separate",
"hwtLib.abstract.template_configured.TemplateConfigured.__init__",
"hwt.hdl.types.stream.HStream",
"hwt.interfaces.utils.addClkRstn",
"hwt.hdl.types.defs.BIT.from_py",
"hwtLib.handshaked.streamNode.StreamNode",
"hwt.synthesizer.hObjList.HObjList",
"hwtLib.amba.axis_comp.frame_deparser.utils.drill_down_in_HStruct_fields",
"hwt.interfaces.std.VldSynced",
"hwtLib.amba.axis_comp.frame_parser.field_connector.AxiS_frameParserFieldConnector",
"hwtLib.abstract.frame_utils.alignment_utils.next_frame_offsets",
"hwt.interfaces.std.Handshaked",
"hwt.code_utils.rename_signal",
"hwt.math.log2ceil",
"hwt.synthesizer.param.Param",
"hwtLib.amba.axis_comp.frame_join._join.AxiS_FrameJoin",
"hwt.synthesizer.utils.to_rtl_str",
"hwt.interfaces.unionIntf.UnionSource",
"hwt.hdl.types.utils.is_only_padding",
"hwt.interfaces.std.Signal"
] | [((4300, 4356), 'hwtLib.abstract.template_configured.TemplateConfigured.__init__', 'TemplateConfigured.__init__', (['self', 'structT', 'tmpl', 'frames'], {}), '(self, structT, tmpl, frames)\n', (4327, 4356), False, 'from hwtLib.abstract.template_configured import TemplateConfigured, HdlType_separate\n'), ((4365, 4392), 'hwtLib.amba.axis_comp.base.AxiSCompBase.__init__', 'AxiSCompBase.__init__', (['self'], {}), '(self)\n', (4386, 4392), False, 'from hwtLib.amba.axis_comp.base import AxiSCompBase\n'), ((4469, 4489), 'hwt.synthesizer.param.Param', 'Param', (['self._structT'], {}), '(self._structT)\n', (4474, 4489), False, 'from hwt.synthesizer.param import Param\n'), ((4526, 4543), 'hwt.synthesizer.param.Param', 'Param', (['self._tmpl'], {}), '(self._tmpl)\n', (4531, 4543), False, 'from hwt.synthesizer.param import Param\n'), ((4903, 4915), 'hwt.synthesizer.param.Param', 'Param', (['(False)'], {}), '(False)\n', (4908, 4915), False, 'from hwt.synthesizer.param import Param\n'), ((5015, 5027), 'hwt.synthesizer.param.Param', 'Param', (['(False)'], {}), '(False)\n', (5020, 5027), False, 'from hwt.synthesizer.param import Param\n'), ((5143, 5155), 'hwt.synthesizer.param.Param', 'Param', (['(False)'], {}), '(False)\n', (5148, 5155), False, 'from hwt.synthesizer.param import Param\n'), ((6522, 6538), 'hwt.interfaces.utils.addClkRstn', 'addClkRstn', (['self'], {}), '(self)\n', (6532, 6538), False, 'from hwt.interfaces.utils import addClkRstn, propagateClkRstn\n'), ((6963, 6981), 'hwt.hdl.types.utils.is_only_padding', 'is_only_padding', (['t'], {}), '(t)\n', (6978, 6981), False, 'from hwt.hdl.types.utils import is_only_padding\n'), ((10384, 10406), 'hwtLib.amba.axis_comp.frame_parser.word_factory.WordFactory', 'WordFactory', (['wordIndex'], {}), '(wordIndex)\n', (10395, 10406), False, 'from hwtLib.amba.axis_comp.frame_parser.word_factory import WordFactory\n'), ((19097, 19119), 'hwt.interfaces.utils.propagateClkRstn', 'propagateClkRstn', (['self'], {}), '(self)\n', (19113, 19119), False, 'from hwt.interfaces.utils import addClkRstn, propagateClkRstn\n'), ((21164, 21177), 'hwt.synthesizer.utils.to_rtl_str', 'to_rtl_str', (['u'], {}), '(u)\n', (21174, 21177), False, 'from hwt.synthesizer.utils import to_rtl_str\n'), ((5518, 5566), 'hwt.interfaces.unionIntf.UnionSource', 'UnionSource', (['t', 'path', 'parent._instantiateFieldFn'], {}), '(t, path, parent._instantiateFieldFn)\n', (5529, 5566), False, 'from hwt.interfaces.unionIntf import UnionSource\n'), ((8632, 8642), 'hwt.synthesizer.hObjList.HObjList', 'HObjList', ([], {}), '()\n', (8640, 8642), False, 'from hwt.synthesizer.hObjList import HObjList\n'), ((10422, 10452), 'hwt.hdl.types.utils.is_only_padding', 'is_only_padding', (['self._structT'], {}), '(self._structT)\n', (10437, 10452), False, 'from hwt.hdl.types.utils import is_only_padding\n'), ((10471, 10534), 'hwtLib.amba.axis_comp.frame_parser.field_connector.AxiS_frameParserFieldConnector', 'AxiS_frameParserFieldConnector', (['self', 'self.dataIn', 'self.dataOut'], {}), '(self, self.dataIn, self.dataOut)\n', (10501, 10534), False, 'from hwtLib.amba.axis_comp.frame_parser.field_connector import AxiS_frameParserFieldConnector\n'), ((10851, 10865), 'hwt.hdl.types.defs.BIT.from_py', 'BIT.from_py', (['(1)'], {}), '(1)\n', (10862, 10865), False, 'from hwt.hdl.types.defs import BIT\n'), ((11225, 11276), 'hwt.code_utils.rename_signal', 'rename_signal', (['self', '(in_vld & out_ready)', '"""data_ack"""'], {}), "(self, in_vld & out_ready, 'data_ack')\n", (11238, 11276), False, 'from hwt.code_utils import connect_optional, rename_signal\n'), ((20871, 20905), 'hwt.hdl.types.stream.HStream', 'HStream', (['uint8_t'], {'frame_len': '(0, 1)'}), '(uint8_t, frame_len=(0, 1))\n', (20878, 20905), False, 'from hwt.hdl.types.stream import HStream\n'), ((5704, 5751), 'hwt.interfaces.structIntf.StructIntf', 'StructIntf', (['t', 'path', 'parent._instantiateFieldFn'], {}), '(t, path, parent._instantiateFieldFn)\n', (5714, 5751), False, 'from hwt.interfaces.structIntf import StructIntf\n'), ((6920, 6928), 'hwt.interfaces.std.Signal', 'Signal', ([], {}), '()\n', (6926, 6928), False, 'from hwt.interfaces.std import Handshaked, Signal, VldSynced\n'), ((8672, 8712), 'hwtLib.abstract.template_configured.HdlType_separate', 'HdlType_separate', (['t', 'is_non_const_stream'], {}), '(t, is_non_const_stream)\n', (8688, 8712), False, 'from hwtLib.abstract.template_configured import TemplateConfigured, HdlType_separate\n'), ((11753, 11794), 'hwt.code.If', 'If', (['(wordIndex != word_index_max_val)', 'incr'], {}), '(wordIndex != word_index_max_val, incr)\n', (11755, 11794), False, 'from hwt.code import If, Or\n'), ((13345, 13363), 'hwtLib.amba.axis_comp.frame_parser.footer_split.AxiS_footerSplit', 'AxiS_footerSplit', ([], {}), '()\n', (13361, 13363), False, 'from hwtLib.amba.axis_comp.frame_parser.footer_split import AxiS_footerSplit\n'), ((13567, 13616), 'hwtLib.amba.axis_comp.frame_deparser.utils.drill_down_in_HStruct_fields', 'drill_down_in_HStruct_fields', (['cm0.t', 'self.dataOut'], {}), '(cm0.t, self.dataOut)\n', (13595, 13616), False, 'from hwtLib.amba.axis_comp.frame_deparser.utils import drill_down_in_HStruct_fields\n'), ((19022, 19086), 'hwt.code.Or', 'Or', (['*(c.error_underflow for c in self.children if c is not None)'], {}), '(*(c.error_underflow for c in self.children if c is not None))\n', (19024, 19086), False, 'from hwt.code import If, Or\n'), ((7431, 7439), 'hwt.interfaces.std.Signal', 'Signal', ([], {}), '()\n', (7437, 7439), False, 'from hwt.interfaces.std import Handshaked, Signal, VldSynced\n'), ((7637, 7645), 'hwt.interfaces.std.Signal', 'Signal', ([], {}), '()\n', (7643, 7645), False, 'from hwt.interfaces.std import Handshaked, Signal, VldSynced\n'), ((8955, 8975), 'hwt.hdl.types.utils.is_only_padding', 'is_only_padding', (['s_t'], {}), '(s_t)\n', (8970, 8975), False, 'from hwt.hdl.types.utils import is_only_padding\n'), ((10280, 10312), 'hwt.math.log2ceil', 'log2ceil', (['(word_index_max_val + 1)'], {}), '(word_index_max_val + 1)\n', (10288, 10312), False, 'from hwt.math import log2ceil\n'), ((14125, 14170), 'hwtLib.abstract.frame_utils.alignment_utils.next_frame_offsets', 'next_frame_offsets', (['prefix_t', 'self.DATA_WIDTH'], {}), '(prefix_t, self.DATA_WIDTH)\n', (14143, 14170), False, 'from hwtLib.abstract.frame_utils.alignment_utils import next_frame_offsets\n'), ((15365, 15407), 'hwt.code_utils.connect_optional', 'connect_optional', (['c1.dataOut', 'self.dataOut'], {}), '(c1.dataOut, self.dataOut)\n', (15381, 15407), False, 'from hwt.code_utils import connect_optional, rename_signal\n'), ((6039, 6050), 'hwtLib.amba.axis.AxiStream', 'AxiStream', ([], {}), '()\n', (6048, 6050), False, 'from hwtLib.amba.axis import AxiStream\n'), ((6186, 6197), 'hwt.interfaces.std.VldSynced', 'VldSynced', ([], {}), '()\n', (6195, 6197), False, 'from hwt.interfaces.std import Handshaked, Signal, VldSynced\n'), ((6236, 6248), 'hwt.interfaces.std.Handshaked', 'Handshaked', ([], {}), '()\n', (6246, 6248), False, 'from hwt.interfaces.std import Handshaked, Signal, VldSynced\n'), ((14296, 14312), 'hwtLib.amba.axis_comp.frame_join._join.AxiS_FrameJoin', 'AxiS_FrameJoin', ([], {}), '()\n', (14310, 14312), False, 'from hwtLib.amba.axis_comp.frame_join._join import AxiS_FrameJoin\n'), ((15638, 15680), 'hwt.code_utils.connect_optional', 'connect_optional', (['c0.dataOut', 'self.dataOut'], {}), '(c0.dataOut, self.dataOut)\n', (15654, 15680), False, 'from hwt.code_utils import connect_optional, rename_signal\n'), ((15926, 15975), 'hwtLib.amba.axis_comp.frame_deparser.utils.drill_down_in_HStruct_fields', 'drill_down_in_HStruct_fields', (['cm1.t', 'self.dataOut'], {}), '(cm1.t, self.dataOut)\n', (15954, 15975), False, 'from hwtLib.amba.axis_comp.frame_deparser.utils import drill_down_in_HStruct_fields\n'), ((18443, 18512), 'hwtLib.handshaked.streamNode.StreamNode', 'StreamNode', (['masters', 'slaves'], {'extraConds': 'extraConds', 'skipWhen': 'skipWhen'}), '(masters, slaves, extraConds=extraConds, skipWhen=skipWhen)\n', (18453, 18512), False, 'from hwtLib.handshaked.streamNode import StreamNode\n'), ((14655, 14732), 'hwt.hdl.types.stream.HStream', 'HStream', (['cm1.t'], {'frame_len': '(1)', 'start_offsets': '[(x // 8) for x in suffix_offsets]'}), '(cm1.t, frame_len=1, start_offsets=[(x // 8) for x in suffix_offsets])\n', (14662, 14732), False, 'from hwt.hdl.types.stream import HStream\n')] |
import pytest
import numpy as np
from tdd.linear_ssvm import logHingeLoss
def test_nan_found():
X = np.array([[-1, 0], [0, 1], [1, 1]])
Y = np.array([0, 1, 1])
clf = logHingeLoss(mu=1e-10)
with pytest.raises(ValueError):
clf.fit(X, Y) | [
"numpy.array",
"tdd.linear_ssvm.logHingeLoss",
"pytest.raises"
] | [((112, 147), 'numpy.array', 'np.array', (['[[-1, 0], [0, 1], [1, 1]]'], {}), '([[-1, 0], [0, 1], [1, 1]])\n', (120, 147), True, 'import numpy as np\n'), ((157, 176), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (165, 176), True, 'import numpy as np\n'), ((188, 210), 'tdd.linear_ssvm.logHingeLoss', 'logHingeLoss', ([], {'mu': '(1e-10)'}), '(mu=1e-10)\n', (200, 210), False, 'from tdd.linear_ssvm import logHingeLoss\n'), ((221, 246), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (234, 246), False, 'import pytest\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
def mortality_total(df,title):
# df.plot(kind='scatter', x='total', y='mortal_prec', title=title,stacked=False)
# plt.show()
x = df['total_year_sum']
y = df['mortalty_rate']
# plt.scatter(x, y)
df.plot(kind='scatter', x='total_year_sum', y='mortalty_rate', title=title, stacked=False)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "r--")
plt.show()
def mortality_total_surg(df,title):
# df.plot(kind='scatter', x='total', y='mortal_prec', title=title,stacked=False)
# plt.show()
x = df['total_year_count']
y = df['mortalty_rate']
# plt.scatter(x, y)
df.plot(kind='scatter', x='total_year_count', y='mortalty_rate', title=title, stacked=False)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "r--")
plt.show()
def draw_hist_c(data,num_of_bins,title,x_title,y_title):
plt.hist(data, bins=num_of_bins, color="plum",ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
path="/tmp/pycharm_project_723/"
df_avg_siteid = pd.read_csv("total_avg_site_id.csv")
df_avg_surgid = pd.read_csv("total_avg_surgid.csv")
# # df_sum_hospid= pd.read_csv(path+"sum all years hospid.csv")
# draw_hist(df_avg_siteid['total_year_avg'],40,"siteid Histogram of yearly avg operation",'avg of Operation',"count of siteid",'skyblue')
# draw_hist(df_avg_siteid['Year_avg_Firstop'],40,"siteid Histogram of yearly avg First operation",'avg of First Operation',"count of siteid",'skyblue')
# draw_hist(df_avg_siteid['Year_avg_reop'],40,"siteid Histogram of yearly avg reOperation",'avg of reOperation',"count of siteid",'skyblue')
#
# draw_hist(df_avg_siteid['firstop/total'],40,"siteid Histogram of yearly avg First operation/Total operation",'% of First Operation',"count of siteid",'palegreen')
# draw_hist(df_avg_siteid['reop/total'],40,"siteid Histogram of yearly avg reOperation/Total operation",'% of reOperation',"count of siteid",'palegreen')
#
# # draw_hist(df_sum_surgid['Year_avg'],20,"surgid Histogram of yearly avg operation",'avg of Operation',"count of surgid")
# draw_hist(df_avg_surgid['total_year_avg'],40,"surgid Histogram of yearly avg operation",'avg of Operation',"count of surgid",'plum')
# draw_hist(df_avg_surgid['Year_avg_Firstop'],40,"surgid Histogram of yearly avg First operation",'avg of First Operation',"count of surgid",'plum')
# draw_hist(df_avg_surgid['Year_avg_reop'],40,"surgid Histogram of yearly avg reOperation",'avg of reOperation',"count of surgid",'plum')
#
# draw_hist(df_avg_surgid['firstop/total'],40,"surgid Histogram of yearly avg First operation/Total operation",'% of First Operation',"count of surgid",'bisque')
# draw_hist(df_avg_surgid['reop/total'],40,"surgid Histogram of yearly avg reOperation/Total operation",'% of reOperation',"count of surgid",'bisque')
# mortality_total(df_avg_siteid, " site id: mortality - total ops")
#
# mortality_total_surg(df_avg_surgid, " surgeon id: mortality - total ops")
# ax = plt.gca()
#
# ax.scatter(df_avg_siteid['total_year_avg'], df_avg_siteid['mortalty_reop_rate'], color="lightcoral")
# ax.scatter(df_avg_siteid['total_year_avg'], df_avg_siteid['Complics_reop_rate'], color="lightseagreen")
# f = lambda c : plt.plot([],color=c, ls="", marker="o")[0]
# ax.legend(handles = [f("lightcoral"), f("lightseagreen")],
# labels=['mortalty', 'Complics'])
# plt.title("SiteID Yearly average")
# plt.xlabel("count of operations")
# plt.ylabel("mortality or complics rate")
# plt.show()
def yearly_avg_siteid():
ax = plt.gca()
ax.scatter(df_avg_siteid['total_year_avg'], df_avg_siteid['mortalty_reop_rate'], color="palevioletred",s=30)
ax.scatter(df_avg_siteid['total_year_avg'], df_avg_siteid['Complics_reop_rate'], color="darkturquoise",edgecolor='lightseagreen',s=30)
plt.title("SiteID yearly average")
plt.xlabel("yearly average of operations")
plt.ylabel("mortality or complication reop rate")
x = df_avg_siteid['total_year_avg']
y = df_avg_siteid['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "mediumvioletred")
a = df_avg_siteid['total_year_avg']
b = df_avg_siteid['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"), f("mediumturquoise")],
labels=[text, text2])
plt.plot(a, t(a), "teal")
plt.savefig('SiteID yearly average.png')
plt.show()
def yearly_First_operation_siteid():
ax = plt.gca()
ax.scatter(df_avg_siteid['Year_avg_Firstop'], df_avg_siteid['mortalty_reop_rate'], color="palevioletred", s=30)
ax.scatter(df_avg_siteid['Year_avg_Firstop'], df_avg_siteid['Complics_reop_rate'],color="darkturquoise",edgecolor='lightseagreen', s=30)
plt.title("SiteID yearly average for first operation")
plt.xlabel("yearly average of first operations")
plt.ylabel("mortality or complication reop rate")
x = df_avg_siteid['Year_avg_Firstop']
y = df_avg_siteid['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "mediumvioletred")
a = df_avg_siteid['Year_avg_Firstop']
b = df_avg_siteid['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"), f("mediumturquoise")],
labels=[text, text2])
plt.plot(a, t(a), "teal")
plt.savefig('SiteID yearly average for first operation.png')
plt.show()
def yearly_reoperation_siteid():
mask = df_avg_siteid['Year_sum_reop'] == 0
df_reop = df_avg_siteid[~mask]
ax = plt.gca()
ax.scatter(df_reop['Year_avg_reop'], df_reop['mortalty_reop_rate'], color="palevioletred", s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['Complics_reop_rate'], color="darkturquoise",edgecolor='lightseagreen',s=30)
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# ax.legend(handles=[f("palevioletred"), f("mediumturquoise")],
# labels=['mortalty', 'Complication'])
plt.title("SiteID yearly average for Reoperation")
plt.xlabel("yearly average of Reoperation")
plt.ylabel("mortality or complication reop rate")
x= df_reop['Year_avg_reop']
y= df_reop['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "mediumvioletred")
a = df_reop['Year_avg_reop']
b = df_reop['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("palevioletred"), f("mediumturquoise")],
labels=[text, text2])
plt.plot(a, t(a), "teal")
plt.savefig('SiteID yearly average for Reoperation.png')
plt.show()
def yearly_avg_surgid():
ax = plt.gca()
ax.scatter(df_avg_surgid['total_year_avg'], df_avg_surgid['mortalty_reop_rate'], color="orchid", s=30)
ax.scatter(df_avg_surgid['total_year_avg'], df_avg_surgid['Complics_reop_rate'], color="steelblue",edgecolor='tab:blue',s=30)
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# ax.legend(handles=[f("orchid"), f("steelblue")],
# labels=['mortalty', 'Complication'])
plt.title("Surgid yearly average")
plt.xlabel("yearly average of operations")
plt.ylabel("mortality or Complication reop rate")
x = df_avg_surgid['total_year_avg']
y = df_avg_surgid['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "darkorchid")
a = df_avg_surgid['total_year_avg']
b = df_avg_surgid['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"), f("steelblue")],
labels=[text, text2])
plt.savefig('Surgid yearly average.png')
plt.show()
def yearly_avg_First_operation_surgid():
ax = plt.gca()
ax.scatter(df_avg_surgid['Year_avg_Firstop'], df_avg_surgid['mortalty_reop_rate'], color="orchid", s=30)
ax.scatter(df_avg_surgid['Year_avg_Firstop'], df_avg_surgid['Complics_reop_rate'], color="steelblue", edgecolor='tab:blue', s=30)
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# ax.legend(handles=[f("orchid"), f("steelblue")],
# labels=['mortalty', 'Complication'])
plt.title("Surgid yearly average for first operation")
plt.xlabel("yearly average of first operations")
plt.ylabel("mortality or Complication reop rate")
x = df_avg_surgid['Year_avg_Firstop']
y = df_avg_surgid['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "darkorchid")
a = df_avg_surgid['Year_avg_Firstop']
b = df_avg_surgid['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"), f("steelblue")],
labels=[text, text2])
plt.savefig('Surgid yearly average for first operation.png')
plt.show()
def yearly_avg_reoperation_surgid():
mask = df_avg_surgid['Year_sum_reop'] == 0
df_reop = df_avg_surgid[~mask]
ax = plt.gca()
ax.scatter(df_reop['Year_avg_reop'], df_reop['mortalty_reop_rate'], color="orchid", s=30)
ax.scatter(df_reop['Year_avg_reop'], df_reop['Complics_reop_rate'], color="steelblue", edgecolor='tab:blue', s=30)
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# ax.legend(handles=[f("orchid"), f("steelblue")],
# labels=['mortalty', 'Complication'])
plt.title("Surgid yearly average for Reoperation")
plt.xlabel("yearly average of reoperations")
plt.ylabel("mortality or Complication reop rate")
x = df_reop['Year_avg_reop']
y = df_reop['mortalty_reop_rate']
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x, p(x), "darkorchid")
a = df_reop['Year_avg_reop']
b = df_reop['Complics_reop_rate']
c = np.polyfit(a, b, 1)
t = np.poly1d(c)
plt.plot(a, t(a), "mediumblue")
text = f" Mortality : $Y={z[0]:0.6f}X{z[1]:+0.6f}$" # \n$R^2 = {r2_score(y, p):0.3f}$"
text2 = f" Complication : $Y={c[0]:0.6f}X{c[1]:+0.6f}$"
f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
ax.legend(handles=[f("orchid"), f("steelblue")],
labels=[text, text2])
plt.savefig('Surgid yearly average for Reoperation.png')
plt.show()
def pearson_correlation_siteid():
pearson_corr_siteid = df_avg_siteid[
['total_year_avg', 'Year_avg_Firstop', 'Year_avg_reop', 'reop/total', 'mortalty_rate']]
correlation = pearson_corr_siteid.corr(method='spearman')
# fig, ax = plt.subplots(figsize=(15, 15))
# sb.heatmap(correlation, annot=True, linewidths=1, cmap='coolwarm', square=True, ax=ax)
# plt.show()
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_title('Spearman correlation for siteid variables')
sb.heatmap(correlation,
xticklabels=correlation.columns,
yticklabels=correlation.columns,
cmap='viridis',
annot=True,
fmt='f',
linewidth=0.5, ax=ax)
# plt.show()
g = sb.PairGrid(pearson_corr_siteid, diag_sharey=False, corner=True)
g.map_lower(sb.regplot,color=".5")
g.map_diag(sb.histplot)
g.fig.suptitle('Pairwise plots for siteid correlation')
# g = sb.PairGrid(pearson_corr_siteid)
# g.map_diag(sb.histplot)
# g.map_upper(sb.regplot, color=".5")
# g.map_lower(sb.scatterplot,color=".3")
# g.fig.suptitle('Pairwise plots for siteid correlation')
plt.show()
def pearson_correlation_surgid():
pearson_corr_surgid = df_avg_surgid[['total_year_avg', 'Year_avg_Firstop', 'Year_avg_reop','reop/total','mortalty_rate']]
correlation = pearson_corr_surgid.corr(method='spearman')
# fig, ax = plt.subplots(figsize=(15, 15))
# sb.heatmap(correlation, annot=True, linewidths=1, cmap='coolwarm', square=True, ax=ax)
# plt.show()
print(correlation)
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_title('Spearman correlation for surgid variables')
sb.heatmap(correlation,
xticklabels=correlation.columns,
yticklabels=correlation.columns,
cmap='viridis',
annot=True,
fmt='f',
linewidth=0.5, ax=ax)
g = sb.PairGrid(pearson_corr_surgid, diag_sharey=False, corner=True)
g.map_lower(sb.regplot,color=".5")
g.map_diag(sb.histplot)
g.fig.suptitle('Pairwise plots for surgid correlation')
# g = sb.PairGrid(pearson_corr_surgid)
# g.map_diag(sb.histplot)
# g.map_upper(sb.regplot, color=".5")
# g.map_lower(sb.scatterplot,color=".3")
# g.fig.suptitle('Pairwise plots for surgid correlation')
plt.show()
yearly_avg_siteid()
yearly_First_operation_siteid()
yearly_reoperation_siteid()
yearly_avg_surgid()
yearly_avg_First_operation_surgid()
yearly_avg_reoperation_surgid()
#
#
# pearson_correlation_siteid()
# pearson_correlation_surgid()
# df= pd.read_csv("total_avg_site_id.csv")
# # total_year_sum
# new_df=pd.DataFrame(data=df,columns=['mortalty_reop_rate','total_year_avg'])
# print(new_df)
#
# new_df.to_csv("box.xls")
# new_df['total operations'] = pd.qcut(new_df['total_year_avg'], 4, labels=['I', 'II', 'III', 'IV'])
# bp = new_df.boxplot(column='mortalty_reop_rate', by='total operations',patch_artist=True)
#
# for patch in bp['boxes']:
# patch.set(facecolor='cyan')
# # colors = [ 'lightblue', 'lightgreen', 'tan', 'pink']
# # for patch, color in zip(box['boxes'], colors):
# # patch.set_facecolor(color)
#
# # plt.show()
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# plt.legend(handles=[f("palevioletred"), f("mediumturquoise")],
# labels=['mortalty', 'Complics'])
#
# plt.show()
#
#
# # def box_plot(data, edge_color, fill_color):
# # bp = ax.boxplot(data, patch_artist=True)
# #
# # for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
# # plt.setp(bp[element], color=edge_color)
# #
# # for patch in bp['boxes']:
# # patch.set(facecolor=fill_color)
# # #
# #
# example_data1 = [[1, 2, 0.8], [0.5, 2, 2], [3, 2, 1]]
# # example_data2 = [[5, 3, 4], [6, 4, 3, 8], [6, 4, 9]]
# #
# fig, ax = plt.subplots()
# bp = ax.boxplot(example_data1, patch_artist=True)
# for patch in bp['boxes']:
# patch.set(facecolor='cyan')
# # box_plot(example_data1, 'black', 'tan')
# # box_plot(example_data2, 'black', 'cyan')
# ax.set_ylim(0, 10)
# plt.show()
# df= pd.read_csv("total_avg_surgid.csv")
# # total_year_sum
# new_df=pd.DataFrame(data=df,columns=['mortalty_reop_rate','total_year_avg'])
#
# new_df['bins'] = pd.qcut(new_df['total_year_avg'], 3, labels=['low', 'mid', 'high'])
# print(new_df)
# new_df.to_csv("box.csv")
# f = lambda c: plt.plot([], color=c, ls="", marker="o")[0]
# new_df.boxplot(column='mortalty_reop_rate', by='bins')
# plt.legend(handles=[f("white"), f("white"), f("white")],
# labels=['low 1-32.7', 'mid 32.75-61.286','high 61.3-339.5'])
# plt.ylabel("test")
# plt.show()
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"seaborn.heatmap",
"numpy.poly1d",
"matplotlib.pyplot.title",
"seaborn.PairGrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1377, 1413), 'pandas.read_csv', 'pd.read_csv', (['"""total_avg_site_id.csv"""'], {}), "('total_avg_site_id.csv')\n", (1388, 1413), True, 'import pandas as pd\n'), ((1430, 1465), 'pandas.read_csv', 'pd.read_csv', (['"""total_avg_surgid.csv"""'], {}), "('total_avg_surgid.csv')\n", (1441, 1465), True, 'import pandas as pd\n'), ((412, 431), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (422, 431), True, 'import numpy as np\n'), ((440, 452), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (449, 452), True, 'import numpy as np\n'), ((487, 497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (495, 497), True, 'import matplotlib.pyplot as plt\n'), ((827, 846), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (837, 846), True, 'import numpy as np\n'), ((855, 867), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (864, 867), True, 'import numpy as np\n'), ((902, 912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (910, 912), True, 'import matplotlib.pyplot as plt\n'), ((976, 1034), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': 'num_of_bins', 'color': '"""plum"""', 'ec': '"""black"""'}), "(data, bins=num_of_bins, color='plum', ec='black')\n", (984, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1038, 1054), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1047, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1078), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_title'], {}), '(x_title)\n', (1069, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1083, 1102), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_title'], {}), '(y_title)\n', (1093, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1117), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1115, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1241), 'matplotlib.pyplot.hist', 'plt.hist', (['data'], {'bins': 'num_of_bins', 'color': 'color', 'ec': '"""black"""'}), "(data, bins=num_of_bins, color=color, ec='black')\n", (1192, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1261), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1254, 1261), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1285), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_title'], {}), '(x_title)\n', (1276, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_title'], {}), '(y_title)\n', (1300, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1322, 1324), True, 'import matplotlib.pyplot as plt\n'), ((3855, 3864), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3862, 3864), True, 'import matplotlib.pyplot as plt\n'), ((4123, 4157), 'matplotlib.pyplot.title', 'plt.title', (['"""SiteID yearly average"""'], {}), "('SiteID yearly average')\n", (4132, 4157), True, 'import matplotlib.pyplot as plt\n'), ((4162, 4204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of operations"""'], {}), "('yearly average of operations')\n", (4172, 4204), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4258), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or complication reop rate"""'], {}), "('mortality or complication reop rate')\n", (4219, 4258), True, 'import matplotlib.pyplot as plt\n'), ((4352, 4371), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (4362, 4371), True, 'import numpy as np\n'), ((4380, 4392), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (4389, 4392), True, 'import numpy as np\n'), ((4527, 4546), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (4537, 4546), True, 'import numpy as np\n'), ((4555, 4567), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (4564, 4567), True, 'import numpy as np\n'), ((4918, 4958), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SiteID yearly average.png"""'], {}), "('SiteID yearly average.png')\n", (4929, 4958), True, 'import matplotlib.pyplot as plt\n'), ((4964, 4974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4972, 4974), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5031), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5029, 5031), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5349), 'matplotlib.pyplot.title', 'plt.title', (['"""SiteID yearly average for first operation"""'], {}), "('SiteID yearly average for first operation')\n", (5304, 5349), True, 'import matplotlib.pyplot as plt\n'), ((5354, 5402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of first operations"""'], {}), "('yearly average of first operations')\n", (5364, 5402), True, 'import matplotlib.pyplot as plt\n'), ((5407, 5456), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or complication reop rate"""'], {}), "('mortality or complication reop rate')\n", (5417, 5456), True, 'import matplotlib.pyplot as plt\n'), ((5552, 5571), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (5562, 5571), True, 'import numpy as np\n'), ((5580, 5592), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (5589, 5592), True, 'import numpy as np\n'), ((5729, 5748), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (5739, 5748), True, 'import numpy as np\n'), ((5757, 5769), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (5766, 5769), True, 'import numpy as np\n'), ((6120, 6180), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SiteID yearly average for first operation.png"""'], {}), "('SiteID yearly average for first operation.png')\n", (6131, 6180), True, 'import matplotlib.pyplot as plt\n'), ((6185, 6195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6193, 6195), True, 'import matplotlib.pyplot as plt\n'), ((6321, 6330), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6328, 6330), True, 'import matplotlib.pyplot as plt\n'), ((6748, 6798), 'matplotlib.pyplot.title', 'plt.title', (['"""SiteID yearly average for Reoperation"""'], {}), "('SiteID yearly average for Reoperation')\n", (6757, 6798), True, 'import matplotlib.pyplot as plt\n'), ((6803, 6846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of Reoperation"""'], {}), "('yearly average of Reoperation')\n", (6813, 6846), True, 'import matplotlib.pyplot as plt\n'), ((6851, 6900), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or complication reop rate"""'], {}), "('mortality or complication reop rate')\n", (6861, 6900), True, 'import matplotlib.pyplot as plt\n'), ((6979, 6998), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (6989, 6998), True, 'import numpy as np\n'), ((7007, 7019), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (7016, 7019), True, 'import numpy as np\n'), ((7141, 7160), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (7151, 7160), True, 'import numpy as np\n'), ((7169, 7181), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (7178, 7181), True, 'import numpy as np\n'), ((7532, 7588), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""SiteID yearly average for Reoperation.png"""'], {}), "('SiteID yearly average for Reoperation.png')\n", (7543, 7588), True, 'import matplotlib.pyplot as plt\n'), ((7593, 7603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7601, 7603), True, 'import matplotlib.pyplot as plt\n'), ((7639, 7648), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7646, 7648), True, 'import matplotlib.pyplot as plt\n'), ((8063, 8097), 'matplotlib.pyplot.title', 'plt.title', (['"""Surgid yearly average"""'], {}), "('Surgid yearly average')\n", (8072, 8097), True, 'import matplotlib.pyplot as plt\n'), ((8102, 8144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of operations"""'], {}), "('yearly average of operations')\n", (8112, 8144), True, 'import matplotlib.pyplot as plt\n'), ((8149, 8198), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or Complication reop rate"""'], {}), "('mortality or Complication reop rate')\n", (8159, 8198), True, 'import matplotlib.pyplot as plt\n'), ((8292, 8311), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (8302, 8311), True, 'import numpy as np\n'), ((8320, 8332), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (8329, 8332), True, 'import numpy as np\n'), ((8462, 8481), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (8472, 8481), True, 'import numpy as np\n'), ((8490, 8502), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (8499, 8502), True, 'import numpy as np\n'), ((8846, 8886), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Surgid yearly average.png"""'], {}), "('Surgid yearly average.png')\n", (8857, 8886), True, 'import matplotlib.pyplot as plt\n'), ((8891, 8901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8899, 8901), True, 'import matplotlib.pyplot as plt\n'), ((8953, 8962), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8960, 8962), True, 'import matplotlib.pyplot as plt\n'), ((9383, 9437), 'matplotlib.pyplot.title', 'plt.title', (['"""Surgid yearly average for first operation"""'], {}), "('Surgid yearly average for first operation')\n", (9392, 9437), True, 'import matplotlib.pyplot as plt\n'), ((9442, 9490), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of first operations"""'], {}), "('yearly average of first operations')\n", (9452, 9490), True, 'import matplotlib.pyplot as plt\n'), ((9495, 9544), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or Complication reop rate"""'], {}), "('mortality or Complication reop rate')\n", (9505, 9544), True, 'import matplotlib.pyplot as plt\n'), ((9640, 9659), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (9650, 9659), True, 'import numpy as np\n'), ((9668, 9680), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (9677, 9680), True, 'import numpy as np\n'), ((9812, 9831), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (9822, 9831), True, 'import numpy as np\n'), ((9840, 9852), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (9849, 9852), True, 'import numpy as np\n'), ((10196, 10256), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Surgid yearly average for first operation.png"""'], {}), "('Surgid yearly average for first operation.png')\n", (10207, 10256), True, 'import matplotlib.pyplot as plt\n'), ((10261, 10271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10269, 10271), True, 'import matplotlib.pyplot as plt\n'), ((10401, 10410), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10408, 10410), True, 'import matplotlib.pyplot as plt\n'), ((10801, 10851), 'matplotlib.pyplot.title', 'plt.title', (['"""Surgid yearly average for Reoperation"""'], {}), "('Surgid yearly average for Reoperation')\n", (10810, 10851), True, 'import matplotlib.pyplot as plt\n'), ((10856, 10900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""yearly average of reoperations"""'], {}), "('yearly average of reoperations')\n", (10866, 10900), True, 'import matplotlib.pyplot as plt\n'), ((10905, 10954), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mortality or Complication reop rate"""'], {}), "('mortality or Complication reop rate')\n", (10915, 10954), True, 'import matplotlib.pyplot as plt\n'), ((11035, 11054), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (11045, 11054), True, 'import numpy as np\n'), ((11063, 11075), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (11072, 11075), True, 'import numpy as np\n'), ((11192, 11211), 'numpy.polyfit', 'np.polyfit', (['a', 'b', '(1)'], {}), '(a, b, 1)\n', (11202, 11211), True, 'import numpy as np\n'), ((11220, 11232), 'numpy.poly1d', 'np.poly1d', (['c'], {}), '(c)\n', (11229, 11232), True, 'import numpy as np\n'), ((11576, 11632), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Surgid yearly average for Reoperation.png"""'], {}), "('Surgid yearly average for Reoperation.png')\n", (11587, 11632), True, 'import matplotlib.pyplot as plt\n'), ((11637, 11647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11645, 11647), True, 'import matplotlib.pyplot as plt\n'), ((12054, 12082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (12066, 12082), True, 'import matplotlib.pyplot as plt\n'), ((12149, 12306), 'seaborn.heatmap', 'sb.heatmap', (['correlation'], {'xticklabels': 'correlation.columns', 'yticklabels': 'correlation.columns', 'cmap': '"""viridis"""', 'annot': '(True)', 'fmt': '"""f"""', 'linewidth': '(0.5)', 'ax': 'ax'}), "(correlation, xticklabels=correlation.columns, yticklabels=\n correlation.columns, cmap='viridis', annot=True, fmt='f', linewidth=0.5,\n ax=ax)\n", (12159, 12306), True, 'import seaborn as sb\n'), ((12414, 12478), 'seaborn.PairGrid', 'sb.PairGrid', (['pearson_corr_siteid'], {'diag_sharey': '(False)', 'corner': '(True)'}), '(pearson_corr_siteid, diag_sharey=False, corner=True)\n', (12425, 12478), True, 'import seaborn as sb\n'), ((12833, 12843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12841, 12843), True, 'import matplotlib.pyplot as plt\n'), ((13261, 13289), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (13273, 13289), True, 'import matplotlib.pyplot as plt\n'), ((13356, 13513), 'seaborn.heatmap', 'sb.heatmap', (['correlation'], {'xticklabels': 'correlation.columns', 'yticklabels': 'correlation.columns', 'cmap': '"""viridis"""', 'annot': '(True)', 'fmt': '"""f"""', 'linewidth': '(0.5)', 'ax': 'ax'}), "(correlation, xticklabels=correlation.columns, yticklabels=\n correlation.columns, cmap='viridis', annot=True, fmt='f', linewidth=0.5,\n ax=ax)\n", (13366, 13513), True, 'import seaborn as sb\n'), ((13604, 13668), 'seaborn.PairGrid', 'sb.PairGrid', (['pearson_corr_surgid'], {'diag_sharey': '(False)', 'corner': '(True)'}), '(pearson_corr_surgid, diag_sharey=False, corner=True)\n', (13615, 13668), True, 'import seaborn as sb\n'), ((14023, 14033), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14031, 14033), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4778), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (4746, 4778), True, 'import matplotlib.pyplot as plt\n'), ((5940, 5980), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (5948, 5980), True, 'import matplotlib.pyplot as plt\n'), ((7352, 7392), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (7360, 7392), True, 'import matplotlib.pyplot as plt\n'), ((8709, 8749), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (8717, 8749), True, 'import matplotlib.pyplot as plt\n'), ((10059, 10099), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (10067, 10099), True, 'import matplotlib.pyplot as plt\n'), ((11439, 11479), 'matplotlib.pyplot.plot', 'plt.plot', (['[]'], {'color': 'c', 'ls': '""""""', 'marker': '"""o"""'}), "([], color=c, ls='', marker='o')\n", (11447, 11479), True, 'import matplotlib.pyplot as plt\n')] |
# ----------------------------------------------------------------------
# |
# | DateTimeTypeInfo_UnitTest.py
# |
# | <NAME> <<EMAIL>>
# | 2018-04-22 22:48:49
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2018.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Unit test for DateTimeTypeInfo.py."""
import datetime
import os
import sys
import unittest
from CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo import DateTimeTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
class StandardSuite(unittest.TestCase):
# ----------------------------------------------------------------------
def test_Standard(self):
self.assertEqual(DateTimeTypeInfo.Desc, "Datetime")
self.assertEqual(DateTimeTypeInfo.ConstraintsDesc, '')
self.assertEqual(DateTimeTypeInfo.ExpectedType, datetime.datetime)
# ----------------------------------------------------------------------
def test_Create(self):
self.assertAlmostEqual(DateTimeTypeInfo.Create(), datetime.datetime.now(), datetime.timedelta(seconds=2))
dt = DateTimeTypeInfo.Create(microseconds=False)
self.assertEqual(dt.microsecond, 0)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
| [
"CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo.DateTimeTypeInfo.Create",
"sys.executable.lower",
"unittest.main",
"os.path.split",
"datetime.datetime.now",
"os.path.abspath",
"datetime.timedelta"
] | [((935, 966), 'os.path.split', 'os.path.split', (['_script_fullpath'], {}), '(_script_fullpath)\n', (948, 966), False, 'import os\n'), ((822, 847), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'import os\n'), ((863, 885), 'sys.executable.lower', 'sys.executable.lower', ([], {}), '()\n', (883, 885), False, 'import sys\n'), ((1638, 1681), 'CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo.DateTimeTypeInfo.Create', 'DateTimeTypeInfo.Create', ([], {'microseconds': '(False)'}), '(microseconds=False)\n', (1661, 1681), False, 'from CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo import DateTimeTypeInfo\n'), ((1539, 1564), 'CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo.DateTimeTypeInfo.Create', 'DateTimeTypeInfo.Create', ([], {}), '()\n', (1562, 1564), False, 'from CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo import DateTimeTypeInfo\n'), ((1566, 1589), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1587, 1589), False, 'import datetime\n'), ((1591, 1620), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (1609, 1620), False, 'import datetime\n'), ((1998, 2024), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2011, 2024), False, 'import unittest\n')] |
"""
Revision ID: e40889bed1ba
Revises: <PASSWORD>
Create Date: 2019-02-10 21:55:49.665314
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e40889bed1ba"
down_revision = "b<PASSWORD>"
def upgrade():
op.add_column(
"projects",
sa.Column("transfer_issue_url", sa.String(length=255), nullable=True),
)
def downgrade():
op.drop_column("projects", "transfer_issue_url")
| [
"sqlalchemy.String",
"alembic.op.drop_column"
] | [((405, 453), 'alembic.op.drop_column', 'op.drop_column', (['"""projects"""', '"""transfer_issue_url"""'], {}), "('projects', 'transfer_issue_url')\n", (419, 453), False, 'from alembic import op\n'), ((337, 358), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (346, 358), True, 'import sqlalchemy as sa\n')] |
"""
Generic Routines for access to the collection instruments service
License: MIT
Copyright (c) 2017 Crown Copyright (Office for National Statistics)
ONSCollectionInstrument wraps access to the CI service
"""
from os import getenv
class ONSCollectionInstrument(object):
"""
This class is designed to take all the work out of accessing the case service. Initially it
should be able to validate and log events against the case service and also query the event
service for specific combinations of events. (for example to determine case status)
"""
def __init__(self, env):
self._env = env
self._get = '/collection-instrument-api/1.0.2/collectioninstrument/id'
self._upload = '/collection-instrument-api/1.0.2/survey_responses'
def activate(self):
""""""
api = getenv('ONS_API_CI', default=None)
if api:
self._env.asyncio.endpoint_init(api, self._get)
self._env.asyncio.endpoint_init(api, self._upload)
def get_by_id(self, instrument_id):
"""
Recover an exercise by instrument_id
:param instrument_id: The id of the exercise in question
:return: An instrument record
"""
instrument = self._env.asyncio.access_endpoint('{}/{}'.format(self._get, instrument_id))
if not instrument:
return 404, {'code': 404, 'text': 'unable to find instrument for this instrument_id'}
return 200, {'code': 200, 'instrument': instrument}
def upload(self, case_id, party_id, file_obj):
try:
upload = self._env.asyncio.post_upload(self._upload, case_id, file_obj)._result()
category = 'SUCCESSFUL_RESPONSE_UPLOAD' if upload else 'UNSUCCESSFUL_RESPONSE_UPLOAD'
code, msg = self._env.case_service.post_event(case_id,
category=category,
created_by='SYSTEM',
party_id=party_id,
description='Instrument response uploaded "{}"'.format(case_id))
if code != 200:
self._env.logger.error('error code = {} logging to case service: "{}"'.format(code, msg))
code = 200 if upload else 404
text = 'instrument uploaded' if upload else 'unable to upload instrument'
return code, {'code': code, 'text': text}
except Exception as e:
return 500, str(e)
| [
"os.getenv"
] | [((845, 879), 'os.getenv', 'getenv', (['"""ONS_API_CI"""'], {'default': 'None'}), "('ONS_API_CI', default=None)\n", (851, 879), False, 'from os import getenv\n')] |
'''
Test swutil.decorators
'''
import unittest
from swutil.decorators import print_profile
import math
class TestDecorators(unittest.TestCase):
def test_print_profile(self):
@print_profile
def test(a):
return math.factorial(a)
test(3)
test(20000)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"math.factorial"
] | [((329, 344), 'unittest.main', 'unittest.main', ([], {}), '()\n', (342, 344), False, 'import unittest\n'), ((243, 260), 'math.factorial', 'math.factorial', (['a'], {}), '(a)\n', (257, 260), False, 'import math\n')] |
import requests
from bs4 import BeautifulSoup
print('Type "en" if you want to translate from French into English, or "fr" if you want to translate from English into French:')
dst_lang = input()
print('Type the word you want to translate:')
word = input()
print(f'You chose "{dst_lang}" as the language to translate "{word}" to.')
url_en_to_fr = "https://context.reverso.net/translation/english-french/"
url_fr_to_en = "https://context.reverso.net/translation/french-english/"
url = None
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.136 Safari/537.36'}
if 'fr' == dst_lang:
url = url_en_to_fr + word
elif 'en' == dst_lang:
url = url_fr_to_en + word
else:
print('Error')
exit()
response = requests.get(url, headers = headers)
if response:
#print('Success')
#print(response.text)
print(str(response.status_code) + " OK")
else:
print('Fail')
print(response.status_code)
html_source_code = response.content
soup = BeautifulSoup(html_source_code, "lxml")
# --------------------------
# for translated word
#translation = soup.select("a.translation.ltr.dict em.translation")
#translation = soup.select("a.translation.ltr.dict em.translation")
#translation = soup.select("a.translation.ltr.dict em")
translation = soup.select("a.translation.ltr.dict")
translation_list = ['Translation']
#print( "find" + str(len(translation)) + "elements")
for dest_lang_word in translation:
#print(dest_lang_word.text)
translation_list.append( dest_lang_word.text.strip() )
print(translation_list)
# --------------------------
# for example sentence
example_sentence = soup.select("#examples-content span.text")
sentence_list = ['Translation']
for sentence in example_sentence:
sentence_list.append(sentence.text.strip())
#print( sentence.text )
print(sentence_list)
#words = soup.select(".example .trg .text em")
#for word in words:
# print(word)
| [
"bs4.BeautifulSoup",
"requests.get"
] | [((793, 827), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (805, 827), False, 'import requests\n'), ((1038, 1077), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_source_code', '"""lxml"""'], {}), "(html_source_code, 'lxml')\n", (1051, 1077), False, 'from bs4 import BeautifulSoup\n')] |
Subsets and Splits