max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
exchanges/exchange.py | Kaoschuks/cryptobot | 178 | 12682026 | <filename>exchanges/exchange.py
import datetime
from api import utils
from abc import ABC, abstractmethod
from twisted.internet import reactor
from strategies.strategy import Strategy
from models.order import Order
class Exchange(ABC):
currency: str
asset: str
strategy: Strategy
def __init__(self, key: str, secret: str):
self.apiKey = key
self.apiSecret = secret
self.name = None
self.client = None
self.socketManager = None
self.socket = None
self.currency = ''
self.asset = ''
self.strategy = None
def set_currency(self, symbol: str):
self.currency = symbol
def set_asset(self, symbol: str):
self.asset = symbol
def set_strategy(self, strategy: Strategy):
self.strategy = strategy
def compute_symbol_pair(self):
return utils.format_pair(self.currency, self.asset)
# abstract methods
# Override to set current exchange symbol pair notation (default with _ separator currency_asset ex: eur_btc)
@abstractmethod
def get_symbol(self):
return self.compute_symbol_pair(self)
# Get current symbol ticker
@abstractmethod
def symbol_ticker(self):
pass
# Get current symbol ticker candle for given interval
@abstractmethod
def symbol_ticker_candle(self, interval):
pass
# Get current symbol historic value
@abstractmethod
def historical_symbol_ticker_candle(self, start: datetime, end=None, interval=60):
pass
# Get balance for a given currency
@abstractmethod
def get_asset_balance(self, currency):
pass
# Create an exchange order
@abstractmethod
def order(self, order: Order):
pass
# Create an exchange test order
@abstractmethod
def test_order(self, order: Order):
pass
# Check an exchange order status
@abstractmethod
def check_order(self, orderId):
pass
# Cancel an exchange order
@abstractmethod
def cancel_order(self, orderId):
pass
# WebSocket related methods
@abstractmethod
def get_socket_manager(self, purchase):
pass
@abstractmethod
def websocket_event_handler(self, msg):
pass
def start_socket(self):
print('Starting WebSocket connection...')
self.socketManager.start()
def close_socket(self):
self.socketManager.stop_socket(self.socket)
self.socketManager.close()
# properly terminate WebSocket
reactor.stop()
@abstractmethod
def start_symbol_ticker_socket(self, symbol: str):
pass
|
src/nodes/corenodes/adjust/__init__.py | Correct-Syntax/GimelStudio | 134 | 12682042 | <reponame>Correct-Syntax/GimelStudio
from .brightness_contrast_node import BrightnessContrastNode
|
models/position_enc.py | sorrowyn/C-Tran | 104 | 12682045 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from pdb import set_trace as stop
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask):
# x = tensor_list.tensors
# mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
# stop()
y_embed = not_mask.cumsum(1)#, dtype=torch.float32)
x_embed = not_mask.cumsum(2)#, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, device=x.device)#, dtype=torch.float32)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
# stop()
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# def build_position_encoding(args):
# N_steps = args.hidden_dim // 2
# position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
def positionalencoding2d(d_model, height, width):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
if d_model % 4 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dimension (got dim={:d})".format(d_model))
pe = torch.zeros(d_model, height, width)
# Each dimension use half of d_model
d_model = int(d_model / 2)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pos_w = torch.arange(0., width).unsqueeze(1)
pos_h = torch.arange(0., height).unsqueeze(1)
pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
pe[d_model + fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
return pe |
alipay/aop/api/domain/RelateInputInvoiceOrderDTO.py | alipay/alipay-sdk-python-all | 213 | 12682054 | <filename>alipay/aop/api/domain/RelateInputInvoiceOrderDTO.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InputInvoiceBillLinkOrderDTO import InputInvoiceBillLinkOrderDTO
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class RelateInputInvoiceOrderDTO(object):
def __init__(self):
self._attachment_name = None
self._attachment_oss_key = None
self._buyer_address = None
self._buyer_bank_account = None
self._buyer_bank_name = None
self._buyer_inst_id = None
self._buyer_invoice_title = None
self._buyer_tax_no = None
self._buyer_telephone = None
self._input_invoice_bill_link_order_list = None
self._inst_id = None
self._invoice_amt = None
self._invoice_code = None
self._invoice_date = None
self._invoice_material = None
self._invoice_no = None
self._invoice_note = None
self._invoice_receive_date = None
self._invoice_source = None
self._invoice_type = None
self._ip_role_id = None
self._memo = None
self._operator = None
self._seller_address = None
self._seller_bank_account = None
self._seller_bank_name = None
self._seller_company_name = None
self._seller_ip_role_id = None
self._seller_mid = None
self._seller_tax_no = None
self._seller_telephone = None
self._tax_amt = None
self._tax_rate = None
@property
def attachment_name(self):
return self._attachment_name
@attachment_name.setter
def attachment_name(self, value):
self._attachment_name = value
@property
def attachment_oss_key(self):
return self._attachment_oss_key
@attachment_oss_key.setter
def attachment_oss_key(self, value):
self._attachment_oss_key = value
@property
def buyer_address(self):
return self._buyer_address
@buyer_address.setter
def buyer_address(self, value):
self._buyer_address = value
@property
def buyer_bank_account(self):
return self._buyer_bank_account
@buyer_bank_account.setter
def buyer_bank_account(self, value):
self._buyer_bank_account = value
@property
def buyer_bank_name(self):
return self._buyer_bank_name
@buyer_bank_name.setter
def buyer_bank_name(self, value):
self._buyer_bank_name = value
@property
def buyer_inst_id(self):
return self._buyer_inst_id
@buyer_inst_id.setter
def buyer_inst_id(self, value):
self._buyer_inst_id = value
@property
def buyer_invoice_title(self):
return self._buyer_invoice_title
@buyer_invoice_title.setter
def buyer_invoice_title(self, value):
self._buyer_invoice_title = value
@property
def buyer_tax_no(self):
return self._buyer_tax_no
@buyer_tax_no.setter
def buyer_tax_no(self, value):
self._buyer_tax_no = value
@property
def buyer_telephone(self):
return self._buyer_telephone
@buyer_telephone.setter
def buyer_telephone(self, value):
self._buyer_telephone = value
@property
def input_invoice_bill_link_order_list(self):
return self._input_invoice_bill_link_order_list
@input_invoice_bill_link_order_list.setter
def input_invoice_bill_link_order_list(self, value):
if isinstance(value, list):
self._input_invoice_bill_link_order_list = list()
for i in value:
if isinstance(i, InputInvoiceBillLinkOrderDTO):
self._input_invoice_bill_link_order_list.append(i)
else:
self._input_invoice_bill_link_order_list.append(InputInvoiceBillLinkOrderDTO.from_alipay_dict(i))
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def invoice_amt(self):
return self._invoice_amt
@invoice_amt.setter
def invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoice_amt = value
else:
self._invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def invoice_code(self):
return self._invoice_code
@invoice_code.setter
def invoice_code(self, value):
self._invoice_code = value
@property
def invoice_date(self):
return self._invoice_date
@invoice_date.setter
def invoice_date(self, value):
self._invoice_date = value
@property
def invoice_material(self):
return self._invoice_material
@invoice_material.setter
def invoice_material(self, value):
self._invoice_material = value
@property
def invoice_no(self):
return self._invoice_no
@invoice_no.setter
def invoice_no(self, value):
self._invoice_no = value
@property
def invoice_note(self):
return self._invoice_note
@invoice_note.setter
def invoice_note(self, value):
self._invoice_note = value
@property
def invoice_receive_date(self):
return self._invoice_receive_date
@invoice_receive_date.setter
def invoice_receive_date(self, value):
self._invoice_receive_date = value
@property
def invoice_source(self):
return self._invoice_source
@invoice_source.setter
def invoice_source(self, value):
self._invoice_source = value
@property
def invoice_type(self):
return self._invoice_type
@invoice_type.setter
def invoice_type(self, value):
self._invoice_type = value
@property
def ip_role_id(self):
return self._ip_role_id
@ip_role_id.setter
def ip_role_id(self, value):
self._ip_role_id = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def seller_address(self):
return self._seller_address
@seller_address.setter
def seller_address(self, value):
self._seller_address = value
@property
def seller_bank_account(self):
return self._seller_bank_account
@seller_bank_account.setter
def seller_bank_account(self, value):
self._seller_bank_account = value
@property
def seller_bank_name(self):
return self._seller_bank_name
@seller_bank_name.setter
def seller_bank_name(self, value):
self._seller_bank_name = value
@property
def seller_company_name(self):
return self._seller_company_name
@seller_company_name.setter
def seller_company_name(self, value):
self._seller_company_name = value
@property
def seller_ip_role_id(self):
return self._seller_ip_role_id
@seller_ip_role_id.setter
def seller_ip_role_id(self, value):
self._seller_ip_role_id = value
@property
def seller_mid(self):
return self._seller_mid
@seller_mid.setter
def seller_mid(self, value):
self._seller_mid = value
@property
def seller_tax_no(self):
return self._seller_tax_no
@seller_tax_no.setter
def seller_tax_no(self, value):
self._seller_tax_no = value
@property
def seller_telephone(self):
return self._seller_telephone
@seller_telephone.setter
def seller_telephone(self, value):
self._seller_telephone = value
@property
def tax_amt(self):
return self._tax_amt
@tax_amt.setter
def tax_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._tax_amt = value
else:
self._tax_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def tax_rate(self):
return self._tax_rate
@tax_rate.setter
def tax_rate(self, value):
self._tax_rate = value
def to_alipay_dict(self):
params = dict()
if self.attachment_name:
if hasattr(self.attachment_name, 'to_alipay_dict'):
params['attachment_name'] = self.attachment_name.to_alipay_dict()
else:
params['attachment_name'] = self.attachment_name
if self.attachment_oss_key:
if hasattr(self.attachment_oss_key, 'to_alipay_dict'):
params['attachment_oss_key'] = self.attachment_oss_key.to_alipay_dict()
else:
params['attachment_oss_key'] = self.attachment_oss_key
if self.buyer_address:
if hasattr(self.buyer_address, 'to_alipay_dict'):
params['buyer_address'] = self.buyer_address.to_alipay_dict()
else:
params['buyer_address'] = self.buyer_address
if self.buyer_bank_account:
if hasattr(self.buyer_bank_account, 'to_alipay_dict'):
params['buyer_bank_account'] = self.buyer_bank_account.to_alipay_dict()
else:
params['buyer_bank_account'] = self.buyer_bank_account
if self.buyer_bank_name:
if hasattr(self.buyer_bank_name, 'to_alipay_dict'):
params['buyer_bank_name'] = self.buyer_bank_name.to_alipay_dict()
else:
params['buyer_bank_name'] = self.buyer_bank_name
if self.buyer_inst_id:
if hasattr(self.buyer_inst_id, 'to_alipay_dict'):
params['buyer_inst_id'] = self.buyer_inst_id.to_alipay_dict()
else:
params['buyer_inst_id'] = self.buyer_inst_id
if self.buyer_invoice_title:
if hasattr(self.buyer_invoice_title, 'to_alipay_dict'):
params['buyer_invoice_title'] = self.buyer_invoice_title.to_alipay_dict()
else:
params['buyer_invoice_title'] = self.buyer_invoice_title
if self.buyer_tax_no:
if hasattr(self.buyer_tax_no, 'to_alipay_dict'):
params['buyer_tax_no'] = self.buyer_tax_no.to_alipay_dict()
else:
params['buyer_tax_no'] = self.buyer_tax_no
if self.buyer_telephone:
if hasattr(self.buyer_telephone, 'to_alipay_dict'):
params['buyer_telephone'] = self.buyer_telephone.to_alipay_dict()
else:
params['buyer_telephone'] = self.buyer_telephone
if self.input_invoice_bill_link_order_list:
if isinstance(self.input_invoice_bill_link_order_list, list):
for i in range(0, len(self.input_invoice_bill_link_order_list)):
element = self.input_invoice_bill_link_order_list[i]
if hasattr(element, 'to_alipay_dict'):
self.input_invoice_bill_link_order_list[i] = element.to_alipay_dict()
if hasattr(self.input_invoice_bill_link_order_list, 'to_alipay_dict'):
params['input_invoice_bill_link_order_list'] = self.input_invoice_bill_link_order_list.to_alipay_dict()
else:
params['input_invoice_bill_link_order_list'] = self.input_invoice_bill_link_order_list
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.invoice_amt:
if hasattr(self.invoice_amt, 'to_alipay_dict'):
params['invoice_amt'] = self.invoice_amt.to_alipay_dict()
else:
params['invoice_amt'] = self.invoice_amt
if self.invoice_code:
if hasattr(self.invoice_code, 'to_alipay_dict'):
params['invoice_code'] = self.invoice_code.to_alipay_dict()
else:
params['invoice_code'] = self.invoice_code
if self.invoice_date:
if hasattr(self.invoice_date, 'to_alipay_dict'):
params['invoice_date'] = self.invoice_date.to_alipay_dict()
else:
params['invoice_date'] = self.invoice_date
if self.invoice_material:
if hasattr(self.invoice_material, 'to_alipay_dict'):
params['invoice_material'] = self.invoice_material.to_alipay_dict()
else:
params['invoice_material'] = self.invoice_material
if self.invoice_no:
if hasattr(self.invoice_no, 'to_alipay_dict'):
params['invoice_no'] = self.invoice_no.to_alipay_dict()
else:
params['invoice_no'] = self.invoice_no
if self.invoice_note:
if hasattr(self.invoice_note, 'to_alipay_dict'):
params['invoice_note'] = self.invoice_note.to_alipay_dict()
else:
params['invoice_note'] = self.invoice_note
if self.invoice_receive_date:
if hasattr(self.invoice_receive_date, 'to_alipay_dict'):
params['invoice_receive_date'] = self.invoice_receive_date.to_alipay_dict()
else:
params['invoice_receive_date'] = self.invoice_receive_date
if self.invoice_source:
if hasattr(self.invoice_source, 'to_alipay_dict'):
params['invoice_source'] = self.invoice_source.to_alipay_dict()
else:
params['invoice_source'] = self.invoice_source
if self.invoice_type:
if hasattr(self.invoice_type, 'to_alipay_dict'):
params['invoice_type'] = self.invoice_type.to_alipay_dict()
else:
params['invoice_type'] = self.invoice_type
if self.ip_role_id:
if hasattr(self.ip_role_id, 'to_alipay_dict'):
params['ip_role_id'] = self.ip_role_id.to_alipay_dict()
else:
params['ip_role_id'] = self.ip_role_id
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.seller_address:
if hasattr(self.seller_address, 'to_alipay_dict'):
params['seller_address'] = self.seller_address.to_alipay_dict()
else:
params['seller_address'] = self.seller_address
if self.seller_bank_account:
if hasattr(self.seller_bank_account, 'to_alipay_dict'):
params['seller_bank_account'] = self.seller_bank_account.to_alipay_dict()
else:
params['seller_bank_account'] = self.seller_bank_account
if self.seller_bank_name:
if hasattr(self.seller_bank_name, 'to_alipay_dict'):
params['seller_bank_name'] = self.seller_bank_name.to_alipay_dict()
else:
params['seller_bank_name'] = self.seller_bank_name
if self.seller_company_name:
if hasattr(self.seller_company_name, 'to_alipay_dict'):
params['seller_company_name'] = self.seller_company_name.to_alipay_dict()
else:
params['seller_company_name'] = self.seller_company_name
if self.seller_ip_role_id:
if hasattr(self.seller_ip_role_id, 'to_alipay_dict'):
params['seller_ip_role_id'] = self.seller_ip_role_id.to_alipay_dict()
else:
params['seller_ip_role_id'] = self.seller_ip_role_id
if self.seller_mid:
if hasattr(self.seller_mid, 'to_alipay_dict'):
params['seller_mid'] = self.seller_mid.to_alipay_dict()
else:
params['seller_mid'] = self.seller_mid
if self.seller_tax_no:
if hasattr(self.seller_tax_no, 'to_alipay_dict'):
params['seller_tax_no'] = self.seller_tax_no.to_alipay_dict()
else:
params['seller_tax_no'] = self.seller_tax_no
if self.seller_telephone:
if hasattr(self.seller_telephone, 'to_alipay_dict'):
params['seller_telephone'] = self.seller_telephone.to_alipay_dict()
else:
params['seller_telephone'] = self.seller_telephone
if self.tax_amt:
if hasattr(self.tax_amt, 'to_alipay_dict'):
params['tax_amt'] = self.tax_amt.to_alipay_dict()
else:
params['tax_amt'] = self.tax_amt
if self.tax_rate:
if hasattr(self.tax_rate, 'to_alipay_dict'):
params['tax_rate'] = self.tax_rate.to_alipay_dict()
else:
params['tax_rate'] = self.tax_rate
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RelateInputInvoiceOrderDTO()
if 'attachment_name' in d:
o.attachment_name = d['attachment_name']
if 'attachment_oss_key' in d:
o.attachment_oss_key = d['attachment_oss_key']
if 'buyer_address' in d:
o.buyer_address = d['buyer_address']
if 'buyer_bank_account' in d:
o.buyer_bank_account = d['buyer_bank_account']
if 'buyer_bank_name' in d:
o.buyer_bank_name = d['buyer_bank_name']
if 'buyer_inst_id' in d:
o.buyer_inst_id = d['buyer_inst_id']
if 'buyer_invoice_title' in d:
o.buyer_invoice_title = d['buyer_invoice_title']
if 'buyer_tax_no' in d:
o.buyer_tax_no = d['buyer_tax_no']
if 'buyer_telephone' in d:
o.buyer_telephone = d['buyer_telephone']
if 'input_invoice_bill_link_order_list' in d:
o.input_invoice_bill_link_order_list = d['input_invoice_bill_link_order_list']
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'invoice_amt' in d:
o.invoice_amt = d['invoice_amt']
if 'invoice_code' in d:
o.invoice_code = d['invoice_code']
if 'invoice_date' in d:
o.invoice_date = d['invoice_date']
if 'invoice_material' in d:
o.invoice_material = d['invoice_material']
if 'invoice_no' in d:
o.invoice_no = d['invoice_no']
if 'invoice_note' in d:
o.invoice_note = d['invoice_note']
if 'invoice_receive_date' in d:
o.invoice_receive_date = d['invoice_receive_date']
if 'invoice_source' in d:
o.invoice_source = d['invoice_source']
if 'invoice_type' in d:
o.invoice_type = d['invoice_type']
if 'ip_role_id' in d:
o.ip_role_id = d['ip_role_id']
if 'memo' in d:
o.memo = d['memo']
if 'operator' in d:
o.operator = d['operator']
if 'seller_address' in d:
o.seller_address = d['seller_address']
if 'seller_bank_account' in d:
o.seller_bank_account = d['seller_bank_account']
if 'seller_bank_name' in d:
o.seller_bank_name = d['seller_bank_name']
if 'seller_company_name' in d:
o.seller_company_name = d['seller_company_name']
if 'seller_ip_role_id' in d:
o.seller_ip_role_id = d['seller_ip_role_id']
if 'seller_mid' in d:
o.seller_mid = d['seller_mid']
if 'seller_tax_no' in d:
o.seller_tax_no = d['seller_tax_no']
if 'seller_telephone' in d:
o.seller_telephone = d['seller_telephone']
if 'tax_amt' in d:
o.tax_amt = d['tax_amt']
if 'tax_rate' in d:
o.tax_rate = d['tax_rate']
return o
|
niftynet/contrib/niftyreg_image_resampling/setup.py | tdml13/NiftyNet | 1,403 | 12682079 | from __future__ import print_function
import os
import os.path as osp
import platform
from setuptools import setup, Extension, Command
from setuptools.command.build_ext import build_ext
from shutil import which
import subprocess as sp
import sys
__CMAKE_OVERRIDE_FLAGS__ = {}
class CMakeExtension(Extension):
def __init__(self, name):
super(CMakeExtension, self).__init__(name, sources=[])
class CMakeOverride(Command):
description = 'Overrides CMake variables for build'
user_options = [('settings=', 's',
'CMake variable override: <KEY>:<VALUE>:<KEY>:<VALUE>...')]
def initialize_options(self):
self.settings = ''
def finalize_options(self):
pass
def run(self):
global __CMAKE_OVERRIDE_FLAGS__
overrides = self.settings.split(':')
for i in range(0, len(overrides), 2):
print('Overriding %s with %s' % (overrides[i], overrides[i+1]))
__CMAKE_OVERRIDE_FLAGS__[overrides[i]] = overrides[i+1]
class CMakeBuildExt(build_ext):
def run(self):
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
print('Building ' + ext.name)
outdir = osp.abspath(osp.dirname(self.get_ext_fullpath(ext.name)))
args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + outdir]
if not osp.isdir(outdir):
os.makedirs(outdir)
args += ['-DGPU_RESAMPLING_CONFIGFILE_DIR=' + outdir]
args += ['-DCMAKE_BUILD_TYPE=' + ('Debug' if self.debug else 'Release')]
if platform.system() == 'Linux' \
and any(dist in platform.dist() for dist in ('Debian', 'Ubuntu')):
# Need to find compilers that play nice with nvcc;
# this assumes compatible versions have been linked to
# /PATH/TO/cuda/bin/cc and /PATH/TO/cuda/bin/c++, and
# that they appear first on the search path.
if not 'CMAKE_C_COMPILER' in __CMAKE_OVERRIDE_FLAGS__:
args += ['-DCMAKE_C_COMPILER=' + which('cc')]
if not 'CMAKE_CXX_COMPILER' in __CMAKE_OVERRIDE_FLAGS__:
args += ['-DCMAKE_CXX_COMPILER=' + which('c++')]
for key, val in __CMAKE_OVERRIDE_FLAGS__.items():
args += ['-D' + key + '=' + val]
args += [osp.join(osp.dirname(osp.abspath(__file__)),
'niftyreg_gpu_resampler')]
if not osp.isdir(self.build_temp):
os.makedirs(self.build_temp)
print('Building in ' + str(self.build_temp)
+ ': cmake ' + ' '.join(args))
sp.call(['cmake'] + args, cwd=self.build_temp)
sp.call(['cmake'] + args, cwd=self.build_temp)
sp.call(['cmake', '--build', self.build_temp])
setup(
name='niftyreg_gpu_resampler',
description='A NiftyNet image resampling sub-module powered by NiftyReg '
'GPU code.',
packages=['.'],
ext_modules=[CMakeExtension('niftyreg_gpu_resampler')],
cmdclass={'override': CMakeOverride,
'build_ext': CMakeBuildExt},
zip_safe=False,
)
|
tests/unit/confidant/services/keymanager_test.py | chadwhitacre/confidant | 1,820 | 12682084 | <filename>tests/unit/confidant/services/keymanager_test.py
from confidant.services import keymanager
def test_get_key_id(mocker):
mocker.patch('confidant.services.keymanager._KEY_METADATA', {})
mock_auth_client = mocker.Mock()
mock_auth_client.describe_key = mocker.Mock(
return_value={'KeyMetadata': {'KeyId': 'mockid'}}
)
mocker.patch(
'confidant.services.keymanager._get_auth_kms_client',
return_value=mock_auth_client,
)
assert keymanager.get_key_id('mockalias') == 'mockid'
def test_get_key_id_cached(mocker):
mocker.patch(
'confidant.services.keymanager._KEY_METADATA',
{'mockalias': {'KeyMetadata': {'KeyId': 'mockid'}}}
)
mock_auth_client = mocker.Mock()
mock_auth_client.describe_key = mocker.Mock()
mocker.patch(
'confidant.services.keymanager._get_auth_kms_client',
return_value=mock_auth_client,
)
mock_auth_client.describe_key = mocker.Mock()
assert keymanager.get_key_id('mockalias') == 'mockid'
def test_create_datakey_mocked(mocker):
fernet_mock = mocker.patch('cryptography.fernet.Fernet.generate_key')
fernet_mock.return_value = 'mocked_fernet_key'
mocker.patch('confidant.services.keymanager.settings.USE_ENCRYPTION', False)
ret = keymanager.create_datakey({})
assert fernet_mock.called is True
# Assert that we got a dict returned where the ciphertext and plaintext
# keys are equal
assert ret['ciphertext'] == ret['plaintext']
# Assert ciphertext is mocked_fernet_key
assert ret['ciphertext'] == 'mocked_fernet_key'
def test_decrypt_datakey_mocked(mocker):
mocker.patch('confidant.services.keymanager.settings.USE_ENCRYPTION', False)
ret = keymanager.decrypt_datakey('mocked_fernet_key')
# Ensure we get the same value out that we sent in.
assert ret == 'mocked_fernet_key'
def test_create_datakey_with_encryption(mocker):
cd_mock = mocker.patch(
'confidant.services.keymanager.cryptolib.create_datakey'
)
cmd_mock = mocker.patch(
'confidant.services.keymanager.cryptolib.create_mock_datakey'
)
mocker.patch('confidant.services.keymanager.settings.USE_ENCRYPTION', True)
context = {'from': 'confidant-development',
'to': 'confidant-development'}
keymanager.create_datakey(context)
# Assert that create_datakey was called and create_mock_datakey was
# not called.
assert cd_mock.called is True
assert cmd_mock.called is False
def test_decrypt_datakey_with_encryption(mocker):
dd_mock = mocker.patch(
'confidant.services.keymanager.cryptolib.decrypt_datakey'
)
dmd_mock = mocker.patch(
'confidant.services.keymanager.cryptolib.decrypt_mock_datakey'
)
mocker.patch('confidant.services.keymanager.settings.USE_ENCRYPTION', True)
context = {'from': 'confidant-development',
'to': 'confidant-development'}
keymanager.decrypt_datakey(b'encrypted', context)
# Assert that decrypt_datakey was called and decrypt_mock_datakey was
# not called.
assert dd_mock.called is True
assert dmd_mock.called is False
|
lib/grizzled/grizzled/db/dbgadfly.py | MiCHiLU/google_appengine_sdk | 790 | 12682088 | <reponame>MiCHiLU/google_appengine_sdk
# $Id: f25618704b7ebe12c191cc1a51055c26db731b85 $
"""
Gadfly extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
from grizzled.db.base import (Cursor, DB, DBDriver, Error, Warning,
TableMetadata, IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class GadflyCursor(Cursor):
def __init__(self, real_cursor, driver):
self.real_cursor = real_cursor
self.driver = driver
@property
def rowcount(self):
total = len(self.real_cursor.fetchall())
self.real_cursor.reset_results()
return total
@property
def description(self):
return self.real_cursor.description
def close(self):
try:
self.real_cursor.close()
except:
raise Error(sys.exc_info()[1])
def execute(self, statement, parameters=None):
try:
if parameters:
result = self.real_cursor.execute(statement, parameters)
else:
result = self.real_cursor.execute(statement)
return result
except:
raise Error(sys.exc_info()[1])
def executemany(self, statement, *parameters):
try:
return self.real_cursor.executemany(statement, *parameters)
except:
raise Error(sys.exc_info()[1])
def fetchall(self):
try:
return self.real_cursor.fetchall()
except:
raise Error(sys.exc_info()[1])
def fetchone(self):
try:
return self.real_cursor.fetchone()
except:
s = sys.exc_info()[1]
if (type(s) == str) and (s.startswith('no more')):
return None
raise Error(s)
def fetchmany(self, n):
try:
return self.real_cursor.fetchmany(n)
except:
s = sys.exc_info()[1]
if (type(s) == str) and (s.startswith('no more')):
return None
raise Error(s)
class GadflyDB(DB):
def __init__(self, db, driver):
DB.__init__(self, db, driver)
self.__db = db
self.__driver = driver
def cursor(self):
return Cursor(GadflyCursor(self.__db.cursor(), self.__driver),
self.__driver)
class GadflyDriver(DBDriver):
"""DB Driver for Gadfly, a pure Python RDBMS"""
def __init__(self):
gadfly = self.get_import()
gadfly.error = Exception()
def get_import(self):
import gadfly
return gadfly
def get_display_name(self):
return "Gadfly"
def connect(self,
host=None,
port=None,
user='',
password='',
database='default'):
gadfly = self.get_import()
directory = os.path.dirname(database)
database = os.path.basename(database)
if database.endswith('.gfd'):
database = database[:-4]
try:
g = gadfly.gadfly()
g.startup(database, directory)
return GadflyDB(g, self)
except IOError:
raise Error(sys.exc_info()[1])
def get_tables(self, cursor):
cursor.execute('SELECT table_name FROM __table_names__ '
'WHERE is_view = 0')
table_names = []
for row in cursor.fetchall():
table_names += [row[0]]
return table_names
def get_rdbms_metadata(self, cursor):
import gadfly
version = '.'.join([str(i) for i in gadfly.version_info])
return RDBMSMetadata('gadfly', 'gadfly', version)
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
cursor.execute("SELECT column_name FROM __columns__ "
"WHERE table_name = '%s'" % table.upper())
result = []
column_names = []
for row in cursor.fetchall():
result += [TableMetadata(row[0], 'object', None, None, None, True)]
return result
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
cursor.execute("SELECT is_unique, index_name FROM __indices__ "
"WHERE table_name = '%s'" % table.upper())
indexes = []
result = []
for row in cursor.fetchall():
indexes.append(row)
for unique, index_name in indexes:
cursor.execute("SELECT column_name FROM __indexcols__ "
"WHERE index_name = '%s'" % index_name)
cols = []
for row in cursor.fetchall():
cols.append(row[0])
if unique:
description = 'UNIQUE'
else:
description = 'NON-UNIQUE'
result.append(IndexMetadata(index_name, cols, description))
return result
def _is_valid_table(self, cursor, table_name):
tables = self.get_tables(cursor)
return table_name.upper() in tables
|
scripts/generate-fidl-tags.py | allansrc/fuchsia | 210 | 12682097 | #!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool uses the contents of fidlc .json files to create tags for .fidl files.
When run via fx fidltags, it looks in the existing build directory, and creates
a file named fidl-tags in the root of the source tree for use with your editor.
See `fx fidltags` for help.
"""
import argparse
import sys
import fnmatch
import os
import json
class Tag(object):
def __init__(self, tag, file, line, column):
self.tag = tag
self.file = file
self.line = line
self.column = column
def __repr__(self):
return f'Tag({self.tag}, {self.file}, {self.line}, {self.column})'
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--build-dir',
required=True,
help='Fuchsia build dir, e.g. out/default')
parser.add_argument(
'--output', default='fidl-tags', help='Output name of the tags file')
return parser.parse_args()
def strip_library(name):
"""
>>> strip_library("fuchsia.device/MAX_DEVICE_NAME_LEN")
'MAX_DEVICE_NAME_LEN'
>>> strip_library("SomethingGreat")
'SomethingGreat'
"""
return name[name.rfind('/') + 1:] # -1 + 1 returns the whole thing
def get_location_pieces(location_json):
file = location_json['filename']
if file != 'generated':
if file[:6] == '../../':
file = file[6:]
return (file, location_json['line'], location_json['column'])
def extract_consts(json):
"""
>>> extract_consts([
... {
... "name": "fuchsia.device/MAX_DEVICE_NAME_LEN",
... "location": {
... "filename": "../../zircon/system/fidl/fuchsia-device/controller.fidl",
... "line": 11,
... "column": 14
... },
... "type": {
... "kind": "primitive",
... "subtype": "uint64"
... },
... "value": {
... "kind": "literal",
... "literal": {
... "kind": "numeric",
... "value": "32",
... "expression": "32"
... }
... }
... },
... {
... "name": "fuchsia.device/MAX_DEVICE_PATH_LEN",
... "location": {
... "filename": "../../zircon/system/fidl/fuchsia-device/controller.fidl",
... "line": 13,
... "column": 22
... },
... "type": {
... "kind": "primitive",
... "subtype": "uint64"
... },
... "value": {
... "kind": "literal",
... "literal": {
... "kind": "numeric",
... "value": "1024",
... "expression": "1024"
... }
... }
... }
... ])
[Tag(MAX_DEVICE_NAME_LEN, zircon/system/fidl/fuchsia-device/controller.fidl, 11, 14),
Tag(MAX_DEVICE_PATH_LEN, zircon/system/fidl/fuchsia-device/controller.fidl, 13, 22)]
"""
result = []
for c in json:
tag = strip_library(c['name'])
result.append(Tag(tag, *get_location_pieces(c['location'])))
return result
def extract_name_and_members(json):
"""
Extracts the tags from enum_, struct_, or table_declarations. They're
similar enough that we can use the same function.
>>> extract_name_and_members([
... {
... "name": "fuchsia.wlan.device/SupportedPhy",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 10,
... "column": 6
... },
... "type": "uint32",
... "members": [
... {
... "name": "DSSS",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 11,
... "column": 5
... },
... "value": {
... "kind": "literal",
... "literal": {
... "kind": "numeric",
... "value": "0",
... "expression": "0"
... }
... }
... },
... {
... "name": "CCK",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 12,
... "column": 5
... },
... "value": {
... "kind": "literal",
... "literal": {
... "kind": "numeric",
... "value": "1",
... "expression": "1"
... }
... }
... },
... {
... "name": "OFDM",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 13,
... "column": 5
... },
... "value": {
... "kind": "literal",
... "literal": {
... "kind": "numeric",
... "value": "2",
... "expression": "2"
... }
... }
... },
... ]
... }])
[Tag(SupportedPhy, garnet/lib/wlan/fidl/phy.fidl, 10, 6),
Tag(DSSS, garnet/lib/wlan/fidl/phy.fidl, 11, 5),
Tag(CCK, garnet/lib/wlan/fidl/phy.fidl, 12, 5),
Tag(OFDM, garnet/lib/wlan/fidl/phy.fidl, 13, 5)]
Struct declarations:
>>> extract_name_and_members([
... {
... "name": "fuchsia.wlan.device/HtCapabilities",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 31,
... "column": 8
... },
... "members": [
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint16"
... },
... "name": "ht_capability_info",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 32,
... "column": 12
... },
... },
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint8"
... },
... "name": "ampdu_params",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 33,
... "column": 11
... },
... },
... {
... "type": {
... "kind": "array",
... "element_type": {
... "kind": "primitive",
... "subtype": "uint8"
... },
... "element_count": 16
... },
... "name": "supported_mcs_set",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 34,
... "column": 21
... },
... },
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint16"
... },
... "name": "ht_ext_capabilities",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 35,
... "column": 12
... },
... },
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint32"
... },
... "name": "tx_beamforming_capabilities",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 36,
... "column": 12
... },
... },
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint8"
... },
... "name": "asel_capabilities",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 37,
... "column": 11
... },
... }
... ],
... },
... {
... "name": "fuchsia.wlan.device/VhtCapabilities",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 40,
... "column": 8
... },
... "members": [
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint32"
... },
... "name": "vht_capability_info",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 41,
... "column": 12
... },
... },
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint64"
... },
... "name": "supported_vht_mcs_and_nss_set",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 42,
... "column": 12
... },
... }
... ],
... },
... {
... "name": "fuchsia.wlan.device/ChannelList",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 45,
... "column": 8
... },
... "members": [
... {
... "type": {
... "kind": "primitive",
... "subtype": "uint16"
... },
... "name": "base_freq",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 46,
... "column": 12
... },
... },
... {
... "type": {
... "kind": "vector",
... "element_type": {
... "kind": "primitive",
... "subtype": "uint8"
... },
... "maybe_element_count": 200,
... },
... "name": "channels",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 47,
... "column": 23
... },
... },
... ],
... }
... ])
[Tag(HtCapabilities, garnet/lib/wlan/fidl/phy.fidl, 31, 8),
Tag(ht_capability_info, garnet/lib/wlan/fidl/phy.fidl, 32, 12),
Tag(ampdu_params, garnet/lib/wlan/fidl/phy.fidl, 33, 11),
Tag(supported_mcs_set, garnet/lib/wlan/fidl/phy.fidl, 34, 21),
Tag(ht_ext_capabilities, garnet/lib/wlan/fidl/phy.fidl, 35, 12),
Tag(tx_beamforming_capabilities, garnet/lib/wlan/fidl/phy.fidl, 36, 12),
Tag(asel_capabilities, garnet/lib/wlan/fidl/phy.fidl, 37, 11),
Tag(VhtCapabilities, garnet/lib/wlan/fidl/phy.fidl, 40, 8),
Tag(vht_capability_info, garnet/lib/wlan/fidl/phy.fidl, 41, 12),
Tag(supported_vht_mcs_and_nss_set, garnet/lib/wlan/fidl/phy.fidl, 42, 12),
Tag(ChannelList, garnet/lib/wlan/fidl/phy.fidl, 45, 8),
Tag(base_freq, garnet/lib/wlan/fidl/phy.fidl, 46, 12),
Tag(channels, garnet/lib/wlan/fidl/phy.fidl, 47, 23)]
Tables declarations (note reserved: True members to be excluded):
>>> extract_name_and_members([
... {
... "name": "fuchsia.test.breakpoints/EventPayload",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 59,
... "column": 7
... },
... "members": [
... {
... "name": "routing_payload",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 61,
... "column": 23
... },
... },
... {
... "name": "use_capability_payload",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 64,
... "column": 29
... },
... }
... ],
... },
... {
... "name": "fuchsia.test.breakpoints/RoutingPayload",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 68,
... "column": 7
... },
... "members": [
... {
... "type": {
... "kind": "identifier",
... "identifier": "fuchsia.test.breakpoints/RoutingProtocol",
... },
... "name": "routing_protocol",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 71,
... "column": 24
... },
... },
... {
... "ordinal": 2,
... "type": {
... "kind": "string",
... "maybe_element_count": 50,
... },
... "name": "capability",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 74,
... "column": 37
... },
... "size": 16,
... "max_out_of_line": 56,
... "alignment": 8,
... "max_handles": 0
... }
... ],
... },
... {
... "name": "fuchsia.test.breakpoints/UseCapabilityPayload",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 78,
... "column": 7
... },
... "members": [
... {
... "type": {
... "kind": "string",
... "maybe_element_count": 50,
... },
... "name": "capability",
... "location": {
... "filename": "../../src/sys/component_manager/tests/fidl/breakpoints.fidl",
... "line": 80,
... "column": 37
... },
... },
... {
... "reserved": True,
... "location": {
... "column": 5,
... "line": 43,
... "filename": "../../sdk/fidl/fuchsia.feedback/data_provider.fidl"
... }
... }
... ],
... },
... ])
[Tag(EventPayload, src/sys/component_manager/tests/fidl/breakpoints.fidl, 59, 7),
Tag(routing_payload, src/sys/component_manager/tests/fidl/breakpoints.fidl, 61, 23),
Tag(use_capability_payload, src/sys/component_manager/tests/fidl/breakpoints.fidl, 64, 29),
Tag(RoutingPayload, src/sys/component_manager/tests/fidl/breakpoints.fidl, 68, 7),
Tag(routing_protocol, src/sys/component_manager/tests/fidl/breakpoints.fidl, 71, 24),
Tag(capability, src/sys/component_manager/tests/fidl/breakpoints.fidl, 74, 37),
Tag(UseCapabilityPayload, src/sys/component_manager/tests/fidl/breakpoints.fidl, 78, 7),
Tag(capability, src/sys/component_manager/tests/fidl/breakpoints.fidl, 80, 37)]
Bits declarations:
>>> extract_name_and_members([
... {
... "name": "fuchsia.io2/ConnectionInfoQuery",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 33,
... "column": 6,
... "length": 19
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ],
... "type": {
... "kind": "primitive",
... "subtype": "uint64"
... },
... "mask": "7",
... "members": [
... {
... "name": "REPRESENTATION",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 35,
... "column": 5,
... "length": 14
... },
... "value": {
... "kind": "literal",
... "value": "1",
... "expression": "0x1",
... "literal": {
... "kind": "numeric",
... "value": "1",
... "expression": "0x1"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": " Requests [`ConnectionInfo.representation`]."
... }
... ]
... },
... {
... "name": "RIGHTS",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 38,
... "column": 5,
... "length": 6
... },
... "value": {
... "kind": "literal",
... "value": "2",
... "expression": "0x2",
... "literal": {
... "kind": "numeric",
... "value": "2",
... "expression": "0x2"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": " Requests [`ConnectionInfo.rights`]."
... }
... ]
... },
... {
... "name": "AVAILABLE_OPERATIONS",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 41,
... "column": 5,
... "length": 20
... },
... "value": {
... "kind": "literal",
... "value": "4",
... "expression": "0x4",
... "literal": {
... "kind": "numeric",
... "value": "4",
... "expression": "0x4"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... }
... ],
... "strict": True
... },
... {
... "name": "fuchsia.io2/NodeProtocols",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 102,
... "column": 6,
... "length": 13
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ],
... "type": {
... "kind": "primitive",
... "subtype": "uint64"
... },
... "mask": "805306495",
... "members": [
... {
... "name": "CONNECTOR",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 106,
... "column": 5,
... "length": 9
... },
... "value": {
... "kind": "literal",
... "value": "1",
... "expression": "0x1",
... "literal": {
... "kind": "numeric",
... "value": "1",
... "expression": "0x1"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "DIRECTORY",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 110,
... "column": 5,
... "length": 9
... },
... "value": {
... "kind": "literal",
... "value": "2",
... "expression": "0x2",
... "literal": {
... "kind": "numeric",
... "value": "2",
... "expression": "0x2"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "FILE",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 114,
... "column": 5,
... "length": 4
... },
... "value": {
... "kind": "literal",
... "value": "4",
... "expression": "0x4",
... "literal": {
... "kind": "numeric",
... "value": "4",
... "expression": "0x4"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "MEMORY",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 121,
... "column": 5,
... "length": 6
... },
... "value": {
... "kind": "literal",
... "value": "8",
... "expression": "0x8",
... "literal": {
... "kind": "numeric",
... "value": "8",
... "expression": "0x8"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "POSIX_SOCKET",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 125,
... "column": 5,
... "length": 12
... },
... "value": {
... "kind": "literal",
... "value": "16",
... "expression": "0x10",
... "literal": {
... "kind": "numeric",
... "value": "16",
... "expression": "0x10"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "PIPE",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 129,
... "column": 5,
... "length": 4
... },
... "value": {
... "kind": "literal",
... "value": "32",
... "expression": "0x20",
... "literal": {
... "kind": "numeric",
... "value": "32",
... "expression": "0x20"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "DEBUGLOG",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 133,
... "column": 5,
... "length": 8
... },
... "value": {
... "kind": "literal",
... "value": "64",
... "expression": "0x40",
... "literal": {
... "kind": "numeric",
... "value": "64",
... "expression": "0x40"
... }
... },
... "maybe_attributes": [
... {
... "name": "Doc",
... "value": ""
... }
... ]
... },
... {
... "name": "DEVICE",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 136,
... "column": 5,
... "length": 6
... },
... "value": {
... "kind": "literal",
... "value": "268435456",
... "expression": "0x10000000",
... "literal": {
... "kind": "numeric",
... "value": "268435456",
... "expression": "0x10000000"
... }
... },
... "maybe_attributes": [
... {
... "name": "Deprecated",
... "value": "devices will be services in the future"
... }
... ]
... },
... {
... "name": "TTY",
... "location": {
... "filename": "../../sdk/fidl/fuchsia.io2/connection-info.fidl",
... "line": 139,
... "column": 5,
... "length": 3
... },
... "value": {
... "kind": "literal",
... "value": "536870912",
... "expression": "0x20000000",
... "literal": {
... "kind": "numeric",
... "value": "536870912",
... "expression": "0x20000000"
... }
... },
... "maybe_attributes": [
... {
... "name": "Deprecated",
... "value": "tty functionalities may be covered by a tty service"
... }
... ]
... }
... ],
... "strict": True
... }])
[Tag(ConnectionInfoQuery, sdk/fidl/fuchsia.io2/connection-info.fidl, 33, 6),
Tag(REPRESENTATION, sdk/fidl/fuchsia.io2/connection-info.fidl, 35, 5),
Tag(RIGHTS, sdk/fidl/fuchsia.io2/connection-info.fidl, 38, 5),
Tag(AVAILABLE_OPERATIONS, sdk/fidl/fuchsia.io2/connection-info.fidl, 41, 5),
Tag(NodeProtocols, sdk/fidl/fuchsia.io2/connection-info.fidl, 102, 6),
Tag(CONNECTOR, sdk/fidl/fuchsia.io2/connection-info.fidl, 106, 5),
Tag(DIRECTORY, sdk/fidl/fuchsia.io2/connection-info.fidl, 110, 5),
Tag(FILE, sdk/fidl/fuchsia.io2/connection-info.fidl, 114, 5),
Tag(MEMORY, sdk/fidl/fuchsia.io2/connection-info.fidl, 121, 5),
Tag(POSIX_SOCKET, sdk/fidl/fuchsia.io2/connection-info.fidl, 125, 5),
Tag(PIPE, sdk/fidl/fuchsia.io2/connection-info.fidl, 129, 5),
Tag(DEBUGLOG, sdk/fidl/fuchsia.io2/connection-info.fidl, 133, 5),
Tag(DEVICE, sdk/fidl/fuchsia.io2/connection-info.fidl, 136, 5),
Tag(TTY, sdk/fidl/fuchsia.io2/connection-info.fidl, 139, 5)]
"""
result = []
for x in json:
tag = strip_library(x['name'])
result.append(Tag(tag, *get_location_pieces(x['location'])))
for member in x['members']:
if member.get('reserved'):
continue
result.append(
Tag(member['name'], *get_location_pieces(member['location'])))
return result
def extract_interfaces(json):
"""
>>> extract_interfaces([
... {
... "name": "fuchsia.wlan.device/Phy",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 112,
... "column": 10
... },
... "methods": [
... {
... "name": "Query",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 113,
... "column": 5
... },
... },
... {
... "name": "CreateIface",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 114,
... "column": 5
... },
... },
... ]
... },
... {
... "name": "fuchsia.wlan.device/Connector",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 123,
... "column": 10
... },
... "methods": [
... {
... "name": "Connect",
... "location": {
... "filename": "../../garnet/lib/wlan/fidl/phy.fidl",
... "line": 124,
... "column": 5
... },
... }
... ]
... },
... ])
[Tag(Phy, garnet/lib/wlan/fidl/phy.fidl, 112, 10),
Tag(Query, garnet/lib/wlan/fidl/phy.fidl, 113, 5),
Tag(CreateIface, garnet/lib/wlan/fidl/phy.fidl, 114, 5),
Tag(Connector, garnet/lib/wlan/fidl/phy.fidl, 123, 10),
Tag(Connect, garnet/lib/wlan/fidl/phy.fidl, 124, 5)]
Some special handling for Transport=Syscall to add the leading zx_ as an
alternate name.
>>> extract_interfaces([
... {
... "name": "zz/profile",
... "location": {
... "filename": "../../zircon/syscalls/profile.fidl",
... "line": 38,
... "column": 10
... },
... "maybe_attributes": [
... {
... "name": "Transport",
... "value": "Syscall"
... }
... ],
... "methods": [
... {
... "name": "profile_create",
... "location": {
... "filename": "../../zircon/syscalls/profile.fidl",
... "line": 41,
... "column": 5
... },
... }
... ]
... },
... {
... "name": "zz/socket",
... "location": {
... "filename": "../../zircon/syscalls/socket.fidl",
... "line": 9,
... "column": 10
... },
... "maybe_attributes": [
... {
... "name": "Transport",
... "value": "Syscall"
... }
... ],
... "methods": [
... {
... "name": "socket_create",
... "location": {
... "filename": "../../zircon/syscalls/socket.fidl",
... "line": 11,
... "column": 5
... },
... },
... {
... "name": "socket_write",
... "location": {
... "filename": "../../zircon/syscalls/socket.fidl",
... "line": 15,
... "column": 5
... },
... },
... ]
... },
... ])
[Tag(profile, zircon/syscalls/profile.fidl, 38, 10),
Tag(profile_create, zircon/syscalls/profile.fidl, 41, 5),
Tag(zx_profile_create, zircon/syscalls/profile.fidl, 41, 5),
Tag(socket, zircon/syscalls/socket.fidl, 9, 10),
Tag(socket_create, zircon/syscalls/socket.fidl, 11, 5),
Tag(zx_socket_create, zircon/syscalls/socket.fidl, 11, 5),
Tag(socket_write, zircon/syscalls/socket.fidl, 15, 5),
Tag(zx_socket_write, zircon/syscalls/socket.fidl, 15, 5)]
"""
def is_transport_syscall(x):
attribs = x.get('maybe_attributes', [])
for attrib in attribs:
if attrib.get('name') == 'Transport' and attrib.get(
'value') == 'Syscall':
return True
return False
result = []
for i in json:
tag = strip_library(i['name'])
is_syscall = is_transport_syscall(i)
result.append(Tag(tag, *get_location_pieces(i['location'])))
for method in i['methods']:
result.append(
Tag(method['name'], *get_location_pieces(method['location'])))
if is_syscall:
result.append(
Tag(
'zx_' + method['name'],
*get_location_pieces(method['location'])))
return result
def get_tags(json, tags):
tags.extend(extract_name_and_members(json['bits_declarations']))
tags.extend(extract_consts(json['const_declarations']))
tags.extend(extract_name_and_members(json['enum_declarations']))
tags.extend(extract_interfaces(json['interface_declarations']))
tags.extend(extract_name_and_members(json['struct_declarations']))
tags.extend(extract_name_and_members(json['table_declarations']))
tags.extend(extract_name_and_members(json['union_declarations']))
def get_syscall_tags(json, tags):
tags.extend
def main():
args = parse_args()
matches = []
for root, dirnames, filenames in os.walk(args.build_dir):
for filename in fnmatch.filter(filenames, '*.fidl.json'):
matches.append(os.path.join(root, filename))
# Include the syscalls ir file too.
matches.append(
os.path.join(args.build_dir, 'gen', 'zircon', 'vdso', 'zx.fidl.json'))
tags = []
for filename in matches:
with open(filename) as f:
get_tags(json.load(f), tags)
tags = [x for x in tags if x.file != 'generated']
tags.sort(key=lambda x: x.tag)
with open(args.output, 'w') as f:
f.write('!_TAG_FILE_SORTED\t1\tgenerated by generated-fidl-tags.py\n')
for t in tags:
f.write(
'%s\t%s\t/\%%%dl\%%%dc/\n' % (t.tag, t.file, t.line, t.column))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'test':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
else:
sys.exit(main())
|
pyzoo/zoo/orca/data/elastic_search.py | limn2o4/analytics-zoo | 2,970 | 12682105 | <reponame>limn2o4/analytics-zoo
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca import OrcaContext
from zoo.common.nncontext import init_nncontext
class elastic_search:
"""
Primary DataFrame-based loading data from elastic search interface,
defining API to read data from ES to DataFrame.
"""
def __init__(self):
pass
@staticmethod
def read_df(esConfig, esResource, schema=None):
"""
Read the data from elastic search into DataFrame.
:param esConfig: Dictionary which represents configuration for
elastic search(eg. ip, port etc).
:param esResource: resource file in elastic search.
:param schema: Optional. Defines the schema of Spark dataframe.
If each column in Es is single value, don't need set schema.
:return: Spark DataFrame. Each row represents a document in ES.
"""
sc = init_nncontext()
spark = OrcaContext.get_spark_session()
reader = spark.read.format("org.elasticsearch.spark.sql")
for key in esConfig:
reader.option(key, esConfig[key])
if schema:
reader.schema(schema)
df = reader.load(esResource)
return df
@staticmethod
def flatten_df(df):
fields = elastic_search.flatten(df.schema)
flatten_df = df.select(fields)
return flatten_df
@staticmethod
def flatten(schema, prefix=None):
from pyspark.sql.types import StructType
fields = []
for field in schema.fields:
name = prefix + '.' + field.name if prefix else field.name
dtype = field.dataType
if isinstance(dtype, StructType):
fields += elastic_search.flatten(dtype, prefix=name)
else:
fields.append(name)
return fields
@staticmethod
def write_df(esConfig, esResource, df):
"""
Write the Spark DataFrame to elastic search.
:param esConfig: Dictionary which represents configuration for
elastic search(eg. ip, port etc).
:param esResource: resource file in elastic search.
:param df: Spark DataFrame that will be saved.
"""
wdf = df.write.format("org.elasticsearch.spark.sql")\
.option("es.resource", esResource)
for key in esConfig:
wdf.option(key, esConfig[key])
wdf.save()
@staticmethod
def read_rdd(esConfig, esResource=None, filter=None, esQuery=None):
"""
Read the data from elastic search into Spark RDD.
:param esConfig: Dictionary which represents configuration for
elastic search(eg. ip, port, es query etc).
:param esResource: Optional. resource file in elastic search.
It also can be set in esConfig
:param filter: Optional. Request only those fields from Elasticsearch
:param esQuery: Optional. es query
:return: Spark RDD
"""
sc = init_nncontext()
if "es.resource" not in esConfig:
esConfig["es.resource"] = esResource
if filter is not None:
esConfig["es.read.source.filter"] = filter
if esQuery is not None:
esConfig["es.query"] = esQuery
rdd = sc.newAPIHadoopRDD("org.elasticsearch.hadoop.mr.EsInputFormat",
"org.apache.hadoop.io.NullWritable",
"org.elasticsearch.hadoop.mr.LinkedMapWritable",
conf=esConfig)
return rdd
|
tutorial/plot_getting_data.py | jiduque/scikit-fda | 147 | 12682114 | """
Getting the data
================
In this section, we will dicuss how to get functional data to
use in scikit-fda. We will briefly describe the
:class:`~skfda.representation.grid.FDataGrid` class, which is the type that
scikit-fda uses for storing and working with functional data in discretized
form. We will discuss also how to import functional data from several sources
and show how to fetch and load existing datasets popular in the :term:`FDA`
literature.
.. Disable isort
isort:skip_file
"""
# Author: <NAME>
# License: MIT
#
# sphinx_gallery_thumbnail_number = 6
##############################################################################
# The FDataGrid class
# -------------------
#
# In order to use scikit-fda, first we need functional data to analyze.
# A common case is to have each functional observation measured at the same
# points.
# This kind of functional data is easily representable in scikit-fda using
# the :class:`~skfda.representation.grid.FDataGrid` class.
# The :class:`~skfda.representation.grid.FDataGrid` has two important
# attributes: ``data_matrix`` and ``grid_points``.
#
# The attribute ``grid_points`` is a tuple with the same length as the
# number of domain dimensions (that is, one for curves, two for surfaces...).
# Each of its elements is a 1D numpy :class:`~numpy.ndarray` containing the
# grid points for that particular dimension,
# .. math::
# ((t_1, \ldots, t_{M_i}))_{i=1}^p,
# where :math:`M_i` is the number of measurement points for each "argument"
# or domain coordinate of the function :math:`i` and :math:`p` is the domain
# dimension.
#
# The attribute ``data_matrix`` is a
# numpy :class:`~numpy.ndarray` containing the measured values of the
# functions in the grid spanned by the grid points. For functions
# :math:`\{x_i: \mathbb{R}^p \to \mathbb{R}^q\}_{i=1}^N` this is a tensor
# with dimensions :math:`N \times M_1 \times \ldots \times M_p \times q`.
##############################################################################
# In order to create a :class:`~skfda.representation.grid.FDataGrid`, these
# attributes may be provided. The attributes are converted to
# :class:`~numpy.ndarray` when necessary.
#
# .. note::
#
# The grid points can be omitted,
# and in that case their number is inferred from the dimensions of
# ``data_matrix`` and they are automatically assigned as equispaced points
# in the unitary cube in the domain set.
#
# In the common case of functions with domain dimension of 1, the list of
# grid points can be passed directly as ``grid_points``.
#
# If the codomain dimension is 1, the last dimension of ``data_matrix``
# can be dropped.
##############################################################################
# The following example shows the creation of a
# :class:`~skfda.representation.grid.FDataGrid` with two functions (curves)
# :math:`\{x_i: \mathbb{R} \to \mathbb{R}\}, i=1,2` measured at the same
# (non-equispaced) points.
import skfda
import matplotlib.pyplot as plt
grid_points = [0, 0.2, 0.5, 0.9, 1] # Grid points of the curves
data_matrix = [
[0, 0.2, 0.5, 0.9, 1], # First observation
[0, 0.04, 0.25, 0.81, 1], # Second observation
]
fd = skfda.FDataGrid(
data_matrix=data_matrix,
grid_points=grid_points,
)
fd.plot()
plt.show()
##############################################################################
# Advanced example
# ^^^^^^^^^^^^^^^^
#
# In order to better understand the FDataGrid structure, you can consider the
# following example, in which a :class:`~skfda.representation.grid.FDataGrid`
# object is created, containing just one function (vector-valued surface)
# :math:`x: \mathbb{R}^2 \to \mathbb{R}^4`.
grid_points_surface = [
[0.2, 0.5, 0.7], # Measurement points in first domain dimension
[0, 1.5], # Measurement points in second domain dimension
]
data_matrix_surface = [
# First observation
[
# 0.2
[
# Value at (0.2, 0)
[1, 2, 3, 4.1],
# Value at (0.2, 1.5)
[0, 1, -1.3, 2],
],
# 0.5
[
# Value at (0.5, 0)
[-2, 0, 5.5, 7],
# Value at (0.5, 1.5)
[2, 1.1, -1, -2],
],
# 0.7
[
# Value at (0.7, 0)
[0, 0, 1.1, 1],
# Value at (0.7, 1.5)
[-3, 5, -0.5, -2],
],
],
# This example has only one observation. Next observations would be
# added here.
]
fd = skfda.FDataGrid(
data_matrix=data_matrix_surface,
grid_points=grid_points_surface,
)
fd.plot()
plt.show()
##############################################################################
# Importing data
# --------------
#
# Usually one does not construct manually the functions, but instead uses
# measurements already formatted in a common format, such as comma-separated
# values (CSV), attribute-relation file format (ARFF) or Matlab and R formats.
#
# If your data is in one of these formats, you can import it into a numpy
# array using the IO functions available in
# `Numpy <https://numpy.org/devdocs/reference/routines.io.html>`_ (for simple
# text-based or binary formats, such as CSV) or in
# `Scipy <https://docs.scipy.org/doc/scipy/reference/io.html>`_ (for Matlab,
# Fortran or ARFF files). For importing data in the R format one can also
# use the package `RData <https://rdata.readthedocs.io>`_ with is already a
# dependency of scikit-fda, as it is used to load the example datasets.
##############################################################################
# Once your data has been introduced as a :class:`~numpy.ndarray` instance,
# you will need to give it the proper dimensions and use it to instantiate
# a functional data object.
##############################################################################
# .. note::
#
# :class:`Pandas DataFrames <pandas.DataFrame>` are also popular as
# datasets containers in the Python scientific ecosystem. If you have
# data in a Pandas DataFrame, you can extract its content as a Numpy
# array using the method :meth:`~pandas.DataFrame.to_numpy` of the
# DataFrame.
##############################################################################
# As an example, we will load the
# :func:`digits dataset <sklearn.datasets.load_digits>` of scikit-learn, which
# is a preprocessed subset of the MNIST dataset, containing digit images. The
# data is already a numpy array. As the data has been flattened into a 1D
# vector of pixels, we need to reshape the arrays to their original 8x8 shape.
# Then this array can be used to construct the digits as surfaces.
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
X = X.reshape(-1, 8, 8)
fd = skfda.FDataGrid(X)
# Plot the first 2 observations
fd[0].plot()
fd[1].plot()
plt.show()
##############################################################################
# Common datasets
# ---------------
#
# scikit-fda can download and import for you several of the most popular
# datasets in the :term:`FDA` literature, such as the Berkeley Growth
# dataset (function :func:`~skfda.datasets.fetch_growth`) or the Canadian
# Weather dataset (function :func:`~skfda.datasets.fetch_weather`). These
# datasets are often useful as benchmarks, in order to compare results
# between different algorithms, or simply as examples to use in teaching or
# research.
X, y = skfda.datasets.fetch_growth(return_X_y=True)
X.plot(group=y)
plt.show()
##############################################################################
# Datasets from CRAN
# ^^^^^^^^^^^^^^^^^^
#
# If you want to work with a dataset for which no fetching function exist, and
# you know that is available inside a R package in the CRAN repository, you
# can try using the function :func:`~skfda.datasets.fetch_cran`. This function
# will load the package, fetch the dataset and convert it to Python objects
# using the packages
# `scikit-datasets <https://github.com/daviddiazvico/scikit-datasets>`_ and
# `RData <https://rdata.readthedocs.io>`_. As datasets in CRAN follow no
# particular structure, you will need to know how it is structured internally
# in order to use it properly.
##############################################################################
# .. note::
#
# Functional data objects from some packages, such as
# `fda.usc <https://cran.r-project.org/web/packages/fda.usc/index.html>`_
# are automatically recognized as such and converted to
# :class:`~skfda.representation.grid.FDataGrid` instances. This
# behaviour can be disabled or customized to work with more packages.
data = skfda.datasets.fetch_cran("MCO", "fda.usc")
data["MCO"]["intact"].plot()
plt.show()
##############################################################################
# Datasets from the UEA & UCR Time Series Classification Repository
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The `UEA & UCR Time Series Classification Repository
# <http://www.timeseriesclassification.com/>`_ is a popular repository
# for classification problems involving time series data. The datasets used
# can be considered also as functional observations, where the functions
# involved have domain dimension of 1, and the grid points are
# equispaced. Thus, they have also been used in the :term:`FDA` literature.
# The original UCR datasets are univariate time series, while the new UEA
# datasets incorporate also vector-valued data.
# In scikit-fda, the function :func:`~skfda.datasets.fetch_ucr` can be used
# to obtain both kinds of datasets as
# :class:`~skfda.representation.grid.FDataGrid` instances.
# Load ArrowHead dataset from UCR
dataset = skfda.datasets.fetch_ucr("ArrowHead")
dataset["data"].plot()
plt.show()
##############################################################################
# Load BasicMotions dataset from UEA
dataset = skfda.datasets.fetch_ucr("BasicMotions")
dataset["data"].plot()
plt.show()
##############################################################################
# Synthetic data
# --------------
#
# Sometimes it is not enough to have real-world data at your disposal.
# Perhaps the messy nature of real-world data makes difficult to detect when
# a particular algorithm has a strange behaviour. Perhaps you want to see how
# it performs under a simplified model. Maybe you want to see what happens
# when your data has particular characteristics, for which no dataset is
# available. Or maybe you only want to illustrate a concept without having
# to introduce a particular set of data.
#
# In those cases, the ability to use generated data is desirable. To aid this
# use case, scikit-learn provides several functions that generate data
# according to some model. These functions are in the
# :doc:`datasets </modules/datasets>` module and have the prefix ``make_``.
# Maybe the most useful of those are the functions
# :func:`skfda.datasets.make_gaussian_process` and
# :func:`skfda.datasets.make_gaussian` which can be used to generate Gaussian
# processes and Gaussian fields with different covariance functions.
import numpy as np
cov = skfda.misc.covariances.Exponential(length_scale=0.1)
fd = skfda.datasets.make_gaussian_process(
start=0,
stop=4,
n_samples=5,
n_features=100,
mean=lambda t: np.power(t, 2),
cov=cov,
)
fd.plot()
plt.show()
##############################################################################
# In order to know all the available functionalities to load existing and
# synthetic datasets it is recommended to look at the documentation of the
# :doc:`datasets </modules/datasets>` module.
|
frameworks/pytorch/examples/5_transformer.py | Michoumichmich/antares | 132 | 12682132 | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from torch.contrib.antares.custom_op import CustomOp
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dtype = torch.float32
kwargs = {'dtype': dtype,
'device': device,
'requires_grad': False}
B, S, N, H, I = 6, 128, 12, 48, 1024
def create_param(name, shape):
return (torch.rand(shape, **kwargs) - 0.5) * 0.001
input_tensor = torch.ones([B, S, N, H], **kwargs)
qkv_weight = create_param('qkv_weight', [3, N, H, N, H])
qkv_bias = create_param('qkv_bias', [3, N, H])
attention_weight = create_param('attention_weight', [N, H, N, H])
attention_bias = create_param('attention_bias', [N, H])
intermediate_weight = create_param('intermediate_weight', [N, H, I])
intermediate_bias = create_param('intermediate_bias', [I])
output_weight = create_param('output_weight', [I, N, H])
output_bias = create_param('output_bias', [N, H])
layer_output_norm = CustomOp(ir=f'''
merged_layer_local[R, B, S1, N1, H1] +=! input_tensor[B, S1, N, H] * qkv_weight[R, N, H, N1, H1];
merged_layer_trans[R, B, N1, S1, H1] = merged_layer_local[R, B, S1, N1, H1] + qkv_bias[R, N1, H1];
attention_scores[B, N1, S1, S2] +=! merged_layer_trans[0, B, N1, S1, H1] * merged_layer_trans[1, B, N1, S2, H1] / const({H}).cast(`float32`);
softmax_1_temp0[B, N1] >=! attention_scores[B, N1, S1, S2];
softmax_1_temp1[B, N1] +=! (attention_scores[B, N1, S1, S2] - softmax_1_temp0[B, N1]).call(`exp`);
attention_probs[B, N1, S1, S2] = (attention_scores[B, N1, S1, S2] - softmax_1_temp0[B, N1]).call(`exp`) / softmax_1_temp1[B, N1];
context_layer_trans[B, S1, N1, H1] +=! attention_probs[B, N1, S1, S2] * merged_layer_trans[2, B, N1, S2, H1];
attention_local[B, S1, N2, H2] +=! context_layer_trans[B, S1, N1, H1] * attention_weight[N1, H1, N2, H2];
attention_output[B, S1, N2, H2] = attention_local[B, S1, N2, H2] + attention_bias[N2, H2];
layer_norm_1_src[B, S1, N2, H2] = attention_output[B, S1, N2, H2] + input_tensor[B, S1, N2, H2];
layer_norm_1_temp0[B, S1] += layer_norm_1_src[B, S1, N2, H2];
layer_norm_1_temp1[B, S1] += layer_norm_1_src[B, S1, N2, H2] * layer_norm_1_src[B, S1, N2, H2];
attention_output_norm[B, S1, N2, H2] = (layer_norm_1_src[B, S1, N2, H2] * {N * H} - layer_norm_1_temp0[B, S1]) * (layer_norm_1_temp0[B, S1] * {N * H} - layer_norm_1_temp1[B, S1] * layer_norm_1_temp1[B, S1]).call(`max`, [1e-8]).call(`rsqrt`);
intermediate_local[B, S1, I] +=! attention_output_norm[B, S1, N2, H2] * intermediate_weight[N2, H2, I];
intermediate[B, S1, I] = intermediate_local[B, S1, I] + intermediate_bias[I];
intermediate_gelu[B, S1, I] = 0.5 * (1.0 + (0.79788456 * (intermediate[B, S1, I] + 0.044715 * intermediate[B, S1, I] * intermediate[B, S1, I] * intermediate[B, S1, I])).call(`tanh`));
layer_output_local[B, S1, N2, H2] +=! intermediate_gelu[B, S1, I] * output_weight[I, N2, H2];
layer_output[B, S1, N2, H2] = layer_output_local[B, S1, N2, H2] + output_bias[N2, H2];
layer_norm_2_src[B, S1, N2, H2] = layer_output[B, S1, N2, H2] + attention_output_norm[B, S1, N2, H2];
layer_norm_2_temp0[B, S1] += layer_norm_2_src[B, S1, N2, H2];
layer_norm_2_temp1[B, S1] += layer_norm_2_src[B, S1, N2, H2] * layer_norm_2_src[B, S1, N2, H2];
layer_output_norm[B, S1, N2, H2] = (layer_norm_2_src[B, S1, N2, H2] * {N * H} - layer_norm_2_temp0[B, S1]) * (layer_norm_2_temp0[B, S1] * {N * H} - layer_norm_2_temp1[B, S1] * layer_norm_2_temp1[B, S1]).call(`max`, [1e-8]).call(`rsqrt`);
''', input_orders={
'input_tensor': input_tensor,
'qkv_weight': qkv_weight,
'qkv_bias': qkv_bias,
'attention_weight': attention_weight,
'attention_bias': attention_bias,
'intermediate_weight': intermediate_weight,
'intermediate_bias': intermediate_bias,
'output_weight': output_weight,
'output_bias': output_bias,
}).to(device).emit()
result = layer_output_norm(input_tensor, qkv_weight, qkv_bias, attention_weight, attention_bias, intermediate_weight, intermediate_bias, output_weight, output_bias)
print('The result of tensor `%s` is:\n%s' % (layer_output_norm.output_names[0], result))
|
cdec-corpus/xml-tok.py | muyeby/Eval-Gen | 114 | 12682156 | #!/usr/bin/env python
import os
import re
import subprocess
import sys
# Tokenize XML files with tokenize-anything.sh
# in: <seg id="963"> The earnings on its 10-year bonds are 28.45%. </seg>
# out: <seg id="963"> The earnings on its 10 - year bonds are 28.45 % . </seg>
def escape(s):
return s.replace('&', '&').replace('>', '>').replace('<', '<').replace('"', '"').replace('\'', ''')
def unescape(s):
return s.replace('>', '>').replace('<', '<').replace('"', '"').replace(''', '\'').replace('&', '&')
def main():
tok = subprocess.Popen([os.path.join(os.path.dirname(__file__), 'tokenize-anything.sh'), '-u'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
line = sys.stdin.readline()
if not line:
break
line = line.strip()
pieces = []
eol = len(line)
pos = 0
while pos < eol:
next = line.find('<', pos)
if next == -1:
next = eol
tok.stdin.write('{}\n'.format(unescape(line[pos:next])))
pieces.append(escape(tok.stdout.readline().strip()))
if next == eol:
break
pos = line.find('>', next + 1)
if pos == -1:
pos = eol
else:
pos += 1
pieces.append(line[next:pos])
sys.stdout.write('{}\n'.format(' '.join(pieces).strip()))
tok.stdin.close()
tok.wait()
if __name__ == '__main__':
main()
|
models/SPH3D_modelnet.py | GaHooooo/SPH3D-GCN | 153 | 12682168 | <reponame>GaHooooo/SPH3D-GCN
import tensorflow as tf
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import sph3gcn_util as s3g_util
def normalize_xyz(points):
points -= tf.reduce_mean(points,axis=1,keepdims=True)
scale = tf.reduce_max(tf.reduce_sum(tf.square(points),axis=-1,keepdims=True),axis=1,keepdims=True)
scale = tf.sqrt(scale,name='normalize')
points /= scale
return points
def _separable_conv3d_block(net, list_channels, bin_size, nn_index, nn_count, filt_idx,
name, depth_multiplier=None, weight_decay=None, reuse=None,
with_bn=True, with_bias=True, is_training=None):
for l, num_out_channels in enumerate(list_channels):
scope = name + '_' + str(l+1) # number from 1, not 0
net = s3g_util.separable_conv3d(net, num_out_channels, bin_size,
depth_multiplier[l], scope, nn_index,
nn_count, filt_idx, weight_decay=weight_decay,
with_bn=with_bn, with_bias=with_bias,
reuse=reuse, is_training=is_training)
return net
def get_model(points, is_training, config=None):
""" Classification Network, input is BxNx3, output Bx40 """
batch_size = points.get_shape()[0].value
num_point = points.get_shape()[1].value
end_points = {}
assert(num_point==config.num_input)
if config.normalize:
points = normalize_xyz(points)
xyz = points
query = tf.reduce_mean(xyz, axis=1, keepdims=True) # the global viewing point
reuse = None
net = s3g_util.pointwise_conv3d(xyz, config.mlp, 'mlp1',
weight_decay=config.weight_decay,
with_bn=config.with_bn, with_bias=config.with_bias,
reuse=reuse, is_training=is_training)
global_feat = []
for l in range(len(config.radius)):
if config.use_raw:
net = tf.concat([net, xyz], axis=-1)
# the neighbor information is the same within xyz_pose_1 and xyz_pose_2.
# Therefore, we compute it with xyz_pose_1, and apply it to xyz_pose_2 as well
intra_idx, intra_cnt, \
intra_dst, indices = s3g_util.build_graph(xyz, config.radius[l], config.nn_uplimit[l],
config.num_sample[l], sample_method=config.sample)
filt_idx = s3g_util.spherical_kernel(xyz, xyz, intra_idx, intra_cnt,
intra_dst, config.radius[l],
kernel=config.kernel)
net = _separable_conv3d_block(net, config.channels[l], config.binSize, intra_idx, intra_cnt,
filt_idx, 'conv'+str(l+1), config.multiplier[l], reuse=reuse,
weight_decay=config.weight_decay, with_bn=config.with_bn,
with_bias=config.with_bias, is_training=is_training)
if config.num_sample[l]>1:
# ==================================gather_nd====================================
xyz = tf.gather_nd(xyz, indices)
inter_idx = tf.gather_nd(intra_idx, indices)
inter_cnt = tf.gather_nd(intra_cnt, indices)
inter_dst = tf.gather_nd(intra_dst, indices)
# =====================================END=======================================
net = s3g_util.pool3d(net, inter_idx, inter_cnt,
method=config.pool_method, scope='pool'+str(l+1))
global_maxpool = tf.reduce_max(net, axis=1, keepdims=True)
global_feat.append(global_maxpool)
# =============================global feature extraction in the final layer=============================
global_radius = 100.0 # global_radius(>=2.0) should connect all points to each point in the cloud
nn_idx, nn_cnt, nn_dst = s3g_util.build_global_graph(xyz, query, global_radius)
filt_idx = s3g_util.spherical_kernel(xyz, query, nn_idx, nn_cnt, nn_dst,
global_radius, kernel=[8,2,1])
net = s3g_util.separable_conv3d(net, config.global_channels, 17, config.global_multiplier,
'global_conv', nn_idx, nn_cnt, filt_idx, reuse=reuse,
weight_decay=config.weight_decay, with_bn=config.with_bn,
with_bias=config.with_bias, is_training=is_training)
global_feat.append(net)
net = tf.concat(global_feat,axis=2)
# =====================================================================================================
# MLP on global point cloud vector
net = tf.reshape(net, [batch_size, -1])
net = s3g_util.fully_connected(net, 512, scope='fc1', weight_decay=config.weight_decay,
with_bn=config.with_bn, with_bias=config.with_bias, is_training=is_training)
net = tf.layers.dropout(net, 0.5, training=is_training, name='fc1_dp')
net = s3g_util.fully_connected(net, 256, scope='fc2', weight_decay=config.weight_decay,
with_bn=config.with_bn, with_bias=config.with_bias, is_training=is_training)
net = tf.layers.dropout(net, 0.5, training=is_training, name='fc2_dp')
net = s3g_util.fully_connected(net, config.num_cls, scope='logits', with_bn=False, with_bias=config.with_bias,
activation_fn=None, is_training=is_training)
return net, end_points
def get_loss(pred, label, end_points):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
|
extract__one_file_exe__pyinstaller/_source_test_file.py | DazEB2/SimplePyScripts | 117 | 12682193 | <gh_stars>100-1000
# uncompyle6 version 3.7.2
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: extract__one_file_exe__pyinstaller\_test_file.py
__author__ = 'ipetrash'
def say():
print('Hello World!')
if __name__ == '__main__':
say() |
mmfewshot/classification/apis/inference.py | BIGWangYuDong/mmfewshot | 376 | 12682211 | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple, Union
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcls.core.visualization import imshow_infos
from mmcls.datasets.pipelines import Compose
from mmcls.models import build_classifier
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmfewshot.classification.models import BaseMetricClassifier
def init_classifier(config: Union[str, mmcv.Config],
checkpoint: Optional[str] = None,
device: str = 'cuda:0',
options: Optional[Dict] = None) -> nn.Module:
"""Prepare a few shot classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None): Checkpoint path. If left as None, the model
will not load any weights. Default: None.
device (str): Runtime device. Default: 'cuda:0'.
options (dict | None): Options to override some settings in the
used config. Default: None.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
load_checkpoint(model, checkpoint, map_location=map_loc)
# save the config in the model for convenience in later use
model.cfg = config
model.to(device)
model.eval()
return model
def process_support_images(model: nn.Module, support_imgs: List[str],
support_labels: List[str]) -> None:
"""Process support images.
Args:
model (nn.Module): Classifier model.
support_imgs (list[str]): The image filenames.
support_labels (list[str]): The class names of support images.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
pipeline = cfg.data.test.dataset.pipeline
if pipeline[0]['type'] != 'LoadImageFromFile':
pipeline[0]['type'] = 'LoadImageFromFile'
test_pipeline = Compose(pipeline)
model.CLASSES = list(set(support_labels))
cat_to_id = {cat: i for i, cat in enumerate(model.CLASSES)}
model.before_forward_support()
# forward support images
with torch.no_grad():
for img, label in zip(support_imgs, support_labels):
data = dict(
img_info=dict(filename=img),
gt_label=np.array(cat_to_id[label], dtype=np.int64),
img_prefix=None)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
model(mode='support', **data)
model.before_forward_query()
def inference_classifier(model: nn.Module, query_img: str) -> Dict:
"""Inference single image with the classifier.
Args:
model (nn.Module): The loaded classifier.
query_img (str): The image filename.
Returns:
dict: The classification results that contains
`pred_score` of each class.
"""
# only support methods without fine-tuning
if isinstance(model, BaseMetricClassifier):
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
pipeline = cfg.data.test.dataset.pipeline
if pipeline[0]['type'] != 'LoadImageFromFile':
pipeline[0]['type'] = 'LoadImageFromFile'
test_pipeline = Compose(pipeline)
data = dict(
img_info=dict(filename=query_img),
gt_label=np.array(-1, dtype=np.int64),
img_prefix=None)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# inference image
with torch.no_grad():
scores = model(mode='query', img=data['img'])[0]
result = {
model.CLASSES[i]: float(scores[i])
for i in range(scores.shape[0])
}
return result
else:
raise TypeError(
'currently, inference only support metric based methods')
def show_result_pyplot(img: str,
result: Dict,
fig_size: Tuple[int] = (15, 10),
wait_time: int = 0,
out_file: Optional[str] = None) -> np.ndarray:
"""Visualize the classification results on the image.
Args:
img (str): Image filename.
result (dict): The classification result.
fig_size (tuple): Figure size of the pyplot figure. Default: (15, 10).
wait_time (int): How many seconds to display the image. Default: 0.
out_file (str | None): Default: None
Returns:
np.ndarray: pyplot figure.
"""
img = mmcv.imread(img)
img = img.copy()
img = imshow_infos(
img,
result,
text_color='white',
font_size=25,
row_width=20,
win_name='',
show=True,
fig_size=fig_size,
wait_time=wait_time,
out_file=out_file)
return img
|
homeassistant/auth/providers/legacy_api_password.py | MrDelik/core | 30,023 | 12682218 | """
Support Legacy API password auth provider.
It will be removed when auth system production ready
"""
from __future__ import annotations
from collections.abc import Mapping
import hmac
from typing import Any, cast
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
AUTH_PROVIDER_TYPE = "legacy_api_password"
CONF_API_PASSWORD = "<PASSWORD>"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{vol.Required(CONF_API_PASSWORD): cv.string}, extra=vol.PREVENT_EXTRA
)
LEGACY_USER_NAME = "Legacy API password user"
class InvalidAuthError(HomeAssistantError):
"""Raised when submitting invalid authentication."""
@AUTH_PROVIDERS.register(AUTH_PROVIDER_TYPE)
class LegacyApiPasswordAuthProvider(AuthProvider):
"""An auth provider support legacy api_password."""
DEFAULT_TITLE = "Legacy API Password"
@property
def api_password(self) -> str:
"""Return api_password."""
return str(self.config[CONF_API_PASSWORD])
async def async_login_flow(self, context: dict[str, Any] | None) -> LoginFlow:
"""Return a flow to login."""
return LegacyLoginFlow(self)
@callback
def async_validate_login(self, password: str) -> None:
"""Validate password."""
api_password = str(self.config[CONF_API_PASSWORD])
if not hmac.compare_digest(
api_password.encode("utf-8"), password.encode("utf-8")
):
raise InvalidAuthError
async def async_get_or_create_credentials(
self, flow_result: Mapping[str, str]
) -> Credentials:
"""Return credentials for this login."""
credentials = await self.async_credentials()
if credentials:
return credentials[0]
return self.async_create_credentials({})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""
Return info for the user.
Will be used to populate info when creating a new user.
"""
return UserMeta(name=LEGACY_USER_NAME, is_active=True)
class LegacyLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
cast(
LegacyApiPasswordAuthProvider, self._auth_provider
).async_validate_login(user_input["password"])
except InvalidAuthError:
errors["base"] = "invalid_auth"
if not errors:
return await self.async_finish({})
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({vol.Required("password"): str}),
errors=errors,
)
|
csbdeep/models/care_upsampling.py | Takuya1031/CSBDeep | 205 | 12682229 | from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from scipy.ndimage.interpolation import zoom
from .care_standard import CARE
from ..data import PercentileNormalizer, PadAndCropResizer
from ..utils import _raise, axes_dict
class UpsamplingCARE(CARE):
"""CARE network for combined image restoration and upsampling of one dimension.
Extends :class:`csbdeep.models.CARE` by replacing prediction
(:func:`predict`, :func:`predict_probabilistic`) to first upsample Z before image restoration.
"""
def predict(self, img, axes, factor, normalizer=PercentileNormalizer(), resizer=PadAndCropResizer(), n_tiles=None):
"""Apply neural network to raw image with low-resolution Z axis.
See :func:`CARE.predict` for documentation.
Parameters
----------
factor : float
Upsampling factor for Z axis. It is important that this is chosen in correspondence
to the subsampling factor used during training data generation.
"""
img = self._upsample(img, axes, factor)
return super(UpsamplingCARE, self).predict(img, axes, normalizer, resizer, n_tiles)
def predict_probabilistic(self, img, axes, factor, normalizer=PercentileNormalizer(), resizer=PadAndCropResizer(), n_tiles=None):
"""Apply neural network to raw image with low-resolution Z axis for probabilistic prediction.
See :func:`CARE.predict_probabilistic` for documentation.
Parameters
----------
factor : float
Upsampling factor for Z axis. It is important that this is chosen in correspondence
to the subsampling factor used during training data generation.
"""
img = self._upsample(img, axes, factor)
return super(UpsamplingCARE, self).predict_probabilistic(img, axes, normalizer, resizer, n_tiles)
@staticmethod
def _upsample(img, axes, factor, axis='Z'):
factors = np.ones(img.ndim)
factors[axes_dict(axes)[axis]] = factor
return zoom(img,factors,order=1)
|
tests/integration-tests.py | jcatana/gpu-feature-discovery | 120 | 12682233 | #!/usr/bin/env python3
import docker
import os
import re
import sys
import shutil
import tempfile
import time
def get_expected_labels_regexs():
with open("./expected-output.txt") as f:
expected_labels = f.readlines()
expected_labels = [x.strip() for x in expected_labels]
return [re.compile(label) for label in expected_labels]
def check_labels(expected_labels_regexs, labels):
for label in labels[:]:
for label_regex in expected_labels_regexs[:]:
if label_regex.match(label):
expected_labels_regexs.remove(label_regex)
labels.remove(label)
break
for label in labels:
print("Unexpected label: {}".format(label))
for regex in expected_labels_regexs:
print("Missing label matching regex: {}".format(regex.pattern))
return len(expected_labels_regexs) == 0 and len(labels) == 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {} DOCKER_IMAGE".format(sys.argv[0]))
sys.exit(1)
image = sys.argv[1]
print("Running integration tests for GFD")
client = docker.from_env()
with tempfile.TemporaryDirectory() as tmpdirname:
mount = docker.types.Mount("/etc/kubernetes/node-feature-discovery/features.d",
tmpdirname, "bind")
print("Running GFD")
container = client.containers.run(image, detach=True, privileged=True, mounts=[mount,])
print("Waiting for GFD output file")
while container.status != "exited" and not os.path.exists(tmpdirname + "/gfd"):
time.sleep(1)
container.reload()
print("Container logs:\n{}".format(container.logs().decode()))
shutil.copyfile(tmpdirname + "/gfd", tmpdirname + "/gfd-copy")
container.stop()
with open(tmpdirname + "/gfd-copy") as output_file:
content = output_file.readlines()
content = [x.strip() for x in content]
expected_labels = get_expected_labels_regexs()
if not check_labels(expected_labels, content):
print("Integration tests failed")
sys.exit(1)
print("Integration tests done")
sys.exit(0)
|
runtime/translation/models/gnmt_large/gpus=8/gnmt_large.py | NestLakerJasonLIN/pipedream | 273 | 12682236 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from .stage0 import Stage0
from .stage1 import Stage1
from .stage2 import Stage2
from .stage3 import Stage3
from .stage4 import Stage4
from .stage5 import Stage5
from .stage6 import Stage6
from .stage7 import Stage7
class GNMT16Partitioned(torch.nn.Module):
def __init__(self):
super(GNMT16Partitioned, self).__init__()
self.stage0 = Stage0()
self.stage1 = Stage1()
self.stage2 = Stage2()
self.stage3 = Stage3()
self.stage4 = Stage4()
self.stage5 = Stage5()
self.stage6 = Stage6()
self.stage7 = Stage7()
def forward(self, input0, input1, input2):
(out1, out0) = self.stage0(input0, input1)
(out4, out5) = self.stage1(out1, out0)
(out13, out14) = self.stage2(input1, out4, out5, input2)
(out13_1, out15, out16) = self.stage3(out13, out14)
(out13_2, out18, out19) = self.stage4(out13_1, out15, out16)
(out13_3, out20, out21) = self.stage5(out13_2, out18, out19)
out23 = self.stage6(out13_3, out20, out21)
out24 = self.stage7(out23)
return out24
|
tools/condlanenet/curvelanes/test_curvelanes_dataset.py | Yibin122/conditional-lane-detection | 232 | 12682264 | <gh_stars>100-1000
import argparse
import os
import mmcv
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.utils.general_utils import mkdir
from tools.condlanenet.common import COLORS
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('--config', required=True,
help='test config file path')
parser.add_argument('--show', required=True, help='show results')
parser.add_argument('--max_show_num', type=int, default=50, help='show results')
args = parser.parse_args()
return args
def mask_to_rgb(mask):
h, w = mask.shape
rgb = np.zeros([h, w, 3], dtype=np.uint8)
for i in range(np.max(mask)+1):
rgb[mask == i] = COLORS[i]
return rgb
def vis_one(data):
# image
img = data['img'].data[0].detach().cpu().numpy()[0, :, :, :]
norm_cfg = data['img_metas'].data[0][0]['img_norm_cfg']
downscale = data['img_metas'].data[0][0]['down_scale']
hm_downscale = data['img_metas'].data[0][0]['hm_down_scale']
img = img.transpose(1, 2, 0)
img = (img * norm_cfg['std']) + norm_cfg['mean']
img = img.astype(np.uint8)
# hm
gt_hm = data['gt_hm'].data[0].detach().cpu().numpy()[
0, :, :, :] * 255
vis_hm = np.zeros_like(gt_hm[0])
for i in range(gt_hm.shape[0]):
vis_hm += gt_hm[i, :, :]
gt_masks = data['img_metas'].data[0][0]['gt_masks']
vis_img = np.zeros(img.shape, np.uint8)
vis_img[:] = img[:]
for i, gt_info in enumerate(gt_masks):
points = gt_info['points']
mask_infos = gt_info['gt_masks']
for color_idx, mask_info in enumerate(mask_infos):
row = mask_info['row']
row_range = mask_info['range']
for coord_y, (coord_x, valid) in enumerate(zip(row, row_range[0])):
if valid:
coord_y *= downscale
coord_x *= downscale
coord_x = int(coord_x)
coord_y = int(coord_y)
cv2.circle(vis_img, (coord_x, coord_y), 3, color=COLORS[color_idx+1], thickness=-1)
points = mask_info['points']
for p in points:
cv2.circle(vis_img, (hm_downscale*p[0], hm_downscale*p[1]), 3, COLORS[1], -1)
cv2.circle(vis_img, (hm_downscale*p[0], hm_downscale*p[1]), 1, (0,0,0), -1)
img = vis_img
return img, vis_hm
def main():
args = parse_args()
mkdir(args.show)
# build the dataloader
cfg = mmcv.Config.fromfile(args.config)
dataset = build_dataset(cfg.data.train)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data['workers_per_gpu'],
dist=False,
shuffle=False)
for index, data in tqdm(enumerate(data_loader)):
file_name = data['img_metas'].data[0][0]['filename']
save_name = os.path.splitext(os.path.basename(file_name))[0]
print(index, file_name)
vis_img, vis_hm = vis_one(data)
vis_img_dir = os.path.join(args.show, '{}_img.png'.format(save_name))
vis_hm_dir = os.path.join(args.show, '{}_hm.png'.format(save_name))
cv2.imwrite(vis_img_dir, vis_img)
cv2.imwrite(vis_hm_dir, vis_hm)
if index >= args.max_show_num:
break
if __name__ == '__main__':
main() |
vqgan_clip/grad.py | aman-tiwari/vqgan-clip | 130 | 12682267 | <reponame>aman-tiwari/vqgan-clip<filename>vqgan_clip/grad.py
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch_optimizer import DiffGrad, AdamP, RAdam
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
clamp_with_grad = ClampWithGrad.apply
def get_opt(opt_name, opt_lr):
if opt_name == "Adam":
opt = optim.Adam([z], lr=opt_lr) # LR=0.1 (Default)
elif opt_name == "AdamW":
opt = optim.AdamW([z], lr=opt_lr)
elif opt_name == "Adagrad":
opt = optim.Adagrad([z], lr=opt_lr)
elif opt_name == "Adamax":
opt = optim.Adamax([z], lr=opt_lr)
elif opt_name == "DiffGrad":
opt = DiffGrad([z], lr=opt_lr, eps=1e-9, weight_decay=1e-9) # NR: Playing for reasons
elif opt_name == "AdamP":
opt = AdamP([z], lr=opt_lr)
elif opt_name == "RAdam":
opt = RAdam([z], lr=opt_lr)
elif opt_name == "RMSprop":
opt = optim.RMSprop([z], lr=opt_lr)
else:
print("Unknown optimiser. Are choices broken?")
opt = optim.Adam([z], lr=opt_lr)
return opt
def vector_quantize(x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
|
SimPEG/electromagnetics/utils/waveform_utils.py | Prithwijit-Chak/simpeg | 358 | 12682268 | <reponame>Prithwijit-Chak/simpeg
import numpy as np
from scipy.constants import mu_0, epsilon_0
# useful params
def omega(freq):
"""Angular frequency, omega"""
return 2.0 * np.pi * freq
def k(freq, sigma, mu=mu_0, eps=epsilon_0):
""" Eq 1.47 - 1.49 in Ward and Hohmann """
w = omega(freq)
alp = w * np.sqrt(mu * eps / 2 * (np.sqrt(1.0 + (sigma / (eps * w)) ** 2) + 1))
beta = w * np.sqrt(mu * eps / 2 * (np.sqrt(1.0 + (sigma / (eps * w)) ** 2) - 1))
return alp - 1j * beta
def TriangleFun(time, ta, tb):
"""
Triangular Waveform
* time: 1D array for time
* ta: time at peak
* tb: time at step-off
"""
out = np.zeros(time.size)
out[time <= ta] = 1 / ta * time[time <= ta]
out[(time > ta) & (time < tb)] = (
-1 / (tb - ta) * (time[(time > ta) & (time < tb)] - tb)
)
return out
def TriangleFunDeriv(time, ta, tb):
"""
Derivative of Triangular Waveform
"""
out = np.zeros(time.size)
out[time <= ta] = 1 / ta
out[(time > ta) & (time < tb)] = -1 / (tb - ta)
return out
def SineFun(time, ta):
"""
Sine Waveform
* time: 1D array for time
* ta: Pulse Period
"""
out = np.zeros(time.size)
out[time <= ta] = np.sin(1.0 / ta * np.pi * time[time <= ta])
return out
def SineFunDeriv(time, ta):
"""
Derivative of Sine Waveform
"""
out = np.zeros(time.size)
out[time <= ta] = 1.0 / ta * np.pi * np.cos(1.0 / ta * np.pi * time[time <= ta])
return out
def VTEMFun(time, ta, tb, a):
"""
VTEM Waveform
* time: 1D array for time
* ta: time at peak of exponential part
* tb: time at step-off
"""
out = np.zeros(time.size)
out[time <= ta] = (1 - np.exp(-a * time[time <= ta] / ta)) / (1 - np.exp(-a))
out[(time > ta) & (time < tb)] = (
-1 / (tb - ta) * (time[(time > ta) & (time < tb)] - tb)
)
return out
|
src/devpy/__init__.py | sametmax/devpy | 161 | 12682278 | <filename>src/devpy/__init__.py<gh_stars>100-1000
import devpy
from .log import autolog # noqa
from .tb import color_traceback # noqa
__version__ = "0.1.8"
def dev_mode(color_traceback=True, autolog=True): # noqa
if color_traceback:
devpy.color_traceback()
if autolog:
return devpy.autolog()
|
env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_diff.py | acrucetta/Chicago_COVI_WebApp | 1,738 | 12682281 | <filename>env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_diff.py
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDiff:
def test_diff(self, datetime_frame):
the_diff = datetime_frame.diff(1)
tm.assert_series_equal(
the_diff["A"], datetime_frame["A"] - datetime_frame["A"].shift(1)
)
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({"s": s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = datetime_frame.astype("float32")
the_diff = tf.diff(1)
tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
# GH#10907
df = pd.DataFrame({"y": pd.Series([2]), "z": pd.Series([3])})
df.insert(0, "x", 1)
result = df.diff(axis=1)
expected = pd.DataFrame(
{"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)}
).astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_axis0(self, tz):
# GH#18578
df = DataFrame(
{
0: date_range("2010", freq="D", periods=2, tz=tz),
1: date_range("2010", freq="D", periods=2, tz=tz),
}
)
result = df.diff(axis=0)
expected = DataFrame(
{
0: pd.TimedeltaIndex(["NaT", "1 days"]),
1: pd.TimedeltaIndex(["NaT", "1 days"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_axis1(self, tz):
# GH#18578
df = DataFrame(
{
0: date_range("2010", freq="D", periods=2, tz=tz),
1: date_range("2010", freq="D", periods=2, tz=tz),
}
)
if tz is None:
result = df.diff(axis=1)
expected = DataFrame(
{
0: pd.TimedeltaIndex(["NaT", "NaT"]),
1: pd.TimedeltaIndex(["0 days", "0 days"]),
}
)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH#4533
df = DataFrame(
dict(
time=[Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
value=[1.0, 2.0],
)
)
res = df.diff()
exp = DataFrame(
[[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"]
)
tm.assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df["A"] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self, datetime_frame):
rs = datetime_frame.diff(-1)
xp = datetime_frame - datetime_frame.shift(-1)
tm.assert_frame_equal(rs, xp)
def test_diff_float_n(self, datetime_frame):
rs = datetime_frame.diff(1.0)
xp = datetime_frame.diff(1)
tm.assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH#9727
df = DataFrame([[1.0, 2.0], [3.0, 4.0]])
tm.assert_frame_equal(
df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])
)
tm.assert_frame_equal(
df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])
)
|
tests/test_overwritting.py | wetgi/lagom | 109 | 12682282 | <reponame>wetgi/lagom
import pytest
from lagom import Container
from lagom.exceptions import DuplicateDefinition
class InitialDep:
pass
class SomeMockForTesting(InitialDep):
pass
class SomeMockThatDoesntEventExtend:
pass
def test_deps_can_be_overridden_by_a_child_class(container: Container):
container.define(InitialDep, lambda: SomeMockForTesting())
resolved = container.resolve(InitialDep)
assert type(resolved) == SomeMockForTesting
def test_deps_can_be_overridden_by_anything(container: Container):
container.define(InitialDep, lambda: SomeMockThatDoesntEventExtend()) # type: ignore
resolved = container.resolve(InitialDep)
assert type(resolved) == SomeMockThatDoesntEventExtend
def test_explicit_definitions_can_only_be_made_once(container: Container):
container.define(InitialDep, lambda: SomeMockForTesting())
with pytest.raises(DuplicateDefinition):
container.define(
InitialDep, lambda: SomeMockThatDoesntEventExtend() # type: ignore
)
|
fortnitepy/ext/commands/help.py | gfdb/fortnitepy | 127 | 12682291 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import copy
import functools
import inspect
import re
import unicodedata
from collections import OrderedDict
from typing import (TYPE_CHECKING, Any, List, Dict, Optional, Iterable,
Callable, Sequence, Union, Tuple)
from fortnitepy.typedefs import MaybeCoro
from fortnitepy.party import ClientParty
from fortnitepy.friend import Friend
from .core import Group, Command
from .errors import CommandError
from .context import Context
from .cog import Cog
if TYPE_CHECKING:
from .bot import Bot
__all__ = (
'Paginator',
'HelpCommand',
'FortniteHelpCommand',
)
_IS_ASCII = re.compile(r'^[\x00-\x7f]+$')
def _string_width(string: str, *, _IS_ASCII: Any = _IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = 'WFA'
width = 0
func = unicodedata.east_asian_width
for char in string:
width += 2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1
return width
async def maybe_coroutine(func: MaybeCoro,
*args: list,
**kwargs: dict) -> Any:
value = func(*args, **kwargs)
if inspect.isawaitable(value):
return await value
else:
return value
class Paginator:
"""A class that aids in paginating code blocks for Fortnite messages.
.. container:: operations
.. describe:: len(x)
Returns the total number of characters in the paginator.
Attributes
-----------
prefix: :class:`str`
The prefix inserted to every page.
suffix: :class:`str`
The suffix appended at the end of every page.
max_size: :class:`int`
The maximum amount of codepoints allowed in a page.
"""
def __init__(self, prefix: str = '',
suffix: str = '',
max_size: int = 10000) -> None:
self.prefix = prefix
self.suffix = suffix
self.max_size = max_size
self.clear()
def clear(self) -> None:
"""Clears the paginator to have no pages."""
if self.prefix is not None:
self._current_page = [self.prefix]
self._count = len(self.prefix)
else:
self._current_page = []
self._count = 0
self._pages = []
@property
def _prefix_len(self) -> int:
return len(self.prefix) if self.prefix else 0
@property
def _suffix_len(self) -> int:
return len(self.suffix) if self.suffix else 0
def add_page(self, text: str) -> None:
"""Adds a page to the paginator with no additional checks done."""
self._pages.append(text)
def add_line(self, line: str = '', *, empty: bool = False) -> None:
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - self._suffix_len
if len(line) > max_page_size:
raise RuntimeError('Line exceeds maximum page size '
'{}'.format(max_page_size))
if self._count + len(line) + 1 > self.max_size - self._suffix_len:
self.close_page()
self._count += len(line) + 1
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += 1
def close_page(self) -> None:
"""Prematurely terminate a page."""
if self.suffix is not None:
self._current_page.append(self.suffix)
self._pages.append('\n'.join(self._current_page))
if self.prefix is not None:
self._current_page = []
self._count = len(self.prefix)
else:
self._current_page = []
self._count = 0
def __len__(self) -> int:
total = sum(len(p) for p in self._pages)
return total + self._count
@property
def pages(self) -> List[str]:
"""Returns the rendered list of pages."""
if len(self._current_page) > (0 if self.prefix is None else 1):
self.close_page()
return self._pages
def __repr__(self) -> str:
fmt = ('<Paginator prefix: {0.prefix} suffix: {0.suffix} max_size: '
'{0.max_size} count: {0._count}>')
return fmt.format(self)
def _not_overridden(func: MaybeCoro) -> MaybeCoro:
func.__fnpy_help_command_not_overridden__ = True
return func
class _HelpCommandImpl(Command):
def __init__(self, inject: Command, *args: list, **kwargs: dict) -> None:
super().__init__(inject.command_callback, *args, **kwargs)
self._original = inject
self._injected = inject
async def prepare(self, ctx: Context) -> None:
self._injected = injected = self._original.copy()
injected.context = ctx
self.callback = injected.command_callback
error_handler = injected.help_command_error_handler
if not hasattr(error_handler, '__fnpy_help_command_not_overridden__'):
if self.cog is not None:
self.error_handler = self._error_handler_cog_implementation
else:
self.error_handler = error_handler
await super().prepare(ctx)
async def _parse_arguments(self, ctx: Context) -> None:
# Make the parser think we don't have a cog so it doesn't
# inject the parameter into `ctx.args`.
original_cog = self.cog
self.cog = None
try:
await super()._parse_arguments(ctx)
finally:
self.cog = original_cog
async def _error_handler_cog_implementation(self, _,
ctx: Context,
error: Exception) -> None:
await self._injected.help_command_error_handler(ctx, error)
@property
def clean_params(self) -> OrderedDict:
result = self.params.copy()
try:
result.popitem(last=False)
except Exception:
raise ValueError('Missing context parameter') from None
else:
return result
def _inject_into_cog(self, cog: Cog) -> None:
# Warning: hacky
# Make the cog think that get_commands returns this command
# as well if we inject it without modifying __cog_commands__
# since that's used for the injection and ejection of cogs.
def wrapped_get_commands(*, _original=cog.get_commands):
ret = _original()
ret.append(self)
return ret
# Ditto here
def wrapped_walk_commands(*, _original=cog.walk_commands):
yield from _original()
yield self
functools.update_wrapper(wrapped_get_commands, cog.get_commands)
functools.update_wrapper(wrapped_walk_commands, cog.walk_commands)
cog.get_commands = wrapped_get_commands
cog.walk_commands = wrapped_walk_commands
self.cog = cog
def _eject_cog(self) -> None:
if self.cog is None:
return
# revert back into their original methods
cog = self.cog
cog.get_commands = cog.get_commands.__wrapped__
cog.walk_commands = cog.walk_commands.__wrapped__
self.cog = None
class HelpCommand:
r"""The base implementation for help command formatting.
.. note::
Internally instances of this class are deep copied every time
the command itself is invoked to prevent a race condition
mentioned in discord.py issue 2123.
This means that relying on the state of this class to be
the same between command invocations would not work as expected.
Attributes
-----------
context: Optional[:class:`Context`]
The context that invoked this help formatter. This is generally set
after the help command assigned, :func:`command_callback`\, has been
called.
show_hidden: :class:`bool`
Specifies if hidden commands should be shown in the output.
Defaults to ``False``.
verify_checks: :class:`bool`
Specifies if commands should have their :attr:`.Command.checks` called
and verified. Defaults to ``True``.
command_attrs: :class:`dict`
A dictionary of options to pass in for the construction of the help
command. This allows you to change the command behaviour without
actually changing the implementation of the command. The attributes
will be the same as the ones passed in the :class:`.Command`
constructor.
"""
def __new__(cls, *args: list, **kwargs: dict) -> 'HelpCommand':
# To prevent race conditions of a single instance while also allowing
# for settings to be passed the original arguments passed must be
# assigned to allow for easier copies (which will be made when the
# help command is actually called)
# see discord.py issue 2123
self = super().__new__(cls)
# Shallow copies cannot be used in this case since it is not unusual
# to pass instances that need state, e.g. Paginator or what have you
# into the function. The keys can be safely copied as-is since they're
# 99.99% certain of being string keys
deepcopy = copy.deepcopy
self.__original_kwargs__ = {
k: deepcopy(v)
for k, v in kwargs.items()
}
self.__original_args__ = deepcopy(args)
return self
def __init__(self, **options: dict) -> None:
self.show_hidden = options.pop('show_hidden', False)
self.verify_checks = options.pop('verify_checks', True)
self.command_attrs = attrs = options.pop('command_attrs', {})
attrs.setdefault('name', 'help')
attrs.setdefault('help', 'Shows this message')
self.context = None
self._command_impl = None
def copy(self) -> 'HelpCommand':
o = self.__class__(*self.__original_args__, **self.__original_kwargs__)
o._command_impl = self._command_impl
return o
def _add_to_bot(self, bot: 'Bot') -> None:
command = _HelpCommandImpl(self, **self.command_attrs)
bot.add_command(command)
self._command_impl = command
def _remove_from_bot(self, bot: 'Bot') -> None:
bot.remove_command(self._command_impl.name)
self._command_impl._eject_cog()
self._command_impl = None
def get_bot_mapping(self) -> Dict[Optional[Cog], List[Command]]:
"""Retrieves the bot mapping passed to :meth:`send_bot_help`."""
bot = self.context.bot
mapping = {
cog: cog.get_commands()
for cog in bot.cogs.values()
}
mapping[None] = [c for c in bot.all_commands.values() if c.cog is None]
return mapping
@property
def command_prefix(self) -> str:
"""The prefix used to invoke the help command."""
return self.context.prefix
@property
def invoked_with(self) -> str:
"""Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
"""
command_name = self._command_impl.name
ctx = self.context
if (ctx is None or ctx.command is None
or ctx.command.qualified_name != command_name):
return command_name
return ctx.invoked_with
def get_command_signature(self, command: Command) -> str:
"""Retrieves the signature portion of the help page.
Parameters
----------
command: :class:`Command`
The command to get the signature of.
Returns
-------
:class:`str`
The signature for the command.
"""
parent = command.full_parent_name
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = '[%s|%s]' % (command.name, aliases)
if parent:
fmt = parent + ' ' + fmt
alias = fmt
else:
alias = command.name if not parent else parent + ' ' + command.name
return '%s%s %s' % (self.command_prefix, alias, command.signature)
@property
def cog(self) -> Optional[Cog]:
"""A property for retrieving or setting the cog for the help command.
When a cog is set for the help command, it is as-if the help command
belongs to that cog. All cog special methods will apply to the help
command and it will be automatically unset on unload.
To unbind the cog from the help command, you can set it to ``None``.
Returns
--------
Optional[:class:`Cog`]
The cog that is currently set for the help command.
"""
return self._command_impl.cog
@cog.setter
def cog(self, cog: Cog) -> None:
# Remove whatever cog is currently valid, if any
self._command_impl._eject_cog()
# If a new cog is set then inject it.
if cog is not None:
self._command_impl._inject_into_cog(cog)
def command_not_found(self, string: str) -> str:
"""|maybecoro|
A method called when a command is not found in the help command.
This is useful to override for i18n.
Defaults to ``No command called {0} found.``
Parameters
------------
string: :class:`str`
The string that contains the invalid command. Note that this has
had mentions removed to prevent abuse.
Returns
---------
:class:`str`
The string to use when a command has not been found.
"""
return 'No command called "{}" found.'.format(string)
def subcommand_not_found(self, command: Command, string: str) -> str:
"""|maybecoro|
A method called when a command did not have a subcommand requested in
the help command. This is useful to override for i18n.
Defaults to either:
- ``'Command "{command.qualified_name}" has no subcommands.'``
- If there is no subcommand in the ``command`` parameter.
- ``'Command "{command.qualified_name}" has no subcommand named {string}'``
- If the ``command`` parameter has subcommands but not one named ``string``.
Parameters
------------
command: :class:`Command`
The command that did not have the subcommand requested.
string: :class:`str`
The string that contains the invalid subcommand.
Returns
---------
:class:`str`
The string to use when the command did not have the subcommand
requested.
""" # noqa
if isinstance(command, Group) and len(command.all_commands) > 0:
return ('Command "{0.qualified_name}" has no subcommand named '
'{1}'.format(command, string))
return 'Command "{0.qualified_name}" has no subcommands.'.format(
command
)
async def filter_commands(self, commands: Iterable[Command], *,
sort: bool = False,
key: Optional[Callable] = None
) -> List[Command]:
"""|coro|
Returns a filtered list of commands and optionally sorts them.
This takes into account the :attr:`verify_checks` and
:attr:`show_hidden` attributes.
Parameters
------------
commands: Iterable[:class:`Command`]
An iterable of commands that are getting filtered.
sort: :class:`bool`
Whether to sort the result.
key: Optional[Callable[:class:`Command`, Any]]
An optional key function to pass to :func:`py:sorted` that
takes a :class:`Command` as its sole parameter. If ``sort`` is
passed as ``True`` then this will default as the command name.
Returns
---------
List[:class:`Command`]
A list of commands that passed the filter.
"""
if sort and key is None:
key = lambda c: c.name # noqa
if self.show_hidden:
iterator = commands
else:
iterator = filter(lambda c: not c.hidden, commands)
if not self.verify_checks:
# if we do not need to verify the checks then we can just
# run it straight through normally without using await.
return sorted(iterator, key=key) if sort else list(iterator)
# if we're here then we need to check every command if it can run
async def predicate(cmd):
try:
return await cmd.can_run(self.context)
except CommandError:
return False
ret = []
for cmd in iterator:
valid = await predicate(cmd)
if valid:
ret.append(cmd)
if sort:
ret.sort(key=key)
return ret
def get_max_size(self, commands: Sequence[Command]) -> int:
"""Returns the largest name length of the specified command list.
Parameters
------------
commands: Sequence[:class:`Command`]
A sequence of commands to check for the largest size.
Returns
--------
:class:`int`
The maximum width of the commands.
"""
as_lengths = (
_string_width(c.name)
for c in commands
)
return max(as_lengths, default=0)
def get_destination(self) -> Union[Friend, ClientParty]:
"""Returns either :class:`fortnitepy.Friend` or
:class:`fortnitepy.ClientParty` where the help command will be output.
You can override this method to customise the behaviour.
By default this returns the context's destination.
"""
return self.context.get_destination()
async def send_error_message(self, error: Exception) -> None:
"""|coro|
Handles the implementation when an error happens in the help command.
For example, the result of :meth:`command_not_found` or
:meth:`command_has_no_subcommand_found` will be passed here.
You can override this method to customise the behaviour.
By default, this sends the error message to the destination
specified by :meth:`get_destination`.
.. note::
You can access the invocation context with
:attr:`HelpCommand.context`.
Parameters
------------
error: :class:`str`
The error message to display to the user.
"""
destination = self.get_destination()
await destination.send(error)
@_not_overridden
async def help_command_error_handler(self, ctx: Context,
error: Exception) -> None:
"""|coro|
The help command's error handler, as specified by
:ref:`ext_commands_error_handler`.
Useful to override if you need some specific behaviour when the
error handler is called.
By default this method does nothing and just propagates to the default
error handlers.
Parameters
------------
ctx: :class:`Context`
The invocation context.
error: :class:`CommandError`
The error that was raised.
"""
pass
async def send_bot_help(self, page: int) -> None:
"""|coro|
Handles the implementation of the bot command page in the help command.
This function is called when the help command is called with no
arguments.
It should be noted that this method does not return anything -- rather
the actual message sending should be done inside this method. Well
behaved subclasses should use :meth:`get_destination` to know where to
send, as this is a customisation point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with
:attr:`HelpCommand.context`. Also, the commands in the mapping are
not filtered. To do the filtering you will have to call
:meth:`filter_commands` yourself.
Parameters
----------
page: :class:`int`
The page to send.
"""
return None
async def send_cog_help(self, cog: Cog, page: int) -> None:
"""|coro|
Handles the implementation of the cog page in the help command.
This function is called when the help command is called with a cog as
the argument.
It should be noted that this method does not return anything -- rather
the actual message sending should be done inside this method. Well
behaved subclasses should use :meth:`get_destination` to know where to
send, as this is a customisation point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with
:attr:`HelpCommand.context`. To get the commands that belong to
this cog see :meth:`Cog.get_commands`. The commands returned not
filtered. To do the filtering you will have to call
:meth:`filter_commands` yourself.
Parameters
-----------
cog: :class:`Cog`
The cog that was requested for help.
page: :class:`int`
The page to send.
"""
return None
async def send_group_help(self, group: Group) -> None:
"""|coro|
Handles the implementation of the group page in the help command.
This function is called when the help command is called with a group
as the argument.
It should be noted that this method does not return anything -- rather
the actual message sending should be done inside this method. Well
behaved subclasses should use :meth:`get_destination` to know where to
send, as this is a customisation point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with
:attr:`HelpCommand.context`. To get the commands that belong to
this group without aliases see :attr:`Group.commands`. The
commands returned not filtered. To do the filtering you will have
to call :meth:`filter_commands` yourself.
Parameters
-----------
group: :class:`Group`
The group that was requested for help.
"""
return None
async def send_command_help(self, command: Command) -> None:
"""|coro|
Handles the implementation of the single command page in the help
command.
It should be noted that this method does not return anything -- rather
the actual message sending should be done inside this method. Well
behaved subclasses should use :meth:`get_destination` to know where to
send, as this is a customisation point for other users.
You can override this method to customise the behaviour.
.. note::
You can access the invocation context with
:attr:`HelpCommand.context`.
.. admonition:: Showing Help
:class: helpful
There are certain attributes and methods that are helpful for a
help command to show such as the following:
- :attr:`Command.help`
- :attr:`Command.brief`
- :attr:`Command.short_doc`
- :attr:`Command.description`
- :meth:`get_command_signature`
There are more than just these attributes but feel free to play
around with these to help you get started to get the output that
you want.
Parameters
-----------
command: :class:`Command`
The command that was requested for help.
"""
return None
async def prepare_help_command(self, ctx: Context,
command: Optional[Command] = None) -> None:
"""|coro|
A low level method that can be used to prepare the help command
before it does anything. For example, if you need to prepare
some state in your subclass before the command does its processing
then this would be the place to do it.
The default implementation does nothing.
.. note::
This is called *inside* the help command callback body. So all
the usual rules that happen inside apply here as well.
Parameters
-----------
ctx: :class:`Context`
The invocation context.
command: Optional[:class:`str`]
The argument passed to the help command.
"""
pass
# Not typehinting because its a command callback
async def command_callback(self, ctx, *, command=None, page: int = 1):
"""|coro|
The actual implementation of the help command.
It is not recommended to override this method and instead change
the behaviour through the methods that actually get dispatched.
- :meth:`send_bot_help`
- :meth:`send_cog_help`
- :meth:`send_group_help`
- :meth:`send_command_help`
- :meth:`get_destination`
- :meth:`command_not_found`
- :meth:`subcommand_not_found`
- :meth:`send_error_message`
- :meth:`on_help_command_error`
- :meth:`prepare_help_command`
"""
# page will never get a value but we just include it here for
# the param list. The actual conversion is done below.
if command is not None:
split = command.split()
try:
page = int(split[-1])
except ValueError:
page = 1
new = command
else:
new = None if len(split) == 1 else ' '.join(split[:-1])
else:
new = command
await self.prepare_help_command(ctx, command)
bot = ctx.bot
if new is None:
# mapping = self.get_bot_mapping()
return await self.send_bot_help(page)
# Check if it's a cog
if not command.startswith(self.command_prefix):
cog = bot.get_cog(new)
if cog is not None:
return await self.send_cog_help(cog, page)
if command.startswith(self.command_prefix):
command = command[len(self.command_prefix):]
maybe_coro = maybe_coroutine
# If it's not a cog then it's a command.
# Since we want to have detailed errors when someone
# passes an invalid subcommand, we need to walk through
# the command group chain ourselves.
keys = command.split(' ')
cmd = bot.all_commands.get(keys[0])
if cmd is None:
string = await maybe_coro(self.command_not_found, keys[0])
return await self.send_error_message(string)
for key in keys[1:]:
try:
found = cmd.all_commands.get(key)
except AttributeError:
string = await maybe_coro(self.subcommand_not_found, cmd, key)
return await self.send_error_message(string)
else:
if found is None:
string = await maybe_coro(
self.subcommand_not_found,
cmd,
key
)
return await self.send_error_message(string)
cmd = found
if isinstance(cmd, Group):
return await self.send_group_help(cmd)
else:
return await self.send_command_help(cmd)
class FortniteHelpCommand(HelpCommand):
"""The implementation of the default help command.
This inherits from :class:`HelpCommand`.
It extends it with the following attributes.
Attributes
------------
dm_help: Optional[:class:`bool`]
A tribool that indicates if the help command should DM the user
instead of sending it to the channel it received it from. If the
boolean is set to ``True``, then all help output is DM'd. If ``False``,
none of the help output is DM'd.
paginator: :class:`Paginator`
The paginator used to paginate the help command output.
commands_title: :class:`str`
The commands title. Defaults to ``Commands:``.
cog_title: :class:`str`
The cog title. Defaults to ``Category:``.
usage_title: :class:`str`
The usage title. Defaults to ``Usage:``.
description_title: :class:`str`
The description title. Defaults to ``Description:``.
help_title: :class:`str`
The help title. Defaults to ``Help:``.
sub_commands_title: :class:`str`
The sub commands title. Defaults to ``Help Commands:``.
no_category_heading: :class:`str`
The text to use as heading if no category (cog) is found
for the command.
Defaults to ``No Category``.
height: :class:`int`
The maximum number of lines to fit.
Defaults to ``15``.
width: :class:`int`
The maximum number of characters that fit in a line.
Defaults to ``60``.
indent: :class:`int`
How much to indent the commands and other text from a title.
Defaults to ``4``.
title_prefix: :class:`str`
The prefix to use for the help title.
Defaults to `` +``.
title_suffix: :class:`str`
The suffix to use for the help title.
Defaults to ``+``.
title_char: :class:`str`
The char to use for the help title.
Defaults to ``=``.
line_prefix: :class:`str`
The prefix to use for all lines.
Defaults to `` ``. (Three spaces)
line_suffix: :class:`str`
The prefix to use for all lines.
Defaults to ````. (Empty)
footer_prefix: :class:`str`
The prefix to use for the help footer.
Defaults to `` +``.
footer_suffix: :class:`str`
The suffix to use for the help footer.
Defaults to ``+``.
footer_char: :class:`str`
The char to use for the help footer.
Defaults to ``=``.
"""
def __init__(self, **options: dict) -> None:
self.dm_help = options.pop('dm_help', False)
self.paginator = options.pop('paginator', None)
self.commands_title = options.pop('commands_title', 'Commands:')
self.cog_title = options.pop('cog_title', 'Category:')
self.usage_title = options.pop('usage_title', 'Usage:')
self.description_title = options.pop('description_title', 'Description:') # noqa
self.help_title = options.pop('help_title', 'Help:')
self.sub_commands_title = options.pop('sub_commands_title', 'Sub Commands:') # noqa
self.no_category = options.pop('no_category_heading', 'No Category')
self.height = options.pop('height', 15)
self.width = options.pop('width', 60)
self.indent = options.pop('indent', 4)
self.title_prefix = options.pop('title_prefix', ' +')
self.title_suffix = options.pop('title_suffix', '+')
self.title_char = options.pop('title_char', '=')
self.line_prefix = options.pop('line_prefix', ' ')
self.line_suffix = options.pop('line_suffix', '')
self.footer_prefix = options.pop('footer_prefix', ' +')
self.footer_suffix = options.pop('footer_suffix', '+')
self.footer_char = options.pop('footer_char', '=')
if self.paginator is None:
self.paginator = Paginator()
super().__init__(**options)
def get_command_name(self, command: Command) -> str:
"""Gets the name of a command.
This method can be overridden for custom text.
Parameters
----------
command: :class:`.Command`
The command to get the name for.
Returns
-------
:class:`str`
| The command name.
| Defaults to ``self.command_prefix + command.qualified_name``
"""
return self.command_prefix + command.qualified_name
def get_sub_command_name(self, sub_command: Command) -> str:
"""Gets the name of a sub command.
This method can be overridden for custom text.
Parameters
----------
sub_command: :class:`.Command`
The sub command to get the name for.
Returns
-------
:class:`str`
| The sub command name.
| Defaults to ``{self.command_prefix} {sub_command.qualified_name}``
""" # noqa
return self.command_prefix + sub_command.qualified_name
def get_bot_header(self, page_num: int, pages_amount: int) -> str:
"""Gets the name of a sub command.
This method can be overridden for custom text.
Parameters
----------
page_num: :class:`int`
The page being built.
pages_amount: :class:`int`
The amount of pages available.
Returns
-------
:class:`str`
| The sub command name.
| Defaults to ``{self.command_prefix} {sub_command.qualified_name}``
""" # noqa
return '{0} - {1} / {2}'.format(
'All Commands',
page_num,
pages_amount
)
def get_bot_footer(self, page_num: int, pages_amount: str) -> str:
"""Gets the text to appear in the footer when
:meth:`send_bot_help()` is called.
This method can be overridden for custom text.
Parameters
----------
page_num: :class:`int`
The page being built.
pages_amount: :class:`int`
The amount of pages available.
Returns
-------
:class:`str`
| The bot footer.
| Defaults to ```` (Empty)
"""
return ''
def get_command_header(self, command: Command) -> str:
"""Gets the text to appear in the header when
:meth:`send_command_help()` is called.
This method can be overridden for custom text.
Parameters
----------
command: :class:`.Command`
The command to get the header for.
Returns
-------
:class:`str`
| The header text.
| Defaults to ``Command | {self.command_prefix}{command.qualified_name}``
""" # noqa
return 'Command | {0}{1}'.format(
self.command_prefix,
command.qualified_name
)
def get_command_footer(self, command: Command) -> str:
"""Gets the text to appear in the footer when
:meth:`send_command_help()` is called.
This method can be overridden for custom text.
Parameters
----------
command: :class:`.Command`
The command to get the footer for.
Returns
-------
:class:`str`
| The footer text.
| Defaults to ```` (Empty)
"""
return ''
def get_group_header(self, group: Group) -> str:
"""Gets the text to appear in the header when
:meth:`send_group_help()` is called.
This method can be overridden for custom text.
Parameters
----------
command: :class:`.Group`
The group to get the header for.
Returns
-------
:class:`str`
| The header text.
| Defaults to ``Command | {self.command_prefix}{group.qualified_name}``
""" # noqa
return 'Command | {0}{1}'.format(
self.command_prefix,
group.qualified_name
)
def get_group_footer(self, group: Group) -> str:
"""Gets the text to appear in the footer when
:meth:`send_group_help()` is called.
This method can be overridden for custom text.
Parameters
----------
command: :class:`.Group`
The group to get the footer for.
Returns
-------
:class:`str`
| The footer text.
| Defaults to ```` (Empty)
"""
return ''
def get_cog_header(self, cog: Cog,
page_num: int,
pages_amount: int) -> str:
"""Gets the text to appear in the header when
:meth:`send_cog_help()` is called.
This method can be overridden for custom text.
Parameters
----------
cog: :class:`.Cog`
The cog to get the header for.
page_num: :class:`int`
The page being built.
pages_amount: :class:`int`
The amount of pages available.
Returns
-------
:class:`str`
| The header text.
| Defaults to ``Category | {cog.qualified_name} - {page_num} / {pages_amount}``
""" # noqa
return 'Category | {0} - {1} / {2}'.format(
cog.qualified_name,
page_num,
pages_amount
)
def get_cog_footer(self, cog: Cog,
page_num: int,
pages_amount: int) -> str:
"""Gets the text to appear in the footer when
:meth:`send_cog_help()` is called.
This method can be overridden for custom text.
Parameters
----------
cog: :class:`.Cog`
The cog to get the footer for.
page_num: :class:`int`
The page being built.
pages_amount: :class:`int`
The amount of pages available.
Returns
-------
:class:`str`
| The footer text.
| Defaults to ``{self.command_prefix}{self.invoked_with} {cog.qualified_name} <page> | {self.command_prefix}{self.invoked_with} <command>``
""" # noqa
return '{0}{1} {2} <page> | {0}{1} <command>'.format(
self.command_prefix,
self.invoked_with,
cog.qualified_name
)
def shorten_text(self, text: str,
max_len: int,
dot_amount: int = 3) -> str:
"""Shortens text to fit into the :attr:`width`."""
if len(text) > max_len:
return text[:max_len-dot_amount] + '.'*dot_amount
return text
def construct_title(self, t: str) -> str:
_title = ' ' + t + ' ' if t else ''
w = self.width - len(self.title_prefix) - len(self.title_suffix)
return '{0}{1:{2}^{3}}{4}'.format(
self.title_prefix,
_title,
self.title_char,
w,
self.title_suffix
)
def construct_footer(self, f: str) -> str:
_footer = ' ' + f + ' ' if f else ''
w = self.width - len(self.footer_prefix) - len(self.footer_suffix)
return '{0}{1:{2}^{3}}{4}'.format(
self.footer_prefix,
_footer,
self.footer_char,
w,
self.footer_suffix
)
def fix_too_long(self, string: str,
length: int,
start_length: int) -> Tuple[str, List[str]]:
first = string[:start_length-1]
string = string[start_length-1:]
return (
first,
[string[0+i:length-1+i] for i in range(0, len(string), length-1)]
)
def chunkstring(self, string: str, length: int) -> List[str]:
lines = []
curr = ''
split = string.split()
for c, word in enumerate(split, 1):
spaces = 1 if c != len(split) else 0
if len(word) + spaces > length:
space_left = (length - len(curr))
start_length = space_left if space_left > 5 else 0
first, too_long = self.fix_too_long(word, length, start_length)
if first:
curr += first + '-'
if curr:
lines.append(curr)
curr = ''
for cc, new in enumerate(too_long, 1):
if cc != len(too_long):
new += '-'
lines.append(new)
else:
curr += new
continue
if len(curr) + len(word) > length:
lines.append(curr[:-1])
curr = ''
curr += word + ' '
if curr:
lines.append(curr)
return lines
def construct_single_line(self, text: str, extra_indent: int = 0) -> str:
prefix = self.line_prefix + ' '*extra_indent
suffix = self.line_suffix
w = self.width - len(prefix) - len(suffix)
return '{0}{1:<{2}}{3}'.format(
prefix,
text,
w,
suffix
)
def construct_category(self, name: str,
brief: str,
extra_indent: int = 0,
raw: bool = False) -> List[str]:
prefix = self.line_prefix + ' '*extra_indent
suffix = self.line_suffix
indent = self.indent
w = self.width - len(prefix) - len(suffix)
name_line = '{0}{1:<{2}}{3}'.format(
prefix,
self.shorten_text(name, w),
w,
suffix
)
brief_w = w - indent
lines = [name_line]
if not raw:
gen = self.chunkstring(brief, brief_w)
else:
gen = brief.splitlines()
for c, line in enumerate(gen, 2):
fmt = '{0}{1}{2:<{3}}{4}'.format(
prefix,
' '*indent,
line,
brief_w,
suffix
)
if c == self.height - 2:
to_cut = 3 + len(suffix)
new = fmt[:to_cut] + '...' + suffix
lines.append(new)
break
lines.append(fmt)
return lines
async def send_pages(self) -> None:
"""A helper utility to send the page output from :attr:`paginator` to
the destination.
"""
destination = self.get_destination()
for page in self.paginator.pages:
await destination.send(page)
async def send_page(self, page_num: int) -> None:
"""A helper utility to send a page output from :attr:`paginator` to
the destination.
"""
pages = self.paginator.pages
if page_num <= 0 or page_num > len(pages):
return await self.send_error_message(
'Could not find the page you were looking for'
)
destination = self.get_destination()
await destination.send(pages[page_num-1])
def get_destination(self) -> Union[Friend, ClientParty]:
ctx = self.context
if self.dm_help is True:
return ctx.author
elif (self.dm_help is None
and len(self.paginator) > self.dm_help_threshold):
return ctx.author
else:
return ctx.get_destination()
async def prepare_help_command(self, ctx: Context,
command: Command) -> None:
self.paginator.clear()
await super().prepare_help_command(ctx, command)
def construct_command_help(self, command: Command) -> List[str]:
fmt = {}
if command.cog:
fmt[self.cog_title] = command.cog.qualified_name
fmt[self.usage_title] = self.get_command_signature(command)
if command.description:
fmt[self.description_title] = command.description
result = []
for title, value in fmt.items():
lines = self.construct_category(title, value)
result.extend(lines)
if command.help:
title = self.help_title
value = command.help
lines = self.construct_category(title, value, raw=True)
result.extend(lines)
return result
async def send_bot_help(self, page: int) -> None:
ctx = self.context
bot = ctx.bot
no_category = '\u200b{0.no_category}:'.format(self)
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name if cog is not None else no_category
filtered = await self.filter_commands(
bot.commands,
sort=True,
key=get_category
)
chunks = []
curr = []
if bot.description:
parts = self.construct_category(
self.description_title,
bot.description
)
curr.extend(parts)
for command in filtered:
name = self.get_command_name(command)
brief = command.brief or ''
lines = self.construct_category(name, brief)
if len(lines) + len(curr) > self.height - 2:
chunks.append(curr)
curr = []
curr.extend(lines)
if curr:
chunks.append(curr)
chunks_length = len(chunks)
for c, chunk in enumerate(chunks, 1):
footer_fmt = self.get_bot_footer(c, chunks_length) or ''
page_chunks = [
self.construct_title(
self.get_bot_header(c, chunks_length) or ''
),
*chunk,
self.construct_footer(footer_fmt.format(
self.command_prefix,
self.invoked_with,
))
]
self.paginator.add_page(
'\u200b\n' + '\n'.join(page_chunks)
)
await self.send_page(page)
async def send_command_help(self, command: Command) -> None:
result = self.construct_command_help(command)
title = self.construct_title(self.get_command_header(command) or '')
footer = self.construct_footer(self.get_command_footer(command) or '')
self.paginator.add_page(
'\u200b\n' + '\n'.join([title, *result, footer])
)
await self.send_pages()
async def send_group_help(self, group: Group) -> None:
result = self.construct_command_help(group)
filtered = await self.filter_commands(
group.commands,
sort=True
)
for c, command in enumerate(filtered):
if c == 0:
title = self.sub_commands_title
result.append('\n'+self.construct_single_line(title))
name = self.get_sub_command_name(command)
brief = command.brief or ''
lines = self.construct_category(
name,
brief,
extra_indent=self.indent
)
result.extend(lines)
title = self.construct_title(
self.get_group_header(group)
)
footer = self.construct_footer('')
self.paginator.add_page(
'\u200b\n' + '\n'.join([title, *result, footer])
)
await self.send_pages()
async def send_cog_help(self, cog: Cog, page: str) -> None:
filtered = await self.filter_commands(
cog.get_commands(),
sort=True
)
chunks = []
curr = []
if cog.description:
parts = self.construct_category(
self.description_title,
cog.description
)
curr.extend(parts)
for c, command in enumerate(filtered):
if c == 0:
title = self.commands_title
pre = '\n' if curr else ''
curr.append(pre+self.construct_single_line(title))
name = self.get_command_name(command)
brief = command.brief or ''
lines = self.construct_category(
name,
brief,
extra_indent=self.indent
)
if len(lines) + len(curr) > self.height - 2:
chunks.append(curr)
curr = []
curr.extend(lines)
if curr:
chunks.append(curr)
chunks_length = len(chunks)
for c, chunk in enumerate(chunks, 1):
title = self.construct_title(
self.get_cog_header(cog, c, chunks_length) or ''
)
fmt = self.get_cog_footer(cog, c, chunks_length) or ''
footer = self.construct_footer(fmt)
page_chunks = [
title,
*chunk,
footer
]
self.paginator.add_page(
'\u200b\n' + '\n'.join(page_chunks)
)
await self.send_page(page)
|
docs/_data/project_scrapper.py | tre3x/awesomeScripts | 245 | 12682298 | from bs4 import BeautifulSoup
import requests
url = "https://github.com/Py-Contributors/awesomeScripts/blob/master/README.md"
page = requests.get(url)
pagetext = page.text
def save_project():
soup = BeautifulSoup(pagetext, "lxml")
table = soup.find("table")
list_of_rows = []
for row in table.findAll('tr'):
list_of_cells = []
for cell in row.findAll(["th", "td"]):
text = cell.text
list_of_cells.append(text)
list_of_rows.append(list_of_cells)
file = open("projects.csv", "w")
for item in list_of_rows:
file.write(",".join(item))
file.write("\n")
file.close()
|
pymagnitude/third_party/allennlp/modules/matrix_attention/legacy_matrix_attention.py | tpeng/magnitude | 1,520 | 12682310 | <filename>pymagnitude/third_party/allennlp/modules/matrix_attention/legacy_matrix_attention.py<gh_stars>1000+
from __future__ import absolute_import
import torch
#overrides
from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class LegacyMatrixAttention(MatrixAttention):
u"""
The legacy implementation of ``MatrixAttention``.
It should be considered deprecated as it uses much more memory than the newer specialized
``MatrixAttention`` modules.
Parameters
----------
similarity_function: ``SimilarityFunction``, optional (default=``DotProductSimilarity``)
The similarity function to use when computing the attention.
"""
def __init__(self, similarity_function = None) :
super(LegacyMatrixAttention, self).__init__()
self._similarity_function = similarity_function or DotProductSimilarity()
#overrides
def forward(self, matrix_1 , matrix_2 ) :
tiled_matrix_1 = matrix_1.unsqueeze(2).expand(matrix_1.size()[0],
matrix_1.size()[1],
matrix_2.size()[1],
matrix_1.size()[2])
tiled_matrix_2 = matrix_2.unsqueeze(1).expand(matrix_2.size()[0],
matrix_1.size()[1],
matrix_2.size()[1],
matrix_2.size()[2])
return self._similarity_function(tiled_matrix_1, tiled_matrix_2)
LegacyMatrixAttention = MatrixAttention.register(u"legacy")(LegacyMatrixAttention)
|
third_party/protobuf/3.6.1/python/mox.py | sevki/bazel | 4,071 | 12682316 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occurred. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError('method must be implemented by a subclass.')
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
|
jarbas/core/migrations/0002_add_indexes.py | vbarceloscs/serenata-de-amor | 3,001 | 12682324 | <reponame>vbarceloscs/serenata-de-amor
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-08 10:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='document',
name='applicant_id',
field=models.IntegerField(db_index=True, verbose_name='Applicant ID'),
),
migrations.AlterField(
model_name='document',
name='cnpj_cpf',
field=models.CharField(db_index=True, max_length=14, verbose_name='CNPJ or CPF'),
),
migrations.AlterField(
model_name='document',
name='congressperson_id',
field=models.IntegerField(db_index=True, verbose_name='Congressperson ID'),
),
migrations.AlterField(
model_name='document',
name='congressperson_name',
field=models.CharField(max_length=128, verbose_name='Congressperson name'),
),
migrations.AlterField(
model_name='document',
name='document_id',
field=models.IntegerField(db_index=True, verbose_name='Document ID'),
),
migrations.AlterField(
model_name='document',
name='document_number',
field=models.CharField(max_length=128, verbose_name='Document number'),
),
migrations.AlterField(
model_name='document',
name='document_type',
field=models.IntegerField(db_index=True, verbose_name='Document type'),
),
migrations.AlterField(
model_name='document',
name='document_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Document value'),
),
migrations.AlterField(
model_name='document',
name='leg_of_the_trip',
field=models.CharField(max_length=128, verbose_name='Leg of the trip'),
),
migrations.AlterField(
model_name='document',
name='month',
field=models.IntegerField(db_index=True, verbose_name='Month'),
),
migrations.AlterField(
model_name='document',
name='net_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Net value'),
),
migrations.AlterField(
model_name='document',
name='party',
field=models.CharField(db_index=True, max_length=16, verbose_name='Party'),
),
migrations.AlterField(
model_name='document',
name='passenger',
field=models.CharField(max_length=128, verbose_name='Passenger'),
),
migrations.AlterField(
model_name='document',
name='reimbursement_number',
field=models.IntegerField(db_index=True, verbose_name='Reimbursement number'),
),
migrations.AlterField(
model_name='document',
name='reimbursement_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Reimbusrsement value'),
),
migrations.AlterField(
model_name='document',
name='remark_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Remark value'),
),
migrations.AlterField(
model_name='document',
name='subquota_description',
field=models.CharField(max_length=128, verbose_name='Subquota descrition'),
),
migrations.AlterField(
model_name='document',
name='subquota_group_description',
field=models.CharField(max_length=128, verbose_name='Subquota group description'),
),
migrations.AlterField(
model_name='document',
name='subquota_group_id',
field=models.IntegerField(db_index=True, verbose_name='Subquota group ID'),
),
migrations.AlterField(
model_name='document',
name='subquota_number',
field=models.IntegerField(db_index=True, verbose_name='Subquote ID'),
),
migrations.AlterField(
model_name='document',
name='term',
field=models.IntegerField(db_index=True, verbose_name='Term'),
),
migrations.AlterField(
model_name='document',
name='year',
field=models.IntegerField(db_index=True, verbose_name='Year'),
),
]
|
tools/mo/openvino/tools/mo/utils/ir_reader/extenders/variadic_split_extender.py | ryanloney/openvino-1 | 1,127 | 12682337 | <reponame>ryanloney/openvino-1
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.utils.graph import Node
from openvino.tools.mo.utils.ir_reader.extender import Extender
class VariadicSplit_extender(Extender):
op = 'VariadicSplit'
@staticmethod
def extend(op: Node):
op['out_ports_count'] = len(op.ports)
|
Bit Manipulation/476. Number Complement.py | beckswu/Leetcode | 138 | 12682342 | <filename>Bit Manipulation/476. Number Complement.py
class Solution:
def findComplement(self, num: int) -> int:
res =i = 0
while num:
if not num & 1:
res |= 1 << i
num = num >> 1
i += 1
return res
class Solution:
def findComplement(self, num: int) -> int:
i = 1
while i <= num:
i = i << 1
return (i - 1) ^ num
class Solution:
def findComplement(self, num: int) -> int:
copy = num;
i = 0;
while copy != 0 :
copy >>= 1;
num ^= (1<<i);
i += 1;
return num;
class Solution:
def findComplement(self, num: int) -> int:
mask = 1
while( mask < num):
mask = (mask << 1) | 1
return ~num & mask
class Solution:
def findComplement(self, num: int) -> int:
n = 0;
while (n < num):
n = (n << 1) | 1;
return n - num; |
ctools/worker/agent/base_agent.py | XinyuJing/DI-star | 267 | 12682355 | <gh_stars>100-1000
from abc import ABC
import copy
from collections import OrderedDict
from typing import Any, Union, Optional, Dict, List
import torch
from .agent_plugin import register_plugin
class BaseAgent(ABC):
r"""
Overview:
the base agent class
Interfaces:
__init__, forward, mode, state_dict, load_state_dict, reset
"""
def __init__(self, model: torch.nn.Module, plugin_cfg: Union[OrderedDict, None]) -> None:
r"""
Overview:
init the model and register plugins
Arguments:
- model (:obj:`torch.nn.Module`): the model of the agent
- plugin_cfg (:obj:`Union[OrderedDict, None]`): the plugin config to register
"""
self._model = model
self._plugin_cfg = plugin_cfg
register_plugin(self, plugin_cfg)
def forward(self, data: Any, param: Optional[dict] = None) -> Any:
r"""
Overview:
forward method will call the foward method of the agent's model
Arguments:
- data (:obj:`Any`): the input data
- param (:obj:`dict` or None): the optinal parameters, default set to None
Returns:
- output (:obj:`Any`): the output calculated by model
"""
if param is not None:
return self._model(data, **param)
else:
return self._model(data)
def mode(self, train: bool) -> None:
r"""
Overview:
call the model's function accordingly
Arguments:
- train (:obj:`bool`): whether to call the train method or eval method
"""
if train:
self._model.train()
else:
self._model.eval()
@property
def model(self) -> torch.nn.Module:
return self._model
@model.setter
def model(self, _model: torch.nn.Module) -> None:
self._model = _model
def state_dict(self) -> dict:
r"""
Overview:
return the state_dict
Returns:
- ret (:obj:`dict`): the returned state_dict, while the ret['model'] is the model's state_dict
"""
return {'model': self._model.state_dict()}
def load_state_dict(self, state_dict: dict) -> None:
r"""
Overview:
load the state_dict to model
Arguments:
- state_dict (:obj:`dict`): the input state_dict the model will load
"""
self._model.load_state_dict(state_dict['model'])
def reset(self) -> None:
pass
model_plugin_cfg_set = set(['main', 'target', 'teacher'])
class AgentAggregator(object):
r"""
Overview:
the AgentAggregator helps to build an agent according to the given input
Interfaces:
__init__, __getattr__
"""
def __init__(
self, agent_type: type, model: Union[torch.nn.Module, List[torch.nn.Module]], plugin_cfg: Dict[str,
OrderedDict]
) -> None:
r"""
Overview:
__init__ of the AgentAggregator will get a class with multi agents in ._agent
Arguments:
- agent_type (:obj:`type`): the based class type of the agents in ._agent
- model (:obj:`torch.nn.Module`): the model of agents
- plugin_cfg (:obj:`Dict[str, OrderedDict])`): the plugin configs of agents
"""
assert issubclass(agent_type, BaseAgent)
assert set(plugin_cfg.keys()
).issubset(model_plugin_cfg_set), '{}-{}'.format(set(plugin_cfg.keys()), model_plugin_cfg_set)
if isinstance(model, torch.nn.Module):
if len(plugin_cfg) == 1:
model = [model]
else:
model = [model] + [copy.deepcopy(model) for _ in range(len(plugin_cfg) - 1)]
self._agent = {}
for i, k in enumerate(plugin_cfg):
self._agent[k] = agent_type(model[i], plugin_cfg[k])
def __getattr__(self, key: str) -> Any:
r"""
Overview:
get the attrbute in key
Arguments:
- key (:obj:`str`): the key to query
Returns:
- ret (:obj:`Any`): the return attribute
.. note::
in usage, if you want to get the attribute "attr" in agent[k], you should query k + "_" + "attr"
"""
if len(self._agent) == 1:
return getattr(self._agent['main'], key)
else:
name = 'main'
for k in self._agent:
if key.startswith(k):
name = k
key = key.split(k + '_')[1]
break
return getattr(self._agent[name], key)
|
recipes/crashpad/all/conanfile.py | rockandsalt/conan-center-index | 562 | 12682361 | <filename>recipes/crashpad/all/conanfile.py
from conans import AutoToolsBuildEnvironment, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
import textwrap
required_conan_version = ">=1.33.0"
class CrashpadConan(ConanFile):
name = "crashpad"
description = "Crashpad is a crash-reporting system."
url = "https://github.com/conan-io/conan-center-index"
topics = ("conan", "crashpad", "crash", "error", "stacktrace", "collecting", "reporting")
license = "Apache-2.0"
homepage = "https://chromium.googlesource.com/crashpad/crashpad/+/master/README.md"
provides = "crashpad", "mini_chromium"
settings = "os", "arch", "compiler", "build_type"
options = {
"fPIC": [True, False],
"http_transport": ["libcurl", "socket", None],
"with_tls": ["openssl", False],
}
default_options = {
"fPIC": True,
"http_transport": None,
"with_tls": "openssl",
}
exports_sources = "patches/*"
@property
def _source_subfolder(self):
return "source_subfolder"
def _minimum_compiler_cxx14(self):
return {
"apple-clang": 10,
"gcc": 5,
"clang": "3.9",
"Visual Studio": 14,
}.get(str(self.settings.compiler))
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os in ("Linux", "FreeBSD"):
self.options.http_transport = "libcurl"
elif self.settings.os == "Android":
self.options.http_transport = "socket"
def build_requirements(self):
self.build_requires("ninja/1.10.2")
self.build_requires("gn/cci.20210429")
def requirements(self):
# FIXME: use mini_chromium conan package instead of embedded package (if possible)
self.requires("zlib/1.2.11")
if self.settings.os in ("Linux", "FreeBSD"):
self.requires("linux-syscall-support/cci.20200813")
if self.options.http_transport != "socket":
del self.options.with_tls
if self.options.http_transport == "libcurl":
self.requires("libcurl/7.75.0")
if self.options.get_safe("with_tls") == "openssl":
self.requires("openssl/1.1.1k")
def validate(self):
if self.settings.compiler == "Visual Studio":
if self.options.http_transport in ("libcurl", "socket"):
raise ConanInvalidConfiguration("http_transport={} is not valid when building with Visual Studio".format(self.options.http_transport))
if self.options.http_transport == "libcurl":
if not self.options["libcurl"].shared:
# FIXME: is this true?
self.output.warn("crashpad needs a shared libcurl library")
min_compiler_version = self._minimum_compiler_cxx14()
if min_compiler_version:
if tools.Version(self.settings.compiler.version) < min_compiler_version:
raise ConanInvalidConfiguration("crashpad needs a c++14 capable compiler, version >= {}".format(min_compiler_version))
else:
self.output.warn("This recipe does not know about the current compiler and assumes it has sufficient c++14 supports.")
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 14)
def source(self):
tools.get(**self.conan_data["sources"][self.version]["url"]["crashpad"], destination=self._source_subfolder, strip_root=True)
tools.get(**self.conan_data["sources"][self.version]["url"]["mini_chromium"],
destination=os.path.join(self._source_subfolder, "third_party", "mini_chromium", "mini_chromium"), strip_root=True)
@property
def _gn_os(self):
if tools.is_apple_os(self.settings.os):
if self.settings.os == "Macos":
return "mac"
else:
return "ios"
return {
"Windows": "win",
}.get(str(self.settings.os), str(self.settings.os).lower())
@property
def _gn_arch(self):
return {
"x86_64": "x64",
"armv8": "aarch64",
"x86": "x86",
}.get(str(self.settings.arch), str(self.settings.arch))
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
yield
else:
env_defaults = {}
if self.settings.compiler == "gcc":
env_defaults.update({
"CC": "gcc",
"CXX": "g++",
"LD": "g++",
})
elif self.settings.compiler in ("clang", "apple-clang"):
env_defaults.update({
"CC": "clang",
"CXX": "clang++",
"LD": "clang++",
})
env = {}
for key, value in env_defaults.items():
if not tools.get_env(key):
env[key] = value
with tools.environment_append(env):
yield
@property
def _http_transport_impl(self):
if str(self.options.http_transport) == "None":
return ""
else:
return str(self.options.http_transport)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "third_party", "zlib", "BUILD.gn"),
"libs = [ \"z\" ]",
"libs = [ {} ]".format(", ".join("\"{}.lib\"".format(l) for l in self.deps_cpp_info["zlib"].libs)))
if self.settings.compiler == "gcc":
toolchain_path = os.path.join(self._source_subfolder, "third_party", "mini_chromium", "mini_chromium", "build", "config", "BUILD.gn")
# Remove gcc-incompatible compiler arguments
for comp_arg in ("-Wheader-hygiene", "-Wnewline-eof", "-Wstring-conversion", "-Wexit-time-destructors", "-fobjc-call-cxx-cdtors", "-Wextra-semi", "-Wimplicit-fallthrough"):
tools.replace_in_file(toolchain_path,
"\"{}\"".format(comp_arg), "\"\"")
autotools = AutoToolsBuildEnvironment(self)
extra_cflags = autotools.flags + ["-D{}".format(d) for d in autotools.defines]
extra_cflags_c = []
extra_cflags_cc = autotools.cxx_flags
extra_ldflags = autotools.link_flags
if self.options.get_safe("fPIC"):
extra_cflags.append("-fPIC")
extra_cflags.extend("-I {}".format(inc) for inc in autotools.include_paths)
extra_ldflags.extend("-{}{}".format("LIBPATH:" if self.settings.compiler == "Visual Studio" else "L ", libdir) for libdir in autotools.library_paths)
if self.settings.compiler == "clang":
if self.settings.compiler.get_safe("libcxx"):
stdlib = {
"libstdc++11": "libstdc++",
}.get(str(self.settings.compiler.libcxx), str(self.settings.compiler.libcxx))
extra_cflags_cc.append("-stdlib={}".format(stdlib))
extra_ldflags.append("-stdlib={}".format(stdlib))
gn_args = [
"host_os=\\\"{}\\\"".format(self._gn_os),
"host_cpu=\\\"{}\\\"".format(self._gn_arch),
"is_debug={}".format(str(self.settings.build_type == "Debug").lower()),
"crashpad_http_transport_impl=\\\"{}\\\"".format(self._http_transport_impl),
"crashpad_use_boringssl_for_http_transport_socket={}".format(str(self.options.get_safe("with_tls", False) != False).lower()),
"extra_cflags=\\\"{}\\\"".format(" ".join(extra_cflags)),
"extra_cflags_c=\\\"{}\\\"".format(" ".join(extra_cflags_c)),
"extra_cflags_cc=\\\"{}\\\"".format(" ".join(extra_cflags_cc)),
"extra_ldflags=\\\"{}\\\"".format(" ".join(extra_ldflags)),
]
with tools.chdir(self._source_subfolder):
with self._build_context():
self.run("gn gen out/Default --args=\"{}\"".format(" ".join(gn_args)), run_environment=True)
targets = ["client", "minidump", "crashpad_handler", "snapshot"]
if self.settings.os == "Windows":
targets.append("crashpad_handler_com")
self.run("ninja -C out/Default {targets} -j{parallel}".format(
targets=" ".join(targets),
parallel=tools.cpu_count()), run_environment=True)
def lib_filename(name):
prefix, suffix = ("", ".lib") if self.settings.compiler == "Visual Studio" else ("lib", ".a")
return "{}{}{}".format(prefix, name, suffix)
tools.rename(os.path.join(self._source_subfolder, "out", "Default", "obj", "client", lib_filename("common")),
os.path.join(self._source_subfolder, "out", "Default", "obj", "client", lib_filename("client_common")))
tools.rename(os.path.join(self._source_subfolder, "out", "Default", "obj", "handler", lib_filename("common")),
os.path.join(self._source_subfolder, "out", "Default", "obj", "handler", lib_filename("handler_common")))
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("*.h", src=os.path.join(self._source_subfolder, "client"), dst=os.path.join("include", "client"))
self.copy("*.h", src=os.path.join(self._source_subfolder, "util"), dst=os.path.join("include", "util"))
self.copy("*.h", src=os.path.join(self._source_subfolder, "third_party", "mini_chromium", "mini_chromium", "base"), dst=os.path.join("include", "base"))
self.copy("*.h", src=os.path.join(self._source_subfolder, "third_party", "mini_chromium", "mini_chromium", "build"), dst=os.path.join("include", "build"))
self.copy("*.h", src=os.path.join(self._source_subfolder, "out", "Default", "gen", "build"), dst=os.path.join("include", "build"))
self.copy("*.a", src=os.path.join(self._source_subfolder, "out", "Default"), dst="lib", keep_path=False)
self.copy("*.lib", src=os.path.join(self._source_subfolder, "out", "Default"), dst="lib", keep_path=False)
self.copy("crashpad_handler", src=os.path.join(self._source_subfolder, "out", "Default"), dst="bin", keep_path=False)
self.copy("crashpad_handler.exe", src=os.path.join(self._source_subfolder, "out", "Default"), dst="bin", keep_path=False)
self.copy("crashpad_handler_com.com", src=os.path.join(self._source_subfolder, "out", "Default"), dst="bin", keep_path=False)
if self.settings.os == "Windows":
tools.rename(os.path.join(self.package_folder, "bin", "crashpad_handler_com.com"),
os.path.join(self.package_folder, "bin", "crashpad_handler.com"))
# Remove accidentally copied libraries. These are used by the executables, not by the libraries.
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*getopt*")
tools.save(os.path.join(self.package_folder, "lib", "cmake", "crashpad-cxx.cmake"),
textwrap.dedent("""\
if(TARGET crashpad::mini_chromium_base)
target_compile_features(crashpad::mini_chromium_base INTERFACE cxx_std_14)
endif()
"""))
def package_info(self):
self.cpp_info.components["mini_chromium_base"].libs = ["base"]
self.cpp_info.components["mini_chromium_base"].build_modules = [os.path.join(self.package_folder, "lib", "cmake", "crashpad-cxx.cmake")]
self.cpp_info.components["mini_chromium_base"].builddirs = [os.path.join("lib", "cmake")]
if tools.is_apple_os(self.settings.os):
if self.settings.os == "Macos":
self.cpp_info.components["mini_chromium_base"].frameworks = ["ApplicationServices", "CoreFoundation", "Foundation", "IOKit", "Security"]
else: # iOS
self.cpp_info.components["mini_chromium_base"].frameworks = ["CoreFoundation", "CoreGraphics", "CoreText", "Foundation", "Security"]
self.cpp_info.components["util"].libs = ["util"]
self.cpp_info.components["util"].requires = ["mini_chromium_base", "zlib::zlib"]
if tools.is_apple_os(self.settings.os):
self.cpp_info.components["util"].libs.append("mig_output")
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["util"].libs.append("compat")
self.cpp_info.components["util"].requires.append("linux-syscall-support::linux-syscall-support")
if self.settings.os == "Windows":
self.cpp_info.components["util"].system_libs.extend(["dbghelp", "rpcrt4"])
if self.options.http_transport == "libcurl":
self.cpp_info.components["util"].requires.append("libcurl::libcurl")
elif self.options.get_safe("with_tls") == "openssl":
self.cpp_info.components["util"].requires.append("openssl::openssl")
if self.settings.os == "Macos":
self.cpp_info.components["util"].frameworks.extend(["CoreFoundation", "Foundation", "IOKit"])
self.cpp_info.components["util"].system_libs.append("bsm")
self.cpp_info.components["client_common"].libs = ["client_common"]
self.cpp_info.components["client_common"].requires = ["util", "mini_chromium_base"]
self.cpp_info.components["client"].libs = ["client"]
self.cpp_info.components["client"].requires = ["util", "mini_chromium_base", "client_common"]
if self.settings.os == "Windows":
self.cpp_info.components["client"].system_libs.append("rpcrt4")
self.cpp_info.components["context"].libs = ["context"]
self.cpp_info.components["context"].requires = ["util"]
self.cpp_info.components["snapshot"].libs = ["snapshot"]
self.cpp_info.components["snapshot"].requires = ["client_common", "mini_chromium_base", "util"]
if tools.is_apple_os(self.settings.os):
self.cpp_info.components["snapshot"].frameworks.extend(["OpenCL"])
self.cpp_info.components["format"].libs = ["format"]
self.cpp_info.components["format"].requires = ["snapshot", "mini_chromium_base", "util"]
self.cpp_info.components["minidump"].libs = ["minidump"]
self.cpp_info.components["minidump"].requires = ["snapshot", "mini_chromium_base", "util"]
self.cpp_info.components["handler_common"].libs = ["handler_common"]
self.cpp_info.components["handler_common"].requires = ["client_common", "snapshot", "util"]
self.cpp_info.components["handler"].libs = ["handler"]
self.cpp_info.components["handler"].requires = ["client", "util", "handler_common", "minidump", "snapshot"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
|
scripts/run_music_transformer.py | HalleyYoung/musicautobot | 402 | 12682367 | <gh_stars>100-1000
import music21
import torch
import numpy as np
try: from apex.optimizers import FusedAdam
except: from torch.optim import Adam as FusedAdam
from fastai.distributed import *
from fastai.callbacks import SaveModelCallback
from fastai.text.models.transformer import *
import sys
sys.path.insert(0, '..')
from musicautobot.music_transformer import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='../data/numpy/')
parser.add_argument('--data_file', type=str, default='musicitem_data_save.pkl')
parser.add_argument('--save', type=str, default='first_run')
parser.add_argument('--load', type=str, default=None)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=12)
parser.add_argument("--mem_len", type=int, default=512)
parser.add_argument("--bptt", type=int, default=512)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument('--half', action='store_true', help='Use half precision')
parser.add_argument('--lamb', action='store_true', help='Use lamb optimizer')
parser.add_argument('--wd', type=float, default=1e-3, help='weight decay for adam')
parser.add_argument('--epochs', type=int, default=5, help='num epochs')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--div_factor', type=int, default=10, help='learning rate div factor')
parser.add_argument('--config', type=str, default='default_config', help='serve.py config name')
parser.add_argument('--no_transpose', action='store_true', help='No transpose data augmentation')
parser.add_argument('--parallel', action='store_true', help='Run in dataparallel')
parser.add_argument('--mask_steps', type=int, default=1, help='Attention mask - max number of random steps. Basically teacher forcing')
args = parser.parse_args()
is_distributed = num_distrib() > 0
if args.local_rank != 0:
f = open('/dev/null', 'w')
sys.stdout = f
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
path = Path(args.path)
from musicautobot import config
config = getattr(config, args.config)()
config['encode_position'] = True
config['mask_steps'] = args.mask_steps
transpose_range = None if args.no_transpose else (0,12)
data = load_data(path, args.data_file, encode_position=config['encode_position'], dl_tfms=[batch_position_tfm],
bs=args.batch_size, bptt=args.bptt, transpose_range=transpose_range, num_workers=args.num_workers)
eps = 1e-2 if args.half else 1e-6
opt_func = partial(FusedAdam, betas=(0.9,0.99), eps=eps)
if args.lamb:
from musicautobot.utils.lamb import Lamb
opt_func = partial(Lamb, eps=eps)
load_path = path/args.load if args.load else None
learn = music_model_learner(data, config=config, drop_mult=1.5, opt_func=opt_func, pretrained_path=load_path)
if not args.half: learn.clip_grad(1.0)
if args.save:
save_path = path/learn.model_dir/args.save
save_path.parent.mkdir(parents=True, exist_ok=True)
if args.half: learn = learn.to_fp16(clip=1.0, dynamic=True, max_scale=2**18)
if is_distributed: learn = learn.to_distributed(args.local_rank, cache_dir=path/'dist_logs')
if args.parallel: learn = learn.to_parallel()
if args.local_rank == 0: learn.callbacks.append(SaveModelCallback(learn, name=f'{args.save}_best'))
learn.fit_one_cycle(args.epochs, args.lr, div_factor=args.div_factor, pct_start=0.2, final_div=200, wd=args.wd)
if args.local_rank == 0: learn.save(f'{args.save}', config=config)
|
custom_components/ble_monitor/ble_parser/helpers.py | avidit/hass | 383 | 12682398 | <filename>custom_components/ble_monitor/ble_parser/helpers.py
"""Helpers for bleparser"""
from uuid import UUID
def to_uuid(uuid: str) -> str:
"""Return formatted UUID"""
return str(UUID(''.join(f'{i:02X}' for i in uuid)))
def to_mac(addr: str) -> str:
"""Return formatted MAC address"""
return ':'.join(f'{i:02X}' for i in addr)
def to_unformatted_mac(addr: int):
"""Return unformatted MAC address"""
return ''.join(f'{i:02X}' for i in addr[:])
|
torchnlp/samplers/balanced_sampler.py | jmribeiro/PyTorch-NLP | 2,125 | 12682399 | from torchnlp._third_party.weighted_random_sampler import WeightedRandomSampler
from torchnlp.utils import identity
class BalancedSampler(WeightedRandomSampler):
""" Weighted sampler with respect for an element's class.
Args:
data (iterable)
get_class (callable, optional): Get the class of an item relative to the entire dataset.
get_weight (callable, optional): Define a weight for each item other than one.
kwargs: Additional key word arguments passed onto `WeightedRandomSampler`.
Example:
>>> from torchnlp.samplers import DeterministicSampler
>>>
>>> data = ['a', 'b', 'c'] + ['c'] * 100
>>> sampler = BalancedSampler(data, num_samples=3)
>>> sampler = DeterministicSampler(sampler, random_seed=12)
>>> [data[i] for i in sampler]
['c', 'b', 'a']
"""
def __init__(self, data_source, get_class=identity, get_weight=lambda x: 1, **kwargs):
classified = [get_class(item) for item in data_source]
weighted = [float(get_weight(item)) for item in data_source]
class_totals = {
k: sum([w for c, w in zip(classified, weighted) if k == c]) for k in set(classified)
}
weights = [w / class_totals[c] if w > 0 else 0.0 for c, w in zip(classified, weighted)]
super().__init__(weights=weights, **kwargs)
|
subsync/media.py | tympanix/subsync | 108 | 12682401 | <gh_stars>100-1000
import os
import librosa
import subprocess
import tempfile
import io
import pysrt
from pysrt import SubRipTime
import string
import random
import chardet
import re
from datetime import timedelta
import numpy as np
import sklearn
from .ffmpeg import Transcode
from .log import logger
class Media:
"""
Media class represents a media file on disk for which the content can be
analyzed and retrieved.
"""
# List of supported media formats
FORMATS = ['.mkv', '.mp4', '.wmv', '.avi', '.flv']
# The frequency of the generated audio
FREQ = 16000
# The number of coefficients to extract from the mfcc
N_MFCC = 13
# The number of samples in each mfcc coefficient
HOP_LEN = 512.0
# The length (seconds) of each item in the mfcc analysis
LEN_MFCC = HOP_LEN/FREQ
def __init__(self, filepath, subtitles=None):
prefix, ext = os.path.splitext(filepath)
if ext == '.srt':
return self.from_srt(filepath)
if not ext:
raise ValueError('unknown file: "{}"'.format(filepath))
if ext not in Media.FORMATS:
raise ValueError('filetype {} not supported: "{}"'.format(ext, filepath))
self.__subtitles = subtitles
self.filepath = os.path.abspath(filepath)
self.filename = os.path.basename(prefix)
self.extension = ext
self.offset = timedelta()
def from_srt(self, filepath):
prefix, ext = os.path.splitext(filepath)
if ext != '.srt':
raise ValueError('filetype must be .srt format')
prefix = os.path.basename(re.sub(r'\.\w\w$', '', prefix))
dir = os.path.dirname(filepath)
for f in os.listdir(dir):
_, ext = os.path.splitext(f)
if f.startswith(prefix) and ext in Media.FORMATS:
return self.__init__(os.path.join(dir, f), subtitles=[filepath])
raise ValueError('no media for subtitle: "{}"'.format(filepath))
def subtitles(self):
if self.__subtitles is not None:
for s in self.__subtitles:
yield Subtitle(self, s)
else:
dir = os.path.dirname(self.filepath)
for f in os.listdir(dir):
if f.endswith('.srt') and f.startswith(self.filename):
yield Subtitle(self, os.path.join(dir, f))
def mfcc(self, duration=60*15, seek=True):
transcode = Transcode(self.filepath, duration=duration, seek=seek)
self.offset = transcode.start
print("Transcoding...")
transcode.run()
y, sr = librosa.load(transcode.output, sr=Media.FREQ)
print("Analysing...")
self.mfcc = librosa.feature.mfcc(y=y, sr=sr,
hop_length=int(Media.HOP_LEN),
n_mfcc=int(Media.N_MFCC)
)
os.remove(transcode.output)
return self.mfcc
class Subtitle:
"""
Subtitle class represnets an .srt file on disk and provides
functionality to inspect and manipulate the subtitle content
"""
def __init__(self, media, path):
self.media = media
self.path = path
self.subs = pysrt.open(self.path, encoding=self._find_encoding())
def labels(self, subs=None):
if self.media.mfcc is None:
raise RuntimeError("Must analyse mfcc before generating labels")
samples = len(self.media.mfcc[0])
labels = np.zeros(samples)
for sub in self.subs if subs is None else subs:
start = timeToPos(sub.start - self.offset())
end = timeToPos(sub.end - self.offset())+1
for i in range(start, end):
if i >= 0 and i < len(labels):
labels[i] = 1
return labels
def _find_encoding(self):
data = None
with open(self.path, "rb") as f:
data = f.read()
det = chardet.detect(data)
return det.get("encoding")
def offset(self):
d = self.media.offset
hours, remainder = divmod(d.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return SubRipTime(
hours=hours, minutes=minutes, seconds=seconds,
milliseconds=d.microseconds/1000
)
def logloss(self, pred, actual, margin=12):
blocks = secondsToBlocks(margin)
logloss = np.ones(blocks*2)
indices = np.ones(blocks*2)
nonzero = np.nonzero(actual)[0]
begin = max(nonzero[0]-blocks, 0)
end = min(nonzero[-1]+blocks, len(actual)-1)
pred = pred[begin:end]
actual = actual[begin:end]
for i, offset in enumerate(range(-blocks, blocks)):
snippet = np.roll(actual, offset)
try:
logloss[i] = sklearn.metrics.log_loss(snippet[blocks:-blocks], pred[blocks:-blocks])
except (ValueError, RuntimeWarning):
pass
indices[i] = offset
return indices, logloss
def sync(self, net, safe=True, margin=12, plot=True):
secs = 0.0
labels = self.labels()
mfcc = self.media.mfcc.T
mfcc = mfcc[..., np.newaxis]
pred = net.predict(mfcc)
x, y = self.logloss(pred, labels, margin=margin)
accept = True
if safe:
mean = np.mean(y)
sd = np.std(y)
accept = np.min(y) < mean - sd
if accept:
secs = blocksToSeconds(x[np.argmin(y)])
print("Shift {} seconds:".format(secs))
self.subs.shift(seconds=secs)
self.subs.save(self.path, encoding='utf-8')
if secs != 0.0:
logger.info('{}: {}s'.format(self.path, secs))
if plot:
self.plot_logloss(x, y)
return secs
def sync_all(self, net, margin=16, plot=True):
secs = 0.0
mfcc = self.media.mfcc.T
mfcc = mfcc[..., np.newaxis]
pred = net.predict(mfcc)
print("Fitting...")
self.__sync_all_rec(self.subs, pred)
self.clean()
self.subs.save(self.path, encoding='utf-8')
def __sync_all_rec(self, subs, pred, margin=16):
if len(subs) < 3:
return
labels = self.labels(subs=subs)
if np.unique(labels).size <= 1:
return
x, y = self.logloss(pred, labels, margin=max(margin, 0.25))
#self.plot_logloss(x,y)
#self.plot_labels(labels, pred)
secs = blocksToSeconds(x[np.argmin(y)])
subs.shift(seconds=secs)
# call recursively
middle = subs[len(subs)//2]
left = subs.slice(ends_before=middle.start)
right = subs.slice(starts_after=middle.start)
self.__sync_all_rec(left, pred, margin=margin/2)
self.__sync_all_rec(right, pred, margin=margin/2)
def clean(self):
for i, s in enumerate(self.subs):
if i >= len(self.subs)-1:
return
next = self.subs[i+1]
if s.end > next.start:
s.end = next.start
def plot_logloss(self, x, y):
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y)
plt.title('logloss over shifts')
plt.ylabel('logloss')
plt.xlabel('shifts')
plt.legend(['logloss'], loc='upper left')
plt.show()
def plot_labels(self, labels, pred):
import matplotlib.pyplot as plt
plt.figure()
plt.plot([i for i in range(0,len(labels))], labels, label='labels')
plt.title('labels vs predictions')
plt.ylabel('value')
plt.xlabel('time')
plt.legend(['labels'], loc='upper left')
plt.figure()
plt.plot([i for i in range(0,len(pred))], pred, label='pred')
plt.title('labels vs predictions')
plt.ylabel('value')
plt.xlabel('time')
plt.legend(['pred'], loc='upper left')
plt.show()
# Convert timestamp to seconds
def timeToSec(t):
total_sec = float(t.milliseconds)/1000
total_sec += t.seconds
total_sec += t.minutes*60
total_sec += t.hours*60*60
return total_sec
# Return timestamp from cell position
def timeToPos(t, freq=Media.FREQ, hop_len=Media.HOP_LEN):
return round(timeToSec(t)/(hop_len/freq))
def secondsToBlocks(s, hop_len=Media.HOP_LEN, freq=Media.FREQ):
return int(float(s)/(hop_len/freq))
def blocksToSeconds(h, freq=Media.FREQ, hop_len=Media.HOP_LEN):
return float(h)*(hop_len/freq)
|
PypeS/pypewrapper.py | michelebucelli/vmtk | 217 | 12682405 | <filename>PypeS/pypewrapper.py
#!/usr/bin/env python
## Program: PypeS
## Module: $RCSfile: pype.py,v $
## Language: Python
## Date: $Date: 2006/07/07 10:45:42 $
## Version: $Revision: 1.18 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import print_function, absolute_import # NEED TO STAY AS TOP IMPORT
import sys
import os.path
from vmtk import pypes
class PypeWrapper(object):
def __init__(self):
self.Mode = 'slicer3'
self.XMLDescription = ''
self.PypeTitle = ''
self.PypeDescription = ''
self.Contributor = ''
self.ModuleFileName = ''
self.Arguments = None
self.ScriptList = []
self.ModulePipeArguments = []
self.AllExposedMembers = []
self.Indentation = ' '
def ParseArguments(self):
if '--help' in self.Arguments:
print('hey!')
return
if '--pype' not in self.Arguments:
print('complain!')
return
if '--mode' in self.Arguments:
self.Mode = self.Arguments[self.Arguments.index('--mode')+1]
if '--title' in self.Arguments:
self.PypeTitle = self.Arguments[self.Arguments.index('--title')+1]
if '--description' in self.Arguments:
self.PypeDescription = self.Arguments[self.Arguments.index('--description')+1]
if '--contributor' in self.Arguments:
self.Contributor = self.Arguments[self.Arguments.index('--contributor')+1]
if '--modulefile' in self.Arguments:
self.ModuleFileName = self.Arguments[self.Arguments.index('--modulefile')+1]
arguments = self.Arguments[self.Arguments.index('--pype')+1:]
self.ModulePipeArguments = arguments[:]
while '--pipe' in arguments:
scriptSlice = arguments[:arguments.index('--pipe')]
self.ScriptList.append([os.path.splitext(os.path.split(scriptSlice[0])[1])[0],scriptSlice[1:]])
arguments = arguments[arguments.index('--pipe')+1:]
scriptSlice = arguments[:]
if not arguments:
return
self.ScriptList.append([os.path.splitext(os.path.split(scriptSlice[0])[1])[0],scriptSlice[1:]])
def Execute(self):
ind = self.Indentation
self.XMLDescription = '<?xml version="1.0" encoding="utf-8"?>\n'
self.XMLDescription += '<executable>\n'
self.XMLDescription += ind + '<category>vmtk</category>\n'
self.XMLDescription += ind + '<title>%s</title>\n' % (self.PypeTitle)
self.XMLDescription += ind + '<description>%s</description>\n' % (self.PypeDescription)
self.XMLDescription += ind + '<contributor>%s</contributor>\n' % (self.Contributor)
self.AllExposedMembers = []
for scriptNameAndArguments in self.ScriptList:
self.XMLDescription += ind + '<parameters>\n'
scriptName = scriptNameAndArguments[0]
moduleName = scriptName
scriptArguments = scriptNameAndArguments[1]
try:
exec('from vmtk import '+ moduleName)
except ImportError:
print('No module named ' + moduleName)
break
scriptObjectClassName = ''
exec ('scriptObjectClassName = '+moduleName+'.'+moduleName)
self.XMLDescription += 2*ind + '<label>%s Parameters</label>\n' % (scriptObjectClassName)
moduleScriptObjectClassName = moduleName+'.'+scriptObjectClassName
scriptObject = 0
exec ('scriptObject = '+moduleScriptObjectClassName+'()')
scriptArguments = scriptNameAndArguments[1]
exposedArgumentNames = [argument.split('@')[0] for argument in scriptArguments if '@' in argument[1:]]
exposedArgumentChannels = [argument.split('@')[1] for argument in scriptArguments if '@' in argument[1:]]
exposedArgumentOptions = [scriptArguments[scriptArguments.index(argument)-1][1:] for argument in scriptArguments if '@' in argument[1:]]
exposedOptionsToNamesAndChannels = {}
for i in range(len(exposedArgumentOptions)):
exposedOptionsToNamesAndChannels[exposedArgumentOptions[i]] = [exposedArgumentNames[i], exposedArgumentChannels[i]]
exposedMembers = []
for member in scriptObject.InputMembers + scriptObject.OutputMembers:
exec('member.MemberValue = scriptObject.'+member.MemberName)
if member.OptionName in exposedOptionsToNamesAndChannels:
member.ExposedName = exposedOptionsToNamesAndChannels[member.OptionName][0]
member.ExposedChannel = exposedOptionsToNamesAndChannels[member.OptionName][1]
exposedMembers.append(member)
self.AllExposedMembers.append(member)
for exposedMember in exposedMembers:
memberXMLTag = ''
memberXMLOptions = ''
enumeration = exposedMember.GetRangeEnumeration()
if exposedMember.MemberType == 'int':
memberXMLTag = 'integer'
elif exposedMember.MemberType == 'float':
memberXMLTag = 'float'
elif exposedMember.MemberType == 'str':
memberXMLTag = 'string'
if enumeration:
memberXMLTag += '-enumeration'
elif exposedMember.MemberType == 'bool':
memberXMLTag = 'boolean'
if exposedMember.MemberLength != 1:
memberXMLTag += '-vector'
if exposedMember.MemberType == 'vtkImageData':
memberXMLTag = 'image'
elif exposedMember.MemberType == 'vtkPolyData':
memberXMLTag = 'geometry'
if exposedMember.ExposedChannel == 'point':
memberXMLTag = 'point'
if exposedMember.MemberLength == -1:
memberXMLOptions += 'multiple="true"'
self.XMLDescription += 2*ind + '<%s>\n' % (memberXMLTag+' '+memberXMLOptions)
self.XMLDescription += 3*ind + '<name>%s</name>\n' % (exposedMember.ExposedName)
self.XMLDescription += 3*ind + '<longflag>%s</longflag>\n' % (exposedMember.ExposedName)
self.XMLDescription += 3*ind + '<label>%s</label>\n' % (exposedMember.ExposedName)
if exposedMember.MemberDoc:
self.XMLDescription += 3*ind + '<description>%s</description>\n' % (exposedMember.MemberDoc)
if exposedMember.MemberValue not in [None, [], '']:
self.XMLDescription += 3*ind + '<default>%s</default>\n' % (str(exposedMember.MemberValue))
if enumeration:
for element in enumeration:
self.XMLDescription += 3*ind + '<element>%s</element>\n' % (str(element))
values = exposedMember.GetRangeValues()
if values:
self.XMLDescription += 3*ind + '<constraints>\n'
if values[0] != None:
self.XMLDescription += 4*ind + '<minimum>%s</minimum>\n' % (str(values[0]))
if values[1] != None:
self.XMLDescription += 4*ind + '<maximum>%s</maximum>\n' % (str(values[1]))
if values[2] != None:
self.XMLDescription += 4*ind + '<step>%s</step>\n' % (str(values[2]))
self.XMLDescription += 3*ind + '</constraints>\n'
if exposedMember.ExposedChannel in ['input','output']:
self.XMLDescription += 3*ind + '<channel>%s</channel>\n' % (exposedMember.ExposedChannel)
self.XMLDescription += 2*ind + '</%s>\n' % (memberXMLTag)
self.XMLDescription += ind + '</parameters>\n'
self.XMLDescription += '</executable>\n'
moduleFile = open(self.ModuleFileName,'w')
moduleFile.write('#!/usr/bin/env python\n\n')
moduleFile.write('xmlDescription = """')
moduleFile.write(self.XMLDescription)
moduleFile.write('"""\n')
moduleFile.write('\n')
moduleFile.write('pypeWrapperCommand = "%s"\n' % ' '.join(sys.argv))
moduleFile.write('\n')
moduleFile.write('import sys\n')
moduleFile.write('if "--xml" in sys.argv:\n')
moduleFile.write(self.Indentation+'print(xmlDescription\n)')
moduleFile.write(self.Indentation+'sys.exit(0)\n')
moduleFile.write('\n')
moduleFile.write('if "--logo" in sys.argv:\n')
moduleFile.write(self.Indentation+'sys.exit(0)\n')
moduleFile.write('\n')
moduleFile.write('import sys\n')
moduleFile.write('if "--pypewrapper" in sys.argv:\n')
moduleFile.write(self.Indentation+'print(pypeWrapperCommand\n)')
moduleFile.write(self.Indentation+'sys.exit(0)\n')
moduleFile.write('\n')
substModulePipeArguments = []
exposedMembersOrder = []
for argument in self.ModulePipeArguments:
if '@' in argument[1:]:
substModulePipeArguments.append(argument.split('@')[0])
else:
substModulePipeArguments.append(argument)
for exposedMember in self.AllExposedMembers:
exposedMembersOrder.append(substModulePipeArguments.index(exposedMember.ExposedName))
if exposedMember.ExposedChannel in ['input','output']:
substModulePipeArguments[substModulePipeArguments.index(exposedMember.ExposedName)-1] += 'file'
substModulePipeArguments[substModulePipeArguments.index(exposedMember.ExposedName)] = '%s'
sortedExposedMembersOrder = exposedMembersOrder[:]
sortedExposedMembersOrder.sort()
allOrderedExposedMemberNames = []
for position in sortedExposedMembersOrder:
allOrderedExposedMemberNames.append(self.AllExposedMembers[exposedMembersOrder.index(position)].ExposedName)
moduleFile.write('arguments = sys.argv[:]\n')
moduleFile.write('\n')
for exposedMember in self.AllExposedMembers:
if exposedMember.MemberType is 'bool':
moduleFile.write('%s = "0"\n' % exposedMember.ExposedName)
moduleFile.write('if "--%s" in arguments:\n' % (exposedMember.ExposedName))
moduleFile.write(self.Indentation+'%s = "1"\n' % (exposedMember.ExposedName))
moduleFile.write(self.Indentation+'arguments.remove("--%s")\n' % exposedMember.ExposedName)
moduleFile.write('%s = " ".join(%s.split(","))\n' % (exposedMember.ExposedName, exposedMember.ExposedName))
moduleFile.write('\n')
else:
moduleFile.write('%s = ""\n' % exposedMember.ExposedName)
moduleFile.write('while "--%s" in arguments:\n' % (exposedMember.ExposedName))
moduleFile.write(self.Indentation+'index = arguments.index("--%s")\n' % (exposedMember.ExposedName))
moduleFile.write(self.Indentation+'if index != len(arguments)-1 and "--" not in arguments[index+1]:\n')
moduleFile.write(2*self.Indentation+'if %s:\n' % exposedMember.ExposedName)
moduleFile.write(3*self.Indentation+'%s += ","\n' % exposedMember.ExposedName)
moduleFile.write(2*self.Indentation+'%s += arguments[index+1]\n' % exposedMember.ExposedName)
moduleFile.write(2*self.Indentation+'arguments.remove(arguments[index+1])\n')
moduleFile.write(self.Indentation+'arguments.remove("--%s")\n' % exposedMember.ExposedName)
moduleFile.write('%s = " ".join(%s.split(","))\n' % (exposedMember.ExposedName, exposedMember.ExposedName))
moduleFile.write('\n')
moduleFile.write('pipe = "%s" %% (%s)\n' % (' '.join(substModulePipeArguments),','.join(allOrderedExposedMemberNames)))
moduleFile.write('\n')
moduleFile.write('from vmtk import pypes\n')
moduleFile.write('pypes.PypeRun(pipe)\n')
moduleFile.write('\n')
moduleFile.close()
if __name__=='__main__':
pipeLumper = PypeWrapper()
pipeLumper.Arguments = sys.argv
pipeLumper.ParseArguments()
pipeLumper.Execute()
|
scripts/tag_datasets.py | pplonski/automlbenchmark | 282 | 12682410 | <filename>scripts/tag_datasets.py<gh_stars>100-1000
import sys
sys.path.append("D:\\repositories/openml-python")
import openml
if __name__ == '__main__':
suite = openml.study.get_suite(218)
tag = 'study_218'
for taskid in suite.tasks:
print('collecting t/', taskid)
task = openml.tasks.get_task(taskid, download_data=False)
#task.push_tag(tag)
print('collecting d/', task.dataset_id)
dataset = openml.datasets.get_dataset(task.dataset_id, download_data=False)
print('tagging')
#dataset.push_tag(tag)
|
mudpi/extensions/mqtt/__init__.py | icyspace/mudpi-core | 163 | 12682412 | <gh_stars>100-1000
"""
MQTT Extension
Includes interfaces for redis to
get data from events.
"""
import time
import paho.mqtt.client as mqtt
from mudpi.extensions import BaseExtension
class Extension(BaseExtension):
namespace = 'mqtt'
update_interval = 1
def init(self, config):
""" Prepare the mqtt connection and components """
self.connections = {}
self.loop_started = False
self.config = config
if not isinstance(config, list):
config = [config]
# Prepare clients for mqtt
for conf in config:
host = conf.get('host', 'localhost')
port = conf.get('port', 1883)
if conf['key'] not in self.connections:
self.connections[conf['key']] = {'client': None,
'connected': False,
'loop_started': False,
'callbacks': {}}
def on_conn(client, userdata, flags, rc):
if rc == 0:
self.connections[conf['key']]['connected'] = True
self.connections[conf['key']]['client'] = mqtt.Client(f'mudpi-{conf["key"]}')
self.connections[conf['key']]['client'].on_connect = on_conn
username = conf.get('username')
password = <PASSWORD>('password')
if all([username, password]):
self.connections[conf['key']]['client'].username_pw_set(username, password)
self.connections[conf['key']]['client'].connect(host, port=port)
while not self.connections[conf['key']]['connected']:
if not self.connections[conf['key']]['loop_started']:
self.connections[conf['key']]['client'].loop_start()
self.connections[conf['key']]['loop_started'] = True
time.sleep(0.1)
return True
def validate(self, config):
""" Validate the mqtt connection info """
config = config[self.namespace]
if not isinstance(config, list):
config = [config]
for conf in config:
key = conf.get('key')
if key is None:
raise ConfigError('MQTT missing a `key` in config for connection')
host = conf.get('host')
if host is None:
conf['host'] = 'localhost'
port = conf.get('port')
if port is None:
conf['port'] = 1883
username = conf.get('username')
password = conf.get('password')
if any([username, password]) and not all([username, password]):
raise ConfigError('A username and password must both be provided.')
return config
def unload(self):
""" Unload the extension """
for conn in self.connections.values():
conn['client'].loop_stop()
conn['client'].disconnect()
def subscribe(self, key, topic, callback):
""" Listen on a topic and pass event data to callback """
if topic not in self.connections[key]['callbacks']:
self.connections[key]['callbacks'][topic] = [callback]
else:
if callback not in self.connections[key]['callbacks'][topic]:
self.connections[key]['callbacks'][topic].append(callback)
def callback_handler(client, userdata, message):
# log = f"{message.payload.decode()} {message.topic}"
if message.topic in self.connections[key]['callbacks']:
for callbk in self.connections[key]['callbacks'][message.topic]:
callbk(message.payload.decode("utf-8"))
self.connections[key]['client'].on_message = callback_handler
return self.connections[key]['client'].subscribe(topic) |
src/fireo/fields/text_field.py | isaacna/FireO | 231 | 12682417 | <gh_stars>100-1000
from fireo.fields import errors
from fireo.fields.base_field import Field
import re
class TextField(Field):
"""Text field for Models
Define text for models
allowed_attributes = ['max_length', 'to_lowercase']
Examples
--------
class User(Model):
age = TextField()
"""
allowed_attributes = ['max_length', 'to_lowercase', 'format']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.format_type = None
self.supported_types = ['title', 'upper', 'lower', 'capitalize']
def attr_format(self, attr_val, field_val):
self.format_type = attr_val
return field_val
def attr_max_length(self, attr_val, field_val):
"""Method for attribute max_length"""
return field_val[:attr_val]
def attr_to_lowercase(self, attr_val, field_val):
"""Method for attribute to_lowercase
Convert text into lowercase
"""
if attr_val:
return field_val.lower() if field_val is not None else None
return field_val
def _titlecase(self, s):
return re.sub(r"[A-Za-z]+('[A-Za-z]+)?",
lambda mo: mo.group(0)[0].upper() +
mo.group(0)[1:].lower(),
s)
# override method
def db_value(self, val):
if type(val) is str or val is None:
# check if user defined to set the value as lower case
if self.model_cls._meta.to_lowercase:
return val.lower() if val is not None else None
return val
raise errors.InvalidFieldType(f'Invalid field type. Field "{self.name}" expected {str}, '
f'got {type(val)}')
# override method
def field_value(self, val):
# check if val is None then there is no need to run these functions
# just return back the None value
if val is None:
return val
self.field_attribute.parse(val, run_only=['format'])
if self.format_type:
if self.format_type in self.supported_types:
if self.format_type == 'title':
return self._titlecase(val)
if self.format_type == 'upper':
return val.upper()
if self.format_type == 'lower':
return val.lower()
if self.format_type == 'capitalize':
return val.capitalize()
raise errors.AttributeTypeError(
f'Invalid attribute type. Inside Field "{self.name}", '
f'"format" type must be one of them "{self.supported_types}".')
return val
|
lib/utils/extract_tpelog.py | NelsonDaniel/SiamDW | 772 | 12682430 | <reponame>NelsonDaniel/SiamDW<filename>lib/utils/extract_tpelog.py
# -*- coding:utf-8 -*-
# ! ./usr/bin/env python
# __author__ = 'zzp'
import shutil
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Analysis siamfc tune results')
parser.add_argument('--path', default='logs/gene_adjust_rpn.log', help='tune result path')
parser.add_argument('--dataset', default='VOT2018', help='test dataset')
parser.add_argument('--save_path', default='logs', help='log file save path')
def collect_results(args):
if not args.path.endswith('txt'):
name = args.path.split('.')[0]
name = name + '.txt'
shutil.copy(args.path, name)
args.path = name
fin = open(args.path, 'r')
lines = fin.readlines()
penalty_k = []
scale_lr = []
wi = []
sz = []
bz = []
eao = []
count = 0 # total numbers
for line in lines:
if not line.startswith('penalty_k'):
pass
else:
# print(line)
count += 1
temp0, temp1, temp2, temp3, temp4, temp5 = line.split(',')
penalty_k.append(float(temp0.split(': ')[-1]))
scale_lr.append(float(temp1.split(': ')[-1]))
wi.append(float(temp2.split(': ')[-1]))
sz.append(float(temp3.split(': ')[-1]))
bz.append(float(temp4.split(': ')[-1]))
eao.append(float(temp5.split(': ')[-1]))
# find max
eao = np.array(eao)
max_idx = np.argmax(eao)
max_eao = eao[max_idx]
print('{} params group have been tested'.format(count))
print('penalty_k: {:.4f}, scale_lr: {:.4f}, wi: {:.4f}, small_sz: {}, big_sz: {}, auc: {}'.format(penalty_k[max_idx], scale_lr[max_idx], wi[max_idx], sz[max_idx], bz[max_idx], max_eao))
if __name__ == '__main__':
args = parser.parse_args()
collect_results(args)
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/040_add_tables_for_dynamic_action_definitions_and_code_sources.py | shubhamdang/mistral | 205 | 12682432 | <reponame>shubhamdang/mistral<filename>mistral/db/sqlalchemy/migration/alembic_migrations/versions/040_add_tables_for_dynamic_action_definitions_and_code_sources.py
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""create new tables for the dynamic actions and code sources
Revision ID: 001
Revises: None
Create Date: 2020-09-30 12:02:51.935368
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
from mistral.db.sqlalchemy import types as st
import sqlalchemy as sa
def upgrade():
op.create_table(
'code_sources',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('project_id', sa.String(length=80), nullable=True),
sa.Column('namespace', sa.String(length=255), nullable=True),
sa.Column('content', sa.TEXT, nullable=False),
sa.Column('version', sa.Integer, nullable=False),
sa.Column('tags', st.JsonEncoded(), nullable=True),
sa.Column('scope', sa.String(length=80), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'namespace', 'project_id'),
sa.Index('code_sources_project_id', 'project_id'),
sa.Index('code_sources_scope', 'scope')
)
op.create_table(
'dynamic_action_definitions',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('class_name', sa.String(length=255), nullable=False),
sa.Column('scope', sa.String(length=80), nullable=True),
sa.Column('project_id', sa.String(length=80), nullable=True),
sa.Column('code_source_id', sa.String(length=36), nullable=False),
sa.Column('code_source_name', sa.String(length=255), nullable=False),
sa.Column('namespace', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(
['code_source_id'],
['code_sources.id'],
ondelete='CASCADE'
),
sa.UniqueConstraint('name', 'namespace', 'project_id'),
sa.Index('dynamic_action_definitions_project_id', 'project_id'),
sa.Index('dynamic_action_definitions_scope', 'scope'),
)
|
02-HelloRDD/HelloRDD.py | IAmZero247/pyspark-learning | 105 | 12682445 | import sys
from pyspark import SparkConf
from collections import namedtuple
from pyspark.sql import SparkSession
from lib.logger import Log4j
SurveyRecord = namedtuple("SurveyRecord", ["Age", "Gender", "Country", "State"])
if __name__ == "__main__":
conf = SparkConf() \
.setMaster("local[3]") \
.setAppName("HelloRDD")
# sc = SparkContext(conf=conf)
spark = SparkSession.builder.config(conf=conf).getOrCreate()
sc = spark.sparkContext
logger = Log4j(spark)
if len(sys.argv) != 2:
logger.error("Usage: HelloSpark <filename>")
sys.exit(-1)
linesRDD = sc.textFile(sys.argv[1])
partitionedRDD = linesRDD.repartition(2)
colsRDD = partitionedRDD.map(lambda line: line.replace('"', '').split(","))
selectRDD = colsRDD.map(lambda cols: SurveyRecord(int(cols[1]), cols[2], cols[3], cols[4]))
filteredRDD = selectRDD.filter(lambda r: r.Age < 40)
kvRDD = filteredRDD.map(lambda r: (r.Country, 1))
countRDD = kvRDD.reduceByKey(lambda v1, v2: v1 + v2)
colsList = countRDD.collect()
for x in colsList:
logger.info(x)
|
nvtabular/io/fsspec_utils.py | NVIDIA/NVTabular | 543 | 12682457 | <gh_stars>100-1000
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
from threading import Thread
import numpy as np
from pyarrow import parquet as pq
try:
import cudf
from cudf.core.column import as_column, build_categorical_column
except ImportError:
cudf = None
#
# Parquet-Specific Utilities
#
def _optimized_read_partition_remote(
fs, pieces, columns, index, categories=(), partitions=(), **kwargs
):
# This is a specialized version of `CudfEngine.read_partition`
# for remote filesystems. This implementation is intended to
# replace the upstream `read_partition` classmethod until
# remote-filesystem handling is optimized in cudf/dask-cudf
if columns is not None:
columns = list(columns)
if isinstance(index, list):
columns += index
# Check that this is a single-piece read on a non-local filesystem
if not isinstance(pieces, list):
pieces = [pieces]
if len(pieces) > 1:
raise ValueError(
"The `_custom_read_partition` code path is not designed to "
"handle a multi-element `pieces` argument."
)
if cudf.utils.ioutils._is_local_filesystem(fs):
raise ValueError(
"The `_custom_read_partition` code path is not intended "
"for use on local filesystems."
)
# Unpack contents of the single piece
if isinstance(pieces[0], str):
path = pieces[0]
row_group = None
partition_keys = []
else:
(path, row_group, partition_keys) = pieces[0]
# Call optimized read utility
df = _optimized_read_remote(path, row_group, columns, fs, **kwargs)
#
# Code below is directly copied from cudf-21.08
#
if index and (index[0] in df.columns):
df = df.set_index(index[0])
elif index is False and set(df.index.names).issubset(columns):
# If index=False, we need to make sure all of the
# names in `columns` are actually in `df.columns`
df.reset_index(inplace=True)
if partition_keys:
if partitions is None:
raise ValueError("Must pass partition sets")
for i, (name, index2) in enumerate(partition_keys):
categories = [val.as_py() for val in partitions.levels[i].dictionary]
col = as_column(index2).as_frame().repeat(len(df))._data[None]
df[name] = build_categorical_column(
categories=categories,
codes=as_column(col.base_data, dtype=col.dtype),
size=col.size,
offset=col.offset,
ordered=False,
)
return df
def _optimized_read_remote(path, row_groups, columns, fs, **kwargs):
if row_groups is not None and not isinstance(row_groups, list):
row_groups = [row_groups]
# Get byte-ranges that are known to contain the
# required data for this read
byte_ranges, footer, file_size = _get_parquet_byte_ranges(
path, row_groups, columns, fs, **kwargs
)
# Transfer the required byte-ranges with fsspec.
# Store these blocks in a local dummy buffer
dummy_buffer = _fsspec_data_transfer(
path,
fs,
byte_ranges=byte_ranges,
footer=footer,
file_size=file_size,
add_par1_magic=True,
**kwargs,
)
# Call cudf.read_parquet on the dummy buffer
strings_to_cats = kwargs.get("strings_to_categorical", False)
df = cudf.read_parquet(
io.BytesIO(dummy_buffer),
engine="cudf",
columns=columns,
row_groups=row_groups,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
del dummy_buffer
return df
def _get_parquet_byte_ranges(
path,
rgs,
columns,
fs,
bytes_per_thread=256_000_000,
**kwargs,
):
# The purpose of this utility is to return a list
# of byte ranges (in path) that are known to contain
# the data needed to read `columns` and `rgs`
# Step 0 - Get size of file
file_size = fs.size(path)
# Return early if the file is too small to merit
# optimized data transfer
if file_size <= bytes_per_thread:
return None, None, file_size
# Step 1 - Get 32 KB from tail of file.
#
# This "sample size" can be tunable, but should
# always be >= 8 bytes (so we can read the footer size)
tail_size = 32_000
footer_sample = fs.tail(path, tail_size)
# Step 2 - Read the footer size and re-read a larger
# tail if necessary
footer_size = int.from_bytes(footer_sample[-8:-4], "little")
if tail_size < (footer_size + 8):
footer_sample = fs.tail(path, footer_size + 8)
# Step 3 - Collect required byte ranges
byte_ranges = []
md = pq.ParquetFile(io.BytesIO(footer_sample)).metadata
for r in range(md.num_row_groups):
# Skip this row-group if we are targeting
# specific row-groups
if rgs is None or r in rgs:
row_group = md.row_group(r)
for c in range(row_group.num_columns):
column = row_group.column(c)
name = column.path_in_schema
# Skip this column if we are targeting a
# specific columns
if columns is None or name in columns:
file_offset0 = column.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.data_page_offset
num_bytes = column.total_uncompressed_size
byte_ranges.append((file_offset0, num_bytes))
return byte_ranges, footer_sample, file_size
#
# General Fsspec Data-transfer Optimization Code
#
def _fsspec_data_transfer(
path_or_fob,
fs,
byte_ranges=None,
footer=None,
file_size=None,
add_par1_magic=None,
bytes_per_thread=256_000_000,
max_gap=64_000,
mode="rb",
**kwargs,
):
# Calculate total file size
file_size = file_size or fs.size(path_or_fob)
# Check if a direct read makes the most sense
if not byte_ranges and bytes_per_thread >= file_size:
return fs.open(path_or_fob, mode=mode, cache_type="none").read()
# Threaded read into "dummy" buffer
buf = np.zeros(file_size, dtype="b")
if byte_ranges:
# Optimize/merge the ranges
byte_ranges = _merge_ranges(
byte_ranges,
max_block=bytes_per_thread,
max_gap=max_gap,
)
# Call multi-threaded data transfer of
# remote byte-ranges to local buffer
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
# Add Header & Footer bytes
if footer is not None:
footer_size = len(footer)
buf[-footer_size:] = np.frombuffer(footer[-footer_size:], dtype="b")
# Add parquet magic bytes (optional)
if add_par1_magic:
buf[:4] = np.frombuffer(b"PAR1", dtype="b")
if footer is None:
buf[-4:] = np.frombuffer(b"PAR1", dtype="b")
else:
byte_ranges = [
(b, min(bytes_per_thread, file_size - b)) for b in range(0, file_size, bytes_per_thread)
]
_read_byte_ranges(
path_or_fob,
byte_ranges,
buf,
fs,
**kwargs,
)
return buf.tobytes()
def _merge_ranges(byte_ranges, max_block=256_000_000, max_gap=64_000):
# Simple utility to merge small/adjacent byte ranges
new_ranges = []
if not byte_ranges:
# Early return
return new_ranges
offset, size = byte_ranges[0]
for (new_offset, new_size) in byte_ranges[1:]:
gap = new_offset - (offset + size)
if gap > max_gap or (size + new_size + gap) > max_block:
# Gap is too large or total read is too large
new_ranges.append((offset, size))
offset = new_offset
size = new_size
continue
size += new_size + gap
new_ranges.append((offset, size))
return new_ranges
def _assign_block(fs, path_or_fob, local_buffer, offset, nbytes):
with fs.open(path_or_fob, mode="rb", cache_type="none") as fob:
fob.seek(offset)
local_buffer[offset : offset + nbytes] = np.frombuffer(
fob.read(nbytes),
dtype="b",
)
def _read_byte_ranges(
path_or_fob,
ranges,
local_buffer,
fs,
**kwargs,
):
workers = []
for (offset, nbytes) in ranges:
if len(ranges) > 1:
workers.append(
Thread(target=_assign_block, args=(fs, path_or_fob, local_buffer, offset, nbytes))
)
workers[-1].start()
else:
_assign_block(fs, path_or_fob, local_buffer, offset, nbytes)
for worker in workers:
worker.join()
|
hwt/hdl/types/typeCast.py | ufo2011/hwt | 134 | 12682478 |
from typing import Optional, Any
from hwt.hdl.types.defs import INT, STR, BOOL, SLICE, FLOAT64
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.value import HValue
from hwt.hdl.variables import SignalItem
from hwt.synthesizer.interfaceLevel.mainBases import InterfaceBase
defaultPyConversions = {
int: INT,
str: STR,
bool: BOOL,
slice: SLICE,
float: FLOAT64
}
def toHVal(op: Any, suggestedType: Optional[HdlType]=None):
"""Convert python or hdl value/signal object to hdl value/signal object"""
if isinstance(op, HValue) or isinstance(op, SignalItem):
return op
elif isinstance(op, InterfaceBase):
return op._sig
else:
if suggestedType is not None:
return suggestedType.from_py(op)
if isinstance(op, int):
if op >= 1 << 31:
raise TypeError(
f"Number {op:d} is too big to fit in 32 bit integer of HDL"
" use Bits type instead")
elif op < -(1 << 31):
raise TypeError(
f"Number {op:d} is too small to fit in 32 bit integer"
" of HDL use Bits type instead")
try:
hType = defaultPyConversions[type(op)]
except KeyError:
hType = None
if hType is None:
raise TypeError(f"Unknown hardware type for instance of {op.__class__}")
return hType.from_py(op)
|
drive/snippets/drive-v3/app_data_snippet/list_appdata.py | himanshupr2627/python-samples | 479 | 12682513 | """Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# [START drive_list_appdata]
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def list_appdata():
"""List all files inserted in the application data folder
prints file titles with Ids.
Returns : List of items
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# call drive api client
service = build('drive', 'v3', credentials=creds)
# pylint: disable=maybe-no-member
response = service.files().list(spaces='appDataFolder',
fields='nextPageToken, files(id, '
'name)', pageSize=10).execute()
for file in response.get('files', []):
# Process change
print(F'Found file: {file.get("name")}, {file.get("id")}')
except HttpError as error:
print(F'An error occurred: {error}')
response = None
return response.get('files')
if __name__ == '__main__':
list_appdata()
# [END drive_list_appdata]
|
src/django_otp/models.py | jaap3/django-otp | 318 | 12682518 | from datetime import timedelta
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from .util import random_number_token
class DeviceManager(models.Manager):
"""
The :class:`~django.db.models.Manager` object installed as
``Device.objects``.
"""
def devices_for_user(self, user, confirmed=None):
"""
Returns a queryset for all devices of this class that belong to the
given user.
:param user: The user.
:type user: :class:`~django.contrib.auth.models.User`
:param confirmed: If ``None``, all matching devices are returned.
Otherwise, this can be any true or false value to limit the query
to confirmed or unconfirmed devices, respectively.
"""
devices = self.model.objects.filter(user=user)
if confirmed is not None:
devices = devices.filter(confirmed=bool(confirmed))
return devices
class Device(models.Model):
"""
Abstract base model for a :term:`device` attached to a user. Plugins must
subclass this to define their OTP models.
.. _unsaved_device_warning:
.. warning::
OTP devices are inherently stateful. For example, verifying a token is
logically a mutating operation on the device, which may involve
incrementing a counter or otherwise consuming a token. A device must be
committed to the database before it can be used in any way.
.. attribute:: user
*ForeignKey*: Foreign key to your user model, as configured by
:setting:`AUTH_USER_MODEL` (:class:`~django.contrib.auth.models.User`
by default).
.. attribute:: name
*CharField*: A human-readable name to help the user identify their
devices.
.. attribute:: confirmed
*BooleanField*: A boolean value that tells us whether this device has
been confirmed as valid. It defaults to ``True``, but subclasses or
individual deployments can force it to ``False`` if they wish to create
a device and then ask the user for confirmation. As a rule, built-in
APIs that enumerate devices will only include those that are confirmed.
.. attribute:: objects
A :class:`~django_otp.models.DeviceManager`.
"""
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), help_text="The user that this device belongs to.", on_delete=models.CASCADE)
name = models.CharField(max_length=64, help_text="The human-readable name of this device.")
confirmed = models.BooleanField(default=True, help_text="Is this device ready for use?")
objects = DeviceManager()
class Meta:
abstract = True
def __str__(self):
try:
user = self.user
except ObjectDoesNotExist:
user = None
return "{0} ({1})".format(self.name, user)
@property
def persistent_id(self):
"""
A stable device identifier for forms and APIs.
"""
return '{0}/{1}'.format(self.model_label(), self.id)
@classmethod
def model_label(cls):
"""
Returns an identifier for this Django model class.
This is just the standard "<app_label>.<model_name>" form.
"""
return '{0}.{1}'.format(cls._meta.app_label, cls._meta.model_name)
@classmethod
def from_persistent_id(cls, persistent_id, for_verify=False):
"""
Loads a device from its persistent id::
device == Device.from_persistent_id(device.persistent_id)
:param bool for_verify: If ``True``, we'll load the device with
:meth:`~django.db.models.query.QuerySet.select_for_update` to
prevent concurrent verifications from succeeding. In which case,
this must be called inside a transaction.
"""
device = None
try:
model_label, device_id = persistent_id.rsplit('/', 1)
app_label, model_name = model_label.split('.')
device_cls = apps.get_model(app_label, model_name)
if issubclass(device_cls, Device):
device_set = device_cls.objects.filter(id=int(device_id))
if for_verify:
device_set = device_set.select_for_update()
device = device_set.first()
except (ValueError, LookupError):
pass
return device
def is_interactive(self):
"""
Returns ``True`` if this is an interactive device. The default
implementation returns ``True`` if
:meth:`~django_otp.models.Device.generate_challenge` has been
overridden, but subclasses are welcome to provide smarter
implementations.
:rtype: bool
"""
return not hasattr(self.generate_challenge, 'stub')
def generate_challenge(self):
"""
Generates a challenge value that the user will need to produce a token.
This method is permitted to have side effects, such as transmitting
information to the user through some other channel (email or SMS,
perhaps). And, of course, some devices may need to commit the
challenge to the database.
:returns: A message to the user. This should be a string that fits
comfortably in the template ``'OTP Challenge: {0}'``. This may
return ``None`` if this device is not interactive.
:rtype: string or ``None``
:raises: Any :exc:`~exceptions.Exception` is permitted. Callers should
trap ``Exception`` and report it to the user.
"""
return None
generate_challenge.stub = True
def verify_is_allowed(self):
"""
Checks whether it is permissible to call :meth:`verify_token`. If it is
allowed, returns ``(True, None)``. Otherwise returns ``(False,
data_dict)``, where ``data_dict`` contains extra information, defined
by the implementation.
This method can be used to implement throttling or locking, for
example. Client code should check this method before calling
:meth:`verify_token` and report problems to the user.
To report specific problems, the data dictionary can return include a
``'reason'`` member with a value from the constants in
:class:`VerifyNotAllowed`. Otherwise, an ``'error_message'`` member
should be provided with an error message.
:meth:`verify_token` should also call this method and return False if
verification is not allowed.
:rtype: (bool, dict or ``None``)
"""
return (True, None)
def verify_token(self, token):
"""
Verifies a token. As a rule, the token should no longer be valid if
this returns ``True``.
:param str token: The OTP token provided by the user.
:rtype: bool
"""
return False
class SideChannelDevice(Device):
"""
Abstract base model for a side-channel :term:`device` attached to a user.
This model implements token generation, verification and expiration, so the
concrete devices only have to implement delivery.
"""
token = models.CharField(max_length=16, blank=True, null=True)
valid_until = models.DateTimeField(
default=timezone.now,
help_text="The timestamp of the moment of expiry of the saved token."
)
class Meta:
abstract = True
def generate_token(self, length=6, valid_secs=300, commit=True):
"""
Generates a token of the specified length, then sets it on the model
and sets the expiration of the token on the model.
Pass 'commit=False' to avoid calling self.save().
:param int length: Number of decimal digits in the generated token.
:param int valid_secs: Amount of seconds the token should be valid.
:param bool commit: Whether to autosave the generated token.
"""
self.token = random_number_token(length)
self.valid_until = timezone.now() + timedelta(seconds=valid_secs)
if commit:
self.save()
def verify_token(self, token):
"""
Verifies a token by content and expiry.
On success, the token is cleared and the device saved.
:param str token: The OTP token provided by the user.
:rtype: bool
"""
_now = timezone.now()
if (self.token is not None) and (token == self.token) and (_now < self.valid_until):
self.token = None
self.valid_until = _now
self.save()
return True
else:
return False
class VerifyNotAllowed:
"""
Constants that may be returned in the ``reason`` member of the extra
information dictionary returned by
:meth:`~django_otp.models.Device.verify_is_allowed`
.. data:: N_FAILED_ATTEMPTS
Indicates that verification is disallowed because of ``n`` successive
failed attempts. The data dictionary should include the value of ``n``
in member ``failure_count``
"""
N_FAILED_ATTEMPTS = 'N_FAILED_ATTEMPTS'
class ThrottlingMixin(models.Model):
"""
Mixin class for models that need throttling behaviour. Implements
exponential back-off.
"""
# This mixin is not publicly documented, but is used internally to avoid
# code duplication. Subclasses must implement get_throttle_factor(), and
# must use the verify_is_allowed(), throttle_reset() and
# throttle_increment() methods from within their verify_token() method.
throttling_failure_timestamp = models.DateTimeField(
null=True, blank=True, default=None,
help_text="A timestamp of the last failed verification attempt. Null if last attempt succeeded."
)
throttling_failure_count = models.PositiveIntegerField(
default=0, help_text="Number of successive failed attempts."
)
def verify_is_allowed(self):
"""
If verification is allowed, returns ``(True, None)``.
Otherwise, returns ``(False, data_dict)``.
``data_dict`` contains further information. Currently it can be::
{'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,
'failure_count': n
}
where ``n`` is the number of successive failures. See
:class:`~django_otp.models.VerifyNotAllowed`.
"""
if (self.throttling_enabled and
self.throttling_failure_count > 0 and
self.throttling_failure_timestamp is not None):
now = timezone.now()
delay = (now - self.throttling_failure_timestamp).total_seconds()
# Required delays should be 1, 2, 4, 8 ...
delay_required = self.get_throttle_factor() * (2 ** (self.throttling_failure_count - 1))
if delay < delay_required:
return (False,
{'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,
'failure_count': self.throttling_failure_count,
'locked_until': self.throttling_failure_timestamp + timedelta(seconds=delay_required)}
)
return super().verify_is_allowed()
def throttle_reset(self, commit=True):
"""
Call this method to reset throttling (normally when a verify attempt
succeeded).
Pass 'commit=False' to avoid calling self.save().
"""
self.throttling_failure_timestamp = None
self.throttling_failure_count = 0
if commit:
self.save()
def throttle_increment(self, commit=True):
"""
Call this method to increase throttling (normally when a verify attempt
failed).
Pass 'commit=False' to avoid calling self.save().
"""
self.throttling_failure_timestamp = timezone.now()
self.throttling_failure_count += 1
if commit:
self.save()
@cached_property
def throttling_enabled(self):
return self.get_throttle_factor() > 0
def get_throttle_factor(self): # pragma: no cover
raise NotImplementedError()
class Meta:
abstract = True
|
train.py | zhechen/PLARD | 122 | 12682536 | import sys, os
import torch
import visdom
import argparse
import numpy as np
import logging
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.autograd import Variable
from torch.utils import data
from tqdm import tqdm
import collections
from ptsemseg.models import get_model
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.metrics import runningScore
from ptsemseg.loss import *
from ptsemseg.augmentations import *
def adjust_learning_rate(optimizer, epoch, lr, decay, step):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (decay ** (epoch // step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(args, logger):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
t_loader = data_loader(data_path, is_transform=True, img_size=(args.img_cols, args.img_rows))
n_classes = t_loader.n_classes
nw = args.batch_size if args.batch_size > 1 else 0
trainloader = data.DataLoader(t_loader, batch_size=args.batch_size, num_workers=nw, shuffle=True)
# Setup Model
model = get_model(args.arch, n_classes)
if args.pretrained is not None:
checkpoint = torch.load(args.pretrained)
model.load_state_dict_without_classification(checkpoint['model_state'])
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
model.cuda()
mom = 0.99
wd = 5e-4
# Check if model has custom optimizer / loss
if hasattr(model.module, 'optimizer'):
optimizer = model.module.optimizer
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.l_rate, momentum=mom, weight_decay=wd) #0.99 5e-4
print('Params: l_rate %f, l_rate_decay: %.2f, l_rate_step: %d, batch_size: %d, mom: %.2f, wd: %f'%(
args.l_rate, args.l_rate_decay, args.l_rate_step, args.batch_size, mom, wd))
if hasattr(model.module, 'loss'):
print('Using custom loss')
logger.info('Using custom loss')
loss_fn = model.module.loss
else:
loss_fn = cross_entropy2d
if args.resume is not None:
if os.path.isfile(args.resume):
print("Loading model and optimizer from checkpoint '{}'".format(args.resume))
logger.info("Loading model and optimizer from checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
print("Loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger.info("Loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("No checkpoint found at '{}'".format(args.resume))
logger.info("No checkpoint found at '{}'".format(args.resume))
best_iou = -100.0
for epoch in range(args.n_epoch):
adjust_learning_rate(optimizer, epoch, args.l_rate, args.l_rate_decay, args.l_rate_step)
model.train()
#if args.pretrained is not None:
model.module.freeze_bn()
avg_loss = 0.
for i, (images, lidars, labels) in enumerate(trainloader):
images = Variable(images.cuda())
if type(labels) == list:
var_labels = []
for ii in range(len(labels)):
var_labels.append(Variable(labels[ii].cuda()))
else:
var_labels = Variable(labels.cuda())
lidars = Variable(lidars.cuda())
optimizer.zero_grad()
loss = model([images, lidars, labels])
optimizer.step()
if args.visdom:
vis.line(
X=torch.ones((1, 1)).cpu() * i,
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=loss_window,
update='append')
avg_loss += loss.detach().cpu().numpy().mean() #.data.item()
#avg_loss += loss.data.item()
if (i+1) % 10 == 0:
avg_loss = avg_loss / 10.
print("Epoch [%d/%d] [%d/%d] Loss: %.4f" % (epoch+1, args.n_epoch, i+1, len(trainloader), avg_loss))
logger.info("Epoch [%d/%d] [%d/%d] Loss: %.4f" % (epoch+1, args.n_epoch, i+1, len(trainloader), avg_loss))
avg_loss = 0.
if epoch > 0:
if (args.n_epoch <= 10 and epoch % 2 == 1) or epoch % 20 == 0:
logger.info('saving models to ' + "{}_{}_{}.pkl".format(args.arch, args.dataset,epoch))
print('saving models to ' + "{}_{}_{}.pkl".format(args.arch, args.dataset,epoch))
state = {'epoch': epoch+1,
'model_state': model.module.state_dict(),
'optimizer_state' : optimizer.state_dict(),}
torch.save(state, "./output-model/{}_{}_{}.pkl".format(args.arch, args.dataset,epoch))
logger.info('saving models to ' + "{}_{}_{}.pkl".format(args.arch, args.dataset, args.n_epoch))
print('saving models to ' + "{}_{}_{}.pkl".format(args.arch, args.dataset,epoch))
state = {'epoch': epoch+1,
'model_state': model.module.state_dict(),
'optimizer_state' : optimizer.state_dict(),}
torch.save(state, "./output-model/{}_{}_{}.pkl".format(args.arch, args.dataset, args.n_epoch))
def setup_logging(name, filename=None):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
# Manually clear root loggers to prevent any module that may have called
# logging.basicConfig() from blocking our logging setup
logging.root.handlers = []
if filename is None:
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
else:
logging.basicConfig(level=logging.INFO, format=FORMAT, filename=filename)
logger = logging.getLogger(name)
return logger
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='pspnet',
help='Architecture to use [\'plard, fcn8s, unet, segnet etc\']')
parser.add_argument('--dataset', nargs='?', type=str, default='mapillary',
help='Dataset to use [\'kitti_road, pascal, camvid, ade20k etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=384,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=1280,
help='Width of the input image')
parser.add_argument('--n_epoch', nargs='?', type=int, default=5,
help='# of the epochs')
parser.add_argument('--batch_size', nargs='?', type=int, default=4,
help='Batch Size')
parser.add_argument('--l_rate', nargs='?', type=float, default=5e-5,
help='Learning Rate')
parser.add_argument('--l_rate_decay', nargs='?', type=float, default=0.1,
help='Learning Rate Decay')
parser.add_argument('--l_rate_step', nargs='?', type=int, default=1,
help='Learning Rate Step')
parser.add_argument('--feature_scale', nargs='?', type=int, default=1,
help='Divider for # of features to use')
parser.add_argument('--resume', nargs='?', type=str, default=None,
help='Path to previous saved model to restart from')
parser.add_argument('--pretrained', nargs='?', type=str, default=None,
help='pretriain')
parser.add_argument('--visdom', dest='visdom', action='store_true',
help='Enable visualization(s) on visdom | False by default')
parser.add_argument('--no-visdom', dest='visdom', action='store_false',
help='Disable visualization(s) on visdom | False by default')
parser.set_defaults(visdom=False)
args = parser.parse_args()
logger = setup_logging(__name__, filename='./'+args.arch+'.out')
train(args, logger)
|
model/conv/MBConv.py | Nitin-Mane/External-Attention-pytorch | 4,466 | 12682545 | import math
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
return partial(Conv2dStaticSamePadding, image_size=image_size)
def get_width_and_height_from_size(x):
""" Obtains width and height from a int or tuple """
if isinstance(x, int): return x, x
if isinstance(x, list) or isinstance(x, tuple): return x
else: raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""
计算出 Conv2dSamePadding with a stride.
"""
if input_image_size is None: return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self, ):
super(Identity, self).__init__()
def forward(self, input):
return input
# MBConvBlock
class MBConvBlock(nn.Module):
'''
层 ksize3*3 输入32 输出16 conv1 stride步长1
'''
def __init__(self, ksize, input_filters, output_filters, expand_ratio=1, stride=1, image_size=224):
super().__init__()
self._bn_mom = 0.1
self._bn_eps = 0.01
self._se_ratio = 0.25
self._input_filters = input_filters
self._output_filters = output_filters
self._expand_ratio = expand_ratio
self._kernel_size = ksize
self._stride = stride
inp = self._input_filters
oup = self._input_filters * self._expand_ratio
if self._expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution
k = self._kernel_size
s = self._stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup,
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
Conv2d = get_same_padding_conv2d(image_size=(1,1))
num_squeezed_channels = max(1, int(self._input_filters * self._se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._expand_ratio != 1:
expand = self._expand_conv(inputs)
bn0 = self._bn0(expand)
x = self._swish(bn0)
depthwise = self._depthwise_conv(x)
bn1 = self._bn1(depthwise)
x = self._swish(bn1)
# Squeeze and Excitation
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._input_filters, self._output_filters
if self._stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
if __name__ == '__main__':
input=torch.randn(1,3,112,112)
mbconv=MBConvBlock(ksize=3,input_filters=3,output_filters=3,image_size=112)
out=mbconv(input)
print(out.shape) |
dataset_preprocessing/camelyon17/generate_all_patch_coords.py | caglasozen/wilds | 355 | 12682551 | # Code adapted from https://github.com/liucong3/camelyon17
# and https://github.com/cv-lee/Camelyon17
import openslide
import cv2
import numpy as np
import pandas as pd
import os
import csv
import argparse
from tqdm import tqdm
from xml.etree.ElementTree import parse
from PIL import Image
PATCH_LEVEL = 2
MASK_LEVEL = 4
CENTER_SIZE = 32
def _read_xml(xml_path, mask_level):
"""
Read an XML file with annotations and return coordinates of tumor and normal areas
"""
xml = parse(xml_path).getroot()
tumor_coord_list = []
normal_coord_list = []
for annotation in xml.iter('Annotation'):
annotation_type = annotation.get('PartOfGroup')
assert annotation_type in ['metastases', 'normal', 'None']
if annotation_type == 'metastases':
coord_list = tumor_coord_list
elif annotation_type == 'normal':
coord_list = normal_coord_list
elif annotation_type == 'None':
continue
for region_idx, region in enumerate(annotation.iter('Coordinates')):
assert region_idx == 0
coords = []
for coord in region:
coords.append([round(float(coord.get('X'))/(2**mask_level)),
round(float(coord.get('Y'))/(2**mask_level))])
coord_list.append(coords)
return tumor_coord_list, normal_coord_list
def _make_masks(slide_path, xml_path, mask_level, make_map, **args):
'''
Return a slide with annotated tumor, normal, and tissue masks using an Otsu threshold
'''
print('_make_masks(%s)' % slide_path)
#slide loading
slide = openslide.OpenSlide(slide_path)
# xml loading
tumor_coord_list, normal_coord_list = _read_xml(xml_path, mask_level)
if make_map:
slide_map = np.array(slide.get_thumbnail(slide.level_dimensions[mask_level]))
# draw boundary of tumor in map
for coords in tumor_coord_list:
cv2.drawContours(slide_map, np.array([coords]), -1, 255, 1)
for coords in normal_coord_list:
cv2.drawContours(slide_map, np.array([coords]), -1, 127, 1)
else:
slide_map = None
# draw tumor mask
# first fill up tumors, then draw normal boundaries and fill those up with 0
tumor_mask = np.zeros(slide.level_dimensions[mask_level][::-1])
for coords in tumor_coord_list:
cv2.drawContours(tumor_mask, np.array([coords]), -1, 255, -1)
for coords in normal_coord_list:
cv2.drawContours(tumor_mask, np.array([coords]), -1, 0, -1)
# draw tissue mask
slide_lv = slide.read_region((0, 0), mask_level, slide.level_dimensions[mask_level])
slide_lv = cv2.cvtColor(np.array(slide_lv), cv2.COLOR_RGBA2RGB)
slide_lv = cv2.cvtColor(slide_lv, cv2.COLOR_BGR2HSV)
slide_lv = slide_lv[:, :, 1]
_, tissue_mask = cv2.threshold(slide_lv, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# check normal mask / draw normal mask
normal_mask = np.array(tissue_mask).copy()
normal_mask[tumor_mask > 127] = 0
return slide, slide_map, tumor_mask, tissue_mask, normal_mask
def _write_masks(mask_folder_path, slide_map, tumor_mask, tissue_mask, normal_mask, **args):
"""
Write masks out to disk; used for sanity checking and visualization.
"""
print('_write_masks')
os.makedirs(mask_folder_path, exist_ok=True)
map_path = os.path.join(mask_folder_path, 'map.png')
cv2.imwrite(map_path, slide_map)
tumor_mask_path = os.path.join(mask_folder_path, 'tumor_mask.png')
cv2.imwrite(tumor_mask_path, tumor_mask) # CHANGED
tissue_mask_path = os.path.join(mask_folder_path, 'tissue_mask.png')
cv2.imwrite(tissue_mask_path, np.array(tissue_mask))
normal_mask_path = os.path.join(mask_folder_path, 'normal_mask.png')
cv2.imwrite(normal_mask_path, normal_mask)
def _record_patches(center_size,
slide, slide_map, patch_level,
mask_level, tumor_mask, tissue_mask, normal_mask,
tumor_threshold,
normal_threshold,
**args):
"""
Extract all tumor and non-tumor patches from a slide, using the given masks.
"""
# Patch size is 3*center_size by 3*center_size
# It is in terms of pixels of the final output
# So it's measured with respect to patch_level
patch_size = center_size * 3
# Extract normal, tumor patches using normal, tumor mask
width, height = np.array(slide.level_dimensions[patch_level]) // center_size
total = width * height
all_cnt = 0
t_cnt = 0
n_cnt = 0
print('_record_patches(w=%d,h=%d)' % (width,height))
margin = 5 #3
mask_max = 255
assert mask_level >= patch_level
width_mask_step = center_size * slide.level_dimensions[mask_level][0] / slide.level_dimensions[patch_level][0]
height_mask_step = center_size * slide.level_dimensions[mask_level][1] / slide.level_dimensions[patch_level][1]
patch_list = []
# These mark the coordinates of the central region of the patch
for i in range(margin, width-margin):
for j in range(margin, height-margin):
mask_i_start = round(width_mask_step * i)
mask_i_end = round(width_mask_step * (i+1))
mask_j_start = round(height_mask_step * j)
mask_j_end = round(height_mask_step * (j+1))
# Compute masks only over central region
tumor_mask_avg = tumor_mask[
mask_j_start : mask_j_end,
mask_i_start : mask_i_end].mean()
normal_mask_avg = normal_mask[
mask_j_start : mask_j_end,
mask_i_start : mask_i_end].mean()
tumor_area_ratio = tumor_mask_avg / mask_max
normal_area_ratio = normal_mask_avg / mask_max
# Extract patch coordinates
# Coords correspond just to the center, not the entire patch
if (tumor_area_ratio > tumor_threshold):
patch_list.append((center_size*i, center_size*j, 1))
cv2.rectangle(
slide_map,
(mask_i_start, mask_j_start),
(mask_i_end, mask_j_end),
(0,0,255),
1)
elif (normal_area_ratio > normal_threshold):
patch_list.append((center_size*i, center_size*j, 0))
cv2.rectangle(
slide_map,
(mask_i_start, mask_j_start),
(mask_i_end, mask_j_end),
(255,255,0),
1)
df = pd.DataFrame(patch_list,
columns=[
'x_coord',
'y_coord',
'tumor'
])
return df
def generate_file(patient, node, xml_path, slide_path, folder_path):
args = {
'slide_path' : slide_path,
'xml_path': xml_path,
'patch_level' : PATCH_LEVEL,
'mask_level' : MASK_LEVEL,
'center_size' : CENTER_SIZE,
'tumor_threshold' : 0,
'normal_threshold' : 0.2,
'mask_folder_path' : folder_path,
'make_map' : True
}
args['slide'], args['slide_map'], args['tumor_mask'], args['tissue_mask'], args['normal_mask'] = _make_masks(**args)
df = _record_patches(**args)
df['patient'] = patient
df['node'] = node
_write_masks(**args)
return df
def generate_files(slide_root, output_root):
aggregate_df = pd.DataFrame(
columns=[
'patient',
'node',
'x_coord',
'y_coord',
'tumor'
])
for root, dirs, files in os.walk(os.path.join(slide_root, 'lesion_annotations')):
for file in files:
if file.endswith('.xml') and not file.startswith('._'):
prefix = file.split('.xml')[0]
try:
assert len(prefix.split('_')) == 4
df = generate_file(
patient=prefix.split('_')[1],
node=prefix.split('_')[3],
xml_path=os.path.join(root, file),
slide_path=os.path.join(slide_root, 'tif', f'{prefix}.tif'),
folder_path=os.path.join(output_root, 'masks', prefix))
aggregate_df = pd.concat([aggregate_df, df])
except openslide.OpenSlideError as err:
print(err)
continue
aggregate_df = aggregate_df.reset_index(drop=True)
aggregate_df.to_csv(os.path.join(output_root, 'all_patch_coords.csv'))
return aggregate_df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--slide_root', required=True)
parser.add_argument('--output_root', required=True)
args = parser.parse_args()
generate_files(
slide_root=args.slide_root,
output_root=args.output_root)
|
sample_selfie_segmentation.py | karaage0703/mediapipe-python-sample | 164 | 12682577 | <reponame>karaage0703/mediapipe-python-sample
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--model_selection",
help='model_selection',
type=int,
default=0)
parser.add_argument("--score_th",
help='score threshold',
type=float,
default=0.1)
parser.add_argument("--bg_path",
help='back ground image path',
type=str,
default=None)
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
model_selection = args.model_selection
score_th = args.score_th
if args.bg_path is not None:
bg_image = cv.imread(args.bg_path)
else:
bg_image = None
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
mp_selfie_segmentation = mp.solutions.selfie_segmentation
selfie_segmentation = mp_selfie_segmentation.SelfieSegmentation(
model_selection=model_selection)
# FPS計測モジュール ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
while True:
display_fps = cvFpsCalc.get()
# カメラキャプチャ #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # ミラー表示
debug_image = copy.deepcopy(image)
# 検出実施 #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = selfie_segmentation.process(image)
# 描画 ################################################################
mask = np.stack((results.segmentation_mask, ) * 3, axis=-1) >= score_th
if bg_image is None:
bg_resize_image = np.zeros(image.shape, dtype=np.uint8)
bg_resize_image[:] = (0, 255, 0)
else:
bg_resize_image = cv.resize(bg_image,
(image.shape[1], image.shape[0]))
debug_image = np.where(mask, debug_image, bg_resize_image)
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2,
cv.LINE_AA)
# キー処理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #############################################################
cv.imshow('MediaPipe Selfie Segmentation Demo', debug_image)
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
python/rikai/spark/sql/codegen/sklearn.py | changhiskhan/rikai | 111 | 12682595 | # Copyright 2021 Rikai Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator
import numpy as np
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType
def generate_udf(spec: "rikai.spark.sql.codegen.base.ModelSpec"):
"""Construct a UDF to run sklearn model.
Parameters
----------
spec : ModelSpec
the model specifications object
Returns
-------
A Spark Pandas UDF.
"""
def sklearn_inference_udf(
iter: Iterator[pd.Series],
) -> Iterator[pd.Series]:
model = spec.load_model()
for series in list(iter):
X = np.vstack(series.to_numpy())
y = model.predict(X)
yield pd.Series(y)
return pandas_udf(sklearn_inference_udf, returnType=spec.schema)
|
okta/models/verify_factor_request.py | corylevine/okta-sdk-python | 145 | 12682608 | <reponame>corylevine/okta-sdk-python<gh_stars>100-1000
# flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class VerifyFactorRequest(
OktaObject
):
"""
A class for VerifyFactorRequest objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.activation_token = config["activationToken"]\
if "activationToken" in config else None
self.answer = config["answer"]\
if "answer" in config else None
self.attestation = config["attestation"]\
if "attestation" in config else None
self.client_data = config["clientData"]\
if "clientData" in config else None
self.next_pass_code = config["nextPassCode"]\
if "nextPassCode" in config else None
self.pass_code = config["passCode"]\
if "passCode" in config else None
self.registration_data = config["registrationData"]\
if "registrationData" in config else None
self.state_token = config["stateToken"]\
if "stateToken" in config else None
else:
self.activation_token = None
self.answer = None
self.attestation = None
self.client_data = None
self.next_pass_code = None
self.pass_code = None
self.registration_data = None
self.state_token = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"activationToken": self.activation_token,
"answer": self.answer,
"attestation": self.attestation,
"clientData": self.client_data,
"nextPassCode": self.next_pass_code,
"passCode": self.pass_code,
"registrationData": self.registration_data,
"stateToken": self.state_token
}
parent_req_format.update(current_obj_format)
return parent_req_format
|
pylayers/gui/PylayersGui.py | usmanwardag/pylayers | 143 | 12682610 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
PyLayers GUI
.. autommodule::
:members:
To run this code. type
python PylayersGui.py
"""
from pylayers.simul.link import *
import pylayers.util.pyutil as pyu
import pylayers.signal.standard as std
from pylayers.util.project import *
import json
# TEST
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
from pyface.qt import QtGui,QtCore
from traitsui.qt4.editor import Editor
from traitsui.qt4.basic_editor_factory import BasicEditorFactory
# console ipython
from IPython import embed_kernel
from traits.api import HasTraits, Button,Range,Enum, Instance, \
on_trait_change,property_depends_on,Float,Str,Int,Bool,List
from traitsui.api import View, Item,HSplit,VSplit, RangeEditor, \
EnumEditor,Group,spring,HGroup,VGroup,Handler, \
InstanceEditor
from traitsui.menu import Action, ActionGroup, Menu, MenuBar, ToolBar
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
from tvtk.pyface.api import Scene
try:
get_ipython
except NameError:
banner=exit_msg=''
else:
banner = '*** Nested interpreter ***'
exit_msg = '*** Back in main IPython ***'
# First import the embed function
from IPython.frontend.terminal.embed import InteractiveShellEmbed
## INIT DLink object
DL=DLink()
filename=pyu.getlong('wstd.json',pstruc['DIRSIMUL'])
fp = open(filename)
stds = json.load(fp)
av_wstds = ['None']+ list(stds.keys())
dchann = {w:[str(i) for i in std.Wstandard(w).chan.keys()] for w in av_wstds if w !='None'}
dchann.update({'None':['None']})
from qtconsole.rich_ipython_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
class QIPythonWidget(RichJupyterWidget):
""" Convenience class for a live IPython console widget. We can replace the standard banner using the customBanner argument"""
def __init__(self,customBanner=None,*args,**kwargs):
if not customBanner is None: self.banner=customBanner
super(QIPythonWidget, self).__init__(*args,**kwargs)
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
kernel_manager.kernel.gui = 'qt4'
self.kernel_client = kernel_client = self._kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt4().exit()
self.exit_requested.connect(stop)
def pushVariables(self,variableDict):
""" Given a dictionary containing name / value pairs, push those variables to the IPython console widget """
self.kernel_manager.kernel.shell.push(variableDict)
def clearTerminal(self):
""" Clears the terminal """
self._control.clear()
def printText(self,text):
""" Prints some plain text to the console """
self._append_plain_text(text)
def executeCommand(self,command):
""" Execute a command in the frame of the console widget """
self._execute(command,False)
class JupyterWidget(QtGui.QWidget):
""" Main GUI Widget including a button and IPython Console widget
inside vertical layout
"""
def __init__(self, parent=None):
super(JupyterWidget, self).__init__(parent)
layout = QtGui.QVBoxLayout(self)
ipyConsole = QIPythonWidget()
layout.addWidget(ipyConsole)
# ipyConsole.pushVariables({'DL':DL})
allvar = globals()
allvar.update(locals())
ipyConsole.pushVariables(allvar)
class _MPLFigureEditor(Editor):
scrollable = True
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# matplotlib commands to create a canvas
frame = QtGui.QWidget()
mpl_canvas = FigureCanvas(self.value)
mpl_toolbar = NavigationToolbar2QT(parent=frame,canvas = mpl_canvas)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(mpl_canvas)
vbox.addWidget(mpl_toolbar)
frame.setLayout(vbox)
mpl_canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
mpl_canvas.setFocus()
return frame#mpl_canvas
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
class WstdHandler(Handler):
channels = List(Str)
def object_Wstd_Enum_changed(self, info):
"""
This method listens for a change in the *state* attribute of the
object (Address) being viewed.
When this listener method is called, *info.object* is a reference to
the viewed object (Address).
"""
# Change the list of available cities
self.channels = dchann[info.object.Wstd_Enum]
# As default value, use the first city in the list:
info.object.chann = self.channels[0]
# info.object.DL.fGHz =
class PylayersGUI(HasTraits):
# slider/dropdown widgets etc
# Layout
laynames = [''] + np.sort(os.listdir(basename +'/struc/lay/')).tolist()#['','DLR.lay','defstr.lay','TC2_METIS.lay']#,
Lay_Enum = Enum(laynames)
## Antenna file :
av_ant = ['Omni','Gauss','aperture']
antext= ['vsh3','sh3']
for fname in os.listdir(basename +'/ant'):
if fname.split('.')[-1] in antext:
av_ant.append(fname)
# Init Positions
xmin = DL.L.ax[0]
xmax = DL.L.ax[1]
ymin = DL.L.ax[2]
ymax = DL.L.ax[3]
zmin = 0.
zmax = DL.L.maxheight-0.1
# Antenna
## position a
aX = Range(low='xmin',high='xmax',value= float(xmin+xmax/2.))
aY = Range(low='ymin',high='ymax',value= float(ymin+ymax/2.))
aZ = Range(low='zmin',high='zmax',value= float(zmin+zmax/2.))
## rotation a
agamma = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
abeta = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
aalpha = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
## file a:
a_ant = Enum(av_ant)
# Antenna B
## position b
bX = Range(low='xmin',high='xmax',value= float(xmin+xmax/2.))
bY = Range(low='ymin',high='ymax',value= float(ymin+ymax/2.))
bZ = Range(low='zmin',high='zmax',value= float(zmin+zmax/2.))
## rotation b
bgamma = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
bbeta = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
balpha = Range(float(-3.14), float(3.14), 0., )#mode='spinner')
## file b:
b_ant = Enum(av_ant)
# frequency
fmmin = 0.
fmmax = 300.
fmin=Range(low = 'fmmin', high = 'fmmax',value = float(DL.Aa.fGHz[0]) )
fmax=Range(low = 'fmmin', high = 'fmmax',value = float(DL.Aa.fGHz[-1]) )
fstep=Range(low = 0,high = 10, value = 0)
# advanced
# init interface
scene = Instance(MlabSceneModel, ())
plot = Instance(PipelineBase)
# @on_trait_change('scene.activated')
# def init_plot(self):
# DL._show3()
# When the scene is activated, or when the parameters are changed, we
# update the plot.
# def _open_changed(self):
# """ Handles the user clicking the 'Open...' button.
# """
# path = pyu.getlong('',pstruc['DIRSTR'])
# file_name = open_file(file_name= path ,extensions = FileInfo())
# if file_name != '':
# self.file_name = file_name
@on_trait_change('Lay_Enum')
def update_L(self):
if self.Lay_Enum != ' ':
mlab.clf()
DL.L=Layout(self.Lay_Enum,bgraphs=True)
self.xmin=DL.L.ax[0]
self.xmax=DL.L.ax[1]
self.ymin=DL.L.ax[2]
self.ymax=DL.L.ax[3]
self.zmin=0.
self.zmax=DL.L.maxheight-0.1
self.aX,self.aY,self.aZ=DL.a
self.bX,self.bY,self.bZ=DL.b
DL.a= np.array([self.aX,self.aY,self.aZ])
DL.b= np.array([self.bX,self.bY,self.bZ])
self.cutoff = DL.cutoff
if not hasattr(DL,'_maya_fig'):
DL._show3()
@on_trait_change('cutoff,threshold')
def update_cutoff_threshold(self):
""" update position ant a
"""
DL.cutoff = self.cutoff
DL.threshold = self.threshold/100.
@on_trait_change('aX,aY,aZ')
def update_a(self):
""" update position ant a
"""
self.clear_fig()
DL.a= np.array([self.aX,self.aY,self.aZ])
self.cutoff = DL.cutoff
@on_trait_change('bX,bY,bZ')
def update_b(self):
""" update position ant b
"""
self.clear_fig()
DL.b= np.array([self.bX,self.bY,self.bZ])
self.cutoff = DL.cutoff
@on_trait_change('aalpha,abeta,agamma')
def update_Ta(self):
""" update rot ant a
"""
T = geu.MEulerAngle(self.aalpha,beta=self.abeta,gamma=self.agamma)
DL.Ta=T
self.clear_fig()
# if DL.dexist['Ct']['exist']:
# DL.C.locbas(Tt=DL.Ta, Tr=DL.Tb)
# #T channel
# DL.H = DL.C.prop2tran(a=DL.Aa,b=DL.Ab,Friis=True)
# self.plt_all()
@on_trait_change('balpha,bbeta,bgamma')
def update_Tb(self):
""" update rot ant b
"""
T = geu.MEulerAngle(self.balpha,beta=self.bbeta,gamma=self.bgamma)
DL.Tb=T
self.clear_fig()
@on_trait_change('a_ant,fmin,fmax,fstep')
def update_Aa(self):
DL.Aa=Antenna(self.a_ant)
self.clear_fig()
# if DL.Aa.fromfile:
# self.fmin=DL.Aa.fGHz[0]
# self.fmax=DL.Aa.fGHz[-1]
# self.fstep=min(1,DL.Aa.fGHz[1]-DL.Aa.fGHz[0])
@on_trait_change('b_ant,fmin,fmax,fstep')
def update_Ab(self):
DL.Ab=Antenna(self.b_ant)
self.clear_fig()
# if DL.Ab.fromfile:
# self.fmin=DL.Ab.fGHz[0]
# self.fmax=DL.Ab.fGHz[-1]
# self.fstep=min(1,DL.Ab.fGHz[1]-DL.Ab.fGHz[0])
@on_trait_change('fmin,fmax,fstep,chann')
def update_fGHz(self):
if self.Wstd_Enum != 'None':
W=std.Wstandard(self.Wstd_Enum)
# DL.fGHz = W.chan[eval(self.chann)].fghz
Wchan = W.chan[eval(self.chann)]
fcGHz = Wchan['fcGHz']
BWGHz = Wchan['BMHz']
GMHz = Wchan['GMHz']
fGHz = Wchan.fghz
DL.fGHz = np.array([fcGHz])
self.BWGHz = BWGHz
self.fmin = float(fGHz[0])
self.fmax = float(fGHz[-1])
self.fstep = float(fGHz[1]-fGHz[0])
else:
if self.fmin < self.fmax:
DL.fGHz = np.arange(self.fmin,
self.fmax,
self.fstep
)
elif self.fmin == self.fmax:
DL.fGHz=np.array([self.fmin])
self.BWGHz = 5
@on_trait_change('Beval')
def DLeval(self):
DL.eval(verbose=False,
force=self.force,
cutoff=self.cutoff,
threshold=self.threshold/100.,
diffraction = self.diffraction,
nD=self.nD,
nT=self.nT,
nR=self.nR,
applywav = self.applywav)
DL._update_show3(delrays=True)
ER = np.squeeze(DL.H.energy())
DL.R._show3(ER=ER)
self.plt_all()
def plt_all(self):
self.plt_cir()
self.plt_doa()
self.plt_dod()
self.plt_dspread()
self.plt_aspread()
def plt_cir(self):
self.figcir.clf()
ax = self.figcir.add_subplot(111)
DL.plt_cir(fig=self.figcir, ax=ax, BWGHz=self.BWGHz, Nf = 5000 )
# ir = DL.H.getcir(BWGHz=5,Nf=1000)
# ir.plot(fig=self.figcir,ax=ax)
# ax.plot(DL.H.taud,20*np.log10(DL.H.y[:,0,0,0]),'or')
self.figcir.canvas.draw()
# DL.plt_doadod(d='doa')
# DL.H.plot(fig=self.figcir,ax=ax)
# self.figcir.canvas.draw()
def plt_doa(self):
self.figdoa.clf()
ax = self.figdoa.add_subplot(111,polar=True)
# DL.L.showG('s',ax=ax,fig=self.figure)
# DL.H.plotd(d='doa',polar=True,fig=self.figdoa,ax=ax)
DL.plt_doa(polar=True,fig=self.figdoa,ax=ax)
self.figdoa.canvas.draw()
def plt_dod(self):
self.figdod.clf()
ax = self.figdod.add_subplot(111,polar=True)
DL.plt_dod(polar=True,fig=self.figdod,ax=ax)
# DL.L.showG('s',ax=ax,fig=self.figure)
# DL.H.plotd(d='dod',polar=True,fig=self.figdod,ax=ax)
self.figdod.canvas.draw()
def plt_dspread(self):
self.figds.clf()
ax = self.figds.add_subplot(111)
DL.plt_dspread(fig=self.figds,ax=ax)
self.figds.canvas.draw()
def plt_aspread(self):
self.figas.clf()
ax = self.figas.add_subplot(111)
DL.plt_aspread(fig=self.figas,ax=ax)
self.figas.canvas.draw()
def clear_fig(self,lf=['cir','doa','dod','as','ds']):
for f in lf:
eval('self.fig'+f+'.clf()')
eval('self.fig'+f+'.canvas.draw()')
#####
##### RENDERING 3D MAYAVI
#####
render3d = Item('scene', editor=SceneEditor(scene_class=Scene),
height=500, width=1500, show_label=False)
# ###
# ### Matplotlib figure
# ###
# figure = Instance(Figure(figsize=(8,20)), ())
#####
##### Layout SELECTION
#####
# Layout
GLay = Group(Item('Lay_Enum',
style='simple',
label='file'),
show_labels=False,
label='Layout')
#####
##### WIRELESS STANDARD
#####
# wireless standard
Wstd_Enum = Enum('None', av_wstds)
chann = Str
# chann = Enum(av_chann)
GWstd_None = Group(Item('fmin', label='fGHz min', style='text'),
Item('fmax', label='fGHz max', style='text'),
Item('fstep', label='fGHz step', style='text'),
label = 'Frequency',
show_border= True,
enabled_when = 'Wstd_Enum == \'None\''
)
GWstd_std = Group(Item(name ='chann',editor=EnumEditor(name='handler.channels')
) ,
label = 'channel',
show_border= True,
enabled_when = 'Wstd_Enum != \'None\''
)
GWstd = Group(
Group(Item (name = 'Wstd_Enum',
label = 'Wireless Standard')),
GWstd_None,
GWstd_std,
label='Wireless Standard',
show_labels=True,
show_border=False)
#####
##### ANTENNA
#####
xmin=Float
xmax = Float
ymin=Float
ymax = Float
zmin=Float
zmax = Float
# Ant A file
Iax = Item('aX',
editor=RangeEditor(low_name='xmin',
high_name='xmax',
format='%.1f',
label_width=28,
mode='auto'),
label='x'
)
Iay = Item('aY',
editor=RangeEditor(low_name='ymin',
high_name='ymax',
format='%.1f',
label_width=28,
mode='auto'),
label='y'
)
Iaz = Item('aZ',
editor=RangeEditor(low_name='zmin',
high_name='zmax',
format='%.1f',
label_width=28,
mode='auto'),
label='z'
)
GPos_a = VGroup(
Iax,
Iay,
Iaz,
id = 'a',
label = 'Position',
show_border=True,
show_labels=True,
layout='split'
)
Ifile_a = Item('a_ant',label='file')
GRot_a = VGroup(
Item('agamma',label='x-roll'),
Item('abeta',label='y-roll'),
Item('aalpha',label='z-roll'),
id = 'Ta',
label = 'Rotation',
show_border=True,
layout='split'
)
G_a = Group(Ifile_a,
GPos_a,
GRot_a,
label='Antenna a',
show_border=False
)
#### ANtenna B
# Ant B positions
Ibx = Item('bX',
editor=RangeEditor(low_name='xmin',
high_name='xmax',
format='%.1f',
label_width=28,
mode='auto'),
label='x'
)
Iby = Item('bY',
editor=RangeEditor(low_name='ymin',
high_name='ymax',
format='%.1f',
label_width=28,
mode='auto'),
label='y'
)
Ibz = Item('bZ',
editor=RangeEditor(low_name='zmin',
high_name='zmax',
format='%.1f',
label_width=28,
mode='auto'),
label='z'
)
GPos_b = Group(
Ibx,
Iby,
Ibz,
id = 'b',
label = 'Position',
show_border=True,
layout='split'
)
# Ant B file
Ifile_b = Item('b_ant',label='file')
GRot_b = Group(
Item('bgamma',label='x-roll'),
Item('bbeta',label='y-roll'),
Item('balpha',label='z-roll'),
id = 'Tb',
label = 'Rotation',
show_border=True,
layout='split'
)
G_b = Group(Ifile_b,
GPos_b,
GRot_b,
label='Antenna b',
show_border=False,
)
####
#### advanced CONFIRGURATION
####
force =Bool
diffraction = Bool
applywav = Bool
applywav = Bool
low_cutoff = 1
high_cutoff = 30
cutoff = Range(low='low_cutoff',high='high_cutoff',value=DL.cutoff)
threshold = Range(0,100,80)
nD=2
nR=10
nT=10
G_advanced = Group(VGroup(
Item('force',
label='force',
resizable=False,
style='simple'),
Item('cutoff',
label='cutoff',
editor=RangeEditor(low_name='low_cutoff',
high_name='high_cutoff',
label_width=28,
mode='auto'),
width=0.2,
style='simple'),
Item('threshold',
label='threshold',
width=0.2,
style='simple'),
Item('diffraction',
label='diffractions',
style='simple'),
Item('nD',
label='max nb Diffractions',
enabled_when='diffraction' ,
style='simple'),
Item('nR',
label='max nb Reflections',
style='simple'),
Item('nT',
label='max nb Transmissions',
style='simple'),
Item('applywav',
label='applywav',
style='simple'),
label='Ray Tracing Configuration',
show_labels=True,
show_border=False))
####
### MANAGING GROUPS
###
# LEFT GROUP WINDOW
Beval = Button('Launch Ray-Tracing')
GLeft = Group(
GLay,
GWstd,
G_advanced
)
# <NAME>
GAnt_ab = HGroup(spring,G_a,spring,G_b,spring)
GAnt_Eval = Group(GAnt_ab,
HGroup(spring,
Item('Beval',
enabled_when='Lay_Enum != \'\''
),
show_labels=False)
)
#### TOP GROUP
GR_0= HSplit(GLeft,
render3d,
layout='split')
# BOTTOM GROUP
figcir= Instance(Figure(figsize=(8,20)), ())
figdoa= Instance(Figure(figsize=(8,20)), ())
figdod= Instance(Figure(figsize=(8,20)), ())
figas= Instance(Figure(figsize=(8,20)), ())
figds= Instance(Figure(figsize=(8,20)), ())
GExploit = Group ( Group(Item('figcir',
editor=MPLFigureEditor(),
),
label='CIR'),
Group(Item('figdoa',
editor=MPLFigureEditor()
),
label='DOA'),
Group(Item('figdod',
editor=MPLFigureEditor()
),
label='DOD'),
Group(Item('figas',
editor=MPLFigureEditor()
),
label='Ang. Spread'),
Group(Item('figds',
editor=MPLFigureEditor()
),
label='Delay Spread'),
layout='tabbed',
)
GR_1 = HGroup(spring,GAnt_Eval,spring,GExploit)
JWidget = JupyterWidget()
JWidget.show()
view = View(VGroup(GR_0,GR_1),
# menubar=MenuBar(Menu_file),
buttons=['Quit'],
title="Pylayers GUI - beta",
resizable=True,
width=1., height=1.,
handler=WstdHandler)
if __name__ == '__main__':
gui = PylayersGUI()
gui.configure_traits()
|
frechet_audio_distance/fad_utils.py | deepneuralmachine/google-research | 23,901 | 12682614 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fréchet Audio Distance util functions."""
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
import tensorflow.compat.v1 as tf
def read_mean_and_covariances(filename):
"""Helper function that reads tf_record containing dataset stats.
Args:
filename: Path of the tf_record.
Returns:
The values of mu and sigma.
"""
tf_record = tf.python_io.tf_record_iterator(filename).next()
example = tf.train.Example().FromString(tf_record)
mu = np.array(example.features.feature['mu'].float_list.value)
emb_len = np.array(
example.features.feature['embedding_length'].int64_list.value)[0]
sigma = (np.array(
example.features.feature['sigma'].float_list.value)).reshape((emb_len,
emb_len))
return mu, sigma
def normalize_loudness(np_samples, max_db_increase=20):
"""Normalizes the loudness to be between -1.0 and 1.0.
Args:
np_samples: 1d numpy array of audio samples with shape (num_samples).
max_db_increase: Maxium loudness incress. This stops very quiet audio from
being distorted and avoids problems on silence where np.amax(np_samples)
== 0.
Returns:
1d numpy array of audio samples with shape (num_samples) where eache sample
is between -1.0 and 1.0.
"""
min_amplitude_ratio = 10**(max_db_increase / -20)
return np_samples / np.maximum(min_amplitude_ratio, np.amax(np_samples))
def _stable_trace_sqrt_product(sigma_test, sigma_train, eps=1e-7):
"""Avoids some problems when computing the srqt of product of sigmas.
Based on <NAME>'s contribution here:
https://github.com/bioinf-jku/TTUR/blob/master/fid.py
Args:
sigma_test: Test covariance matrix.
sigma_train: Train covariance matirx.
eps: Small number; used to avoid singular product.
Returns:
The Trace of the square root of the product of the passed convariance
matrices.
Raises:
ValueError: If the sqrt of the product of the sigmas contains complex
numbers with large imaginary parts.
"""
# product might be almost singular
sqrt_product, _ = linalg.sqrtm(sigma_test.dot(sigma_train), disp=False)
if not np.isfinite(sqrt_product).all():
# add eps to the diagonal to avoid a singular product.
offset = np.eye(sigma_test.shape[0]) * eps
sqrt_product = linalg.sqrtm((sigma_test + offset).dot(sigma_train + offset))
# Might have a slight imaginary component.
if not np.allclose(np.diagonal(sqrt_product).imag, 0, atol=1e-3):
raise ValueError('sqrt_product contains large complex numbers.')
sqrt_product = sqrt_product.real
return np.trace(sqrt_product)
def frechet_distance(mu_test, sigma_test, mu_train, sigma_train):
"""Fréchet distance calculation.
From: <NAME> & <NAME> The Fréchet distance between
multivariate normal distributions
https://doi.org/10.1016/0047-259X(82)90077-X
The Fréchet distance between two multivariate gaussians,
`X ~ N(mu_x, sigma_x)` and `Y ~ N(mu_y, sigma_y)`, is `d^2`.
d^2 = (mu_x - mu_y)^2 + Tr(sigma_x + sigma_y - 2 * sqrt(sigma_x*sigma_y))
= (mu_x - mu_y)^2 + Tr(sigma_x) + Tr(sigma_y)
- 2 * Tr(sqrt(sigma_x*sigma_y)))
Args:
mu_test: Mean of the test multivariate gaussian.
sigma_test: Covariance matrix of the test multivariate gaussians.
mu_train: Mean of the test multivariate gaussian.
sigma_train: Covariance matrix of the test multivariate gaussians.
Returns:
The Fréchet distance.
Raises:
ValueError: If the input arrays do not have the expect shapes.
"""
if len(mu_train.shape) != 1:
raise ValueError('mu_train must be 1 dimensional.')
if len(sigma_train.shape) != 2:
raise ValueError('sigma_train must be 2 dimensional.')
if mu_test.shape != mu_train.shape:
raise ValueError('mu_test should have the same shape as mu_train')
if sigma_test.shape != sigma_train.shape:
raise ValueError('sigma_test should have the same shape as sigma_train')
mu_diff = mu_test - mu_train
trace_sqrt_product = _stable_trace_sqrt_product(sigma_test, sigma_train)
return mu_diff.dot(mu_diff) + np.trace(sigma_test) + np.trace(
sigma_train) - 2 * trace_sqrt_product
|
docs/tutorials/detection/demo_ssd.py | Kh4L/gluon-cv | 5,447 | 12682618 | <reponame>Kh4L/gluon-cv<filename>docs/tutorials/detection/demo_ssd.py
"""01. Predict with pre-trained SSD models
==========================================
This article shows how to play with pre-trained SSD models with only a few
lines of code.
First let's import some necessary libraries:
"""
from gluoncv import model_zoo, data, utils
from matplotlib import pyplot as plt
######################################################################
# Load a pretrained model
# -------------------------
#
# Let's get an SSD model trained with 512x512 images on Pascal VOC
# dataset with ResNet-50 V1 as the base model. By specifying
# ``pretrained=True``, it will automatically download the model from the model
# zoo if necessary. For more pretrained models, please refer to
# :doc:`../../model_zoo/index`.
net = model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=True)
######################################################################
# Pre-process an image
# --------------------
#
# Next we download an image, and pre-process with preset data transforms. Here we
# specify that we resize the short edge of the image to 512 px. But you can
# feed an arbitrarily sized image.
#
# You can provide a list of image file names, such as ``[im_fname1, im_fname2,
# ...]`` to :py:func:`gluoncv.data.transforms.presets.ssd.load_test` if you
# want to load multiple image together.
#
# This function returns two results. The first is a NDArray with shape
# `(batch_size, RGB_channels, height, width)`. It can be fed into the
# model directly. The second one contains the images in numpy format to
# easy to be plotted. Since we only loaded a single image, the first dimension
# of `x` is 1.
im_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' +
'gluoncv/detection/street_small.jpg?raw=true',
path='street_small.jpg')
x, img = data.transforms.presets.ssd.load_test(im_fname, short=512)
print('Shape of pre-processed image:', x.shape)
######################################################################
# Inference and display
# ---------------------
#
# The forward function will return all detected bounding boxes, and the
# corresponding predicted class IDs and confidence scores. Their shapes are
# `(batch_size, num_bboxes, 1)`, `(batch_size, num_bboxes, 1)`, and
# `(batch_size, num_bboxes, 4)`, respectively.
#
# We can use :py:func:`gluoncv.utils.viz.plot_bbox` to visualize the
# results. We slice the results for the first image and feed them into `plot_bbox`:
class_IDs, scores, bounding_boxes = net(x)
ax = utils.viz.plot_bbox(img, bounding_boxes[0], scores[0],
class_IDs[0], class_names=net.classes)
plt.show()
|
cocos/tests/test_numerics/test_statistics/test_rng/test_gamma_rng.py | michaelnowotny/cocos | 101 | 12682636 | import pytest
import cocos.numerics as cn
from cocos.tests.test_numerics.test_statistics.utilities import perform_ks_test
n_kolmogorov_smirnov = 1500000
test_data = [(1, 2, n_kolmogorov_smirnov),
(2, 2, n_kolmogorov_smirnov),
(3, 2, n_kolmogorov_smirnov),
(5, 1, n_kolmogorov_smirnov),
(9, 0.5, n_kolmogorov_smirnov),
(7.5, 1, n_kolmogorov_smirnov),
(0.5, 1, n_kolmogorov_smirnov)]
@pytest.mark.parametrize("a, b, n_kolmogorov_smirnov", test_data)
def test_gamma_distribution(a, b, n_kolmogorov_smirnov):
u = cn.random.gamma(a, b, n_kolmogorov_smirnov)
reject = perform_ks_test(u,
alpha=0.01,
distribution='gamma',
args=(a, 0.0, b),
verbose=True)
assert not reject
|
scripts/automation/trex_control_plane/interactive/trex/utils/filters.py | timgates42/trex-core | 956 | 12682684 |
def shallow_copy(x):
return type(x)(x)
class ToggleFilter(object):
"""
This class provides a "sticky" filter, that works by "toggling" items of the original database on and off.
"""
def __init__(self, db_ref, show_by_default=True):
"""
Instantiate a ToggleFilter object
:parameters:
db_ref : iterable
an iterable object (i.e. list, set etc) that would serve as the reference db of the instance.
Changes in that object will affect the output of ToggleFilter instance.
show_by_default: bool
decide if by default all the items are "on", i.e. these items will be presented if no other
toggling occurred.
default value : **True**
"""
self._data = db_ref
self._toggle_db = set()
self._filter_method = filter
self.__set_initial_state(show_by_default)
def reset (self):
"""
Toggles off all the items
"""
self._toggle_db = set()
def toggle_item(self, item_key):
"""
Toggle a single item in/out.
:parameters:
item_key :
an item the by its value the filter can decide to toggle or not.
Example: int, str and so on.
:return:
+ **True** if item toggled **into** the filtered items
+ **False** if item toggled **out from** the filtered items
:raises:
+ KeyError, in case if item key is not part of the toggled list and not part of the referenced db.
"""
if item_key in self._toggle_db:
self._toggle_db.remove(item_key)
return False
elif item_key in self._data:
self._toggle_db.add(item_key)
return True
else:
raise KeyError("Provided item key isn't a key of the referenced data structure.")
def toggle_items(self, *args):
"""
Toggle multiple items in/out with a single call. Each item will be ha.
:parameters:
args : iterable
an iterable object containing all item keys to be toggled in/out
:return:
+ **True** if all toggled items were toggled **into** the filtered items
+ **False** if at least one of the items was toggled **out from** the filtered items
:raises:
+ KeyError, in case if ont of the item keys was not part of the toggled list and not part of the referenced db.
"""
# in python 3, 'map' returns an iterator, so wrapping with 'list' call creates same effect for both python 2 and 3
return all(list(map(self.toggle_item, args)))
def filter_items(self):
"""
Filters the pointed database by showing only the items mapped at toggle_db set.
:returns:
Filtered data of the original object.
"""
return self._filter_method(self.__toggle_filter, self._data)
# private methods
def __set_initial_state(self, show_by_default):
try:
_ = (x for x in self._data)
if isinstance(self._data, dict):
self._filter_method = ToggleFilter.dict_filter
if show_by_default:
self._toggle_db = set(self._data.keys())
return
elif isinstance(self._data, list):
self._filter_method = ToggleFilter.list_filter
elif isinstance(self._data, set):
self._filter_method = ToggleFilter.set_filter
elif isinstance(self._data, tuple):
self._filter_method = ToggleFilter.tuple_filter
if show_by_default:
self._toggle_db = set(shallow_copy(self._data)) # assuming all relevant data with unique identifier
return
except TypeError:
raise TypeError("provided data object is not iterable")
def __toggle_filter(self, x):
return (x in self._toggle_db)
# static utility methods
@staticmethod
def dict_filter(function, iterable):
assert isinstance(iterable, dict)
return {k: v
for k,v in iterable.items()
if function(k)}
@staticmethod
def list_filter(function, iterable):
# in python 3, filter returns an iterator, so wrapping with list creates same effect for both python 2 and 3
return list(filter(function, iterable))
@staticmethod
def set_filter(function, iterable):
return {x
for x in iterable
if function(x)}
@staticmethod
def tuple_filter(function, iterable):
return tuple(filter(function, iterable))
if __name__ == "__main__":
pass
|
src/dataprotection/azext_dataprotection/vendored_sdks/dataprotection/models/_data_protection_client_enums.py | haroonf/azure-cli-extensions | 207 | 12682687 | <reponame>haroonf/azure-cli-extensions<filename>src/dataprotection/azext_dataprotection/vendored_sdks/dataprotection/models/_data_protection_client_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6370, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AbsoluteMarker(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ALL_BACKUP = "AllBackup"
FIRST_OF_DAY = "FirstOfDay"
FIRST_OF_MONTH = "FirstOfMonth"
FIRST_OF_WEEK = "FirstOfWeek"
FIRST_OF_YEAR = "FirstOfYear"
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class CurrentProtectionState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the current protection state of the resource
"""
INVALID = "Invalid"
NOT_PROTECTED = "NotProtected"
CONFIGURING_PROTECTION = "ConfiguringProtection"
PROTECTION_CONFIGURED = "ProtectionConfigured"
BACKUP_SCHEDULES_SUSPENDED = "BackupSchedulesSuspended"
RETENTION_SCHEDULES_SUSPENDED = "RetentionSchedulesSuspended"
PROTECTION_STOPPED = "ProtectionStopped"
PROTECTION_ERROR = "ProtectionError"
CONFIGURING_PROTECTION_FAILED = "ConfiguringProtectionFailed"
SOFT_DELETING = "SoftDeleting"
SOFT_DELETED = "SoftDeleted"
UPDATING_PROTECTION = "UpdatingProtection"
class DataStoreTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""type of datastore; Operational/Vault/Archive
"""
OPERATIONAL_STORE = "OperationalStore"
VAULT_STORE = "VaultStore"
ARCHIVE_STORE = "ArchiveStore"
class DayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FRIDAY = "Friday"
MONDAY = "Monday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
THURSDAY = "Thursday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
class FeatureSupportStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""feature support status
"""
INVALID = "Invalid"
NOT_SUPPORTED = "NotSupported"
ALPHA_PREVIEW = "AlphaPreview"
PRIVATE_PREVIEW = "PrivatePreview"
PUBLIC_PREVIEW = "PublicPreview"
GENERALLY_AVAILABLE = "GenerallyAvailable"
class FeatureType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""backup support feature type.
"""
INVALID = "Invalid"
DATA_SOURCE_TYPE = "DataSourceType"
class Month(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
APRIL = "April"
AUGUST = "August"
DECEMBER = "December"
FEBRUARY = "February"
JANUARY = "January"
JULY = "July"
JUNE = "June"
MARCH = "March"
MAY = "May"
NOVEMBER = "November"
OCTOBER = "October"
SEPTEMBER = "September"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the BackupVault resource
"""
FAILED = "Failed"
PROVISIONING = "Provisioning"
SUCCEEDED = "Succeeded"
UNKNOWN = "Unknown"
UPDATING = "Updating"
class RecoveryOption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Recovery Option
"""
FAIL_IF_EXISTS = "FailIfExists"
class RehydrationPriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Priority to be used for rehydration. Values High or Standard
"""
INVALID = "Invalid"
HIGH = "High"
STANDARD = "Standard"
class RehydrationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
CREATE_IN_PROGRESS = "CREATE_IN_PROGRESS"
COMPLETED = "COMPLETED"
DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS"
DELETED = "DELETED"
FAILED = "FAILED"
class ResourceMoveState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource move state for backup vault
"""
UNKNOWN = "Unknown"
IN_PROGRESS = "InProgress"
PREPARE_FAILED = "PrepareFailed"
COMMIT_FAILED = "CommitFailed"
FAILED = "Failed"
PREPARE_TIMEDOUT = "PrepareTimedout"
COMMIT_TIMEDOUT = "CommitTimedout"
CRITICAL_FAILURE = "CriticalFailure"
PARTIAL_SUCCESS = "PartialSuccess"
MOVE_SUCCEEDED = "MoveSucceeded"
class RestoreSourceDataStoreType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type of the source data store.
"""
OPERATIONAL_STORE = "OperationalStore"
VAULT_STORE = "VaultStore"
ARCHIVE_STORE = "ArchiveStore"
class RestoreTargetLocationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Denotes the target location where the data will be restored,
string value for the enum
{Microsoft.Internal.AzureBackup.DataProtection.Common.Interface.RestoreTargetLocationType}
"""
INVALID = "Invalid"
AZURE_BLOBS = "AzureBlobs"
AZURE_FILES = "AzureFiles"
class SecretStoreType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type of secret store
"""
INVALID = "Invalid"
AZURE_KEY_VAULT = "AzureKeyVault"
class SourceDataStoreType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type of the source data store.
"""
ARCHIVE_STORE = "ArchiveStore"
SNAPSHOT_STORE = "SnapshotStore"
VAULT_STORE = "VaultStore"
class Status(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the protection status of the resource
"""
CONFIGURING_PROTECTION = "ConfiguringProtection"
CONFIGURING_PROTECTION_FAILED = "ConfiguringProtectionFailed"
PROTECTION_CONFIGURED = "ProtectionConfigured"
PROTECTION_STOPPED = "ProtectionStopped"
SOFT_DELETED = "SoftDeleted"
SOFT_DELETING = "SoftDeleting"
class StorageSettingStoreTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type of the datastore.
"""
ARCHIVE_STORE = "ArchiveStore"
SNAPSHOT_STORE = "SnapshotStore"
VAULT_STORE = "VaultStore"
class StorageSettingTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type.
"""
GEO_REDUNDANT = "GeoRedundant"
LOCALLY_REDUNDANT = "LocallyRedundant"
class WeekNumber(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
FIRST = "First"
FOURTH = "Fourth"
LAST = "Last"
SECOND = "Second"
THIRD = "Third"
|
tests/st/ops/ascend/test_aicpu_ops/test_gather_d.py | GuoSuiming/mindspore | 3,200 | 12682690 | <filename>tests/st/ops/ascend/test_aicpu_ops/test_gather_d.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, dim=0):
super(Net, self).__init__()
self.op = P.GatherD()
self.dim = dim
def construct(self, x, index):
return self.op(x, self.dim, index)
class NetGrad(nn.Cell):
def __init__(self, dim=0, shape=None):
super(NetGrad, self).__init__()
self.op = G.GatherDGrad(dim, shape)
def construct(self, index, x):
return self.op(index, x)
def test_net():
x = Tensor(np.array([[772, 231, 508, 545, 615, 249],
[923, 210, 480, 696, 482, 761],
[465, 904, 521, 824, 607, 669],
[156, 539, 56, 159, 916, 566],
[122, 676, 714, 261, 19, 936]]), mindspore.int32)
index = Tensor(np.array([[0, 0, 0, 1, 1],
[0, 0, 0, 1, 4],
[0, 0, 0, 1, -1],
[1, 1, 1, 0, 0]]), mindspore.int32)
dim = 0
net = Net(dim)
out = net(x, index)
print(out.asnumpy())
expect_out = np.array([[772, 231, 508, 696, 482],
[772, 231, 508, 696, 19],
[772, 231, 508, 696, 19],
[923, 210, 480, 545, 615]])
assert np.array_equal(out.asnumpy(), expect_out)
def test_net_bool():
x = Tensor(np.array([[0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1],
[1, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0]]), mindspore.bool_)
index = Tensor(np.array([[0, 0, 0, 1, 1],
[0, 0, 0, 1, 4],
[0, 0, 0, 1, -1],
[1, 1, 1, 0, 0]]), mindspore.int32)
dim = 0
net = Net(dim)
out = net(x, index)
print(out.asnumpy())
expect_out = np.array([[0, 1, 0, 0, 1],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 1]]).astype(np.bool)
assert np.array_equal(out.asnumpy(), expect_out)
def test_net_grad():
index = Tensor(np.array([[0, 1, 2, 0, 0],
[2, 0, 0, 1, -1]]), mindspore.int32)
x = Tensor(np.array([[772, 231, 508, 615, 249],
[122, 676, 714, 261, 936]]), mindspore.int32)
net = NetGrad(dim=0, shape=(3, 5))
out = net(index, x)
print(out.asnumpy())
expect_out = np.array([[772, 676, 714, 615, 249],
[0, 231, 0, 261, 0],
[122, 0, 508, 0, 936]])
assert np.array_equal(out.asnumpy(), expect_out)
|
examples/draw_a_cat.py | jontonsoup4/ascii_art | 199 | 12682700 | <filename>examples/draw_a_cat.py<gh_stars>100-1000
from ascii_art.ascii_art import ASCIIArt, ASCIIPicture
# ASCII drawing
picture = ASCIIArt('cat', 2).draw_ascii(curve=1)
ASCIIPicture(picture).save('cat_scale2_draw_ascii.png')
with open('cat_scale2_draw.txt', 'w') as f:
f.write(''.join(picture))
picture = ASCIIArt('cat', 5).draw_ascii(curve=1)
ASCIIPicture(picture).save('cat_scale5_draw_ascii.png')
with open('cat_scale5_draw.txt', 'w') as f:
f.write(''.join(picture))
# Colored ASCII drawing using sorted custom character sets on a black background
colored_picture = ASCIIArt('cat', 2).draw_color_ascii(ASCIIArt.sort('09215'))
ASCIIPicture(colored_picture, 'black').save('cat_scale2_color_numbers')
colored_picture = ASCIIArt('cat', 5).draw_color_ascii(ASCIIArt.sort('09215'))
ASCIIPicture(colored_picture, 'black').save('cat_scale5_color_numbers')
colored_picture = ASCIIArt('cat', 2).draw_color_ascii(ASCIIArt.sort('jontonsoup4'))
ASCIIPicture(colored_picture, 'black').save('cat_scale2_color_name')
colored_picture = ASCIIArt('cat', 5).draw_color_ascii(ASCIIArt.sort('jontonsoup4'))
ASCIIPicture(colored_picture, 'black').save('cat_scale5_color_name')
# ASCII to HTML using 'kitten' as a character set on a black background
html = ASCIIArt('cat', 1).draw_html(ASCIIArt.sort('kitten'), background_color='black')
with open('cat_scale1_html_kitten.html', 'w') as f:
f.write(''.join(html))
html = ASCIIArt('cat', 2).draw_html(ASCIIArt.sort('kitten'), background_color='black')
with open('cat_scale2_html_kitten.html', 'w') as f:
f.write(''.join(html))
# ASCII to HTML using only '#' on a black background
html = ASCIIArt('cat', 1).draw_html(ASCIIArt.BLOCK, background_color='black')
with open('cat_scale1_html_block.html', 'w') as f:
f.write(''.join(html))
html = ASCIIArt('cat', 2).draw_html(ASCIIArt.BLOCK, background_color='black')
with open('cat_scale2_html_block.html', 'w') as f:
f.write(''.join(html))
# Colored ASCII with only '#' on a black background
colored_picture = ASCIIArt('cat', 2).draw_color_ascii(ASCIIArt.BLOCK, curve=1.5)
ASCIIPicture(colored_picture, 'black').save('cat_scale2_block_color.png')
colored_picture = ASCIIArt('cat', 5).draw_color_ascii(ASCIIArt.BLOCK, curve=1.5)
ASCIIPicture(colored_picture, 'black').save('cat_scale5_block_color.png')
# Colored ASCII with full grayscale
colored_picture = ASCIIArt('cat', 2).draw_color_ascii(ASCIIArt.FULL_RANGE, curve=1.5)
ASCIIPicture(colored_picture).save('cat_scale2_full_range_color.png')
colored_picture = ASCIIArt('cat', 5).draw_color_ascii(ASCIIArt.FULL_RANGE, curve=1.5)
ASCIIPicture(colored_picture).save('cat_scale5_full_range_color.png')
|
src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case_local_variables3.py | r3m0t/debugpy | 695 | 12682707 | class MyDictSubclass(dict):
def __init__(self):
dict.__init__(self)
self.var1 = 10
self['in_dct'] = 20
def __str__(self):
ret = []
for key, val in sorted(self.items()):
ret.append('%s: %s' % (key, val))
ret.append('self.var1: %s' % (self.var1,))
return '{' + '; '.join(ret) + '}'
__repr__ = __str__
class MyListSubclass(list):
def __init__(self):
list.__init__(self)
self.var1 = 11
self.append('a')
self.append('b')
def __str__(self):
ret = []
for obj in self:
ret.append(repr(obj))
ret.append('self.var1: %s' % (self.var1,))
return '[' + ', '.join(ret) + ']'
__repr__ = __str__
class MySetSubclass(set):
def __init__(self):
set.__init__(self)
self.var1 = 12
self.add('a')
def __str__(self):
ret = []
for obj in sorted(self):
ret.append(repr(obj))
ret.append('self.var1: %s' % (self.var1,))
return 'set([' + ', '.join(ret) + '])'
__repr__ = __str__
class MyTupleSubclass(tuple):
def __new__ (cls):
return super(MyTupleSubclass, cls).__new__(cls, tuple(['a', 1]))
def __init__(self):
self.var1 = 13
def __str__(self):
ret = []
for obj in self:
ret.append(repr(obj))
ret.append('self.var1: %s' % (self.var1,))
return 'tuple(' + ', '.join(ret) + ')'
__repr__ = __str__
def Call():
variable_for_test_1 = MyListSubclass()
variable_for_test_2 = MySetSubclass()
variable_for_test_3 = MyDictSubclass()
variable_for_test_4 = MyTupleSubclass()
all_vars_set = True # Break here
if __name__ == '__main__':
Call()
print('TEST SUCEEDED!')
|
wandb/vendor/prompt_toolkit/layout/dimension.py | dreamflasher/client | 6,989 | 12682738 | """
Layout dimensions are used to give the minimum, maximum and preferred
dimensions for containers and controls.
"""
from __future__ import unicode_literals
__all__ = (
'LayoutDimension',
'sum_layout_dimensions',
'max_layout_dimensions',
)
class LayoutDimension(object):
"""
Specified dimension (width/height) of a user control or window.
The layout engine tries to honor the preferred size. If that is not
possible, because the terminal is larger or smaller, it tries to keep in
between min and max.
:param min: Minimum size.
:param max: Maximum size.
:param weight: For a VSplit/HSplit, the actual size will be determined
by taking the proportion of weights from all the children.
E.g. When there are two children, one width a weight of 1,
and the other with a weight of 2. The second will always be
twice as big as the first, if the min/max values allow it.
:param preferred: Preferred size.
"""
def __init__(self, min=None, max=None, weight=1, preferred=None):
assert isinstance(weight, int) and weight > 0 # Cannot be a float.
self.min_specified = min is not None
self.max_specified = max is not None
self.preferred_specified = preferred is not None
if min is None:
min = 0 # Smallest possible value.
if max is None: # 0-values are allowed, so use "is None"
max = 1000 ** 10 # Something huge.
if preferred is None:
preferred = min
self.min = min
self.max = max
self.preferred = preferred
self.weight = weight
# Make sure that the 'preferred' size is always in the min..max range.
if self.preferred < self.min:
self.preferred = self.min
if self.preferred > self.max:
self.preferred = self.max
@classmethod
def exact(cls, amount):
"""
Return a :class:`.LayoutDimension` with an exact size. (min, max and
preferred set to ``amount``).
"""
return cls(min=amount, max=amount, preferred=amount)
def __repr__(self):
return 'LayoutDimension(min=%r, max=%r, preferred=%r, weight=%r)' % (
self.min, self.max, self.preferred, self.weight)
def __add__(self, other):
return sum_layout_dimensions([self, other])
def sum_layout_dimensions(dimensions):
"""
Sum a list of :class:`.LayoutDimension` instances.
"""
min = sum([d.min for d in dimensions if d.min is not None])
max = sum([d.max for d in dimensions if d.max is not None])
preferred = sum([d.preferred for d in dimensions])
return LayoutDimension(min=min, max=max, preferred=preferred)
def max_layout_dimensions(dimensions):
"""
Take the maximum of a list of :class:`.LayoutDimension` instances.
"""
min_ = max([d.min for d in dimensions if d.min is not None])
max_ = max([d.max for d in dimensions if d.max is not None])
preferred = max([d.preferred for d in dimensions])
return LayoutDimension(min=min_, max=max_, preferred=preferred)
|
nuplan/database/nuplan_db/scenario_tag.py | motional/nuplan-devkit | 128 | 12682739 | <reponame>motional/nuplan-devkit<filename>nuplan/database/nuplan_db/scenario_tag.py
from __future__ import annotations # postpone evaluation of annotations
import logging
from typing import Any
from sqlalchemy import Column, inspect
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Text
from nuplan.database.common import sql_types
from nuplan.database.common.utils import simple_repr
from nuplan.database.nuplan_db.lidar_pc import LidarPc
from nuplan.database.nuplan_db.models import Base
logger = logging.getLogger()
class ScenarioTag(Base):
"""
Scenarios Tags for a scene.
"""
__tablename__ = 'scenario_tag'
token: str = Column(sql_types.HexLen8, primary_key=True)
lidar_pc_token: str = Column(sql_types.HexLen8, ForeignKey("lidar_pc.token"), nullable=False)
type: str = Column(Text)
agent_track_token: str = Column(sql_types.HexLen8, ForeignKey("track.token"), nullable=False)
lidar_pc: LidarPc = relationship("LidarPc", foreign_keys=[lidar_pc_token], back_populates="scenario_tags")
@property
def _session(self) -> Any:
"""
Get the underlying session.
:return: The underlying session.
"""
return inspect(self).session
def __repr__(self) -> str:
"""
Get the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
LidarPc.scenario_tags = relationship(
"ScenarioTag", foreign_keys="ScenarioTag.lidar_pc_token", back_populates="lidar_pc"
)
|
face_sdk/api_usage/face_crop.py | weihaoxie/FaceX-Zoo | 1,329 | 12682746 | <filename>face_sdk/api_usage/face_crop.py
"""
@author: <NAME>, <NAME>
@date: 20201015
@contact: <EMAIL>
"""
import sys
sys.path.append('.')
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
import logging.config
logging.config.fileConfig("config/logging.conf")
logger = logging.getLogger('api')
import cv2
from core.image_cropper.arcface_cropper.FaceRecImageCropper import FaceRecImageCropper
if __name__ == '__main__':
image_path = 'api_usage/test_images/test1.jpg'
image_info_file = 'api_usage/test_images/test1_landmark_res0.txt'
line = open(image_info_file).readline().strip()
landmarks_str = line.split(' ')
landmarks = [float(num) for num in landmarks_str]
face_cropper = FaceRecImageCropper()
image = cv2.imread(image_path)
cropped_image = face_cropper.crop_image_by_mat(image, landmarks)
cv2.imwrite('api_usage/temp/test1_cropped.jpg', cropped_image)
logger.info('Crop image successful!')
|
examples/pxScene2d/external/libnode-v6.9.0/deps/v8/tools/gcmole/download_gcmole_tools.py | madanagopaltcomcast/pxCore | 5,964 | 12682753 | #!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
SHA1_PATH = os.path.join(GCMOLE_PATH, 'gcmole-tools.tar.gz.sha1')
if re.search(r'\bgcmole=1', os.environ.get('GYP_DEFINES', '')):
subprocess.check_call([
'download_from_google_storage',
'-b', 'chrome-v8-gcmole',
'-u', '--no_resume',
'-s', SHA1_PATH,
'--platform=linux*'
])
else:
print 'Skipping gcmole download as gcmole is not set in gyp flags.'
|
tests/orm/relations/test_relation.py | wjzero/orator | 1,484 | 12682761 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import pendulum
from flexmock import flexmock, flexmock_teardown
from ... import OratorTestCase
from orator.query.builder import QueryBuilder
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm.relations import HasOne
class OrmRelationTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_set_relation_fail(self):
parent = OrmRelationResetModelStub()
relation = OrmRelationResetModelStub()
parent.set_relation("test", relation)
parent.set_relation("foo", "bar")
self.assertFalse("foo" in parent.to_dict())
def test_touch_method_updates_related_timestamps(self):
builder = flexmock(Builder, get_model=None, where=None)
parent = Model()
parent = flexmock(parent)
parent.should_receive("get_attribute").with_args("id").and_return(1)
related = Model()
related = flexmock(related)
builder.should_receive("get_model").and_return(related)
builder.should_receive("where")
relation = HasOne(
Builder(QueryBuilder(None, None, None)), parent, "foreign_key", "id"
)
related.should_receive("get_table").and_return("table")
related.should_receive("get_updated_at_column").and_return("updated_at")
now = pendulum.now()
related.should_receive("fresh_timestamp").and_return(now)
builder.should_receive("update").once().with_args({"updated_at": now})
relation.touch()
class OrmRelationResetModelStub(Model):
def get_query(self):
return self.new_query().get_query()
|
mono/model/mono_baseline/net.py | Jenaer/FeatDepth | 179 | 12682782 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import torch
import torch.nn.functional as F
import torch.nn as nn
from .layers import SSIM, Backproject, Project
from .depth_encoder import DepthEncoder
from .depth_decoder import DepthDecoder
from .pose_encoder import PoseEncoder
from .pose_decoder import PoseDecoder
from ..registry import MONO
@MONO.register_module
class Baseline(nn.Module):
def __init__(self, options):
super(Baseline, self).__init__()
self.opt = options
self.num_input_frames = len(self.opt.frame_ids)
self.DepthEncoder = DepthEncoder(self.opt.depth_num_layers,
self.opt.depth_pretrained_path)
self.DepthDecoder = DepthDecoder(self.DepthEncoder.num_ch_enc)
self.PoseEncoder = PoseEncoder(self.opt.pose_num_layers,
self.opt.pose_pretrained_path,
num_input_images=2)
self.PoseDecoder = PoseDecoder(self.PoseEncoder.num_ch_enc)
self.ssim = SSIM()
self.backproject = Backproject(self.opt.imgs_per_gpu, self.opt.height, self.opt.width)
self.project_3d = Project(self.opt.imgs_per_gpu, self.opt.height, self.opt.width)
def forward(self, inputs):
outputs = self.DepthDecoder(self.DepthEncoder(inputs["color_aug", 0, 0]))
if self.training:
outputs.update(self.predict_poses(inputs))
loss_dict = self.compute_losses(inputs, outputs)
return outputs, loss_dict
return outputs
def robust_l1(self, pred, target):
eps = 1e-3
return torch.sqrt(torch.pow(target - pred, 2) + eps ** 2)
def compute_reprojection_loss(self, pred, target):
photometric_loss = self.robust_l1(pred, target).mean(1, True)
ssim_loss = self.ssim(pred, target).mean(1, True)
reprojection_loss = (0.85 * ssim_loss + 0.15 * photometric_loss)
return reprojection_loss
def compute_losses(self, inputs, outputs):
loss_dict = {}
for scale in self.opt.scales:
"""
initialization
"""
disp = outputs[("disp", 0, scale)]
target = inputs[("color", 0, 0)]
reprojection_losses = []
"""
reconstruction
"""
outputs = self.generate_images_pred(inputs, outputs, scale)
"""
automask
"""
if self.opt.automask:
for frame_id in self.opt.frame_ids[1:]:
pred = inputs[("color", frame_id, 0)]
identity_reprojection_loss = self.compute_reprojection_loss(pred, target)
identity_reprojection_loss += torch.randn(identity_reprojection_loss.shape).cuda() * 1e-5
reprojection_losses.append(identity_reprojection_loss)
"""
minimum reconstruction loss
"""
for frame_id in self.opt.frame_ids[1:]:
pred = outputs[("color", frame_id, scale)]
reprojection_losses.append(self.compute_reprojection_loss(pred, target))
reprojection_loss = torch.cat(reprojection_losses, 1)
min_reconstruct_loss, outputs[("min_index", scale)] = torch.min(reprojection_loss, dim=1)
loss_dict[('min_reconstruct_loss', scale)] = min_reconstruct_loss.mean()/len(self.opt.scales)
"""
disp mean normalization
"""
if self.opt.disp_norm:
mean_disp = disp.mean(2, True).mean(3, True)
disp = disp / (mean_disp + 1e-7)
"""
smooth loss
"""
smooth_loss = self.get_smooth_loss(disp, target)
loss_dict[('smooth_loss', scale)] = self.opt.disparity_smoothness * smooth_loss / (2 ** scale)/len(self.opt.scales)
return loss_dict
def disp_to_depth(self, disp, min_depth, max_depth):
min_disp = 1 / max_depth # 0.01
max_disp = 1 / min_depth # 10
scaled_disp = min_disp + (max_disp - min_disp) * disp # (10-0.01)*disp+0.01
depth = 1 / scaled_disp
return scaled_disp, depth
def predict_poses(self, inputs):
outputs = {}
pose_feats = {f_i: F.interpolate(inputs["color_aug", f_i, 0], [192, 640], mode="bilinear", align_corners=False) for f_i in self.opt.frame_ids}
for f_i in self.opt.frame_ids[1:]:
if not f_i == "s":
if f_i < 0:
pose_inputs = [pose_feats[f_i], pose_feats[0]]
else:
pose_inputs = [pose_feats[0], pose_feats[f_i]]
pose_inputs = self.PoseEncoder(torch.cat(pose_inputs, 1))
axisangle, translation = self.PoseDecoder(pose_inputs)
outputs[("cam_T_cam", 0, f_i)] = self.transformation_from_parameters(axisangle[:, 0], translation[:, 0], invert=(f_i < 0))
return outputs
def generate_images_pred(self, inputs, outputs, scale):
disp = outputs[("disp", 0, scale)]
disp = F.interpolate(disp, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False)
_, depth = self.disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)
for i, frame_id in enumerate(self.opt.frame_ids[1:]):
if frame_id == "s":
T = inputs["stereo_T"]
else:
T = outputs[("cam_T_cam", 0, frame_id)]
cam_points = self.backproject(depth, inputs[("inv_K")])
pix_coords = self.project_3d(cam_points, inputs[("K")], T)#[b,h,w,2]
outputs[("color", frame_id, scale)] = F.grid_sample(inputs[("color", frame_id, 0)], pix_coords, padding_mode="border")
return outputs
def transformation_from_parameters(self, axisangle, translation, invert=False):
R = self.rot_from_axisangle(axisangle)
t = translation.clone()
if invert:
R = R.transpose(1, 2)
t *= -1
T = self.get_translation_matrix(t)
if invert:
M = torch.matmul(R, T)
else:
M = torch.matmul(T, R)
return M
def get_translation_matrix(self, translation_vector):
T = torch.zeros(translation_vector.shape[0], 4, 4).cuda()
t = translation_vector.contiguous().view(-1, 3, 1)
T[:, 0, 0] = 1
T[:, 1, 1] = 1
T[:, 2, 2] = 1
T[:, 3, 3] = 1
T[:, :3, 3, None] = t
return T
def rot_from_axisangle(self, vec):
angle = torch.norm(vec, 2, 2, True)
axis = vec / (angle + 1e-7)
ca = torch.cos(angle)
sa = torch.sin(angle)
C = 1 - ca
x = axis[..., 0].unsqueeze(1)
y = axis[..., 1].unsqueeze(1)
z = axis[..., 2].unsqueeze(1)
xs = x * sa
ys = y * sa
zs = z * sa
xC = x * C
yC = y * C
zC = z * C
xyC = x * yC
yzC = y * zC
zxC = z * xC
rot = torch.zeros((vec.shape[0], 4, 4)).cuda()
rot[:, 0, 0] = torch.squeeze(x * xC + ca)
rot[:, 0, 1] = torch.squeeze(xyC - zs)
rot[:, 0, 2] = torch.squeeze(zxC + ys)
rot[:, 1, 0] = torch.squeeze(xyC + zs)
rot[:, 1, 1] = torch.squeeze(y * yC + ca)
rot[:, 1, 2] = torch.squeeze(yzC - xs)
rot[:, 2, 0] = torch.squeeze(zxC - ys)
rot[:, 2, 1] = torch.squeeze(yzC + xs)
rot[:, 2, 2] = torch.squeeze(z * zC + ca)
rot[:, 3, 3] = 1
return rot
def get_smooth_loss(self, disp, img):
b, _, h, w = disp.size()
a1 = 0.5
a2 = 0.5
img = F.interpolate(img, (h, w), mode='area')
disp_dx, disp_dy = self.gradient(disp)
img_dx, img_dy = self.gradient(img)
disp_dxx, disp_dxy = self.gradient(disp_dx)
disp_dyx, disp_dyy = self.gradient(disp_dy)
img_dxx, img_dxy = self.gradient(img_dx)
img_dyx, img_dyy = self.gradient(img_dy)
smooth1 = torch.mean(disp_dx.abs() * torch.exp(-a1 * img_dx.abs().mean(1, True))) + \
torch.mean(disp_dy.abs() * torch.exp(-a1 * img_dy.abs().mean(1, True)))
smooth2 = torch.mean(disp_dxx.abs() * torch.exp(-a2 * img_dxx.abs().mean(1, True))) + \
torch.mean(disp_dxy.abs() * torch.exp(-a2 * img_dxy.abs().mean(1, True))) + \
torch.mean(disp_dyx.abs() * torch.exp(-a2 * img_dyx.abs().mean(1, True))) + \
torch.mean(disp_dyy.abs() * torch.exp(-a2 * img_dyy.abs().mean(1, True)))
return smooth1 + smooth2
def gradient(self, D):
D_dy = D[:, :, 1:] - D[:, :, :-1]
D_dx = D[:, :, :, 1:] - D[:, :, :, :-1]
return D_dx, D_dy
|
unittest_reinvent/running_modes/reinforcement_tests/test_margin_guard.py | lilleswing/Reinvent-1 | 183 | 12682790 | <filename>unittest_reinvent/running_modes/reinforcement_tests/test_margin_guard.py
import unittest
from unittest.mock import Mock
import torch
import numpy as np
from running_modes.reinforcement_learning.margin_guard import MarginGuard
class MarginGuardStoreTest(unittest.TestCase):
def setUp(self) -> None:
self.runner = Mock()
self.mg = MarginGuard(self.runner)
self.agent_likelihood = torch.tensor([[1., -1.], [1., -1.]])
self.prior_likelihood = torch.tensor([[1., -1.], [1., -1.]])
self.augmented_likelihood = torch.tensor([[1., -1.], [1., -1.]])
self.score = np.array([1., 2., 3])
def _store_run(self) -> None:
self.mg.store_run_stats(
self.agent_likelihood,
self.prior_likelihood,
self.augmented_likelihood,
self.score
)
def test_empty(self):
self.assertEqual(len(self.mg._run_stats), 0)
def test_store_one(self):
self._store_run()
self.assertEqual(len(self.mg._run_stats), 1)
def test_store_two(self):
self._store_run()
self._store_run()
self.assertEqual(len(self.mg._run_stats), 2)
def test_stats_have_all_fields(self):
self._store_run()
fields = {
"agent_likelihood",
"prior_likelihood",
"augmented_likelihood",
"score"
}
self.assertTrue(all(f in line for line in self.mg._run_stats for f in fields))
|
conftest.py | mrclary/spyder-terminal | 169 | 12682802 | <filename>conftest.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Configuration file for Pytest
NOTE: DO NOT add fixtures here. It could generate problems with
QtAwesome being called before a QApplication is created.
"""
import os
os.environ['SPYDER_DEBUG'] = '3'
|
tests/common/gcp_api/appengine_test.py | aarontp/forseti-security | 921 | 12682816 | <gh_stars>100-1000
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the AppEngine client."""
import unittest
from googleapiclient import errors
import unittest.mock as mock
import httplib2
import google.auth
from google.oauth2 import credentials
from tests import unittest_utils
from tests.common.gcp_api.test_data import fake_appengine_responses as fae
from tests.common.gcp_api.test_data import http_mocks
from google.cloud.forseti.common.gcp_api import appengine as ae
from google.cloud.forseti.common.gcp_api import errors as api_errors
class AppEngineTest(unittest_utils.ForsetiTestCase):
"""Test the AppEngine client."""
@classmethod
@mock.patch.object(
google.auth, 'default',
return_value=(mock.Mock(spec_set=credentials.Credentials),
'test-project'))
def setUpClass(cls, mock_google_credential):
"""Set up."""
fake_global_configs = {
'appengine': {'max_calls': 18, 'period': 1}}
cls.ae_api_client = ae.AppEngineClient(fake_global_configs,
use_rate_limiter=False)
@mock.patch.object(
google.auth, 'default',
return_value=(mock.Mock(spec_set=credentials.Credentials),
'test-project'))
def test_no_quota(self, mock_google_credential):
"""Verify no rate limiter is used if the configuration is missing."""
ae_api_client = ae.AppEngineClient(global_configs={})
self.assertEqual(None, ae_api_client.repository._rate_limiter)
def test_is_status_not_found_404(self):
response = httplib2.Response({
'status': '404',
'content-type': 'application/json'})
response.reason = 'Not Found'
error = errors.HttpError(response, fae.APP_NOT_FOUND.encode(), uri='')
self.assertTrue(ae._is_status_not_found(error))
def test_is_status_not_found_403(self):
response = httplib2.Response({
'status': '403',
'content-type': 'application/json'})
response.reason = 'Permission Denied'
error = errors.HttpError(response, fae.PERMISSION_DENIED.encode(), uri='')
self.assertFalse(ae._is_status_not_found(error))
def test_get_app(self):
http_mocks.mock_http_response(fae.FAKE_APP_GET_RESPONSE)
response = self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)
self.assertEqual(fae.FAKE_APP_NAME, response.get('name'))
def test_get_app_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)
self.assertEqual({}, response)
def test_get_app_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.get_app(fae.FAKE_PROJECT_ID)
def test_get_service(self):
http_mocks.mock_http_response(fae.GET_SERVICE_RESPONSE)
response = self.ae_api_client.get_service(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
self.assertEqual(fae.EXPECTED_SERVICE_NAMES[0], response.get('name'))
def test_get_service_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.get_service(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
self.assertEqual({}, response)
def test_get_service_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.get_service(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
def test_list_services(self):
http_mocks.mock_http_response(fae.LIST_SERVICES_RESPONSE)
response = self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)
self.assertEqual(fae.EXPECTED_SERVICE_NAMES,
[r.get('name') for r in response])
def test_list_services_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)
self.assertEqual([], response)
def test_list_services_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.list_services(fae.FAKE_PROJECT_ID)
def test_get_version(self):
http_mocks.mock_http_response(fae.GET_VERSION_RESPONSE)
response = self.ae_api_client.get_version(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
self.assertEqual(fae.EXPECTED_VERSION_NAMES[0], response.get('name'))
def test_get_version_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.get_version(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
self.assertEqual({}, response)
def test_get_version_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.get_version(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
def test_list_versions(self):
mock_responses = []
for page in fae.LIST_VERSIONS_RESPONSES:
mock_responses.append(({'status': '200'}, page))
http_mocks.mock_http_response_sequence(mock_responses)
response = self.ae_api_client.list_versions(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
self.assertEqual(fae.EXPECTED_VERSION_NAMES,
[r.get('name') for r in response])
def test_list_versions_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.list_versions(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
self.assertEqual([], response)
def test_list_versions_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.list_versions(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID)
def test_get_instance(self):
http_mocks.mock_http_response(fae.GET_INSTANCE_RESPONSE)
response = self.ae_api_client.get_instance(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID,
fae.FAKE_INSTANCE_ID)
self.assertEqual(fae.EXPECTED_INSTANCE_NAMES[0], response.get('name'))
def test_get_instance_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.get_instance(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID,
fae.FAKE_INSTANCE_ID)
self.assertEqual({}, response)
def test_get_instance_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.get_instance(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID,
fae.FAKE_INSTANCE_ID)
def test_list_instances(self):
http_mocks.mock_http_response(fae.LIST_INSTANCES_RESPONSE)
response = self.ae_api_client.list_instances(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
self.assertEqual(fae.EXPECTED_INSTANCE_NAMES,
[r.get('name') for r in response])
def test_list_instances_not_found(self):
http_mocks.mock_http_response(fae.APP_NOT_FOUND, '404')
response = self.ae_api_client.list_instances(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
self.assertEqual([], response)
def test_list_instances_raises(self):
http_mocks.mock_http_response(fae.PERMISSION_DENIED, '403')
with self.assertRaises(api_errors.ApiExecutionError):
self.ae_api_client.list_instances(
fae.FAKE_PROJECT_ID, fae.FAKE_SERVICE_ID, fae.FAKE_VERSION_ID)
if __name__ == '__main__':
unittest.main()
|
hyperbolic/datasets/process_meetup.py | deepneuralmachine/google-research | 23,901 | 12682825 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collaborative Filtering meetup dataset pre-processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from hyperbolic.utils.preprocess import process_dataset
from hyperbolic.utils.preprocess import save_as_pickle
FLAGS = flags.FLAGS
flags.DEFINE_string(
'dataset_path',
default='data/meetup/',
help='Path to raw dataset dir')
flags.DEFINE_string(
'save_dir_path',
default='data/meetup20_nrand/',
help='Path to saving directory')
def read_event_times(dataset_path):
"""Maps events times to a dictonary."""
event_times = {}
for split in ['train', 'test']:
path = os.path.join(dataset_path, 'NYC', split, 'events.txt')
with tf.gfile.Open(path, 'r') as lines:
for line in lines:
line = line.strip('\n').split(' ')
event = line[0]
timestamp = int(line[2])
event_times[event] = timestamp
return event_times
def to_np_new_ids(examples):
"""Creates new ids to a user-events dict. Casts new values as Numpy arrays."""
user_id = {user: i for i, user in enumerate(examples.keys())}
all_events = set().union(*examples.values())
event_id = {event: i for i, event in enumerate(all_events)}
examples_new_ids = {}
for user in examples:
events = [event_id[event] for event in examples[user]]
examples_new_ids[user_id[user]] = np.array(events)
return examples_new_ids
def meetup_to_dict(dataset_path, min_interaction=20):
"""Maps raw dataset file to a Dictonary.
Args:
dataset_path: Path to directory so that:
dataset_file/NYC/train/event_users.txt and
dataset_file/NYC/test/event_users.txt
both have format of
event_id user_id user_id ... user_id
dataset_file/NYC/train/events.txt and
dataset_file/NYC/test/events.txt
both have format of
Event_id Venue_id Time Group_id
where the format of Time is YYYYMMDDhhmmss.
min_interaction: number of minimal interactions per user to filter on.
Returns:
Dictionary containing users as keys, and a numpy array of events the user
interacted with, sorted by the time of interaction.
"""
# create user to event dict
all_examples = {}
for split in ['train', 'test']:
path = os.path.join(dataset_path, 'NYC', split, 'event_users.txt')
with tf.gfile.Open(path, 'r') as lines:
for line in lines:
line = line.strip('\n').split(' ')
event = line[0]
for user in line[1:]:
if user in all_examples:
all_examples[user].append(event)
else:
all_examples[user] = [event]
# filter on users with enough events and sort events by time
event_times = read_event_times(dataset_path)
for user in list(all_examples):
if len(all_examples[user]) >= min_interaction:
all_examples[user] = sorted(
all_examples[user],
key=lambda event: event_times[event] if event in event_times else 0)
else:
del all_examples[user]
return to_np_new_ids(all_examples)
def main(_):
dataset_path = FLAGS.dataset_path
save_path = FLAGS.save_dir_path
sorted_dict = meetup_to_dict(dataset_path)
dataset_examples = process_dataset(sorted_dict, random=False)
save_as_pickle(save_path, dataset_examples)
if __name__ == '__main__':
app.run(main)
|
src/lib/py_compile.py | DTenore/skulpt | 2,671 | 12682832 | import _sk_fail; _sk_fail._("py_compile")
|
apps/micro_razers/tests/run_tests.py | JensUweUlrich/seqan | 409 | 12682834 | #!/usr/bin/env python2
"""Execute the tests for micro_razers.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for micro_razers'
print '==============================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/micro_razers/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/micro_razers', 'micro_razers')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# First Section.
# ============================================================
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads36_1_default.stdout'),
args=[ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads36_1.fa'),
'-o', ph.outFile('se-adeno-reads36_1_default.razers' )],
to_diff=[(ph.inFile('se-adeno-reads36_1_default.razers' ),
ph.outFile('se-adeno-reads36_1_default.razers' )),
(ph.inFile('se-adeno-reads36_1_default.stdout' ),
ph.outFile('se-adeno-reads36_1_default.stdout' ))])
conf_list.append(conf)
# Run with different seed lengths
for sl in range(14,21):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d.stdout' % sl),
args=['-sL', str(sl),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads36_1.fa'),
'-o', ph.outFile('se-adeno-reads36_1_sl%d.razers' % sl)],
to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d.razers' % sl),
ph.outFile('se-adeno-reads36_1_sl%d.razers' % sl)),
(ph.inFile('se-adeno-reads36_1_sl%d.stdout' % sl),
ph.outFile('se-adeno-reads36_1_sl%d.stdout' % sl))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl),
args=['-sL', str(sl),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads36_1.fa'),
'-o', ph.outFile('se-adeno-reads36_1_sl%d.sam' % sl)],
to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d.sam' % sl),
ph.outFile('se-adeno-reads36_1_sl%d.sam' % sl)),
(ph.inFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl),
ph.outFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl))])
conf_list.append(conf)
# allow error in seed
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d_se.stdout' % sl),
args=['-sL', str(sl), '-sE',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads36_1.fa'),
'-o', ph.outFile('se-adeno-reads36_1_sl%d_se.razers' % sl)],
to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d_se.razers' % sl),
ph.outFile('se-adeno-reads36_1_sl%d_se.razers' % sl)),
(ph.inFile('se-adeno-reads36_1_sl%d_se.stdout' % sl),
ph.outFile('se-adeno-reads36_1_sl%d_se.stdout' % sl))])
conf_list.append(conf)
# change maxhits parameter
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ),
args=['-sL', str(18), '-m', str(20), '-pa',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads36_1.fa'),
'-o', ph.outFile('se-adeno-reads36_1_sl18_m20_pa.razers' )],
to_diff=[(ph.inFile('se-adeno-reads36_1_sl18_m20_pa.razers' ),
ph.outFile('se-adeno-reads36_1_sl18_m20_pa.razers' )),
(ph.inFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ),
ph.outFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['micro_razers'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
pyxl/codec/register_invertible.py | gvanrossum/pyxl3 | 150 | 12682839 | import codecs
def search_function(encoding):
if encoding != 'pyxl': return None
from pyxl.codec.transform import (
pyxl_encode, pyxl_decode, PyxlIncrementalDecoderInvertible, PyxlIncrementalEncoder,
PyxlStreamReaderInvertible, PyxlStreamWriter,
)
return codecs.CodecInfo(
name = 'pyxl',
encode = pyxl_encode,
decode = lambda b: pyxl_decode(b, invertible=True),
incrementalencoder = PyxlIncrementalEncoder,
incrementaldecoder = PyxlIncrementalDecoderInvertible,
streamreader = PyxlStreamReaderInvertible,
streamwriter = PyxlStreamWriter,
)
codecs.register(search_function)
|
test/python/test_logsoftmax.py | avijit-chakroborty/ngraph-bridge | 142 | 12682840 | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge split operation test
"""
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
import pytest
from common import NgraphTest
class TestLogSoftmaxOperations(NgraphTest):
def test_logsoftmax(self):
type = np.float32
max = np.finfo(type).max
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
logsoftmax = tf.nn.log_softmax(features)
sess_fn = lambda sess: sess.run([logsoftmax])
out = self.with_ngraph(sess_fn)
assert np.allclose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
|
objectModel/Python/tests/samples/test_create_manifest.py | rt112000/CDM | 884 | 12682856 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from typing import cast
from cdm.enums import CdmStatusLevel, CdmObjectType
from cdm.objectmodel import CdmCorpusDefinition, CdmEntityDefinition, CdmLocalEntityDeclarationDefinition, \
CdmManifestDefinition
from cdm.storage import LocalAdapter
from tests.common import async_test, TestHelper
def IfRunTestsFlagNotSet():
return os.environ.get('SAMPLE_RUNTESTS') is not '1'
class CreateManifestTest(unittest.TestCase):
tests_subpath = 'Samples'
test_name = 'test_create_manifest'
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "SAMPLE_RUNTESTS environment variable not set.")
async def test_create_manifest(self):
TestHelper.delete_files_from_actual_output(
TestHelper.get_actual_output_folder_path(self.tests_subpath, self.test_name))
await self.create_manifest(self.setup_cdm_corpus())
error_log = TestHelper.compare_folder_files_equality(
TestHelper.get_expected_output_folder_path(self.tests_subpath, self.test_name),
TestHelper.get_actual_output_folder_path(self.tests_subpath, self.test_name), True)
self.assertEqual('', error_log)
def setup_cdm_corpus(self):
# Make a corpus, the corpus is the collection of all documents and folders created or discovered while navigating
# objects and paths.
cdm_corpus = CdmCorpusDefinition()
cdm_corpus.ctx.report_at_level = CdmStatusLevel.ERROR
print('Configure storage adapters')
cdm_corpus.storage.mount('local', LocalAdapter(
TestHelper.get_actual_output_folder_path(self.tests_subpath, self.test_name)))
# Local is our default. So any paths that start out navigating without a device tag will assume local.
cdm_corpus.storage.default_namespace = 'local'
# Fake cdm, normally use the CDM Standards adapter.
cdm_corpus.storage.mount('cdm', LocalAdapter(TestHelper.sample_schema_folder_path))
return cdm_corpus
async def create_manifest(self, cdm_corpus: CdmCorpusDefinition):
print('Make placeholder manifest')
# Make the temp manifest and add it to the root of the local documents in the corpus.
manifest_abstract = cdm_corpus.make_object(CdmObjectType.MANIFEST_DEF,
'temp_abstract') # type: CdmManifestDefinition
# Add each declaration, this example is about medical appointments and care plans
manifest_abstract.entities.append('Account',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Account.cdm.json/Account')
manifest_abstract.entities.append('Address',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Address.cdm.json/Address')
manifest_abstract.entities.append('CarePlan',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/CarePlan.cdm.json/CarePlan')
manifest_abstract.entities.append('CodeableConcept',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/CodeableConcept.cdm.json/CodeableConcept')
manifest_abstract.entities.append('Contact',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Contact.cdm.json/Contact')
manifest_abstract.entities.append('Device',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Device.cdm.json/Device')
manifest_abstract.entities.append('EmrAppointment',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/EmrAppointment.cdm.json/EmrAppointment')
manifest_abstract.entities.append('Encounter',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Encounter.cdm.json/Encounter')
manifest_abstract.entities.append('EpisodeOfCare',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/EpisodeOfCare.cdm.json/EpisodeOfCare')
manifest_abstract.entities.append('Location',
'cdm:/core/applicationCommon/foundationCommon/crmCommon/accelerators/healthCare/electronicMedicalRecords/Location.cdm.json/Location')
# Add the temp manifest to the root of the local documents in the corpus.
local_root = cdm_corpus.storage.fetch_root_folder('local')
local_root.documents.append(manifest_abstract)
# Create the resolved version of everything in the root folder too.
print('Resolve the placeholder')
manifest_resolved = await manifest_abstract.create_resolved_manifest_async('default', '')
# Add an import to the foundations doc so the traits about partitons will resolve nicely.
manifest_resolved.imports.append('cdm:/foundations.cdm.json', '')
print('Save the documents')
for e_def in manifest_resolved.entities:
# Get the entity being pointed at.
local_e_def = cast(CdmLocalEntityDeclarationDefinition, e_def)
# Turns a relative path from manifest_resolved into an absolute path.
ent_def = cast(CdmEntityDefinition,
await cdm_corpus.fetch_object_async(local_e_def.entity_path, manifest_resolved))
# Make a fake partition, just to demo that.
part = cdm_corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, '{}-data-description'.format(
ent_def.entity_name)) # type: CdmDataPartitionDefinition
local_e_def.data_partitions.append(part)
part.explanation = 'not real data, just for demo'
# Define the location of the partition, relative to the manifest
local_location = 'local:/{}/partition-data.csv'.format(ent_def.entity_name)
part.location = cdm_corpus.storage.create_relative_corpus_path(local_location, manifest_resolved)
# Add trait to partition for csv params.
csv_trait = part.exhibits_traits.append('is.partition.format.CSV', False)
csv_trait.arguments.append('columnHeaders', 'true')
csv_trait.arguments.append('delimiter', ',')
# Get the actual location of the partition file from the corpus.
part_path = cdm_corpus.storage.corpus_path_to_adapter_path(local_location)
# Make a fake file with nothing but header for columns.
header = ','.join([att.name for att in ent_def.attributes])
os.makedirs(cdm_corpus.storage.corpus_path_to_adapter_path('local:/{}'.format(ent_def.entity_name)),
exist_ok=True)
with open(part_path, 'w') as file:
file.write(header)
await manifest_resolved.save_as_async('{}.manifest.cdm.json'.format(manifest_resolved.manifest_name), True)
|
yabgp/message/keepalive.py | mengjunyi/yabgp | 203 | 12682860 | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" BGP KeepAlive message"""
import struct
from yabgp.common.exception import MessageHeaderError
from yabgp.common.constants import ERR_MSG_HDR_BAD_MSG_LEN
class KeepAlive(object):
"""
KEEPALIVE messages are exchanged between peers often
enough not to cause the Hold Timer to expire
"""
MSG_KEEPALIVE = 4
@staticmethod
def parse(msg):
"""
Parse keepalive message
:param msg: input raw binary message data
"""
if len(msg) != 0:
raise MessageHeaderError(
sub_error=ERR_MSG_HDR_BAD_MSG_LEN,
data='')
@staticmethod
def construct_header():
"""Prepends the mandatory header to a constructed BGP message
"""
# 16-octet 2-octet 1-octet
# ---------------+--------+---------+------+
# Maker | Length | Type | msg |
# ---------------+--------+---------+------+
return b'\xff'*16 + struct.pack('!HB', 19, 4)
def construct(self):
"""
Construct a keepalive message
"""
return self.construct_header()
|
scanpy/tests/external/test_scrublet.py | mrland99/scanpy | 1,171 | 12682894 | import pytest
import scanpy as sc
import scanpy.external as sce
from anndata.tests.helpers import assert_equal
def test_scrublet():
"""
Test that Scrublet run works.
Check that scrublet runs and detects some doublets.
"""
pytest.importorskip("scrublet")
adata = sc.datasets.pbmc3k()
sce.pp.scrublet(adata, use_approx_neighbors=False)
# replace assertions by conditions
assert "predicted_doublet" in adata.obs.columns
assert "doublet_score" in adata.obs.columns
assert adata.obs["predicted_doublet"].any(), "Expect some doublets to be identified"
def test_scrublet_dense():
"""
Test that Scrublet works for dense matrices.
Check that scrublet runs and detects some doublets when a dense matrix is supplied.
"""
pytest.importorskip("scrublet")
adata = sc.datasets.paul15()[:500].copy()
sce.pp.scrublet(adata, use_approx_neighbors=False)
# replace assertions by conditions
assert "predicted_doublet" in adata.obs.columns
assert "doublet_score" in adata.obs.columns
assert adata.obs["predicted_doublet"].any(), "Expect some doublets to be identified"
def test_scrublet_params():
"""
Test that Scrublet args are passed.
Check that changes to parameters change scrublet results.
"""
pytest.importorskip("scrublet")
# Reduce size of input for faster test
adata = sc.datasets.pbmc3k()[:500].copy()
sc.pp.filter_genes(adata, min_counts=100)
# Get the default output
default = sce.pp.scrublet(adata, use_approx_neighbors=False, copy=True)
test_params = {
'expected_doublet_rate': 0.1,
'synthetic_doublet_umi_subsampling': 0.8,
'knn_dist_metric': 'manhattan',
'normalize_variance': False,
'log_transform': True,
'mean_center': False,
'n_prin_comps': 10,
'n_neighbors': 2,
'threshold': 0.1,
}
# Test each parameter and make sure something changes
for param in test_params.keys():
test_args = {
'adata': adata,
'use_approx_neighbors': False,
'copy': True,
param: test_params[param],
}
curr = sc.external.pp.scrublet(**test_args)
with pytest.raises(AssertionError):
assert_equal(default, curr)
def test_scrublet_simulate_doublets():
"""
Test that standalone Scrublet doublet simulation works.
Check that doublet simulation runs and simulates some doublets..
"""
pytest.importorskip("scrublet")
adata_obs = sc.datasets.pbmc3k()
sc.pp.filter_genes(adata_obs, min_cells=3)
sc.pp.filter_cells(adata_obs, min_genes=3)
adata_obs.layers['raw'] = adata_obs.X
sc.pp.normalize_total(adata_obs)
logged = sc.pp.log1p(adata_obs, copy=True)
_ = sc.pp.highly_variable_genes(logged)
adata_obs = adata_obs[:, logged.var['highly_variable']]
adata_sim = sce.pp.scrublet_simulate_doublets(adata_obs, layer='raw')
assert 'doublet_parents' in adata_sim.obsm.keys()
|
GitRangerLiu/0000/img_addnum.py | saurabh896/python-1 | 3,976 | 12682896 | <gh_stars>1000+
from PIL import Image, ImageDraw, ImageFont
def img_addnum(img_name, num):
im = Image.open(img_name)
draw = ImageDraw.Draw(im)
#width and height
w = im.width;
h = im.height;
print h, w
#load font
#fnt = ImageFont.load_default()
fnt = ImageFont.truetype('arial.ttf', int(h * 0.15))
draw.text((w * 0.9 , h * 0.05), num, font=fnt, fill=(255, 0, 0, 128))
im.save(img_name.split('.')[0] + '2.jpg')
if __name__ == '__main__':
img_addnum('cat.jpg', '3')
|
python_toolbox/reasoned_bool.py | hboshnak/python_toolbox | 119 | 12682899 | <gh_stars>100-1000
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
class ReasonedBool:
'''
A variation on `bool` that also gives a `.reason`.
This is useful when you want to say "This is False because... (reason.)"
Unfortunately this class is not a subclass of `bool`, since Python doesn't
allow subclassing `bool`.
'''
def __init__(self, value, reason=None):
'''
Construct the `ReasonedBool`.
`reason` is the reason *why* it has a value of `True` or `False`. It is
usually a string, but is allowed to be of any type.
'''
self.value = bool(value)
self.reason = reason
def __repr__(self):
if self.reason is not None:
return f'<{self.value} because {repr(self.reason)}>'
else: # self.reason is None
return f'<{self.value} with no reason>'
def __eq__(self, other):
return bool(self) == other
def __hash__(self):
return hash(bool(self))
def __neq__(self, other):
return not self.__eq__(other)
def __bool__(self):
return self.value |
model/db/zd_qconf_agent.py | knightoning/zkdash | 748 | 12682916 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: zd_qconf_agent.py
创 建 者: zhuangshixiong
创建日期: 2015-08-26
"""
from peewee import CharField
from peewee import IntegerField
from peewee import SQL
from model.db.base import ZKDASH_DB, EnumField
class ZdQconfAgent(ZKDASH_DB.Model):
"""ZdQconfAgent Model
"""
id = IntegerField(primary_key=True, constraints=[SQL("AUTO_INCREMENT")])
ip = CharField(max_length=32, null=True)
hostname = CharField(max_length=32, null=True)
cluster_name = CharField(max_length=32, null=True)
notes = CharField(max_length=255, null=True)
deleted = EnumField(enum_value="'0', '1'", constraints=[SQL("DEFAULT '0'")])
class Meta(object):
"""表配置信息
"""
db_table = "zd_qconf_agent"
|
pytrait/errors.py | tushar-deepsource/pytrait | 115 | 12682937 | class PytraitError(RuntimeError):
pass
class DisallowedInitError(PytraitError):
pass
class NonMethodAttrError(PytraitError):
pass
class MultipleImplementationError(PytraitError):
pass
class InheritanceError(PytraitError):
pass
class NamingConventionError(PytraitError):
pass
|
ch2_seldon_examples/train_pipeline.py | gabrielclimb/intro-to-ml-with-kubeflow-examples | 150 | 12682956 | <reponame>gabrielclimb/intro-to-ml-with-kubeflow-examples
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.onprem as onprem
from string import Template
import json
@dsl.pipeline(name='Simple sci-kit KF Pipeline',
description='A simple end to end sci-kit seldon kf pipeline')
def mnist_train_pipeline(docker_org="index.docker.io/seldonio",
train_container_version="0.2",
serve_container_version="0.1"):
vop = dsl.VolumeOp(name="create_pvc",
resource_name="nfs-1",
modes=dsl.VOLUME_MODE_RWO,
size="10G")
volume = vop.volume
train = dsl.ContainerOp(
name='sk-train',
image=
f"{docker_org}/skmnistclassifier_trainer:{train_container_version}",
pvolumes={"/data": volume})
seldon_serving_json_template = Template("""
{
"apiVersion": "machinelearning.seldon.io/v1alpha2",
"kind": "SeldonDeployment",
"metadata": {
"labels": {
"app": "seldon"
},
"name": "mnist-classifier"
},
"spec": {
"annotations": {
"deployment_version": "v1",
"project_name": "MNIST Example"
},
"name": "mnist-classifier",
"predictors": [
{
"annotations": {
"predictor_version": "v1"
},
"componentSpecs": [
{
"spec": {
"containers": [
{
"image": "$dockerreposerving:$dockertagserving",
"imagePullPolicy": "Always",
"name": "mnist-classifier",
"volumeMounts": [
{
"mountPath": "/data",
"name": "persistent-storage"
}
]
}
],
"terminationGracePeriodSeconds": 1,
"volumes": [
{
"name": "persistent-storage",
"persistentVolumeClaim": {
"claimName": "$modelpvc"
}
}
]
}
}
],
"graph": {
"children": [],
"endpoint": {
"type": "REST"
},
"name": "mnist-classifier",
"type": "MODEL"
},
"name": "mnist-classifier",
"replicas": 1
}
]
}
}
""")
seldon_serving_json = seldon_serving_json_template.substitute({
'dockerreposerving':
f"{docker_org}/skmnistclassifier_runtime",
'dockertagserving':
str(serve_container_version),
'modelpvc':
vop.outputs["name"]
})
seldon_deployment = json.loads(seldon_serving_json)
serve = dsl.ResourceOp(
name='serve',
k8s_resource=seldon_deployment,
success_condition='status.state == Available').after(train)
# If we're called directly create an expirement and run
if __name__ == '__main__':
pipeline_func = mnist_train_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
expirement_name = "cheese"
experiment = client.create_experiment(expirement_name)
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name,
pipeline_filename, arguments)
print(run_result)
|
inference.py | Na-Z/Atlas | 1,571 | 12683013 | # Copyright 2020 Magic Leap, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: <NAME> (<EMAIL>)
import argparse
import os
import numpy as np
import torch
from atlas.data import SceneDataset, parse_splits_list
from atlas.model import VoxelNet
import atlas.transforms as transforms
def process(info_file, model, num_frames, save_path, total_scenes_index, total_scenes_count):
""" Run the netork on a scene and save output
Args:
info_file: path to info_json file for the scene
model: pytorch model that implemets Atlas
frames: number of frames to use in reconstruction (-1 for all)
save_path: where to save outputs
total_scenes_index: used to print which scene we are on
total_scenes_count: used to print the total number of scenes to process
"""
voxel_scale = model.voxel_sizes[0]
dataset = SceneDataset(info_file, voxel_sizes=[voxel_scale],
voxel_types=model.voxel_types, num_frames=num_frames)
# compute voxel origin
if 'file_name_vol_%02d'%voxel_scale in dataset.info:
# compute voxel origin from ground truth
tsdf_trgt = dataset.get_tsdf()['vol_%02d'%voxel_scale]
voxel_size = float(voxel_scale)/100
# shift by integer number of voxels for padding
shift = torch.tensor([.5, .5, .5])//voxel_size
offset = tsdf_trgt.origin - shift*voxel_size
else:
# use default origin
# assume floor is a z=0 so pad bottom a bit
offset = torch.tensor([0,0,-.5])
T = torch.eye(4)
T[:3,3] = offset
transform = transforms.Compose([
transforms.ResizeImage((640,480)),
transforms.ToTensor(),
transforms.TransformSpace(T, model.voxel_dim_val, [0,0,0]),
transforms.IntrinsicsPoseToProjection(),
])
dataset.transform = transform
dataloader = torch.utils.data.DataLoader(dataset, batch_size=None,
batch_sampler=None, num_workers=2)
scene = dataset.info['scene']
model.initialize_volume()
torch.cuda.empty_cache()
for j, d in enumerate(dataloader):
# logging progress
if j%25==0:
print(total_scenes_index,
total_scenes_count,
dataset.info['dataset'],
scene,
j,
len(dataloader)
)
model.inference1(d['projection'].unsqueeze(0).cuda(),
image=d['image'].unsqueeze(0).cuda())
outputs, losses = model.inference2()
tsdf_pred = model.postprocess(outputs)[0]
# TODO: set origin in model... make consistent with offset above?
tsdf_pred.origin = offset.view(1,3).cuda()
if 'semseg' in tsdf_pred.attribute_vols:
mesh_pred = tsdf_pred.get_mesh('semseg')
# save vertex attributes seperately since trimesh doesn't
np.savez(os.path.join(save_path, '%s_attributes.npz'%scene),
**mesh_pred.vertex_attributes)
else:
mesh_pred = tsdf_pred.get_mesh()
tsdf_pred.save(os.path.join(save_path, '%s.npz'%scene))
mesh_pred.export(os.path.join(save_path, '%s.ply'%scene))
def main():
parser = argparse.ArgumentParser(description="Atlas Testing")
parser.add_argument("--model", required=True, metavar="FILE",
help="path to checkpoint")
parser.add_argument("--scenes", default="data/scannet_test.txt",
help="which scene(s) to run on")
parser.add_argument("--num_frames", default=-1, type=int,
help="number of frames to use (-1 for all)")
parser.add_argument("--voxel_dim", nargs=3, default=[-1,-1,-1], type=int,
help="override voxel dim")
args = parser.parse_args()
# get all the info_file.json's from the command line
# .txt files contain a list of info_file.json's
info_files = parse_splits_list(args.scenes)
model = VoxelNet.load_from_checkpoint(args.model)
model = model.cuda().eval()
torch.set_grad_enabled(False)
# overwrite default values of voxel_dim_test
if args.voxel_dim[0] != -1:
model.voxel_dim_test = args.voxel_dim
# TODO: implement voxel_dim_test
model.voxel_dim_val = model.voxel_dim_test
model_name = os.path.splitext(os.path.split(args.model)[1])[0]
save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
model.cfg.TRAINER.VERSION, 'test_'+model_name)
if args.num_frames>-1:
save_path = '%s_%d'%(save_path, args.num_frames)
os.makedirs(save_path, exist_ok=True)
for i, info_file in enumerate(info_files):
# run model on each scene
process(info_file, model, args.num_frames, save_path, i, len(info_files))
if __name__ == "__main__":
main() |
llvm/bindings/python/llvm/tests/test_bitreader.py | medismailben/llvm-project | 4,812 | 12683015 | <reponame>medismailben/llvm-project
from __future__ import print_function
from .base import TestBase
from ..core import OpCode
from ..core import MemoryBuffer
from ..core import PassRegistry
from ..core import Context
from ..core import Module
from ..bit_reader import parse_bitcode
class TestBitReader(TestBase):
def test_parse_bitcode(self):
source = self.get_test_bc()
m = parse_bitcode(MemoryBuffer(filename=source))
print(m.target)
print(m.datalayout)
|
main_lapwgan.py | AnimatedRNG/pytorch-LapSRN | 270 | 12683025 | import argparse, os
import pdb
import torch
import math, random
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from lapsrn_wgan import _netG, _netD, L1_Charbonnier_loss
from dataset import DatasetFromHdf5
from torchvision import models, transforms
import torch.utils.model_zoo as model_zoo
# Training settings
parser = argparse.ArgumentParser(description="PyTorch LapSRN WGAN")
parser.add_argument("--batchSize", type=int, default=32, help="training batch size")
parser.add_argument("--nEpochs", type=int, default=400, help="number of epochs to train for")
parser.add_argument('--lrG', type=float, default=1e-4, help='Learning Rate. Default=1e-4')
parser.add_argument('--lrD', type=float, default=1e-4, help='Learning Rate. Default=1e-4')
parser.add_argument("--step", type=int, default=50, help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")
parser.add_argument("--cuda", action="store_true", help="Use cuda?")
parser.add_argument("--resume", default="", type=str, help="Path to checkpoint (default: none)")
parser.add_argument("--start-epoch", default=1, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--threads", type=int, default=1, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
parser.add_argument("--weight-decay", "--wd", default=1e-4, type=float, help="weight decay, Default: 1e-4")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained model (default: none)")
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
def main():
global opt, model
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
opt.seed = random.randint(1, 10000)
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print("===> Loading datasets")
train_set = DatasetFromHdf5("data/lap_pry_x4_small.h5")
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
print('===> Building generator model')
netG = _netG()
print('===> Building discriminator model')
netD = _netD()
print('===> Loading VGG model')
model_urls = {
"vgg19": "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth"
}
netVGG = models.vgg19()
netVGG.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
weight = torch.FloatTensor(64,1,3,3)
parameters = list(netVGG.parameters())
for i in range(64):
weight[i,:,:,:] = parameters[0].data[i].mean(0)
bias = parameters[1].data
class _content_model(nn.Module):
def __init__(self):
super(_content_model, self).__init__()
self.conv = conv2d = nn.Conv2d(1, 64, kernel_size=3, padding=1)
self.feature = nn.Sequential(*list(netVGG.features.children())[1:-1])
self._initialize_weights()
def forward(self, x):
out = self.conv(x)
out = self.feature(out)
return out
def _initialize_weights(self):
self.conv.weight.data.copy_(weight)
self.conv.bias.data.copy_(bias)
netContent = _content_model()
print('===> Building Loss')
criterion = L1_Charbonnier_loss()
print("===> Setting GPU")
if cuda:
netG = netG.cuda()
netD = netD.cuda()
netContent = netContent.cuda()
criterion = criterion.cuda()
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = checkpoint["epoch"] + 1
netG.load_state_dict(checkpoint["model"].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# optionally copy weights from a checkpoint
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
netG.load_state_dict(weights['model'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
print("===> Setting Optimizer")
optimizerD = optim.RMSprop(netD.parameters(), lr = opt.lrD)
optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizerG, optimizerD, netG, netD, netContent, criterion, epoch)
save_checkpoint(netG, epoch)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""
lr = opt.lr * (0.1 ** (epoch // opt.step))
return lr
def train(training_data_loader, optimizerG, optimizerD, netG, netD, netContent, criterion, epoch):
netG.train()
netD.train()
one = torch.FloatTensor([1.])
mone = one * -1
content_weight = torch.FloatTensor([1.])
adversarial_weight = torch.FloatTensor([1.])
for iteration, batch in enumerate(training_data_loader, 1):
input, label_x2, label_x4 = Variable(batch[0]), Variable(batch[1], requires_grad=False), Variable(batch[2], requires_grad=False)
if opt.cuda:
input = input.cuda()
label_x2 = label_x2.cuda()
label_x4 = label_x4.cuda()
one, mone, content_weight, adversarial_weight = one.cuda(), mone.cuda(), content_weight.cuda(), adversarial_weight.cuda()
############################
# (1) Update D network: loss = D(x)) - D(G(z))
###########################
# train with real
errD_real = netD(label_x4)
errD_real.backward(one, retain_graph=True)
# train with fake
input_G = Variable(input.data, volatile = True)
fake_x4 = Variable(netG(input_G)[1].data)
fake_D = fake_x4
errD_fake = netD(fake_D)
errD_fake.backward(mone)
errD = errD_real - errD_fake
optimizerD.step()
for p in netD.parameters(): # reset requires_grad
p.data.clamp_(opt.clamp_lower, opt.clamp_upper)
netD.zero_grad()
netG.zero_grad()
netContent.zero_grad()
############################
# (2) Update G network: loss = D(G(z))
###########################
fake_D_x2, fake_D_x4 = netG(input)
content_fake_x2 = netContent(fake_D_x2)
content_real_x2 = netContent(label_x2)
content_real_x2 = Variable(content_real_x2.data)
content_loss_x2 = criterion(content_fake_x2, content_real_x2)
content_loss_x2.backward(content_weight, retain_graph=True)
content_fake_x4 = netContent(fake_D_x4)
content_real_x4 = netContent(label_x4)
content_real_x4 = Variable(content_real_x4.data)
content_loss_x4 = criterion(content_fake_x4, content_real_x4)
content_loss_x4.backward(content_weight, retain_graph=True)
content_loss = content_loss_x2 + content_loss_x4
adversarial_loss = netD(fake_D_x4)
adversarial_loss.backward(adversarial_weight)
optimizerG.step()
netD.zero_grad()
netG.zero_grad()
netContent.zero_grad()
if iteration%10 == 0:
print("===> Epoch[{}]({}/{}): LossD: {:.10f} [{:.10f} - {:.10f}] LossG: [{:.10f} + {:.10f}]".format(epoch, iteration, len(training_data_loader),
errD.data[0], errD_real.data[0], errD_fake.data[0], adversarial_loss.data[0], content_loss.data[0]))
def save_checkpoint(model, epoch):
model_folder = "checkpoint/"
model_out_path = model_folder + "lapwgan_model_epoch_{}.pth".format(epoch)
state = {"epoch": epoch ,"model": model}
if not os.path.exists(model_folder):
os.makedirs(model_folder)
torch.save(state, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
if __name__ == "__main__":
main() |
rl_coach/tests/memories/test_differential_neural_dictionary.py | jl45621/coach | 1,960 | 12683039 | # nasty hack to deal with issue #46
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import numpy as np
import time
from rl_coach.memories.non_episodic.differentiable_neural_dictionary import QDND
import tensorflow as tf
NUM_ACTIONS = 3
NUM_DND_ENTRIES_TO_ADD = 10000
EMBEDDING_SIZE = 512
NUM_SAMPLED_EMBEDDINGS = 500
NUM_NEIGHBORS = 10
DND_SIZE = 500000
@pytest.fixture()
def dnd():
return QDND(
DND_SIZE,
EMBEDDING_SIZE,
NUM_ACTIONS,
0.1,
key_error_threshold=0,
learning_rate=0.0001,
num_neighbors=NUM_NEIGHBORS
)
@pytest.mark.unit_test
def test_random_sample_from_dnd(dnd: QDND):
# store single non terminal transition
embeddings = [np.random.rand(EMBEDDING_SIZE) for j in range(NUM_DND_ENTRIES_TO_ADD)]
actions = [np.random.randint(NUM_ACTIONS) for j in range(NUM_DND_ENTRIES_TO_ADD)]
values = [np.random.rand() for j in range(NUM_DND_ENTRIES_TO_ADD)]
dnd.add(embeddings, actions, values)
dnd_embeddings, dnd_values, dnd_indices = dnd.query(embeddings[0:10], 0, NUM_NEIGHBORS)
# calculate_normalization_factor
sampled_embeddings = dnd.sample_embeddings(NUM_SAMPLED_EMBEDDINGS)
coefficient = 1/(NUM_SAMPLED_EMBEDDINGS * (NUM_SAMPLED_EMBEDDINGS - 1.0))
tf_current_embedding = tf.placeholder(tf.float32, shape=(EMBEDDING_SIZE), name='current_embedding')
tf_other_embeddings = tf.placeholder(tf.float32, shape=(NUM_SAMPLED_EMBEDDINGS - 1, EMBEDDING_SIZE), name='other_embeddings')
sub = tf_current_embedding - tf_other_embeddings
square = tf.square(sub)
result = tf.reduce_sum(square)
###########################
# more efficient method
###########################
sampled_embeddings_expanded = tf.placeholder(
tf.float32, shape=(1, NUM_SAMPLED_EMBEDDINGS, EMBEDDING_SIZE), name='sampled_embeddings_expanded')
sampled_embeddings_tiled = tf.tile(sampled_embeddings_expanded, (sampled_embeddings_expanded.shape[1], 1, 1))
sampled_embeddings_transposed = tf.transpose(sampled_embeddings_tiled, (1, 0, 2))
sub2 = sampled_embeddings_tiled - sampled_embeddings_transposed
square2 = tf.square(sub2)
result2 = tf.reduce_sum(square2)
config = tf.ConfigProto()
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
sess = tf.Session(config=config)
sum1 = 0
start = time.time()
for i in range(NUM_SAMPLED_EMBEDDINGS):
curr_sampled_embedding = sampled_embeddings[i]
other_embeddings = np.delete(sampled_embeddings, i, 0)
sum1 += sess.run(result, feed_dict={tf_current_embedding: curr_sampled_embedding, tf_other_embeddings: other_embeddings})
print("1st method: {} sec".format(time.time()-start))
start = time.time()
sum2 = sess.run(result2, feed_dict={sampled_embeddings_expanded: np.expand_dims(sampled_embeddings,0)})
print("2nd method: {} sec".format(time.time()-start))
# validate that results are equal
print("sum1 = {}, sum2 = {}".format(sum1, sum2))
norm_factor = -0.5/(coefficient * sum2)
if __name__ == '__main__':
test_random_sample_from_dnd(dnd())
|
detectron2/model_zoo/model_zoo.py | AlanDecode/detectron2 | 201 | 12683046 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Optional
import pkg_resources
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.modeling import build_model
class _ModelZooUrls(object):
"""
Mapping from names to officially released Detectron2 pre-trained models.
"""
S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
# format: {config_path.yaml} -> model_id/model_final_{commit}.pkl
CONFIG_PATH_TO_URL_SUFFIX = {
# COCO Detection with Faster R-CNN
"COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl",
"COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl",
"COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl",
"COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl",
"COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl",
"COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl",
"COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl",
"COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl",
"COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl",
"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl",
# COCO Detection with RetinaNet
"COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl",
"COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl",
"COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl",
# COCO Detection with RPN and Fast R-CNN
"COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl",
"COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl",
"COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl",
# COCO Instance Segmentation Baselines with Mask R-CNN
"COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl",
"COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa
# COCO Person Keypoint Detection Baselines with Keypoint R-CNN
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl",
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl",
"COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl",
"COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl",
# COCO Panoptic Segmentation Baselines with Panoptic FPN
"COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl",
"COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl",
"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl",
# LVIS Instance Segmentation Baselines with Mask R-CNN
"LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa
"LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa
"LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa
# Cityscapes & Pascal VOC Baselines
"Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl",
"PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl",
# Other Settings
"Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl",
"Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl",
"Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl",
"Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl",
"Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl",
"Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl",
"Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl",
"Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa
# D1 Comparisons
"Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa
"Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa
"Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl",
}
@staticmethod
def query(config_path: str) -> Optional[str]:
"""
Args:
config_path: relative config filename
"""
name = config_path.replace(".yaml", "").replace(".py", "")
if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name]
return _ModelZooUrls.S3_PREFIX + name + "/" + suffix
return None
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
url = _ModelZooUrls.query(config_path)
if url is None:
raise RuntimeError("Pretrained model for {} is not available!".format(config_path))
return url
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"detectron2.model_zoo", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
def get_config(config_path, trained: bool = False):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode or omegaconf.DictConfig: a config object
"""
cfg_file = get_config_file(config_path)
if cfg_file.endswith(".yaml"):
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
elif cfg_file.endswith(".py"):
cfg = LazyConfig.load(cfg_file)
if trained:
url = get_checkpoint_url(config_path)
if "train" in cfg and "init_checkpoint" in cfg.train:
cfg.train.init_checkpoint = url
else:
raise NotImplementedError
return cfg
def get(config_path, trained: bool = False, device: Optional[str] = None):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): see :func:`get_config`.
device (str or None): overwrite the device in config, if given.
Returns:
nn.Module: a detectron2 model. Will be in training mode.
Example:
::
from detectron2 import model_zoo
model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True)
"""
cfg = get_config(config_path, trained)
if device is None and not torch.cuda.is_available():
device = "cpu"
if device is not None and isinstance(cfg, CfgNode):
cfg.MODEL.DEVICE = device
if isinstance(cfg, CfgNode):
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
model = instantiate(cfg.model)
if device is not None:
model = model.to(device)
if "train" in cfg and "init_checkpoint" in cfg.train:
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
return model
|
mindarmour/adv_robustness/detectors/ensemble_detector.py | mindspore-ai/mindarmour | 139 | 12683070 | <reponame>mindspore-ai/mindarmour
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ensemble Detector.
"""
import numpy as np
from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_numpy_param, \
check_param_multi_types
from .detector import Detector
LOGGER = LogUtil.get_instance()
TAG = 'EnsembleDetector'
class EnsembleDetector(Detector):
"""
Ensemble detector.
Args:
detectors (Union[tuple, list]): List of detector methods.
policy (str): Decision policy, could be 'vote', 'all' or 'any'.
Default: 'vote'
"""
def __init__(self, detectors, policy="vote"):
super(EnsembleDetector, self).__init__()
self._detectors = check_param_multi_types('detectors', detectors,
[list, tuple])
self._num_detectors = len(detectors)
self._policy = policy
def fit(self, inputs, labels=None):
"""
Fit detector like a machine learning model. This method is not available
in this class.
Args:
inputs (numpy.ndarray): Data to calculate the threshold.
labels (numpy.ndarray): Labels of data. Default: None.
Raises:
NotImplementedError: This function is not available in ensemble.
"""
msg = 'The function fit() is not available in the class ' \
'`EnsembleDetector`.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def detect(self, inputs):
"""
Detect adversarial examples from input samples.
Args:
inputs (numpy.ndarray): Input samples.
Returns:
list[int], whether a sample is adversarial. if res[i]=1, then the
input sample with index i is adversarial.
Raises:
ValueError: If policy is not supported.
"""
inputs = check_numpy_param('inputs', inputs)
x_len = inputs.shape[0]
counts = np.zeros(x_len)
res = np.zeros(x_len, dtype=np.int)
for detector in list(self._detectors):
idx = detector.detect(inputs)
counts[idx] += 1
if self._policy == "vote":
idx_adv = np.argwhere(counts > self._num_detectors / 2)
elif self._policy == "all":
idx_adv = np.argwhere(counts == self._num_detectors)
elif self._policy == "any":
idx_adv = np.argwhere(counts > 0)
else:
msg = 'Policy {} is not supported.'.format(self._policy)
LOGGER.error(TAG, msg)
raise ValueError(msg)
res[idx_adv] = 1
return list(res)
def detect_diff(self, inputs):
"""
This method is not available in this class.
Args:
inputs (Union[numpy.ndarray, list, tuple]): Data been used as
references to create adversarial examples.
Raises:
NotImplementedError: This function is not available in ensemble.
"""
msg = 'The function detect_diff() is not available in the class ' \
'`EnsembleDetector`.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def transform(self, inputs):
"""
Filter adversarial noises in input samples.
This method is not available in this class.
Args:
inputs (Union[numpy.ndarray, list, tuple]): Data been used as
references to create adversarial examples.
Raises:
NotImplementedError: This function is not available in ensemble.
"""
msg = 'The function transform() is not available in the class ' \
'`EnsembleDetector`.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
|
setup.py | PLMZ/nb2xls | 144 | 12683079 | from distutils.util import convert_path
from setuptools import setup, find_packages
module = 'nb2xls'
# get version from __meta__
meta_ns = {}
path = convert_path(module+'/__meta__.py')
with open(path) as meta_file:
exec(meta_file.read(), meta_ns)
# read requirements.txt
with open('requirements.txt', 'r') as f:
content = f.read()
li_req = content.split('\n')
install_requires = [e.strip() for e in li_req if len(e)]
name = module
name_url = name.replace('_', '-')
packages = [module]
version = meta_ns['__version__']
description = 'Export Jupyter notebook as an Excel xls file.'
long_description = 'Export Jupyter notebook as an Excel xls file.'
author = 'ideonate'
author_email = '<EMAIL>'
# github template
url = 'https://github.com/{}/{}'.format(author,
name_url)
download_url = 'https://github.com/{}/{}/tarball/{}'.format(author,
name_url,
version)
keywords = ['jupyter',
'nbconvert',
]
license = 'MIT'
classifiers = ['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
include_package_data = True
zip_safe = False
extra_requirements = {
'test': ['pytest', 'testpath', 'openpyxl', 'matplotlib']
}
# ref https://packaging.python.org/tutorials/distributing-packages/
setup(
name=name,
version=version,
packages=packages,
author=author,
author_email=author_email,
description=description,
long_description=long_description,
url=url,
download_url=download_url,
keywords=keywords,
license=license,
classifiers=classifiers,
include_package_data=include_package_data,
install_requires=install_requires,
extras_require=extra_requirements,
zip_safe=zip_safe,
entry_points = {
'nbconvert.exporters': [
'xls = nb2xls:XLSExporter'
],
}
)
|
integration_tests/samples/socket_mode/bolt_adapter/base_handler.py | priya1puresoftware/python-slack-sdk | 2,486 | 12683084 | import logging
from threading import Event
from slack_sdk.socket_mode.client import BaseSocketModeClient
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_bolt import App
class BaseSocketModeHandler:
app: App # type: ignore
client: BaseSocketModeClient
def handle(self, client: BaseSocketModeClient, req: SocketModeRequest) -> None:
raise NotImplementedError()
def connect(self):
self.client.connect()
def disconnect(self):
self.client.disconnect()
def close(self):
self.client.close()
def start(self):
self.connect()
if self.app.logger.level > logging.INFO:
print("⚡️ Bolt app is running!")
else:
self.app.logger.info("⚡️ Bolt app is running!")
Event().wait()
|
tests/test_reloader.py | Varriount/sanic | 4,959 | 12683085 | <filename>tests/test_reloader.py
import os
import secrets
import sys
from contextlib import suppress
from subprocess import PIPE, Popen, TimeoutExpired
from tempfile import TemporaryDirectory
from textwrap import dedent
from threading import Timer
from time import sleep
import pytest
# We need to interrupt the autoreloader without killing it, so that the server gets terminated
# https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
try:
from signal import CTRL_BREAK_EVENT
from subprocess import CREATE_NEW_PROCESS_GROUP
flags = CREATE_NEW_PROCESS_GROUP
except ImportError:
flags = 0
TIMER_DELAY = 2
def terminate(proc):
if flags:
proc.send_signal(CTRL_BREAK_EVENT)
else:
proc.terminate()
def write_app(filename, **runargs):
text = secrets.token_urlsafe()
with open(filename, "w") as f:
f.write(
dedent(
f"""\
import os
from sanic import Sanic
app = Sanic(__name__)
app.route("/")(lambda x: x)
@app.listener("after_server_start")
def complete(*args):
print("complete", os.getpid(), {text!r})
if __name__ == "__main__":
app.run(**{runargs!r})
"""
)
)
return text
def write_json_config_app(filename, jsonfile, **runargs):
with open(filename, "w") as f:
f.write(
dedent(
f"""\
import os
from sanic import Sanic
import json
app = Sanic(__name__)
with open("{jsonfile}", "r") as f:
config = json.load(f)
app.config.update_config(config)
app.route("/")(lambda x: x)
@app.listener("after_server_start")
def complete(*args):
print("complete", os.getpid(), app.config.FOO)
if __name__ == "__main__":
app.run(**{runargs!r})
"""
)
)
def write_file(filename):
text = secrets.token_urlsafe()
with open(filename, "w") as f:
f.write(f"""{{"FOO": "{text}"}}""")
return text
def scanner(proc):
for line in proc.stdout:
line = line.decode().strip()
if line.startswith("complete"):
yield line
argv = dict(
script=[sys.executable, "reloader.py"],
module=[sys.executable, "-m", "reloader"],
sanic=[
sys.executable,
"-m",
"sanic",
"--port",
"42104",
"--debug",
"reloader.app",
],
)
@pytest.mark.parametrize(
"runargs, mode",
[
(dict(port=42102, auto_reload=True), "script"),
(dict(port=42103, debug=True), "module"),
({}, "sanic"),
],
)
async def test_reloader_live(runargs, mode):
with TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, "reloader.py")
text = write_app(filename, **runargs)
command = argv[mode]
proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
try:
timeout = Timer(TIMER_DELAY, terminate, [proc])
timeout.start()
# Python apparently keeps using the old source sometimes if
# we don't sleep before rewrite (pycache timestamp problem?)
sleep(1)
line = scanner(proc)
assert text in next(line)
# Edit source code and try again
text = write_app(filename, **runargs)
assert text in next(line)
finally:
timeout.cancel()
terminate(proc)
with suppress(TimeoutExpired):
proc.wait(timeout=3)
@pytest.mark.parametrize(
"runargs, mode",
[
(dict(port=42102, auto_reload=True), "script"),
(dict(port=42103, debug=True), "module"),
({}, "sanic"),
],
)
async def test_reloader_live_with_dir(runargs, mode):
with TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, "reloader.py")
config_file = os.path.join(tmpdir, "config.json")
runargs["reload_dir"] = tmpdir
write_json_config_app(filename, config_file, **runargs)
text = write_file(config_file)
command = argv[mode]
if mode == "sanic":
command += ["--reload-dir", tmpdir]
proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
try:
timeout = Timer(TIMER_DELAY, terminate, [proc])
timeout.start()
# Python apparently keeps using the old source sometimes if
# we don't sleep before rewrite (pycache timestamp problem?)
sleep(1)
line = scanner(proc)
assert text in next(line)
# Edit source code and try again
text = write_file(config_file)
assert text in next(line)
finally:
timeout.cancel()
terminate(proc)
with suppress(TimeoutExpired):
proc.wait(timeout=3)
|
tests/internal/test_xdg.py | grdorin/mopidy | 6,700 | 12683088 | import os
import pathlib
from unittest import mock
import pytest
from mopidy.internal import xdg
@pytest.fixture
def environ():
patcher = mock.patch.dict(os.environ, clear=True)
yield patcher.start()
patcher.stop()
def test_cache_dir_default(environ):
assert xdg.get_dirs()["XDG_CACHE_DIR"] == (
pathlib.Path("~/.cache").expanduser()
)
def test_cache_dir_from_env(environ):
os.environ["XDG_CACHE_HOME"] = "/foo/bar"
assert xdg.get_dirs()["XDG_CACHE_DIR"] == pathlib.Path("/foo/bar")
def test_config_dir_default(environ):
assert xdg.get_dirs()["XDG_CONFIG_DIR"] == (
pathlib.Path("~/.config").expanduser()
)
def test_config_dir_from_env(environ):
os.environ["XDG_CONFIG_HOME"] = "/foo/bar"
assert xdg.get_dirs()["XDG_CONFIG_DIR"] == pathlib.Path("/foo/bar")
def test_data_dir_default(environ):
assert xdg.get_dirs()["XDG_DATA_DIR"] == (
pathlib.Path("~/.local/share").expanduser()
)
def test_data_dir_from_env(environ):
os.environ["XDG_DATA_HOME"] = "/foo/bar"
assert xdg.get_dirs()["XDG_DATA_DIR"] == pathlib.Path("/foo/bar")
def test_user_dirs(environ, tmpdir):
os.environ["XDG_CONFIG_HOME"] = str(tmpdir)
with open(os.path.join(str(tmpdir), "user-dirs.dirs"), "wb") as fh:
fh.write(b"# Some comments\n")
fh.write(b'XDG_MUSIC_DIR="$HOME/Music2"\n')
result = xdg.get_dirs()
assert result["XDG_MUSIC_DIR"] == pathlib.Path("~/Music2").expanduser()
assert "XDG_DOWNLOAD_DIR" not in result
def test_user_dirs_when_no_dirs_file(environ, tmpdir):
os.environ["XDG_CONFIG_HOME"] = str(tmpdir)
result = xdg.get_dirs()
assert "XDG_MUSIC_DIR" not in result
assert "XDG_DOWNLOAD_DIR" not in result
|
tests/spot/margin/test_margin_interest_history.py | Banging12/binance-connector-python | 512 | 12683089 | import responses
from binance.spot import Spot as Client
from tests.util import random_str
from urllib.parse import urlencode
from tests.util import mock_http_response
mock_item = {"key_1": "value_1", "key_2": "value_2"}
mock_exception = {"code": -1, "msg": "error message"}
key = random_str()
secret = random_str()
params = {
"asset": "BNB",
"startTime": "1590969041003",
"endTime": "1590969041003",
"size": 10,
"recvWindow": 1000,
}
@mock_http_response(
responses.GET,
"/sapi/v1/margin/interestHistory\\?" + urlencode(params),
mock_item,
200,
)
def test_margin_interest_history():
"""Tests the API endpoint to query margin interest history"""
client = Client(key, secret)
response = client.margin_interest_history(**params)
response.should.equal(mock_item)
|
tests/common/test_responses.py | mumtozvalijonov/fastapi_contrib | 504 | 12683112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fastapi_contrib.common.responses import UJSONResponse
def test_ujson_response_helps_with_slashes():
url = "http://hello.world/endpoint/?key=value"
json = UJSONResponse().render(content={"url": url})
assert json == f'{{"url":"{url}"}}'.encode('utf-8')
|
assets/scripts/voronoi-svg.py | ford442/oglplu2 | 103 | 12683135 | #!/usr/bin/python3
# coding: UTF-8
# Copyright <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt
import os
import sys
import math
import numpy
import random
import argparse
import multiprocessing
# ------------------------------------------------------------------------------
def mix(b, i, f):
return (1.0-f)*b + f*i
# ------------------------------------------------------------------------------
def inverse_logistic(x):
eps = 0.001
return math.log(max(x, eps)) - math.log(max(1.0 - x, eps ))
# ------------------------------------------------------------------------------
def logistic(x):
return 1.0 / (1.0 + math.exp(-x))
# ------------------------------------------------------------------------------
def sigmoid(x, c):
return logistic(c * inverse_logistic(x))
# ------------------------------------------------------------------------------
def perpendicular(v1):
v2 = numpy.empty_like(v1)
v2[0] = -v1[1]
v2[1] = v1[0]
return v2
# ------------------------------------------------------------------------------
def set_center(points):
return sum(points)/len(points)
# ------------------------------------------------------------------------------
def segment_point(p1, p2, c):
return (1-c)*p1 + c*p2;
# ------------------------------------------------------------------------------
def segment_midpoint(p1, p2):
return (p1+p2)*0.5
# ------------------------------------------------------------------------------
def segment_normal(p1, p2):
return perpendicular(p2-p1)
# ------------------------------------------------------------------------------
def line_intersect_param(l1, l2):
d1 = l1[1]
d2 = l2[1]
dp = l2[0]-l1[0]
d2p = perpendicular(d2)
num = numpy.dot(d2p, dp)
den = numpy.dot(d2p, d1)
if abs(den) > 0.00001:
return num / den
return None
# ------------------------------------------------------------------------------
class ImageSampler(object):
# --------------------------------------------------------------------------
def __init__(self, image, width, height):
self._im = image
self._w = width
self._h = height
# --------------------------------------------------------------------------
@classmethod
def from_file(cls, path, width, height):
import PIL.Image
image = PIL.Image.open(path).convert("RGB")
if width is None:
width, unused = image.size
if height is None:
unused, height = image.size
if (width, height) != image.size:
image = image.resize((width, height), PIL.Image.BICUBIC)
return cls(image, width, height)
# --------------------------------------------------------------------------
def width(self):
return self._w
# --------------------------------------------------------------------------
def height(self):
return self._h
# --------------------------------------------------------------------------
def get_pixel(self, x, y):
x = max(min(x, self._w-1), 0)
y = max(min(y, self._h-1), 0)
c0, c1, c2 = self._im.getpixel((x, y))
return (c0/255.0, c1/255.0, c2/255.0)
# --------------------------------------------------------------------------
def converted(self, mode):
return ImageSampler(self._im.convert(mode), self._w, self._h)
# ------------------------------------------------------------------------------
class NoImageSampler(object):
# --------------------------------------------------------------------------
def __init__(self):
pass
# --------------------------------------------------------------------------
def get_pixel(self, x, y):
return (0.0, 0.0, 0.0)
# --------------------------------------------------------------------------
def converted(self, mode):
return self
# ------------------------------------------------------------------------------
class RandomGenerator(object):
# --------------------------------------------------------------------------
def __init__(self, mrg, rrg):
self._mrg = mrg
self._rrg = rrg
# --------------------------------------------------------------------------
def get(self, rim):
if rim:
try:
return self._rrg.random()
except:
pass
return self._mrg.random()
# ------------------------------------------------------------------------------
class Randomized(object):
# --------------------------------------------------------------------------
def _get_rng0(self):
try:
return self.rng0
except:
self.rng0 = random.Random(self._mid_seed)
return self.rng0
# --------------------------------------------------------------------------
def _mid_rng(self):
import random
if self._mid_seed is None:
import time
try: return random.SystemRandom()
except: return random.Random(time.time())
else:
return random.Random(self._get_rng0().randrange(0, sys.maxsize))
# --------------------------------------------------------------------------
def _rim_rng(self):
if self._rim_seed is not None:
return random.Random(self._rim_seed)
return None
# --------------------------------------------------------------------------
def get_rng(self):
return RandomGenerator(self._mid_rng(), self._rim_rng())
# --------------------------------------------------------------------------
def __init__(self, options):
self._mid_seed = options.seed
self._rim_seed = options.rim_seed
# ------------------------------------------------------------------------------
class RandomCellValues(Randomized):
# --------------------------------------------------------------------------
def _gen_values(self, w, h, transformable):
rc = self.get_rng()
cell_data = list()
for y in range(h):
r = list()
for x in range(w):
rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h
r.append(rc.get(rim))
cell_data.append(r)
if transformable:
r = range(int(w/2)+1)
rv = [rc.get(True) for i in r]
for i in r:
v = 0.5 + (rv[i]-0.5)*0.75
cell_data[i][0] = v
cell_data[h-i-1][0] = v
cell_data[i][w-1] = v
cell_data[h-i-1][w-1] = v
cell_data[0][i] = v
cell_data[0][w-i-1] = v
cell_data[h-1][i] = v
cell_data[h-1][w-i-1] = v
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, w, h):
Randomized.__init__(self, options)
self._values = self._gen_values(w, h, options.transformable)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._values[y][x]
# ------------------------------------------------------------------------------
class RandomCellOffsets(Randomized):
# --------------------------------------------------------------------------
def _gen_offsets(self, w, h, transformable):
rx = self.get_rng()
ry = self.get_rng()
cell_data = list()
for y in range(h):
row = list()
for x in range(w):
rim = x <= 0 or y <= 0 or x+1 >= w or y+1 >= h
row.append((rx.get(rim), ry.get(rim)))
cell_data.append(row)
if transformable:
r = range(int(w/2)+1)
rv = [(rx.get(True), ry.get(True)) for i in r]
for i in r:
xo, yo = rv[i]
l = 0.8
cell_data[i][0] = (l*xo, yo)
cell_data[h-i-1][0] = (l*xo, 1.0-yo)
cell_data[i][w-1] = (1.0-l*xo, 1.0-yo)
cell_data[h-i-1][w-1] = (1.0-l*xo, yo)
cell_data[0][i] = (xo, l*yo)
cell_data[0][w-i-1] = (1.0-xo, l*yo)
cell_data[h-1][i] = (1.0-xo, 1.0-l*yo)
cell_data[h-1][w-i-1] = (xo, 1.0-l*yo)
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, w, h):
Randomized.__init__(self, options)
self._offsets = self._gen_offsets(w, h, options.transformable)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._offsets[y][x]
# ------------------------------------------------------------------------------
class ImageContourCellOffsets(object):
# --------------------------------------------------------------------------
def _gen_offsets(self, im, bg, w, h):
def _distmod(x, y):
d = abs(x - y)
return d if d < 0.5 else 1.0-d
kernel = [
(-1, -1),
( 0, -1),
( 1, -1),
(-1, 0),
( 1, 0),
(-1, 1),
( 0, 1),
( 1, 1)
]
kn = 1.0/(len(kernel))
cell_data = list()
for y in range(h):
row = list()
for x in range(w):
nx = 0.0
ny = 0.0
dispx = 0.0
dispy = 0.0
h, s, v = im.get_pixel(x, y)
for ox, oy in kernel:
oh, os, ov = im.get_pixel(x+ox, y+oy)
dh = _distmod(h, oh)
ds = s - os
dv = v - ov
adh = abs(dh)
ads = abs(ds)
adv = abs(dv)
dw = dv if adv > ads else ds if ads > adh else dh
vx, vy = ox, oy
vl = math.sqrt(vx*vx + vy*vy)
vx /= vl
vy /= vl
nx += vx*dw
ny += vy*dw
dispx += nx*nx
dispy += ny*ny
nx = nx*kn
ny = ny*kn
dispx = math.sqrt(dispx)*kn
dispy = math.sqrt(dispy)*kn
dispw = sigmoid(
math.sqrt(
max(abs(nx), abs(ny), abs(dispx-dispy))
),
2.5
)
nx = 0.5 + 0.5*nx
ny = 0.5 + 0.5*ny
bx, by = bg.get(x, y)
row.append((mix(bx, nx, dispw), mix(by, ny, dispw)))
cell_data.append(row)
return cell_data
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._offsets = self._gen_offsets(
options.image.converted("HSV"),
bg,
w, h)
# --------------------------------------------------------------------------
def get(self, x, y):
return self._offsets[y][x]
# ------------------------------------------------------------------------------
class HoneycombXCellOffsets(object):
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._fact_x = 0.8
self._fact_y = 0.9
self._bg = bg
# --------------------------------------------------------------------------
def get(self, x, y):
hx, hy = (0.5, 0.0 if x % 2 == 0 else 0.5)
bx, by = self._bg.get(x, y)
return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))
# ------------------------------------------------------------------------------
class HoneycombYCellOffsets(object):
# --------------------------------------------------------------------------
def __init__(self, options, bg, w, h):
self._fact_x = 0.9
self._fact_y = 0.8
self._bg = bg
# --------------------------------------------------------------------------
def get(self, x, y):
hx, hy = (0.0 if y % 2 == 0 else 0.5, 0.5)
bx, by = self._bg.get(x, y)
return (mix(bx, hx, self._fact_x), mix(by, hy, self._fact_y))
# ------------------------------------------------------------------------------
class VoronoiArgumentParser(argparse.ArgumentParser):
# --------------------------------------------------------------------------
def _nonnegative_int(self, x):
try:
i = int(x)
assert i > 0
return i
except:
self.error("`%s' is not a positive integer value" % str(x))
# --------------------------------------------------------------------------
def __init__(self, **kw):
argparse.ArgumentParser.__init__(self, **kw)
self.add_argument(
'output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout
)
self.add_argument(
'--log', '-l',
type=argparse.FileType('w'),
default=sys.stderr
)
self.add_argument(
'--jobs', '-j',
dest="job_count",
type=self._nonnegative_int,
action="store",
default=multiprocessing.cpu_count()
)
self.add_argument(
'--x-cells', '-X',
type=self._nonnegative_int,
action="store",
default=None
)
self.add_argument(
'--y-cells', '-Y',
type=self._nonnegative_int,
action="store",
default=None
)
self.add_argument(
'--width', '-W',
type=self._nonnegative_int,
action="store",
default=512
)
self.add_argument(
'--height', '-H',
type=self._nonnegative_int,
action="store",
default=512
)
self.add_argument(
'--units', '-U',
action="store",
default="px"
)
self.add_argument(
'--stroke-width', '-s',
type=float,
action="store",
default=0.5
)
self.add_argument(
'--value-low', '-vl',
type=float,
action="store",
default=0.05
)
self.add_argument(
'--value-high', '-vh',
type=float,
action="store",
default=0.95
)
self.add_argument(
'--cell-z-coord', '-cz',
type=float,
action="store",
default=0.0
)
self.add_argument(
'--scale', '-S',
type=float,
action="store",
default=0.9
)
self.add_argument(
'--scale-mode', '-Q',
type=str,
choices=["constant", "linear", "sqrt", "pow2", "exp", "sigmoid"],
action="store",
default="constant"
)
self.add_argument(
'--seed', '-rs',
type=float,
action="store",
default=None
)
self.add_argument(
'--rim-seed', '-Rs',
type=float,
action="store",
default=None
)
self.add_argument(
'--transformable', '-T',
action="store_true",
default=False
)
self.add_argument(
'--color-mode', '-M',
type=str,
choices=["grayscale", "cell-coord", "image-rgb"],
action="store",
default="grayscale"
)
self.add_argument(
'--cell-mode', '-C',
type=str,
choices=["full", "scaled", "flagstone","pebble", "worley"],
action="store",
default="full"
)
self.add_argument(
'--offs-mode', '-O',
type=str,
choices=["default", "honeycomb-x", "honeycomb-y"],
action="store",
default="default"
)
self.add_argument(
'--image', '-i',
dest="image_path",
type=os.path.realpath,
action="store",
default=None
)
self.add_argument(
'--verbose', '-v',
action="store_true",
default=False
)
# --------------------------------------------------------------------------
def process_parsed_options(self, options):
if options.transformable:
if options.width != options.height:
self.error("width and height must be the same in transformable mode")
if options.x_cells != options.y_cells:
self.error("X-cells and Y-cells must be the same in transformable mode")
if options.image_path is not None:
options.image = ImageSampler.from_file(
options.image_path,
options.x_cells,
options.y_cells
)
if options.x_cells is None:
options.x_cells = options.image.width()
if options.y_cells is None:
options.y_cells = options.image.height()
else:
options.image = NoImageSampler()
if options.x_cells is None:
options.x_cells = 32
if options.y_cells is None:
options.y_cells = 32
if options.cell_mode in ["worley"]:
options.need_neighbors = True
options.job_count = 1
else:
options.need_neighbors = False
return options
# --------------------------------------------------------------------------
def parse_args(self):
return self.process_parsed_options(
argparse.ArgumentParser.parse_args(self)
)
# ------------------------------------------------------------------------------
def make_argument_parser():
return VoronoiArgumentParser(
prog="voronoi-svg",
description="""
Utility annotating lines read from standard input
"""
)
# ------------------------------------------------------------------------------
class Renderer(object):
# --------------------------------------------------------------------------
def grayscale_color_str(self, v):
c = "%02x" % int(255*v)
return "#"+3*c
# --------------------------------------------------------------------------
def cell_offset(self, x, y):
cy = (y+self.y_cells)%self.y_cells
cx = (x+self.x_cells)%self.x_cells
return self.cell_offsets.get(cx, cy)
# --------------------------------------------------------------------------
def cell_value(self, x, y):
cy = (y+self.y_cells)%self.y_cells
cx = (x+self.x_cells)%self.x_cells
return self.cell_values.get(cx, cy)
# --------------------------------------------------------------------------
def cell_grayscale_color(self, x, y):
cv = self.cell_value(x, y)
v = self.value_low + cv*(self.value_high-self.value_low)
return self.grayscale_color_str(v)
# --------------------------------------------------------------------------
def cell_coord_color(self, x, y):
x = (x + self.x_cells) % self.x_cells
y = (y + self.y_cells) % self.y_cells
r = int((256*x)/self.x_cells)
g = int((256*y)/self.y_cells)
b = int((256*self.cell_z_coord))
return "#%02x%02x%02x" % (r, g, b)
# --------------------------------------------------------------------------
def cell_image_color(self, x, y):
r, g, b = self.image.get_pixel(x, y)
return "#%02x%02x%02x" % (int(r*255), int(g*255), int(b*255))
# --------------------------------------------------------------------------
def cell_gradient_id(self, x, y, i, j):
s = "grad%d_%d" % (
(y+3) * (self.x_cells + 6) + (x+3),
(y+j+3) * (self.x_cells + 6) + (x+i+3)
)
return s
# --------------------------------------------------------------------------
def cell_scale(self, x, y):
coef = 1.0
if self.scale_mode == "linear":
coef = self.cell_value(x, y)
elif self.scale_mode == "sqrt":
coef = math.sqrt(self.cell_value(x, y))
elif self.scale_mode == "pow2":
coef = math.pow(self.cell_value(x, y), 2)
elif self.scale_mode == "exp":
coef = math.exp(self.cell_value(x, y)) / math.exp(1)
elif self.scale_mode == "sigmoid":
coef = 0.5 - 0.5*math.cos(self.cell_value(x, y)*math.pi)
return self.scale * coef
# --------------------------------------------------------------------------
def full_cell_element_str(self, x, y, unused, corners, offs):
clist = ["%.3f %.3f" % (c[0], c[1]) for c in corners]
pathstr = "M"+" L".join(clist)+" Z"
yield """
<path d="%(def)s" stroke="%(color)s" fill="%(color)s"/>\n""" % {
"def": pathstr,
"color": self.cell_color(x, y)
}
# --------------------------------------------------------------------------
def scaled_cell_element_str(self, x, y, center, corners, offs):
m = set_center(corners)
newcorners = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]
yield self.full_cell_element_str(x, y, center, newcorners);
# --------------------------------------------------------------------------
def flagstone_cell_element_str(self, x, y, center, corners, offs):
zcorners = zip(corners, corners[1:] + [corners[0]])
c = self.cell_value(x, y)
newcorners = [segment_point(a, b, c) for (a, b) in zcorners]
yield self.scaled_cell_element_str(x, y, center, newcorners);
# --------------------------------------------------------------------------
def pebble_cell_element_str(self, x, y, center, corners, offs):
m = set_center(corners)
apoints = [segment_point(m, c, self.cell_scale(x, y)) for c in corners]
bpoints = apoints[1:] + [apoints[0]]
c = self.cell_value(x, y)
zpoints = zip(apoints, bpoints)
cpoints = [segment_point(a, b, c) for (a, b) in zpoints]
dpoints = cpoints[1:] + [cpoints[0]]
zpoints = zip(bpoints, dpoints)
cfmt = lambda c : "%.3f %.3f" % (c[0], c[1])
clist = ["%s, %s" % (cfmt(b), cfmt(d)) for (b, d) in zpoints]
pathstr = "M%s Q" % cfmt(cpoints[0])+" Q".join(clist)+" Z"
yield """<path d="%(def)s" stroke="%(color)s" fill="%(color)s"/>\n""" % {
"def": pathstr,
"color": self.cell_color(x, y)
}
# --------------------------------------------------------------------------
def worley_cell_element_str(self, x, y, center, corners, offs):
n = len(corners)
for t in range(n):
i, j = offs[t]
verts = (center, corners[t], corners[(t+1)%n])
clist = ["%.3f %.3f" % (v[0], v[1]) for v in verts]
pathstr = "M"+" L".join(clist)+" Z"
yield """<path d="%(def)s" stroke="url(#%(gref)s)" fill="url(#%(gref)s)"/>\n""" % {
"def": pathstr,
"gref": self.cell_gradient_id(x, y, i, j)
}
# --------------------------------------------------------------------------
def __init__(self):
useropts = make_argument_parser().parse_args()
for k, v in useropts.__dict__.items():
self.__dict__[k] = v
if self.color_mode == "grayscale":
self.cell_color = lambda x, y: self.cell_grayscale_color(x, y)
elif self.color_mode == "cell-coord":
self.cell_color = lambda x, y: self.cell_coord_color(x, y)
elif self.color_mode == "image-rgb":
self.cell_color = lambda x, y: self.cell_image_color(x, y)
if self.cell_mode == "full":
self.cell_element_str = self.full_cell_element_str
elif self.cell_mode == "scaled":
self.cell_element_str = self.scaled_cell_element_str
elif self.cell_mode == "flagstone":
self.cell_element_str = self.flagstone_cell_element_str
elif self.cell_mode == "pebble":
self.cell_element_str = self.pebble_cell_element_str
elif self.cell_mode == "worley":
self.cell_element_str = self.worley_cell_element_str
self.cell_values = RandomCellValues(
self,
self.x_cells,
self.y_cells
)
rco = RandomCellOffsets(
self,
self.x_cells,
self.y_cells
)
if self.offs_mode == "honeycomb-x":
self.cell_offsets = HoneycombXCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
elif self.offs_mode == "honeycomb-y":
self.cell_offsets = HoneycombYCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
else:
self.cell_offsets = ImageContourCellOffsets(
self,
rco,
self.x_cells,
self.y_cells
)
self.values = dict()
self.values["width"] = self.width
self.values["height"] = self.height
self.values["wunit"] = self.units
self.values["hunit"] = self.units
self.cell_fmt = "%%%dd %%%dd\n" % (
int(math.log10(self.x_cells)+1),
int(math.log10(self.y_cells)+1)
)
# ------------------------------------------------------------------------------
def cell_world_coord(renderer, x, y):
c = renderer.cell_offset(x, y)
return numpy.array((
(x+c[0])*(renderer.width/renderer.x_cells),
(y+c[1])*(renderer.height/renderer.y_cells)
))
# ------------------------------------------------------------------------------
def cell_value(renderer, x, y):
return renderer.get_value(x, y)
# ------------------------------------------------------------------------------
def cell_color(renderer, x, y):
return grayscalestr(
renderer.value_low+
cell_value(renderer, x, y)*
(renderer.value_high-renderer.value_low)
)
# ------------------------------------------------------------------------------
def offs_cell_world_coord(renderer, x, y, o):
return cell_world_coord(renderer, x+o[0], y+o[1])
# ------------------------------------------------------------------------------
def make_cell(renderer, x, y):
owc = cell_world_coord(renderer, x, y)
offsets = []
for j in range(-2, 3):
for i in range(-2, 3):
if j != 0 or i != 0:
offsets.append((i, j))
loffs = len(offsets)
cuts = []
for o in offsets:
cwc = offs_cell_world_coord(renderer, x, y, o)
sm = segment_midpoint(owc, cwc)
sn = segment_normal(owc, cwc)
cuts.append((sm, sn))
assert loffs == len(cuts)
intersections = []
for cj in range(loffs):
for ci in range(cj+1, loffs):
t = line_intersect_param(cuts[cj], cuts[ci])
if t is not None:
intersections.append((cuts[cj][0]+cuts[cj][1]*t, set([ci, cj])))
corners_and_cuts = []
for isc, cus in intersections:
seg = (owc, isc-owc)
eps = 0.001
skip = False
for cut in cuts:
t = line_intersect_param(seg, cut)
if t is not None and t >= 0 and t < 1-eps:
skip = True
break
if not skip:
corners_and_cuts.append((isc, cus))
def corner_angle(p):
v = p[0] - owc
return math.atan2(v[1], v[0])
sorted_corners_and_cuts = sorted(corners_and_cuts, key=corner_angle)
corners = []
neighbors = []
caclen = len(sorted_corners_and_cuts)
for c in range(caclen):
co0, cu0 = sorted_corners_and_cuts[c]
co1, cu1 = sorted_corners_and_cuts[(c+1)%caclen]
cu = cu0.intersection(cu1)
corners.append(co0)
if renderer.need_neighbors:
assert len(cu) == 1
neighbors.append(offsets[cu.pop()])
if renderer.need_neighbors:
assert len(corners) == len(neighbors)
return owc, corners, neighbors
# ------------------------------------------------------------------------------
def do_make_cell(renderer, job, output_lock):
w = renderer.x_cells + 2
h = renderer.y_cells + 2
k = job
n = w * h
res = []
log = []
def _flush(res, log):
r = str().join(res)
if renderer.verbose:
l = str().join(log)
try:
output_lock.acquire()
renderer.output.write(r)
if renderer.verbose:
renderer.log.write(l)
finally:
output_lock.release()
return ([], [])
try:
while k < n:
y = int(k / w) - 1
x = int(k % w) - 1
center, corners, offs = make_cell(renderer, x, y)
for svg_str in renderer.cell_element_str(x, y, center, corners, offs):
res.append(svg_str)
if renderer.verbose:
log.append(renderer.cell_fmt % (x, y))
else:
log.append(None)
if len(res) >= renderer.job_count:
res, log = _flush(res, log)
k += renderer.job_count
except KeyboardInterrupt:
pass
_flush(res, log)
# ------------------------------------------------------------------------------
def make_gradients(renderer):
w = renderer.x_cells
h = renderer.y_cells
grad_fmt = """<linearGradient gradientUnits="userSpaceOnUse" id="%(gref)s" """+\
"""x1="%(x1)f" y1="%(y1)f" x2="%(x2)f" y2="%(y2)f">\n"""
stop_fmt = """<stop offset="%(soffs)d%%" style="stop-color:%(color)s"/>\n"""
offsets = []
for j in range(-2, 3):
for i in range(-2, 3):
if j != 0 or i != 0:
offsets.append((i, j))
for y in range(-1, h+2):
for x in range(-1, w+2):
for i, j in offsets:
cwc = cell_world_coord(renderer, x, y)
owc = cell_world_coord(renderer, x+i, y+j)
vec = cwc - owc
renderer.output.write(grad_fmt % {
"gref": renderer.cell_gradient_id(x, y, i, j),
"x1": cwc[0],
"y1": cwc[1],
"x2": owc[0],
"y2": owc[1]
})
if renderer.cell_mode == "worley":
renderer.output.write(stop_fmt % {
"soffs": 0.0,
"color": "#%(r)02x%(g)02x%(b)02x%(a)02x" % {
"r": int(255*float((x+w) % w)/w),
"g": int(255*float((y+h) % h)/h),
"a": int(255*renderer.cell_value(x, y)),
"b": 255
}
})
renderer.output.write(stop_fmt % {
"soffs": 50.0,
"color": "#%(r)02x%(g)02x%(b)02x%(a)02x" % {
"r": int(255*float((x+w) % w)/w),
"g": int(255*float((y+h) % h)/h),
"a": int(255*renderer.cell_value(x, y)),
"b": 0
}
})
renderer.output.write("""</linearGradient>\n""")
# ------------------------------------------------------------------------------
def print_svg(renderer):
renderer.output.write("""<?xml version="1.0" encoding="utf8"?>\n""")
renderer.output.write("""<svg xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
width="%(width)s%(wunit)s" height="%(height)s%(hunit)s"
viewBox="0 0 %(width)s %(height)s"
version="1.1"
contentScriptType="text/ecmascript"
contentStyleType="text/css"\n>\n""" % renderer.values)
renderer.output.write(
"""<g class="voronoi" stroke-width="%(stroke_width)f">\n""" % {
"stroke_width": renderer.stroke_width
}
)
renderer.output.write("<defs>\n")
if renderer.cell_mode in ["worley"]:
make_gradients(renderer)
renderer.output.write("</defs>\n")
renderer.output.flush()
try:
output_lock = multiprocessing.Lock()
def call_do_make_cell(renderer, job, output_lock):
try:
do_make_cell(renderer, job, output_lock)
except Exception:
sys.stderr.write("failed to generate SVG, please retry\n")
raise SystemExit
tasks = []
for job in range(renderer.job_count):
t = multiprocessing.Process(
target=call_do_make_cell,
args=(renderer, job, output_lock)
)
t.start()
tasks.append(t)
for t in tasks:
t.join()
if t.exitcode is not None and t.exitcode != 0:
return 1
except KeyboardInterrupt:
pass
renderer.output.write("""\n""")
renderer.output.write("""</g>\n""")
renderer.output.write("""</svg>\n""")
return 0
# ------------------------------------------------------------------------------
def main():
renderer = Renderer()
sys.exit(print_svg(renderer))
# ------------------------------------------------------------------------------
if __name__ == "__main__": main()
|
ffn/utils/vector_pb2.py | pgunn/ffn | 266 | 12683137 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: utils/vector.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='utils/vector.proto',
package='ffn.proto',
syntax='proto2',
serialized_pb=_b('\n\x12utils/vector.proto\x12\tffn.proto\" \n\x08Vector2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\" \n\x08Vector2i\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"+\n\x08Vector3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"+\n\x08Vector3f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"+\n\x08Vector3j\x12\t\n\x01x\x18\x01 \x01(\x03\x12\t\n\x01y\x18\x02 \x01(\x03\x12\t\n\x01z\x18\x03 \x01(\x03\"4\n\x0cVector2dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2d\"4\n\x0cVector2iList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2i\"4\n\x0cVector3dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3d\"4\n\x0cVector3fList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3f\"4\n\x0cVector3jList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3j')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_VECTOR2D = _descriptor.Descriptor(
name='Vector2d',
full_name='ffn.proto.Vector2d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ffn.proto.Vector2d.x', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ffn.proto.Vector2d.y', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=65,
)
_VECTOR2I = _descriptor.Descriptor(
name='Vector2i',
full_name='ffn.proto.Vector2i',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ffn.proto.Vector2i.x', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ffn.proto.Vector2i.y', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=99,
)
_VECTOR3D = _descriptor.Descriptor(
name='Vector3d',
full_name='ffn.proto.Vector3d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ffn.proto.Vector3d.x', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ffn.proto.Vector3d.y', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='z', full_name='ffn.proto.Vector3d.z', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=144,
)
_VECTOR3F = _descriptor.Descriptor(
name='Vector3f',
full_name='ffn.proto.Vector3f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ffn.proto.Vector3f.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ffn.proto.Vector3f.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='z', full_name='ffn.proto.Vector3f.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=189,
)
_VECTOR3J = _descriptor.Descriptor(
name='Vector3j',
full_name='ffn.proto.Vector3j',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ffn.proto.Vector3j.x', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ffn.proto.Vector3j.y', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='z', full_name='ffn.proto.Vector3j.z', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=234,
)
_VECTOR2DLIST = _descriptor.Descriptor(
name='Vector2dList',
full_name='ffn.proto.Vector2dList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vectors', full_name='ffn.proto.Vector2dList.vectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=236,
serialized_end=288,
)
_VECTOR2ILIST = _descriptor.Descriptor(
name='Vector2iList',
full_name='ffn.proto.Vector2iList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vectors', full_name='ffn.proto.Vector2iList.vectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=290,
serialized_end=342,
)
_VECTOR3DLIST = _descriptor.Descriptor(
name='Vector3dList',
full_name='ffn.proto.Vector3dList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vectors', full_name='ffn.proto.Vector3dList.vectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=396,
)
_VECTOR3FLIST = _descriptor.Descriptor(
name='Vector3fList',
full_name='ffn.proto.Vector3fList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vectors', full_name='ffn.proto.Vector3fList.vectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=398,
serialized_end=450,
)
_VECTOR3JLIST = _descriptor.Descriptor(
name='Vector3jList',
full_name='ffn.proto.Vector3jList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vectors', full_name='ffn.proto.Vector3jList.vectors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=452,
serialized_end=504,
)
_VECTOR2DLIST.fields_by_name['vectors'].message_type = _VECTOR2D
_VECTOR2ILIST.fields_by_name['vectors'].message_type = _VECTOR2I
_VECTOR3DLIST.fields_by_name['vectors'].message_type = _VECTOR3D
_VECTOR3FLIST.fields_by_name['vectors'].message_type = _VECTOR3F
_VECTOR3JLIST.fields_by_name['vectors'].message_type = _VECTOR3J
DESCRIPTOR.message_types_by_name['Vector2d'] = _VECTOR2D
DESCRIPTOR.message_types_by_name['Vector2i'] = _VECTOR2I
DESCRIPTOR.message_types_by_name['Vector3d'] = _VECTOR3D
DESCRIPTOR.message_types_by_name['Vector3f'] = _VECTOR3F
DESCRIPTOR.message_types_by_name['Vector3j'] = _VECTOR3J
DESCRIPTOR.message_types_by_name['Vector2dList'] = _VECTOR2DLIST
DESCRIPTOR.message_types_by_name['Vector2iList'] = _VECTOR2ILIST
DESCRIPTOR.message_types_by_name['Vector3dList'] = _VECTOR3DLIST
DESCRIPTOR.message_types_by_name['Vector3fList'] = _VECTOR3FLIST
DESCRIPTOR.message_types_by_name['Vector3jList'] = _VECTOR3JLIST
Vector2d = _reflection.GeneratedProtocolMessageType('Vector2d', (_message.Message,), dict(
DESCRIPTOR = _VECTOR2D,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector2d)
))
_sym_db.RegisterMessage(Vector2d)
Vector2i = _reflection.GeneratedProtocolMessageType('Vector2i', (_message.Message,), dict(
DESCRIPTOR = _VECTOR2I,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector2i)
))
_sym_db.RegisterMessage(Vector2i)
Vector3d = _reflection.GeneratedProtocolMessageType('Vector3d', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3D,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3d)
))
_sym_db.RegisterMessage(Vector3d)
Vector3f = _reflection.GeneratedProtocolMessageType('Vector3f', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3F,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3f)
))
_sym_db.RegisterMessage(Vector3f)
Vector3j = _reflection.GeneratedProtocolMessageType('Vector3j', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3J,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3j)
))
_sym_db.RegisterMessage(Vector3j)
Vector2dList = _reflection.GeneratedProtocolMessageType('Vector2dList', (_message.Message,), dict(
DESCRIPTOR = _VECTOR2DLIST,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector2dList)
))
_sym_db.RegisterMessage(Vector2dList)
Vector2iList = _reflection.GeneratedProtocolMessageType('Vector2iList', (_message.Message,), dict(
DESCRIPTOR = _VECTOR2ILIST,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector2iList)
))
_sym_db.RegisterMessage(Vector2iList)
Vector3dList = _reflection.GeneratedProtocolMessageType('Vector3dList', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3DLIST,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3dList)
))
_sym_db.RegisterMessage(Vector3dList)
Vector3fList = _reflection.GeneratedProtocolMessageType('Vector3fList', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3FLIST,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3fList)
))
_sym_db.RegisterMessage(Vector3fList)
Vector3jList = _reflection.GeneratedProtocolMessageType('Vector3jList', (_message.Message,), dict(
DESCRIPTOR = _VECTOR3JLIST,
__module__ = 'utils.vector_pb2'
# @@protoc_insertion_point(class_scope:ffn.proto.Vector3jList)
))
_sym_db.RegisterMessage(Vector3jList)
# @@protoc_insertion_point(module_scope)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.