max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
host/greatfet/interfaces/jtag.py | grvvy/greatfet | 328 | 12743196 | #
# This file is part of GreatFET
#
from __future__ import print_function
import sys
from warnings import warn
from ..interface import GreatFETInterface
from ..support.bits import bits
from ..protocol.jtag_svf import SVFParser, SVFEventHandler
class JTAGPatternError(IOError):
""" Class for errors that come from a JTAG read not matching the expected response. """
def __init__(self, message, result):
self.result = result
super(JTAGPatternError, self).__init__(message)
# FIXME: should this be an instance of a 'target' class?
class JTAGDevice(GreatFETInterface):
""" Class representing a single device on a JTAG scan chain. """
DESCRIPTION = "no description available"
# A list of supported IDCODEs for the relevant class.
# Used unless the supports_idcode() method is overridden.
SUPPORTED_IDCODES = []
# A list of any GreatFET subcommands that are useful for driving this target;
# for informational use.
SUPPORTED_CONSOLE_COMMANDS = []
@classmethod
def from_idcode(cls, idcode, position_in_chain=0):
""" Attempts to create a JTAGDevice object that fits the provided IDCODE. """
# Assume the generic device class is the most appropriate class for the device, initially.
most_appropriate_class = cls
# Search each imported subclass for the
for subclass in cls.__subclasses__():
if subclass.supports_idcode(idcode):
most_appropriate_class = subclass
break
# Finally, create an instance of the most appropriate class for this object.
instance = object.__new__(most_appropriate_class)
most_appropriate_class.__init__(instance, idcode, position_in_chain)
return instance
@classmethod
def supports_idcode(cls, idcode):
"""
Returns true iff this class supports the given IDCODE.
This default implementation uses SUPPORTED_IDCODES, but subclasses can override this
for more nuanced behavior.
"""
return idcode in cls.SUPPORTED_IDCODES
@classmethod
def supported_console_commands(cls):
""" Returns a list of GreatFET subcommands that provide access to the given class. """
return cls.SUPPORTED_CONSOLE_COMMANDS
def idcode(self):
""" Returns this device's IDCODE. """
return self._idcode
def description(self):
""" Returns a short description of the device. """
return self.DESCRIPTION
def __init__(self, idcode, position_in_chain):
self._idcode = idcode
class JTAGChain(GreatFETInterface):
""" Class representing a JTAG scan-chain interface. """
# Short name for this type of interface.
INTERFACE_SHORT_NAME = "jtag"
#
# Simple mapping that captures the various TAP FSM states.
# Names from the JTAG SVF specification are used directly, so we can easily parse SVF files.
#
STATE_PROGRESSIONS = {
'RESET': {0: 'IDLE', 1: 'RESET' },
'IDLE': {0: 'IDLE', 1: 'DRSELECT' },
# Data register path.
'DRSELECT': {0: 'DRCAPTURE', 1: 'IRSELECT' },
'DRCAPTURE': {0: 'DRSHIFT', 1: 'DREXIT1' },
'DRSHIFT': {0: 'SHIFT_DR', 1: 'DREXIT1' },
'DREXIT1': {0: 'DRPAUSE', 1: 'DRUPDATE' },
'DRPAUSE': {0: 'DRPAUSE', 1: 'DREXIT2' },
'DREXIT2': {0: 'DRSHIFT', 1: 'DRUPDATE' },
'DRUPDATE': {0: 'IDLE', 1: 'DRSELECT' },
# Instruction register path.
'IRSELECT': {0: 'IRCAPTURE', 1: 'RESET' },
'IRCAPTURE': {0: 'IRSHIFT', 1: 'IREXIT1' },
'IRSHIFT': {0: 'IRSHIFT', 1: 'IREXIT1' },
'IREXIT1': {0: 'IRPAUSE', 1: 'IRUPDATE' },
'IRPAUSE': {0: 'IRPAUSE', 1: 'IREXIT2' },
'IREXIT2': {0: 'IRSHIFT', 1: 'IRUPDATE' },
'IRUPDATE': {0: 'IDLE', 1: 'DRSELECT' },
}
def __init__(self, board, max_frequency=405e3):
""" Creates a new JTAG scan-chain interface.
Paramters:
board -- the GreatFET board we're working with.
max_frequency -- the maximum frequency we should attempt scan out data with
"""
# Grab our JTAG API object.
self.api = board.apis.jtag
# Assume we're starting our chain in 'IDLE'.
self.state = 'IDLE'
# Configure our chain to run at the relevant frequency.
self.frequency = int(max_frequency)
self.max_bits_per_scan = self.api.configure(self.frequency)
def set_frequency(self, max_frequency):
""" Sets the operating frequency of future transactions on this JTAG chain. """
self.frequency = int(max_frequency)
self.api.configure(self.frequency)
def _progress_state(self, tms_value):
""" Adjusts our internal model of the TAP FSM to account for an applied TMS value. """
# Normalize our state to always be 1 or 0.
tms_value = 1 if tms_value else 0
# Move our state to the next state per our TAP FSM.
self.state = self.STATE_PROGRESSIONS[self.state][tms_value]
def pulse_tms(self, cycles=1, asserted=True):
""" Asserts or de-asserts TMS for the given number of cycles; used for navigating the TAP FSM. """
# Run the clock for a single cycle, with TMS asserted each time.
for _ in range(cycles):
self.api.run_clock(1, asserted)
self._progress_state(asserted)
def initialize_chain(self):
""" Put the scan chain into its initial state, allowing fresh JTAG communications. """
# Pulse the TMS line five times -- this brings us into the TEST_RESET state, which resets the test logic.
self.pulse_tms(5)
# We now should know that we're in the RESET state.
assert(self.state == 'RESET')
def _receive_data(self, bits_to_scan, advance_state=False):
""" Performs a raw scan-in of data, and returns the result. """
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result = self.api.scan_in(bits_to_scan, advance_state)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
return result
def _pad_data_to_length(self, length_in_bits, data=None):
""" Pads a given data set to a given length, in bits. """
# Compute how many bytes we need the data to be.
target_length_bytes = (length_in_bits + 7) // 8
# If our data doesn't need padding, return it directly.
if data and (len(data) >= target_length_bytes):
return data
# Create a mutable array of data; and add any data we have.
padded = bytearray()
if data:
padded.extend(data)
# Figure out how much padding we need.
padding_necessary = target_length_bytes - len(padded)
padded.extend("b\0" * padding_necessary)
# Return our padded data.
return padded
def _transmit_data(self, bits_to_scan, data=None, advance_state=False):
""" Performs a raw scan-out of data, discarding any result. """
# Pad our data to the relevant length.
data = self._pad_data_to_length(bits_to_scan)
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
self.api.scan_out(bits_to_scan, advance_state, data)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
def _scan_data(self, bits_to_scan, byte_data, advance_state=False):
""" Performs a raw scan-in of data, and returns the result. """
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result = self.api.scan(bits_to_scan, advance_state, byte_data)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
return result
def _next_hop_towards(self, state):
""" Identify the next TMS value we should apply to move towards the given state. """
# Special case: if we're headed to RESET, then our next hop is always 1.
if state == 'RESET':
return 1
# Special case: if we're in the Select-DR state, we'll steer either towards the instruction column ('1')
# or data column ('0') based on the target state.
if self.state == 'DRSELECT':
return 1 if 'IR' in state else 0
# Grab the next states for TMS values of one and zero.
next_states = self.STATE_PROGRESSIONS[self.state]
# We'll apply a simple heuristic to advance through the TAP FSM.
# First, we'll identify it providing a '1' would cause us to loop back towards the current state,
# which will occur if we'd stay in the same state with a '1', or if we'd move out of the core FSM.
towards_one_would_loop = (next_states[1] == self.state) or (next_states[1] == 'RESET')
# Next, we'll apply the following simple heuristics:
# - If pulsing clock with TMS=0 would land us in the right state, do so.
# - If pulsing clock with TMS=1 would cause us to self, loop, pulse clock with TMS=0.
# - Otherwise, pulse clock with TMS=1, as TMS=1 generally moves us through the TAP FSM.
target_state_is_towards_zero = (next_states[0] == state)
return 0 if (target_state_is_towards_zero or towards_one_would_loop) else 1
def _ensure_in_state(self, state):
"""
Ensures the JTAG TAP FSM is in the given state.
If we're not; progresses the TAP FSM by pulsing TMS until we reach the relevant state.
"""
# Progress through the TAP FSM until we're in the right state.
while self.state != state:
# Identify the direction we'll need to move in order to move closer to our target state...
next_hop = self._next_hop_towards(state)
# ... and apply it.
self.pulse_tms(asserted=next_hop)
def move_to_state(self, state_name):
""" Moves the JTAG scan chain to the relevant state.
Parameters:
state_name: The target state to wind up in, as a string. States are accepted in the format
defined in the JTAG SVF standard, and thus should be one of:
"RESET", "IDLE", "DRSELECT", "DRCAPTURE", "DRSHIFT", "DREXIT1", "DRPAUSE",
"DREXIT2", "DRUPDATE", "IRSELECT", "IRCAPTURE", "IRSHIFT", "IREXIT1", "IRPAUSE",
"IREXIT2", "IRUPDATE"
"""
self._ensure_in_state(state_name.strip())
def _shift_while_in_state(self, state, tdi=None, length=None, ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the chain while in the given state. """
# Normalize our data into a bitstring type that we can easily work with.
# This both ensures we have a known format; and implicitly handles things like padding.
if tdi:
data_bits = bits(tdi, length, byteorder=byteorder)
# Convert from our raw data to the format we'll need to send down to the device.
bit_length = len(data_bits)
data_bytes = data_bits.to_bytes(byteorder='big')
else:
if length is None:
raise ValueError("either TDI or length must be provided!")
bit_length = length
# Move into our shift-DR state.
self._ensure_in_state(state)
# Finally, issue the transaction itself.
if tdi and ignore_response:
self._transmit_data(bit_length, data_bytes, advance_state)
return None
elif tdi:
result = self._scan_data(bit_length, data_bytes, advance_state)
else:
result = self._receive_data(bit_length, advance_state)
# Return our data, converted back up to bits.
return bits(result, bit_length)
def _validate_response(self, response_bits, tdo=None, mask=None):
""" Validates the response provided by a _shift_while_in_state call, in the traditional JTAG SVF form. """
# If we don't have any data to validate against, vacuously succeed.
if (not tdo) or (not response_bits):
return
# If we have a mask, mask both the TDO value and response, and then compare.
masked_response = mask & response_bits if mask else response_bits
masked_tdo = mask & tdo if mask else tdo
if masked_response != masked_tdo:
raise JTAGPatternError("Scan result did not match expected pattern: {} != {} (expected)!".format(
masked_response, masked_tdo), response_bits)
def shift_data(self, tdi=None, length=None, tdo=None, mask=None,
ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the scan-chain's data register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""
# Perform the core shift, and gather the response.
response = self._shift_while_in_state('DRSHIFT', tdi=tdi, length=length, ignore_response=ignore_response,
advance_state=advance_state, byteorder=byteorder)
# Validate our response against any provided constraints.
self._validate_response(response, tdo=tdo, mask=mask)
return response
def shift_instruction(self, tdi=None, length=None, tdo=None, mask=None,
ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the chain's instruction register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""
# Perform the core shift, and gather the response.
response = self._shift_while_in_state('IRSHIFT', tdi=tdi, length=length, ignore_response=ignore_response,
advance_state=advance_state, byteorder=byteorder)
# Validate our response against any provided constraints.
self._validate_response(response, tdo=tdo, mask=mask)
return response
def run_test(self, cycles, from_state='IDLE', end_state=None):
""" Places the device into the RUNTEST/IDLE (or provided) state, and pulses the JTAG clock.
Paraameters:
cycles -- The number of cycles for which the device should remain in the given state.
from_state -- The state in which the cycles should be spent; defaults to IDLE.
end_state -- The state in which the device should be placed after the test is complete.
"""
if from_state:
self.move_to_state(from_state)
self.api.run_clock(cycles, False, timeout=0)
if from_state:
self.move_to_state(end_state)
def _create_device_for_idcode(self, idcode, position_in_chain):
""" Creates a JTAGDevice object for the relevant idcode. """
return JTAGDevice.from_idcode(idcode, position_in_chain)
def enumerate(self, return_idcodes=False):
""" Initializes the JTAG TAP FSM, and attempts to identify all connected devices.
Parameters:
return_idcodes -- If true, this method will return a list of IDCodes rather than JTAGDevice objects.
Returns a list of JTAGDevices (return_idcodes=False) or JTAG IDCODES (return_idcodes=True).
"""
devices = []
# Place the JTAG TAP FSM into its initial state, so we can perform enumeration.
self.initialize_chain()
# Resetting the TAP FSM also automatically loaded the instruction register with the IDCODE
# instruction, and accordingly filled the chain of data registers with each device's IDCODE.
# We can accordingly just scan out the data using shift_data.
# Once we (re-)initialize the chain, each device automatically loads the IDCODE instruction
# for execution. This means that if we just scan in data, we'll receive each device's IDCODE,
# followed by a null terminator (32 bits of zeroes).
position_in_chain = 0
while True:
# Attempt to read a 32-bit IDCODE from the device.
raw_idcode = self.shift_data(length=32)
idcode = int.from_bytes(raw_idcode, byteorder='little')
# If our IDCODE is all 1's, and we have no devices, we seem to be stuck at one.
# Warn the user.
if idcode == 0xFFFFFFFF and not devices:
warn("TDI appears to be stuck at '1'. Check your wiring?")
# If we've received our null IDCODE, we've finished enumerating the chain.
# We'll also treat an all-1's IDCODE as a terminator, as this invalid IDCODE occurs
# if TDI is stuck-at-one.
if idcode in (0x00000000, 0xFFFFFFFF):
self.pulse_tms(asserted=True)
break
if return_idcodes:
devices.append(idcode)
else:
devices.append(self._create_device_for_idcode(idcode, position_in_chain))
position_in_chain += 1
return devices
def play_svf_instructions(self, svf_string, log_function=None, error_log_function=print):
""" Executes a string of JTAG SVF instructions, strumming the relevant scan chain.
svf_string -- A string containing valid JTAG SVF instructions to be executed.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""
# Create the parser that will run our SVF file, and run our SVF.
parser = SVFParser(svf_string, GreatfetSVFEventHandler(self, log_function, error_log_function))
parser.parse_file()
def play_svf_file(self, svf_file, log_function=None, error_log_function=print):
""" Executes the JTAG SVF instructions from the given file.
svf_file -- A filename or file object pointing to a JTAG SVF file.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""
close_after = False
if isinstance(svf_file, str):
svf_file = open(svf_file, 'r')
close_after = True
self.play_svf_instructions(svf_file.read(), log_function=log_function, error_log_function=error_log_function)
if close_after:
svf_file.close()
class GreatfetSVFEventHandler(SVFEventHandler):
""" SVF event handler that delegates handling of SVF instructions to a GreatFET JTAG interface. """
def __init__(self, interface, verbose_log_function=None, error_log_function=print):
""" Creates a new SVF event handler.
Parameters:
interface: The GreatFET JTAG interface that will execute our JTAG commands.
"""
if verbose_log_function is None:
verbose_log_function = lambda string : None
if error_log_function is None:
error_log_function = print
self.interface = interface
self.log = verbose_log_function
self.log_error = error_log_function
# Assume that after a data / instruction shift operation that we'll
# wind up in the IDLE state, per the SVF standard. The SVF file can
# override these defaults
self.end_dr_state = 'IDLE'
self.end_ir_state = 'IDLE'
# By default, don't have any headers or trailers for IR or DR shifts.
# The SVF can override these using the HDR/TDR/HIR/TIR instructions.
nullary_padding = {'tdi': bits(), 'tdo': bits(), 'mask': bits(), }
self.dr_header = nullary_padding.copy()
self.dr_trailer = nullary_padding.copy()
self.ir_header = nullary_padding.copy()
self.ir_trailer = nullary_padding.copy()
# Store default masks for our ShiftIR and ShiftDR instructions.
self.last_dr_mask = None
self.last_dr_smask = None
self.ir_mask = None
self.ir_smask = None
def svf_frequency(self, frequency):
"""Called when the ``FREQUENCY`` command is encountered."""
self.log (" -- FREQUENCY set to {}".format(frequency))
self.interface.set_frequency(frequency)
def svf_trst(self, mode):
"""Called when the ``TRST`` command is encountered."""
warn('SVF provided TRST command; but this implementation does not yet support driving the TRST line')
def svf_state(self, state, path):
"""Called when the ``STATE`` command is encountered."""
# Visit each state in any intermediate paths provided...
if path:
for intermediate in path:
self.log("STATE; Moving through {}.".format(intermediate))
self.interface.move_to_state(intermediate)
# ... ensuring we end up in the relevant state.
self.log("Moving to {} STATE.".format(state))
self.interface.move_to_state(state)
def svf_endir(self, state):
"""Called when the ``ENDIR`` command is encountered."""
self.log("Moving to {} after each Shift-IR.".format(state))
self.end_dr_state = state
def svf_enddr(self, state):
"""Called when the ``ENDDR`` command is encountered."""
self.log("Moving to {} after each Shift-DR.".format(state))
self.end_ir_state = state
def svf_hir(self, **header):
"""Called when the ``HIR`` command is encountered."""
self.log("Applying Shift-IR prefix. ")
self.ir_header = header
def svf_tir(self, **trailer):
self.log("Applying Shift-IR suffix. ")
self.ir_trailer = trailer
def svf_hdr(self, **header):
"""Called when the ``HDR`` command is encountered."""
self.log("Applying Shift-DR header. ")
self.dr_header = header
def svf_tdr(self, **trailer):
"""Called when the ``TDR`` command is encountered."""
self.log("Applying Shift-DR suffix. ")
self.dr_trailer = trailer
def svf_sir(self, **data):
"""Called when the ``SIR`` command is encountered."""
# Append our header and trailer to each of our arguments.
arguments = {}
for arg, value in data.items():
header = self.ir_header[arg] if (arg in self.ir_header) else bits()
trailer = self.ir_trailer[arg] if (arg in self.ir_trailer) else bits()
arguments[arg] = (header + value + trailer) if value else None
if data['mask']:
self.ir_mask = data['mask']
if data['smask']:
self.ir_smask = data['mask']
self.log("Performing SHIFT-IR:")
self.log( "out: {}".format(arguments['tdi']))
self.log( "expected: {}".format(arguments['tdo']))
self.log( "mask: {}".format(arguments['tdo']))
try:
result = self.interface.shift_instruction(tdi=arguments['tdi'], tdo=arguments['tdo'], mask=arguments['mask'])
except JTAGPatternError as e:
self.log( "in: {} [FAIL]\n".format(e.result))
self.log_error("\n\n<!> Failure while performing SHIFT-IR: \n " + str(e))
raise
self.log( "in: {} [OK]\n".format(result))
def svf_sdr(self, **data):
"""Called when the ``SDR`` command is encountered."""
# Append our header and trailer to each of our arguments.
arguments = {}
for arg, value in data.items():
header = self.dr_header[arg] if (arg in self.dr_header) else bits()
trailer = self.dr_trailer[arg] if (arg in self.dr_trailer) else bits()
arguments[arg] = (header + value + trailer) if value else None
if data['mask']:
self.dr_mask = data['mask']
if data['smask']:
self.dr_smask = data['mask']
self.log("Performing SHIFT-DR:")
self.log( "out: {}".format(arguments['tdi']))
self.log( "expected: {}".format(arguments['tdo']))
self.log( "mask: {}".format(arguments['tdo']))
try:
result = self.interface.shift_data(tdi=arguments['tdi'], tdo=arguments['tdo'], mask=arguments['mask'])
except JTAGPatternError as e:
self.log( "in: {} [FAIL]\n".format(e.result))
self.log_error("\n\n<!> Failure while performing SHIFT-DR: \n " + str(e))
raise
self.log( "in: {} [OK]\n".format(result))
def svf_runtest(self, run_state, run_count, run_clock, min_time, max_time, end_state):
"""Called when the ``RUNTEST`` command is encountered."""
self.log("Running test for {} cycles.".format(run_count))
self.interface.run_test(run_count, from_state=run_state, end_state=end_state)
def svf_piomap(self, mapping):
"""Called when the ``PIOMAP`` command is encountered."""
raise NotImplementedError("This implementation does not yet support PIOMAP.")
def svf_pio(self, vector):
"""Called when the ``PIO`` command is encountered."""
raise NotImplementedError("This implementation does not yet support PIO.")
|
zulip/zulip/send.py | dimisjim/python-zulip-api | 351 | 12743223 | #!/usr/bin/env python3
# zulip-send -- Sends a message to the specified recipients.
import argparse
import logging
import sys
from typing import Any, Dict
import zulip
logging.basicConfig()
log = logging.getLogger("zulip-send")
def do_send_message(client: zulip.Client, message_data: Dict[str, Any]) -> bool:
"""Sends a message and optionally prints status about the same."""
if message_data["type"] == "stream":
log.info(
'Sending message to stream "%s", subject "%s"... '
% (message_data["to"], message_data["subject"])
)
else:
log.info("Sending message to {}... ".format(message_data["to"]))
response = client.send_message(message_data)
if response["result"] == "success":
log.info("Message sent.")
return True
else:
log.error(response["msg"])
return False
def main() -> int:
usage = """zulip-send [options] [recipient...]
Sends a message to specified recipients.
Examples: zulip-send --stream denmark --subject castle -m "Something is rotten in the state of Denmark."
zulip-send <EMAIL> <EMAIL> -m "Conscience doth make cowards of us all."
Specify your Zulip API credentials and server in a ~/.zuliprc file or using the options.
"""
parser = zulip.add_default_arguments(argparse.ArgumentParser(usage=usage))
parser.add_argument(
"recipients", nargs="*", help="email addresses of the recipients of the message"
)
parser.add_argument(
"-m", "--message", help="Specifies the message to send, prevents interactive prompting."
)
group = parser.add_argument_group("Stream parameters")
group.add_argument(
"-s",
"--stream",
dest="stream",
action="store",
help="Allows the user to specify a stream for the message.",
)
group.add_argument(
"-S",
"--subject",
dest="subject",
action="store",
help="Allows the user to specify a subject for the message.",
)
options = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.INFO)
# Sanity check user data
if len(options.recipients) != 0 and (options.stream or options.subject):
parser.error("You cannot specify both a username and a stream/subject.")
if len(options.recipients) == 0 and (bool(options.stream) != bool(options.subject)):
parser.error("Stream messages must have a subject")
if len(options.recipients) == 0 and not (options.stream and options.subject):
parser.error("You must specify a stream/subject or at least one recipient.")
client = zulip.init_from_options(options)
if not options.message:
options.message = sys.stdin.read()
if options.stream:
message_data = {
"type": "stream",
"content": options.message,
"subject": options.subject,
"to": options.stream,
}
else:
message_data = {
"type": "private",
"content": options.message,
"to": options.recipients,
}
if not do_send_message(client, message_data):
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
src/stage1/data_generator.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 174 | 12743226 | <reponame>gathierry/FashionAI-KeyPointsDetectionOfApparel
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from stage1.label_encoder import DataEncoder
class DataGenerator(Dataset):
def __init__(self, config, data, phase='train'):
self.phase = phase
self.data = data
self.config = config
self.encoder = DataEncoder(self.config)
def __getitem__(self, idx):
img = cv2.imread(self.data.get_image_path(idx)) # BGR
bboxes = self.data.get_bbox(idx)
img_h, img_w, _ = img.shape
# data augmentation
if self.phase == 'train':
random_flip = np.random.randint(0, 2)
if random_flip == 1:
img = cv2.flip(img, 1)
x1s = img_w - bboxes[:, 2]
x2s = img_w - bboxes[:, 0]
bboxes[:, 0] = x1s
bboxes[:, 2] = x2s
# min size resizing
scale = self.config.img_max_size / max(img_w, img_h)
img_h2 = int(img_h * scale)
img_w2 = int(img_w * scale)
img = cv2.resize(img, (img_w2, img_h2), interpolation=cv2.INTER_CUBIC)
bboxes *= scale
img = np.transpose(img, (2, 0, 1)).astype(np.float32) # channel, height, width
img[[0, 2]] = img[[2, 0]]
img = img / 255.0
img = (img - self.config.mu) / self.config.sigma
return torch.from_numpy(img), torch.from_numpy(bboxes)
def collate_fn(self, batch):
imgs = [x[0] for x in batch]
bboxes = [x[1] for x in batch]
# Use the same size to accelerate dynamic graph
maxh = self.config.img_max_size #max([img.size(1) for img in imgs])
maxw = self.config.img_max_size #max([img.size(2) for img in imgs])
num_imgs = len(imgs)
pad_imgs = torch.zeros(num_imgs, 3, maxh, maxw)
reg_targets = []
cls_targets = []
for i in range(num_imgs):
img = imgs[i]
pad_imgs[i, :, :img.size(1), :img.size(2)] = img # Pad images to the same size
reg_target, cls_target = self.encoder.encode(bboxes[i], torch.ones([1,]), [maxh, maxw])
reg_targets.append(reg_target)
cls_targets.append(cls_target)
reg_targets = torch.stack(reg_targets) # [batch_size, anchor#, 4]
cls_targets = torch.stack(cls_targets) # [batch_size, anchor#] 0 for neg, 1, 2, 3 ... for different classes
return pad_imgs, reg_targets, cls_targets
def __len__(self):
return self.data.size()
if __name__ == '__main__':
from src.config import Config
from coco import Coco
from torch.utils.data import DataLoader
from time import time
db_path = '/home/storage/lsy/coco/'
config = Config()
train_coco = Coco(db_path, 'train')
train_dataset = DataGenerator(config, train_coco, phase='train')
train_loader = DataLoader(train_dataset,
batch_size=32,
shuffle=True,
num_workers=16,
collate_fn=train_dataset.collate_fn,
pin_memory=True)
t0 = time()
for i, (data, reg_targets, cls_targets) in enumerate(train_loader):
print(data.size(), reg_targets.size(), cls_targets.size())
t1 = time()
|
scripts/emu/simple_ipv6.py | timgates42/trex-core | 956 | 12743237 | from trex.emu.api import *
import argparse
import get_args
class Prof1():
def __init__(self):
self.mac = Mac('00:00:00:70:00:01')
self.def_ns_plugs = {'ipv6' : {'dmac':self.mac.V()}}
self.def_c_plugs = None
def create_profile(self, ns_size, clients_size):
ns_list = []
# create different namespace each time
vport, tci, tpid = 0, [0, 0], [0x00, 0x00]
for j in range(vport, ns_size + vport):
ns_key = EMUNamespaceKey(vport = j,
tci = tci,
tpid = tpid)
ns = EMUNamespaceObj(ns_key = ns_key, def_c_plugs = self.def_c_plugs)
mac = self.mac
ipv6 = Ipv6("2001:DB8:1::2")
# create a different client each time
for i in range(clients_size):
client = EMUClientObj(mac = mac[i].V(),
ipv6 = ipv6[i].V(),
plugs = {'ipv6': {},
},
)
ns.add_clients(client)
ns_list.append(ns)
return EMUProfile(ns = ns_list, def_ns_plugs = self.def_ns_plugs)
def get_profile(self, tuneables):
args = get_args.get_args(tuneables)
return self.create_profile(args.ns, args.clients)
def register():
return Prof1()
|
metrics/hybridqa/evaluator.py | HKUNLP/UnifiedSKG | 191 | 12743272 | import re
import collections
import string
# copy from https://github.com/wenhuchen/HybridQA/blob/master/evaluate_script.py
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, reference):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example['question_id']
gold_answers = [reference['reference'][qas_id]]
prediction = example['pred']
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
qid_list = reference['reference'].keys()
total = len(qid_list)
table_list = reference['table']
passage_list = reference['passage']
return collections.OrderedDict(
[
("table exact", 100.0 * sum(exact_scores[k] for k in table_list) / len(table_list)),
("table f1", 100.0 * sum(f1_scores[k] for k in table_list) / len(table_list)),
("passage exact", 100.0 * sum(exact_scores[k] for k in passage_list) / len(passage_list)),
("passage f1", 100.0 * sum(f1_scores[k] for k in passage_list) / len(passage_list)),
("total exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("total f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
exact_scores = {}
f1_scores = {}
for pred, gold in zip(preds, golds):
qas_id = gold['id']
gold_answers = [gold['answer_text']]
exact_scores[qas_id] = max(compute_exact(a, pred) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, pred) for a in gold_answers)
total = len(golds)
qid_list = list(exact_scores.keys())
summary["acc"] = sum(exact_scores[k] for k in qid_list) / total
summary["f1"] = sum(f1_scores[k] for k in qid_list) / total
return summary |
pxr/usdImaging/bin/testusdview/testenv/testUsdviewDeactivate/testUsdviewDeactivate.py | DougRogers-DigitalFish/USD | 3,680 | 12743296 | #!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from __future__ import print_function
import sys
from pxr.Usdviewq.qt import QtWidgets
# Remove any unwanted visuals from the view.
def _modifySettings(appController):
appController._dataModel.viewSettings.showBBoxes = False
appController._dataModel.viewSettings.showHUD = False
# Select one or more prim paths, then set active state of those prims.
def _selectAndSetActive(appController, active, paths):
selection = appController._dataModel.selection
with selection.batchPrimChanges:
selection.clearPrims()
for path in paths:
selection.addPrimPath(path)
if active:
appController.activateSelectedPrims()
# We must processEvents after every call to activateSelectedPrims() so the
# activated PrimViewItems can repopulate. (See _primViewUpdateTimer in
# appController.py)
QtWidgets.QApplication.processEvents()
else:
appController.deactivateSelectedPrims()
# Test deactivating then reactivating a single prim with no children.
def _testSingleDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres/a"])
appController._takeShot("singleDeactivate.png")
_selectAndSetActive(appController, True, ["/spheres/a"])
# Test deactivating then reactivating a single prim with some children.
def _testParentDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres"])
appController._takeShot("parentDeactivate.png")
_selectAndSetActive(appController, True, ["/spheres"])
# Test deactivating then reactivating a parent prim and one of its children.
def _testParentChildDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres", "/spheres/a"])
appController._takeShot("parentChildDeactivate1.png")
# Reactivation is a two-part process because we must activate the parent
# before we can even select the child. Take a snapshot in-between to verify
# this is working.
_selectAndSetActive(appController, True, ["/spheres"])
appController._takeShot("parentChildDeactivate2.png")
_selectAndSetActive(appController, True, ["/spheres/a"])
# In this case, the child prim has a shorter path than the parent due to a
# reference. If we deactivate the prims through Usd in sorted order where longer
# paths are deactivated first then this case fails.
def _testReferenceChildDeactivate(appController):
_selectAndSetActive(appController, False, ["/C2/D", "/A/B/C"])
# Test that instance proxies cannot be deactivated. The call does not raise an
# error, but prints a warning and does not perform the deactivation.
def _testInstanceProxyDeactivate(appController):
_selectAndSetActive(appController, False, ["/X/Y"])
prim = appController._dataModel.stage.GetPrimAtPath("/X/Y")
assert prim.IsActive() # Activation state should not have changed.
# Test that the complexity setting works properly in usdview.
def testUsdviewInputFunction(appController):
_modifySettings(appController)
_testSingleDeactivate(appController)
_testParentDeactivate(appController)
_testParentChildDeactivate(appController)
_testReferenceChildDeactivate(appController)
_testInstanceProxyDeactivate(appController)
|
lib/models/modules/projection.py | littleSunlxy/contrastive-seg-lin | 398 | 12743304 | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.tools.module_helper import ModuleHelper
from lib.utils.tools.logger import Logger as Log
class ProjectionHead(nn.Module):
def __init__(self, dim_in, proj_dim=256, proj='convmlp', bn_type='torchsyncbn'):
super(ProjectionHead, self).__init__()
Log.info('proj_dim: {}'.format(proj_dim))
if proj == 'linear':
self.proj = nn.Conv2d(dim_in, proj_dim, kernel_size=1)
elif proj == 'convmlp':
self.proj = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size=1),
ModuleHelper.BNReLU(dim_in, bn_type=bn_type),
nn.Conv2d(dim_in, proj_dim, kernel_size=1)
)
def forward(self, x):
return F.normalize(self.proj(x), p=2, dim=1) |
deepspeaker/embedding.py | ishine/Cross-Speaker-Emotion-Transfer | 147 | 12743308 | import numpy as np
from deepspeaker.audio_ds import read_mfcc
from deepspeaker.batcher import sample_from_mfcc
from deepspeaker.constants import SAMPLE_RATE, NUM_FRAMES, WIN_LENGTH
from deepspeaker.conv_models import DeepSpeakerModel
import tensorflow as tf
def build_model(ckpt_path):
model = DeepSpeakerModel()
model.m.load_weights(ckpt_path, by_name=True)
return model
def predict_embedding(model, audio, sr=SAMPLE_RATE, win_length=WIN_LENGTH, cuda=True):
mfcc = sample_from_mfcc(read_mfcc(audio, sr, win_length), NUM_FRAMES)
# Call the model to get the embeddings of shape (1, 512) for each file.
gpus = tf.config.experimental.list_physical_devices('GPU') if cuda else 0
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
print(e)
with tf.device('/device:GPU:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
else:
with tf.device('device:cpu:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
return embedding
|
airflow/plugins_manager.py | ChaseKnowlden/airflow | 15,947 | 12743325 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Manages all plugins."""
import importlib
import importlib.machinery
import importlib.util
import inspect
import logging
import os
import sys
import types
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
try:
import importlib_metadata
except ImportError:
from importlib import metadata as importlib_metadata
from airflow import settings
from airflow.utils.entry_points import entry_points_with_dist
from airflow.utils.file import find_path_from_directory
from airflow.utils.module_loading import as_importable_string
if TYPE_CHECKING:
from airflow.hooks.base import BaseHook
from airflow.timetables.base import Timetable
log = logging.getLogger(__name__)
import_errors: Dict[str, str] = {}
plugins = None # type: Optional[List[AirflowPlugin]]
# Plugin components to integrate as modules
registered_hooks: Optional[List['BaseHook']] = None
macros_modules: Optional[List[Any]] = None
executors_modules: Optional[List[Any]] = None
# Plugin components to integrate directly
admin_views: Optional[List[Any]] = None
flask_blueprints: Optional[List[Any]] = None
menu_links: Optional[List[Any]] = None
flask_appbuilder_views: Optional[List[Any]] = None
flask_appbuilder_menu_links: Optional[List[Any]] = None
global_operator_extra_links: Optional[List[Any]] = None
operator_extra_links: Optional[List[Any]] = None
registered_operator_link_classes: Optional[Dict[str, Type]] = None
timetable_classes: Optional[Dict[str, Type["Timetable"]]] = None
"""Mapping of class names to class of OperatorLinks registered by plugins.
Used by the DAG serialization code to only allow specific classes to be created
during deserialization
"""
PLUGINS_ATTRIBUTES_TO_DUMP = {
"hooks",
"executors",
"macros",
"flask_blueprints",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
}
class AirflowPluginSource:
"""Class used to define an AirflowPluginSource."""
def __str__(self):
raise NotImplementedError
def __html__(self):
raise NotImplementedError
class PluginsDirectorySource(AirflowPluginSource):
"""Class used to define Plugins loaded from Plugins Directory."""
def __init__(self, path):
self.path = os.path.relpath(path, settings.PLUGINS_FOLDER)
def __str__(self):
return f"$PLUGINS_FOLDER/{self.path}"
def __html__(self):
return f"<em>$PLUGINS_FOLDER/</em>{self.path}"
class EntryPointSource(AirflowPluginSource):
"""Class used to define Plugins loaded from entrypoint."""
def __init__(self, entrypoint: importlib_metadata.EntryPoint, dist: importlib_metadata.Distribution):
self.dist = dist.metadata['name']
self.version = dist.version
self.entrypoint = str(entrypoint)
def __str__(self):
return f"{self.dist}=={self.version}: {self.entrypoint}"
def __html__(self):
return f"<em>{self.dist}=={self.version}:</em> {self.entrypoint}"
class AirflowPluginException(Exception):
"""Exception when loading plugin."""
class AirflowPlugin:
"""Class used to define AirflowPlugin."""
name: Optional[str] = None
source: Optional[AirflowPluginSource] = None
hooks: List[Any] = []
executors: List[Any] = []
macros: List[Any] = []
admin_views: List[Any] = []
flask_blueprints: List[Any] = []
menu_links: List[Any] = []
appbuilder_views: List[Any] = []
appbuilder_menu_items: List[Any] = []
# A list of global operator extra links that can redirect users to
# external systems. These extra links will be available on the
# task page in the form of buttons.
#
# Note: the global operator extra link can be overridden at each
# operator level.
global_operator_extra_links: List[Any] = []
# A list of operator extra links to override or add operator links
# to existing Airflow Operators.
# These extra links will be available on the task page in form of
# buttons.
operator_extra_links: List[Any] = []
# A list of timetable classes that can be used for DAG scheduling.
timetables: List[Type["Timetable"]] = []
@classmethod
def validate(cls):
"""Validates that plugin has a name."""
if not cls.name:
raise AirflowPluginException("Your plugin needs a name.")
@classmethod
def on_load(cls, *args, **kwargs):
"""
Executed when the plugin is loaded.
This method is only called once during runtime.
:param args: If future arguments are passed in on call.
:param kwargs: If future arguments are passed in on call.
"""
def is_valid_plugin(plugin_obj):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
global plugins
if (
inspect.isclass(plugin_obj)
and issubclass(plugin_obj, AirflowPlugin)
and (plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in plugins
return False
def register_plugin(plugin_instance):
"""
Start plugin load and register it after success initialization
:param plugin_instance: subclass of AirflowPlugin
"""
global plugins
plugin_instance.on_load()
plugins.append(plugin_instance)
def load_entrypoint_plugins():
"""
Load and register plugins AirflowPlugin subclasses from the entrypoints.
The entry_point group should be 'airflow.plugins'.
"""
global import_errors
log.debug("Loading plugins from entrypoints")
for entry_point, dist in entry_points_with_dist('airflow.plugins'):
log.debug('Importing entry_point plugin %s', entry_point.name)
try:
plugin_class = entry_point.load()
if not is_valid_plugin(plugin_class):
continue
plugin_instance = plugin_class()
plugin_instance.source = EntryPointSource(entry_point, dist)
register_plugin(plugin_instance)
except Exception as e:
log.exception("Failed to import plugin %s", entry_point.name)
import_errors[entry_point.module] = str(e)
def load_plugins_from_plugin_directory():
"""Load and register Airflow Plugins from plugins directory"""
global import_errors
log.debug("Loading plugins from directory: %s", settings.PLUGINS_FOLDER)
for file_path in find_path_from_directory(settings.PLUGINS_FOLDER, ".airflowignore"):
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py':
continue
try:
loader = importlib.machinery.SourceFileLoader(mod_name, file_path)
spec = importlib.util.spec_from_loader(mod_name, loader)
mod = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mod
loader.exec_module(mod)
log.debug('Importing plugin module %s', file_path)
for mod_attr_value in (m for m in mod.__dict__.values() if is_valid_plugin(m)):
plugin_instance = mod_attr_value()
plugin_instance.source = PluginsDirectorySource(file_path)
register_plugin(plugin_instance)
except Exception as e:
log.exception('Failed to import plugin %s', file_path)
import_errors[file_path] = str(e)
def make_module(name: str, objects: List[Any]):
"""Creates new module."""
if not objects:
return None
log.debug('Creating module %s', name)
name = name.lower()
module = types.ModuleType(name)
module._name = name.split('.')[-1] # type: ignore
module._objects = objects # type: ignore
module.__dict__.update((o.__name__, o) for o in objects)
return module
def ensure_plugins_loaded():
"""
Load plugins from plugins directory and entrypoints.
Plugins are only loaded if they have not been previously loaded.
"""
from airflow.stats import Stats
global plugins, registered_hooks
if plugins is not None:
log.debug("Plugins are already loaded. Skipping.")
return
if not settings.PLUGINS_FOLDER:
raise ValueError("Plugins folder is not set")
log.debug("Loading plugins")
with Stats.timer() as timer:
plugins = []
registered_hooks = []
load_plugins_from_plugin_directory()
load_entrypoint_plugins()
# We don't do anything with these for now, but we want to keep track of
# them so we can integrate them in to the UI's Connection screens
for plugin in plugins:
registered_hooks.extend(plugin.hooks)
num_loaded = len(plugins)
if num_loaded > 0:
log.debug("Loading %d plugin(s) took %.2f seconds", num_loaded, timer.duration)
def initialize_web_ui_plugins():
"""Collect extension points for WEB UI"""
global plugins
global flask_blueprints
global flask_appbuilder_views
global flask_appbuilder_menu_links
if (
flask_blueprints is not None
and flask_appbuilder_views is not None
and flask_appbuilder_menu_links is not None
):
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize Web UI plugin")
flask_blueprints = []
flask_appbuilder_views = []
flask_appbuilder_menu_links = []
for plugin in plugins:
flask_appbuilder_views.extend(plugin.appbuilder_views)
flask_appbuilder_menu_links.extend(plugin.appbuilder_menu_items)
flask_blueprints.extend([{'name': plugin.name, 'blueprint': bp} for bp in plugin.flask_blueprints])
if (plugin.admin_views and not plugin.appbuilder_views) or (
plugin.menu_links and not plugin.appbuilder_menu_items
):
log.warning(
"Plugin \'%s\' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
plugin.name,
)
def initialize_extra_operators_links_plugins():
"""Creates modules for loaded extension from extra operators links plugins"""
global global_operator_extra_links
global operator_extra_links
global registered_operator_link_classes
if (
global_operator_extra_links is not None
and operator_extra_links is not None
and registered_operator_link_classes is not None
):
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize extra operators links plugins")
global_operator_extra_links = []
operator_extra_links = []
registered_operator_link_classes = {}
for plugin in plugins:
global_operator_extra_links.extend(plugin.global_operator_extra_links)
operator_extra_links.extend(list(plugin.operator_extra_links))
registered_operator_link_classes.update(
{
f"{link.__class__.__module__}.{link.__class__.__name__}": link.__class__
for link in plugin.operator_extra_links
}
)
def initialize_timetables_plugins():
"""Collect timetable classes registered by plugins."""
global timetable_classes
if timetable_classes is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize extra timetables plugins")
timetable_classes = {
as_importable_string(timetable_class): timetable_class
for plugin in plugins
for timetable_class in plugin.timetables
}
def integrate_executor_plugins() -> None:
"""Integrate executor plugins to the context."""
global plugins
global executors_modules
if executors_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate executor plugins")
executors_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
plugin_name: str = plugin.name
executors_module = make_module('airflow.executors.' + plugin_name, plugin.executors)
if executors_module:
executors_modules.append(executors_module)
sys.modules[executors_module.__name__] = executors_module
def integrate_macros_plugins() -> None:
"""Integrates macro plugins."""
global plugins
global macros_modules
from airflow import macros
if macros_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate DAG plugins")
macros_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
macros_module = make_module(f'airflow.macros.{plugin.name}', plugin.macros)
if macros_module:
macros_modules.append(macros_module)
sys.modules[macros_module.__name__] = macros_module
# Register the newly created module on airflow.macros such that it
# can be accessed when rendering templates.
setattr(macros, plugin.name, macros_module)
def get_plugin_info(attrs_to_dump: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""
Dump plugins attributes
:param attrs_to_dump: A list of plugin attributes to dump
:type attrs_to_dump: List
"""
ensure_plugins_loaded()
integrate_executor_plugins()
integrate_macros_plugins()
initialize_web_ui_plugins()
initialize_extra_operators_links_plugins()
if not attrs_to_dump:
attrs_to_dump = PLUGINS_ATTRIBUTES_TO_DUMP
plugins_info = []
if plugins:
for plugin in plugins:
info = {"name": plugin.name}
info.update({n: getattr(plugin, n) for n in attrs_to_dump})
plugins_info.append(info)
return plugins_info
|
src/gausskernel/dbmind/tools/xtuner/tuner/algorithms/rl_agent.py | opengauss-mirror/openGauss-graph | 360 | 12743329 | """
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from tensorflow.keras import Model
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate
from tensorflow.keras.optimizers import Adam
from rl.agents import DDPGAgent
from rl.agents import DQNAgent
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from rl.random import OrnsteinUhlenbeckProcess
class RLAgent:
def __init__(self, env, alg='ddpg'):
self.env = env
nb_actions = env.action_space.shape[0]
nb_states = env.observation_space.shape[0]
if alg == 'ddpg':
self.agent = self._build_ddpg(nb_actions, nb_states)
elif alg == 'dpn':
self.agent = self._build_dqn(nb_actions, nb_states)
else:
raise ValueError('Can not support this reinforcement learning algorithm.')
@staticmethod
# not regression test on DQN, suggest to choose DDPG.
def _build_dqn(nb_actions, nb_states):
# build network
model = Sequential()
model.add(Flatten(input_shape=(1, nb_states)))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
# build alg
memory = SequentialMemory(limit=10240, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory,
nb_steps_warmup=10, enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
return dqn
@staticmethod
def _build_ddpg(nb_actions, nb_states):
# build an actor network
actor = Sequential()
actor.add(Flatten(input_shape=(1, nb_states)))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('sigmoid'))
# build a critic network
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1, nb_states), name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
# tricks:
memory = SequentialMemory(limit=10240, window_length=1)
oup = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
# build ddpg alg
ddpg = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_actor=100, nb_steps_warmup_critic=100,
random_process=oup, gamma=.99, target_model_update=1e-3)
ddpg.compile(Adam(), metrics=['mae'])
return ddpg
def fit(self, steps, nb_max_episode_steps=100, verbose=0):
self.agent.fit(self.env, nb_steps=steps, nb_max_episode_steps=nb_max_episode_steps, verbose=verbose)
def save(self, filepath):
self.agent.save_weights(filepath, overwrite=True)
def load(self, filepath):
self.agent.load_weights(filepath)
def test(self, episodes, nb_max_episode_steps=10, verbose=0):
self.agent.test(self.env, nb_episodes=episodes, nb_max_episode_steps=nb_max_episode_steps, verbose=verbose)
|
adaptive/learner/sequence_learner.py | Davide-sd/adaptive | 720 | 12743344 | from copy import copy
import cloudpickle
from sortedcontainers import SortedDict, SortedSet
from adaptive.learner.base_learner import BaseLearner
class _IgnoreFirstArgument:
"""Remove the first argument from the call signature.
The SequenceLearner's function receives a tuple ``(index, point)``
but the original function only takes ``point``.
This is the same as `lambda x: function(x[1])`, however, that is not
pickable.
"""
def __init__(self, function):
self.function = function
def __call__(self, index_point, *args, **kwargs):
index, point = index_point
return self.function(point, *args, **kwargs)
def __getstate__(self):
return self.function
def __setstate__(self, function):
self.__init__(function)
class SequenceLearner(BaseLearner):
r"""A learner that will learn a sequence. It simply returns
the points in the provided sequence when asked.
This is useful when your problem cannot be formulated in terms of
another adaptive learner, but you still want to use Adaptive's
routines to run, save, and plot.
Parameters
----------
function : callable
The function to learn. Must take a single element `sequence`.
sequence : sequence
The sequence to learn.
Attributes
----------
data : dict
The data as a mapping from "index of element in sequence" => value.
Notes
-----
From primitive tests, the `~adaptive.SequenceLearner` appears to have a
similar performance to `ipyparallel`\s ``load_balanced_view().map``. With
the added benefit of having results in the local kernel already.
"""
def __init__(self, function, sequence):
self._original_function = function
self.function = _IgnoreFirstArgument(function)
self._to_do_indices = SortedSet({i for i, _ in enumerate(sequence)})
self._ntotal = len(sequence)
self.sequence = copy(sequence)
self.data = SortedDict()
self.pending_points = set()
def ask(self, n, tell_pending=True):
indices = []
points = []
loss_improvements = []
for index in self._to_do_indices:
if len(points) >= n:
break
point = self.sequence[index]
indices.append(index)
points.append((index, point))
loss_improvements.append(1 / self._ntotal)
if tell_pending:
for i, p in zip(indices, points):
self.tell_pending((i, p))
return points, loss_improvements
def loss(self, real=True):
if not (self._to_do_indices or self.pending_points):
return 0
else:
npoints = self.npoints + (0 if real else len(self.pending_points))
return (self._ntotal - npoints) / self._ntotal
def remove_unfinished(self):
for i in self.pending_points:
self._to_do_indices.add(i)
self.pending_points = set()
def tell(self, point, value):
index, point = point
self.data[index] = value
self.pending_points.discard(index)
self._to_do_indices.discard(index)
def tell_pending(self, point):
index, point = point
self.pending_points.add(index)
self._to_do_indices.discard(index)
def done(self):
return not self._to_do_indices and not self.pending_points
def result(self):
"""Get the function values in the same order as ``sequence``."""
if not self.done():
raise Exception("Learner is not yet complete.")
return list(self.data.values())
@property
def npoints(self):
return len(self.data)
def _get_data(self):
return self.data
def _set_data(self, data):
if data:
indices, values = zip(*data.items())
# the points aren't used by tell, so we can safely pass None
points = [(i, None) for i in indices]
self.tell_many(points, values)
def __getstate__(self):
return (
cloudpickle.dumps(self._original_function),
self.sequence,
self._get_data(),
)
def __setstate__(self, state):
function, sequence, data = state
function = cloudpickle.loads(function)
self.__init__(function, sequence)
self._set_data(data)
|
tests/models/test_models_utils.py | rohanshad/pycox | 449 | 12743360 | import pytest
import numpy as np
import torch
from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_start(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val, where='start')
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([pad, x], dim=1)).all()
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_end(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val)
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([x, pad], dim=1)).all()
@pytest.mark.parametrize('n', [2, 13, 40])
def test_make_subgrid_1(n):
grid = np.random.uniform(0, 100, n)
grid = np.sort(grid)
new_grid = make_subgrid(grid, 1)
assert len(new_grid) == len(grid)
assert (new_grid == grid).all()
@pytest.mark.parametrize('sub', [2, 10, 20])
@pytest.mark.parametrize('start', [0, 2])
@pytest.mark.parametrize('stop', [4, 100])
@pytest.mark.parametrize('n', [5, 10])
def test_make_subgrid(sub, start, stop, n):
grid = np.linspace(start, stop, n)
new_grid = make_subgrid(grid, sub)
true_new = np.linspace(start, stop, n*sub - (sub-1))
assert len(new_grid) == len(true_new)
assert np.abs(true_new - new_grid).max() < 1e-13
def test_cumsum_reverse_error_dim():
x = torch.randn((5, 3))
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=0)
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=2)
def test_cumsum_reverse_dim_1():
torch.manual_seed(1234)
x = torch.randn(5, 16)
res_np = x.numpy()[:, ::-1].cumsum(1)[:, ::-1]
res = cumsum_reverse(x, dim=1)
assert np.abs(res.numpy() - res_np).max() < 1e-6
|
mmdet/core/loss/losses.py | escapist2019/AugFPN | 144 | 12743378 | <filename>mmdet/core/loss/losses.py
# TODO merge naive and weighted loss.
import torch
import torch.nn.functional as F
from ..bbox import bbox_overlaps
def weighted_nll_loss(pred, label, weight, avg_factor=None):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.nll_loss(pred, label, reduction='none')
return torch.sum(raw * weight)[None] / avg_factor
def weighted_cross_entropy(pred, label, weight, avg_factor=None, reduce=True):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
raw = F.cross_entropy(pred, label, reduction='none')
if reduce:
return torch.sum(raw * weight)[None] / avg_factor
else:
return raw * weight / avg_factor
def fcos_binary_cross_entropy(pred, label, ):
return F.binary_cross_entropy_with_logits(
pred, label.float(), weight.float(),
reduction='mean')[None]
def weighted_binary_cross_entropy(pred, label, weight, avg_factor=None):
if avg_factor is None:
avg_factor = max(torch.sum(weight > 0).float().item(), 1.)
return F.binary_cross_entropy_with_logits(
pred, label.float(), weight.float(),
reduction='sum')[None] / avg_factor
def sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean'):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
weight = (alpha * target + (1 - alpha) * (1 - target)) * weight
weight = weight * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * weight
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weighted_sigmoid_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
avg_factor=None,
num_classes=80):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6
return sigmoid_focal_loss(
pred, target, weight, gamma=gamma, alpha=alpha,
reduction='sum')[None] / avg_factor
def mask_cross_entropy(pred, target, label):
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, reduction='mean')[None]
def smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.sum() / pred.numel()
elif reduction_enum == 2:
return loss.sum()
def weighted_iou_loss(pred,
target,
weight,
style='naive',
beta=0.2,
eps=1e-3,
avg_factor=None):
if style not in ['bounded', 'naive']:
raise ValueError('Only support bounded iou loss and naive iou loss.')
inds = torch.nonzero(weight[:, 0] > 0)
if avg_factor is None:
avg_factor = inds.numel() + 1e-6
if inds.numel() > 0:
inds = inds.squeeze(1)
else:
return (pred * weight).sum()[None] / avg_factor
if style == 'bounded':
loss = bounded_iou_loss(
pred[inds], target[inds], beta=beta, eps=eps, reduction='sum')
else:
loss = iou_loss(pred[inds], target[inds], reduction='sum')
loss = loss[None] / avg_factor
return loss
def iou_loss(pred_bboxes, target_bboxes, reduction='mean'):
ious = bbox_overlaps(pred_bboxes, target_bboxes, is_aligned=True)
loss = -ious.log()
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):
if avg_factor is None:
avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6
loss = smooth_l1_loss(pred, target, beta, reduction='none')
return torch.sum(loss * weight)[None] / avg_factor
def accuracy(pred, target, topk=1):
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, 1, True, True)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
|
TextGenerator/core/element/TextImg.py | yinhaoxs/CharacterRecognition-CRNN | 166 | 12743433 | <gh_stars>100-1000
import cv2
from typing import List
from core.element.BaseImg import BaseImg
from core.element.CharImg import CharImg
from PIL import Image, ImageFont, ImageDraw
import os
import numpy as np
import json
from utils import time_util as tu
import math
import traceback
from utils import log
TYPE_ORIENTATION_HORIZONTAL = 0
TYPE_ORIENTATION_VERTICAL = 1
TYPE_ALIGN_MODEL_B = 0 # 文本对齐模式:底部/左边 对齐
TYPE_ALIGN_MODEL_C = 1 # 文本对齐模式:居中 对齐
TYPE_ALIGN_MODEL_T = 2 # 文本对齐模式:顶部/右边 对齐
class TextImg(BaseImg):
"""
字符串图片对象
"""
def __init__(self,
char_obj_list: List[CharImg],
text_img_output_dir,
text_img_info_output_dir,
orientation,
align_mode,
img: Image.Image = None,
img_path: str = None,
**kwargs
):
tmp_list = []
for item in char_obj_list:
if isinstance(item, dict):
tmp_list.append(CharImg(**item))
if tmp_list:
char_obj_list = tmp_list
self.char_obj_list = char_obj_list
self.text = "".join([char_obj.char for char_obj in self.char_obj_list])
self.text_img_output_dir = text_img_output_dir
self.text_img_info_output_dir = text_img_info_output_dir
self.orientation = orientation
self.align_mode = align_mode
if img_path:
self.img_name = img_path.split(os.sep)[-1]
self.name = self.img_name.split('.')[0]
self.img_path = img_path
self.img = load_img(self.img_path)
else:
self.name = self._gen_name(align_mode, orientation)
self.img_name = self.name + ".png"
self.img_path = os.path.join(text_img_output_dir, self.img_name)
self.img = img
def _gen_name(self, align_mode, orientation):
o = "v" if orientation == TYPE_ORIENTATION_VERTICAL else "h"
a = 'b'
if align_mode == TYPE_ALIGN_MODEL_T:
a = 't'
elif align_mode == TYPE_ALIGN_MODEL_C:
a = 'c'
return tu.timestamp() + "_" + o + "_" + a + "_" + self.text.replace(" ", "_")
def __repr__(self):
return json.dumps(self.__dict__, cls=CharImgEncoder)
def export(self):
"""
数据导出
:return:
"""
self.img.save(self.img_path)
json_file_path = os.path.join(self.text_img_info_output_dir, self.name + ".json")
with open(json_file_path, 'w') as f:
json.dump(self.__dict__, f, cls=CharImgEncoder)
@staticmethod
def load_from_json(file_path):
"""
从json文件中加载对象
:param file_path:
:return:
"""
assert os.path.exists(file_path), "json file is not exist,please check: {file_path}".format(file_path=file_path)
with open(file_path, 'r') as f:
j = json.load(f)
return TextImg(**j)
def show(self, with_box=False):
"""
展示图片
:param with_box:
:return:
"""
image = self.cv_img()
if with_box:
for char_obj in self.char_obj_list:
pt1 = (char_obj.box[0], char_obj.box[1])
pt2 = (char_obj.box[2], char_obj.box[3])
image = cv2.rectangle(image, pt1=pt1, pt2=pt2, color=(0, 0, 255), thickness=1)
cv2.imshow(self.text, image)
cv2.waitKey()
cv2.destroyWindow(self.text)
def cv_img(self):
"""
获取opencv的image对象
:return:
"""
image = np.array(self.img)
image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)
return image
def pil_img(self):
"""
获取pillow的image对象
:return:
"""
return self.img
class CharImgEncoder(json.JSONEncoder):
def default(self, o):
if not isinstance(o, Image.Image):
return o.__dict__
def load_img(img_path):
"""
从磁盘上加载图片文件
:param img_path:
:return:
"""
assert os.path.exists(img_path), "image is not exist, please check. {img_path}".format(img_path=img_path)
return Image.open(img_path)
def calc_bg_size(font_path: str,
orientation: int,
char_obj_list: List[CharImg],
spacing_rate: float,
padding,
auto_padding_to_ratio) -> tuple:
"""
计算背景尺寸
:param font_path: 字体路径
:param orientation: 朝向
:param char_obj_list: 字符对象
:param spacing_rate: 间距 (相对于文字大小的占比)
:param padding: 内边距
:param auto_padding_to_ratio: 自动 padding 到指定的比例(水平排布是 w/h 竖直排布是 h/w)
:return:
"""
max_char_bg_w = 0
max_char_bg_h = 0
bg_w = 0
bg_h = 0
for index, char_obj in enumerate(char_obj_list):
font = ImageFont.truetype(font_path, size=char_obj.font_size)
# 获取当前字符的背景尺寸
char_bg_w = 0
char_bg_h = 0
try:
char_bg_w, char_bg_h = font.getsize(char_obj.char)
# 加上边框尺寸
char_bg_w += char_obj.border_width * 2
char_bg_h += char_obj.border_width * 2
except Exception as e:
traceback.print_exc()
char_obj.size = (char_bg_w, char_bg_h)
# 获取当前行文本的最大字符图片的宽高
max_char_bg_w = char_bg_w if char_bg_w > max_char_bg_w else max_char_bg_w
max_char_bg_h = char_bg_h if char_bg_h > max_char_bg_h else max_char_bg_h
# 判断是否遍历到了最后一个字符的位置
is_last = index == len(char_obj_list) - 1
r = 0 if is_last else spacing_rate
if orientation == TYPE_ORIENTATION_VERTICAL:
bg_w = max_char_bg_w
bg_h += math.ceil(char_obj.size[1] * (1 + r))
else:
bg_w += math.ceil(char_obj.size[0] * (1 + r))
bg_h = max_char_bg_h
if auto_padding_to_ratio > 0:
# 自动 padding 到指定尺寸
# 如果是水平排列 则在左右两边加padding
# auto_padding_to_ratio = tw / th
if orientation == TYPE_ORIENTATION_HORIZONTAL:
st_w = auto_padding_to_ratio * bg_h
if st_w > bg_w:
d = round((st_w - bg_w) / 2)
padding = (d, 0, d, 0)
else:
st_h = bg_w / auto_padding_to_ratio
d = round((st_h - bg_h) / 2)
padding = (0, d, 0, d)
# 如果是竖直排列 则在上下两边加padding
# auto_padding_to_ratio = th / tw
elif orientation == TYPE_ORIENTATION_VERTICAL:
st_h = auto_padding_to_ratio * bg_w
if st_h > bg_h:
d = round((st_h - bg_h) / 2)
padding = (0, d, 0, d)
else:
st_w = bg_h / auto_padding_to_ratio
d = round((st_w - bg_w) / 2)
padding = (d, 0, d, 0)
bg_w = bg_w + padding[0] + padding[2]
bg_h = bg_h + padding[1] + padding[3]
return bg_w, bg_h, padding
def draw_text(font_path, bg_w, bg_h, orientation, char_obj_list: List[CharImg], spacing_rate, align_mode, padding):
"""
在文字贴图背景上绘制文字
:param font_path:
:param bg_w:
:param bg_h:
:param orientation:
:param char_obj_list:
:param spacing_rate:
:param align_mode:
:param padding:
:return:
"""
img = Image.new("RGBA", (bg_w, bg_h), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(img)
font_area_w = bg_w - padding[0] - padding[2]
font_area_h = bg_h - padding[1] - padding[3]
tmp_char = None
l, t = 0, 0
for index, char_obj in enumerate(char_obj_list):
font = ImageFont.truetype(font_path, size=char_obj.font_size)
cw, ch = char_obj.size
if orientation == TYPE_ORIENTATION_VERTICAL:
if align_mode == TYPE_ALIGN_MODEL_B:
l = 0
elif align_mode == TYPE_ALIGN_MODEL_C:
l = math.ceil((font_area_w - cw) / 2)
elif align_mode == TYPE_ALIGN_MODEL_T:
l = font_area_w - cw
if tmp_char:
add_t = math.ceil(tmp_char.size[1] * (1 + spacing_rate))
t += add_t
else:
t = 0
l += padding[0]
if index == 0:
t += padding[1]
char_obj.box = [l, t, l + cw, t + ch]
else:
t = 0
if align_mode == TYPE_ALIGN_MODEL_B:
t = font_area_h - ch
elif align_mode == TYPE_ALIGN_MODEL_C:
t = math.ceil((font_area_h - ch) / 2)
elif align_mode == TYPE_ALIGN_MODEL_T:
t = 0
if tmp_char:
add_l = math.ceil(tmp_char.size[0] * (1 + spacing_rate))
l += add_l
else:
l = 0
t += padding[1]
if index == 0:
l += padding[0]
char_obj.box = [l, t, l + cw, t + ch]
log.info("draw text >> {text} color: {color} font: {font}".format(text=char_obj.char,
color=char_obj.color,
font=font))
draw.text((l + char_obj.border_width, t + char_obj.border_width),
text=char_obj.char,
fill=char_obj.color,
font=font)
if char_obj.border_width > 0:
draw.rectangle(xy=tuple(char_obj.box), width=char_obj.border_width, outline=char_obj.border_color)
tmp_char = char_obj
return img
def gen_batch_char_obj(text,
color,
font_size,
border_width=0,
border_color=(0, 0, 0, 0)) -> List[CharImg]:
"""
生成一批CharImg对象
:param text:
:param color:
:param font_size:
:param border_width:
:param border_color:
:return:
"""
char_obj_list = []
for char in text:
char_obj_list.append(
CharImg(char, font_size=font_size, color=color, border_width=border_width, border_color=border_color))
return char_obj_list
def create(char_obj_list: List[CharImg],
orientation: int = TYPE_ORIENTATION_HORIZONTAL,
align_mode: int = TYPE_ALIGN_MODEL_B,
spacing_rate: float = 0.08,
padding=(0, 0, 0, 0),
auto_padding_to_ratio=0,
font_path="",
text_img_output_dir="",
text_img_info_output_dir=""
):
"""
生成文本图片
:param char_obj_list: 字符对象列表
:param orientation: 生成的方向
:param align_mode: 文本对齐模式
:param spacing_rate: 间距 (相对于文字大小的占比)
:param padding: 内边距
:param auto_padding_to_ratio: 自动padding到指定的比例 <=0 代表不自动padding (水平排布是 w/h 竖直排布是 h/w)
:param font_path: 字体文件路径
:param text_img_output_dir:
:param text_img_info_output_dir:
:return:
"""
# 生成文本贴图的透明背景区域
bg_w, bg_h, padding = calc_bg_size(font_path, orientation, char_obj_list, spacing_rate, padding,
auto_padding_to_ratio)
# 绘制文字
img = draw_text(font_path, bg_w, bg_h, orientation, char_obj_list, spacing_rate, align_mode, padding)
return TextImg(char_obj_list=char_obj_list,
text_img_output_dir=text_img_output_dir,
text_img_info_output_dir=text_img_info_output_dir,
orientation=orientation,
align_mode=align_mode,
img=img)
|
tushare/stock/indictor.py | lixianjian/tushare | 12,490 | 12743459 | # -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/05/26
@author: <NAME>
@group : **
@contact: <EMAIL>
"""
def ma(data, n=10, val_name="close"):
import numpy as np
'''
移动平均线 Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MA = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def md(data, n=10, val_name="close"):
import numpy as np
'''
移动标准差
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MD = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MD.append(np.std(values))
return np.asarray(MD)
def _get_day_ema(prices, n):
a = 1 - 2 / (n + 1)
day_ema = 0
for index, price in enumerate(reversed(prices)):
day_ema += a ** index * price
return day_ema
def ema(data, n=12, val_name="close"):
import numpy as np
'''
指数平均数指标 Exponential Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
EMA:numpy.ndarray<numpy.float64>
指数平均数指标
'''
prices = []
EMA = []
for index, row in data.iterrows():
if index == 0:
past_ema = row[val_name]
EMA.append(row[val_name])
else:
# Y=[2*X+(N-1)*Y’]/(N+1)
today_ema = (2 * row[val_name] + (n - 1) * past_ema) / (n + 1)
past_ema = today_ema
EMA.append(today_ema)
return np.asarray(EMA)
def macd(data, quick_n=12, slow_n=26, dem_n=9, val_name="close"):
import numpy as np
'''
指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
quick_n:int
DIFF差离值中快速移动天数
slow_n:int
DIFF差离值中慢速移动天数
dem_n:int
DEM讯号线的移动天数
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
OSC:numpy.ndarray<numpy.float64>
MACD bar / OSC 差值柱形图 DIFF - DEM
DIFF:numpy.ndarray<numpy.float64>
差离值
DEM:numpy.ndarray<numpy.float64>
讯号线
'''
ema_quick = np.asarray(ema(data, quick_n, val_name))
ema_slow = np.asarray(ema(data, slow_n, val_name))
DIFF = ema_quick - ema_slow
data["diff"] = DIFF
DEM = ema(data, dem_n, "diff")
OSC = DIFF - DEM
return OSC, DIFF, DEM
def kdj(data):
import numpy as np
'''
随机指标KDJ
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
K:numpy.ndarray<numpy.float64>
K线
D:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
K, D, J = [], [], []
last_k, last_d = None, None
for index, row in data.iterrows():
if last_k is None or last_d is None:
last_k = 50
last_d = 50
c, l, h = row["close"], row["low"], row["high"]
rsv = (c - l) / (h - l) * 100
k = (2 / 3) * last_k + (1 / 3) * rsv
d = (2 / 3) * last_d + (1 / 3) * k
j = 3 * k - 2 * d
K.append(k)
D.append(d)
J.append(j)
last_k, last_d = k, d
return np.asarray(K), np.asarray(D), np.asarray(J)
def rsi(data, n=6, val_name="close"):
import numpy as np
'''
相对强弱指标RSI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
RSI:numpy.ndarray<numpy.float64>
RSI线
'''
RSI = []
UP = []
DOWN = []
for index, row in data.iterrows():
if index == 0:
past_value = row[val_name]
RSI.append(0)
else:
diff = row[val_name] - past_value
if diff > 0:
UP.append(diff)
DOWN.append(0)
else:
UP.append(0)
DOWN.append(diff)
if len(UP) == n:
del UP[0]
if len(DOWN) == n:
del DOWN[0]
past_value = row[val_name]
rsi = np.sum(UP) / (-np.sum(DOWN) + np.sum(UP)) * 100
RSI.append(rsi)
return np.asarray(RSI)
def boll(data, n=10, val_name="close", k=2):
'''
布林线指标BOLL
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
BOLL:numpy.ndarray<numpy.float64>
中轨线
UPPER:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
BOLL = ma(data, n, val_name)
MD = md(data, n, val_name)
UPPER = BOLL + k * MD
LOWER = BOLL - k * MD
return BOLL, UPPER, LOWER
def wnr(data, n=14):
'''
威廉指标 w&r
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
WNR:numpy.ndarray<numpy.float64>
威廉指标
'''
high_prices = []
low_prices = []
WNR = []
for index, row in data.iterrows():
high_prices.append(row["high"])
if len(high_prices) == n:
del high_prices[0]
low_prices.append(row["low"])
if len(low_prices) == n:
del low_prices[0]
highest = max(high_prices)
lowest = min(low_prices)
wnr = (highest - row["close"]) / (highest - lowest) * 100
WNR.append(wnr)
return WNR
def _get_any_ma(arr, n):
import numpy as np
MA = []
values = []
for val in arr:
values.append(val)
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def dmi(data, n=14, m=14, k=6):
import numpy as np
'''
动向指标或趋向指标 DMI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
+-DI(n): DI统计时长,默认14
m:int
ADX(m): ADX统计时常参数,默认14
k:int
ADXR(k): ADXR统计k个周期前数据,默认6
return
-------
P_DI:numpy.ndarray<numpy.float64>
+DI指标
M_DI:numpy.ndarray<numpy.float64>
-DI指标
ADX:numpy.ndarray<numpy.float64>
ADX指标
ADXR:numpy.ndarray<numpy.float64>
ADXR指标
ref.
-------
https://www.mk-mode.com/octopress/2012/03/03/03002038/
'''
# 上升动向(+DM)
P_DM = [0.]
# 下降动向(-DM)
M_DM = [0.]
# 真实波幅TR
TR = [0.]
# 动向
DX = [0.]
P_DI = [0.]
M_DI = [0.]
for index, row in data.iterrows():
if index == 0:
past_row = row
else:
p_dm = row["high"] - past_row["high"]
m_dm = past_row["low"] - row["low"]
if (p_dm < 0 and m_dm < 0) or (np.isclose(p_dm, m_dm)):
p_dm = 0
m_dm = 0
if p_dm > m_dm:
m_dm = 0
if m_dm > p_dm:
p_dm = 0
P_DM.append(p_dm)
M_DM.append(m_dm)
tr = max(row["high"] - past_row["low"], row["high"] - past_row["close"], past_row["close"] - row["low"])
TR.append(tr)
if len(P_DM) == n:
del P_DM[0]
if len(M_DM) == n:
del M_DM[0]
if len(TR) == n:
del TR[0]
# 上升方向线(+DI)
p_di = (np.average(P_DM) / np.average(TR)) * 100
P_DI.append(p_di)
# 下降方向线(-DI)
m_di = (np.average(M_DM) / np.average(TR)) * 100
M_DI.append(m_di)
# 当日+DI与-DI
# p_day_di = (p_dm / tr) * 100
# m_day_di = (m_dm / tr) * 100
# 动向DX
# dx=(di dif÷di sum) ×100
# di dif为上升指标和下降指标的价差的绝对值
# di sum为上升指标和下降指标的总和
# adx就是dx的一定周期n的移动平均值。
if (p_di + m_di) == 0:
dx = 0
else:
dx = (abs(p_di - m_di) / (p_di + m_di)) * 100
DX.append(dx)
past_row = row
ADX = _get_any_ma(DX, m)
#
# # 估计数值ADXR
ADXR = []
for index, adx in enumerate(ADX):
if index >= k:
adxr = (adx + ADX[index - k]) / 2
ADXR.append(adxr)
else:
ADXR.append(0)
return P_DI, M_DI, ADX, ADXR
def bias(data, n=5):
import numpy as np
'''
乖离率 bias
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
BIAS:numpy.ndarray<numpy.float64>
乖离率指标
'''
MA = ma(data, n)
CLOSES = data["close"]
BIAS = (np.true_divide((CLOSES - MA), MA)) * (100 / 100)
return BIAS
def asi(data, n=5):
import numpy as np
'''
振动升降指标 ASI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
ASI:numpy.ndarray<numpy.float64>
振动升降指标
'''
SI = []
for index, row in data.iterrows():
if index == 0:
last_row = row
SI.append(0.)
else:
a = abs(row["close"] - last_row["close"])
b = abs(row["low"] - last_row["close"])
c = abs(row["high"] - last_row["close"])
d = abs(last_row["close"] - last_row["open"])
if b > a and b > c:
r = b + (1 / 2) * a + (1 / 4) * d
elif c > a and c > b:
r = c + (1 / 4) * d
else:
r = 0
e = row["close"] - last_row["close"]
f = row["close"] - last_row["open"]
g = last_row["close"] - last_row["open"]
x = e + (1 / 2) * f + g
k = max(a, b)
l = 3
if np.isclose(r, 0) or np.isclose(l, 0):
si = 0
else:
si = 50 * (x / r) * (k / l)
SI.append(si)
ASI = _get_any_ma(SI, n)
return ASI
def vr(data, n=26):
import numpy as np
'''
Volatility Volume Ratio 成交量变异率
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
VR:numpy.ndarray<numpy.float64>
成交量变异率
'''
VR = []
AV_volumes, BV_volumes, CV_volumes = [], [], []
for index, row in data.iterrows():
if row["close"] > row["open"]:
AV_volumes.append(row["volume"])
elif row["close"] < row["open"]:
BV_volumes.append(row["volume"])
else:
CV_volumes.append(row["volume"])
if len(AV_volumes) == n:
del AV_volumes[0]
if len(BV_volumes) == n:
del BV_volumes[0]
if len(CV_volumes) == n:
del CV_volumes[0]
avs = sum(AV_volumes)
bvs = sum(BV_volumes)
cvs = sum(CV_volumes)
if (bvs + (1 / 2) * cvs) != 0:
vr = (avs + (1 / 2) * cvs) / (bvs + (1 / 2) * cvs)
else:
vr = 0
VR.append(vr)
return np.asarray(VR)
def arbr(data, n=26):
import numpy as np
'''
AR 指标 BR指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
AR:numpy.ndarray<numpy.float64>
AR指标
BR:numpy.ndarray<numpy.float64>
BR指标
'''
H, L, O, PC = np.array([0]), np.array([0]), np.array([0]), np.array([0])
AR, BR = np.array([0]), np.array([0])
for index, row in data.iterrows():
if index == 0:
last_row = row
else:
h = row["high"]
H = np.append(H, [h])
if len(H) == n:
H = np.delete(H, 0)
l = row["low"]
L = np.append(L, [l])
if len(L) == n:
L = np.delete(L, 0)
o = row["open"]
O = np.append(O, [o])
if len(O) == n:
O = np.delete(O, 0)
pc = last_row["close"]
PC = np.append(PC, [pc])
if len(PC) == n:
PC = np.delete(PC, 0)
ar = (np.sum(np.asarray(H) - np.asarray(O)) / sum(np.asarray(O) - np.asarray(L))) * 100
AR = np.append(AR, [ar])
br = (np.sum(np.asarray(H) - np.asarray(PC)) / sum(np.asarray(PC) - np.asarray(L))) * 100
BR = np.append(BR, [br])
last_row = row
return np.asarray(AR), np.asarray(BR)
def dpo(data, n=20, m=6):
'''
区间震荡线指标 DPO
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认20
m:int
MADPO的参数M,默认6
return
-------
DPO:numpy.ndarray<numpy.float64>
DPO指标
MADPO:numpy.ndarray<numpy.float64>
MADPO指标
'''
CLOSES = data["close"]
DPO = CLOSES - ma(data, int(n / 2 + 1))
MADPO = _get_any_ma(DPO, m)
return DPO, MADPO
def trix(data, n=12, m=20):
import numpy as np
'''
三重指数平滑平均线 TRIX
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认12
m:int
TRMA的参数M,默认20
return
-------
TRIX:numpy.ndarray<numpy.float64>
AR指标
TRMA:numpy.ndarray<numpy.float64>
BR指标
'''
CLOSES = []
TRIX = []
for index, row in data.iterrows():
CLOSES.append(row["close"])
if len(CLOSES) == n:
del CLOSES[0]
tr = np.average(CLOSES)
if index == 0:
past_tr = tr
TRIX.append(0)
else:
trix = (tr - past_tr) / past_tr * 100
TRIX.append(trix)
TRMA = _get_any_ma(TRIX, m)
return TRIX, TRMA
def bbi(data):
import numpy as np
'''
Bull And Bearlndex 多空指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
BBI:numpy.ndarray<numpy.float64>
BBI指标
'''
CS = []
BBI = []
for index, row in data.iterrows():
CS.append(row["close"])
if len(CS) < 24:
BBI.append(row["close"])
else:
bbi = np.average([np.average(CS[-3:]), np.average(CS[-6:]), np.average(CS[-12:]), np.average(CS[-24:])])
BBI.append(bbi)
return np.asarray(BBI)
def mtm(data, n=6):
import numpy as np
'''
Momentum Index 动量指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认6
return
-------
MTM:numpy.ndarray<numpy.float64>
MTM动量指标
'''
MTM = []
CN = []
for index, row in data.iterrows():
if index < n - 1:
MTM.append(0.)
else:
mtm = row["close"] - CN[index - n]
MTM.append(mtm)
CN.append(row["close"])
return np.asarray(MTM)
def obv(data):
import numpy as np
'''
On Balance Volume 能量潮指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
OBV:numpy.ndarray<numpy.float64>
OBV能量潮指标
'''
tmp = np.true_divide(((data["close"] - data["low"]) - (data["high"] - data["close"])), (data["high"] - data["low"]))
OBV = tmp * data["volume"]
return OBV
def sar(data, n=4):
raise Exception("Not implemented yet")
def plot_all(data, is_show=True, output=None):
import matplotlib.pyplot as plt
from pylab import rcParams
import numpy as np
rcParams['figure.figsize'] = 18, 50
plt.figure()
# 收盘价
plt.subplot(20, 1, 1)
plt.plot(data["date"], data["close"], label="close")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动平均线
plt.subplot(20, 1, 2)
MA = ma(data, n=10)
plt.plot(data["date"], MA, label="MA(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("MA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动标准差
n = 10
plt.subplot(20, 1, 3)
MD = md(data, n)
plt.plot(data["date"], MD, label="MD(n=10)")
plt.title("MD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平均数指标
plt.subplot(20, 1, 4)
EMA = ema(data, n)
plt.plot(data["date"], EMA, label="EMA(n=12)")
plt.title("EMA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
plt.subplot(20, 1, 5)
OSC, DIFF, DEM = macd(data, n)
plt.plot(data["date"], OSC, label="OSC")
plt.plot(data["date"], DIFF, label="DIFF")
plt.plot(data["date"], DEM, label="DEM")
plt.title("MACD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 随机指标
plt.subplot(20, 1, 6)
K, D, J = kdj(data)
plt.plot(data["date"], K, label="K")
plt.plot(data["date"], D, label="D")
plt.plot(data["date"], J, label="J")
plt.title("KDJ")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 相对强弱指标
plt.subplot(20, 1, 7)
RSI6 = rsi(data, 6)
RSI12 = rsi(data, 12)
RSI24 = rsi(data, 24)
plt.plot(data["date"], RSI6, label="RSI(n=6)")
plt.plot(data["date"], RSI12, label="RSI(n=12)")
plt.plot(data["date"], RSI24, label="RSI(n=24)")
plt.title("RSI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# BOLL 林线指标
plt.subplot(20, 1, 8)
BOLL, UPPER, LOWER = boll(data)
plt.plot(data["date"], BOLL, label="BOLL(n=10)")
plt.plot(data["date"], UPPER, label="UPPER(n=10)")
plt.plot(data["date"], LOWER, label="LOWER(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("BOLL")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# W&R 威廉指标
plt.subplot(20, 1, 9)
WNR = wnr(data, n=14)
plt.plot(data["date"], WNR, label="WNR(n=14)")
plt.title("WNR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动向或趋向指标
plt.subplot(20, 1, 10)
P_DI, M_DI, ADX, ADXR = dmi(data)
plt.plot(data["date"], P_DI, label="+DI(n=14)")
plt.plot(data["date"], M_DI, label="-DI(n=14)")
plt.plot(data["date"], ADX, label="ADX(m=14)")
plt.plot(data["date"], ADXR, label="ADXR(k=6)")
plt.title("DMI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 乖离值
plt.subplot(20, 1, 11)
BIAS = bias(data, n=5)
plt.plot(data["date"], BIAS, label="BIAS(n=5)")
plt.title("BIAS")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 12)
ASI = asi(data, n=5)
plt.plot(data["date"], ASI, label="ASI(n=5)")
plt.title("ASI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 13)
VR = vr(data, n=26)
plt.plot(data["date"], VR, label="VR(n=26)")
plt.title("VR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 14)
AR, BR = arbr(data, n=26)
plt.plot(data["date"], AR, label="AR(n=26)")
plt.plot(data["date"], BR, label="BR(n=26)")
plt.title("ARBR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 区间震荡线
plt.subplot(20, 1, 15)
DPO, MADPO = dpo(data, n=20, m=6)
plt.plot(data["date"], DPO, label="DPO(n=20)")
plt.plot(data["date"], MADPO, label="MADPO(m=6)")
plt.title("DPO")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 三重指数平滑平均线
plt.subplot(20, 1, 16)
TRIX, TRMA = trix(data, n=12, m=20)
plt.plot(data["date"], TRIX, label="DPO(n=12)")
plt.plot(data["date"], TRMA, label="MADPO(m=20)")
plt.title("TRIX")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 多空指标
plt.subplot(20, 1, 17)
BBI = bbi(data)
plt.plot(data["date"], BBI, label="BBI(3,6,12,24)")
plt.title("BBI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 18)
MTM = mtm(data, n=6)
plt.plot(data["date"], MTM, label="MTM(n=6)")
plt.title("MTM")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 19)
OBV = obv(data)
plt.plot(data["date"], OBV, label="OBV")
plt.title("OBV")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
plt.tight_layout()
if is_show:
plt.show()
if output is not None:
plt.savefig(output) |
pyatv/protocols/mrp/protobuf/NowPlayingClient_pb2.py | Jacobs4/pyatv | 532 | 12743481 | <reponame>Jacobs4/pyatv
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/NowPlayingClient.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/protocols/mrp/protobuf/NowPlayingClient.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3pyatv/protocols/mrp/protobuf/NowPlayingClient.proto\"\xe8\x01\n\x10NowPlayingClient\x12\x19\n\x11processIdentifier\x18\x01 \x01(\x05\x12\x18\n\x10\x62undleIdentifier\x18\x02 \x01(\t\x12)\n!parentApplicationBundleIdentifier\x18\x03 \x01(\t\x12\x1d\n\x15processUserIdentifier\x18\x04 \x01(\x05\x12\x1c\n\x14nowPlayingVisibility\x18\x05 \x01(\x05\x12\x13\n\x0b\x64isplayName\x18\x07 \x01(\t\x12\"\n\x1a\x62undleIdentifierHierarchys\x18\x08 \x03(\t'
)
_NOWPLAYINGCLIENT = _descriptor.Descriptor(
name='NowPlayingClient',
full_name='NowPlayingClient',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='processIdentifier', full_name='NowPlayingClient.processIdentifier', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bundleIdentifier', full_name='NowPlayingClient.bundleIdentifier', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parentApplicationBundleIdentifier', full_name='NowPlayingClient.parentApplicationBundleIdentifier', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='processUserIdentifier', full_name='NowPlayingClient.processUserIdentifier', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nowPlayingVisibility', full_name='NowPlayingClient.nowPlayingVisibility', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='displayName', full_name='NowPlayingClient.displayName', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bundleIdentifierHierarchys', full_name='NowPlayingClient.bundleIdentifierHierarchys', index=6,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=288,
)
DESCRIPTOR.message_types_by_name['NowPlayingClient'] = _NOWPLAYINGCLIENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NowPlayingClient = _reflection.GeneratedProtocolMessageType('NowPlayingClient', (_message.Message,), {
'DESCRIPTOR' : _NOWPLAYINGCLIENT,
'__module__' : 'pyatv.protocols.mrp.protobuf.NowPlayingClient_pb2'
# @@protoc_insertion_point(class_scope:NowPlayingClient)
})
_sym_db.RegisterMessage(NowPlayingClient)
# @@protoc_insertion_point(module_scope)
|
ansible/roles/test/files/ptftests/fg_ecmp_test.py | lolyu/sonic-mgmt | 132 | 12743489 | <reponame>lolyu/sonic-mgmt
# PTF test contains the test cases for fine grained ecmp, the scenarios of test are as follows:
# create_flows: Sends NUM_FLOWS flows with varying src_Ip and creates a tuple to port map
# initial_hash_check: Checks the the flows from create_flows still end up at the same port
# hash_check_warm_boot: Similar to initial hash check but this is run during warm boot, accounts for possible flooding during warm boot
# bank_check: Check that the flows end up on the same bank as before
# withdraw_nh: Withdraw next-hop in one fg nhg bank, and make sure flow redistributes to ports in the fg nhg bank
# add_nh: Add next-hop in one fg nhg bank, and make sure flow redistributes to from ports in same fg nhg bank to added port
# withdraw_bank: Withdraw all next-hops which constitue a bank, and make sure that flows migrate to using the other bank
# add_first_nh: Add 1st next-hop from previously withdrawn bank, and make sure that some flow migrate back to using the next-hop in old bank
# net_port_hashing: Verify hashing of packets to the T1(network) ports such that the packet came from the server
import ipaddress
import logging
import random
import time
import os
import json
import ipaddress
import ptf
import ptf.packet as scapy
from ptf.base_tests import BaseTest
from ptf.mask import Mask
import ptf.testutils as testutils
from ptf.testutils import *
import lpm
IPV4_SRC_IP_RANGE = ['192.168.3.11', '192.168.127.12']
IPV6_SRC_IP_RANGE = ['20D0:A800:0:00::', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b']
PERSIST_MAP = '/tmp/fg_ecmp_persist_map.json'
MAX_ONE_PERCENT_LOSS = 0.01
def verify_packet_warm(test, pkt, port, device_number=0, timeout=None, n_timeout=None):
# This packet verification function accounts for possible flood during warm boot
# We ensure that packets are received on the expected port, and return a special
# return value of -1 to denote that a flood had occured. The caller can use the
# special return value to identify how many packets were flooded.
if timeout is None:
timeout = ptf.ptfutils.default_timeout
if n_timeout is None:
n_timeout = ptf.ptfutils.default_negative_timeout
logging.debug("Checking for pkt on device %d, port %r", device_number, port)
result = dp_poll(test, device_number=device_number, timeout=timeout, exp_pkt=pkt)
verify_no_other_packets(test, device_number=device_number, timeout=n_timeout)
if isinstance(result, test.dataplane.PollSuccess):
if result.port != port:
# Flood case, check if packet rcvd on expected port as well
verify_packet(test, pkt, port)
return (-1, None)
else:
return (port, result.packet)
assert(isinstance(result, test.dataplane.PollFailure))
test.fail("Did not receive expected packet on any of ports %r for device %d.\n%s"
% (ports, device_number, result.format()))
return (0, None)
def verify_packet_any_port_lossy(test, pkt, ports=[], device_number=0, timeout=None, n_timeout=None):
# This packet verification function accounts for possible loss of packet due to route table change
# We ensure that packets are received on the expected ports, and return a special
# return value of -1 to denote that a packet loss occured. The caller can use the
# special return value to identify how many packets were lost and check if loss is within acceptable range
if timeout is None:
timeout = ptf.ptfutils.default_timeout
if n_timeout is None:
n_timeout = ptf.ptfutils.default_negative_timeout
logging.debug("Checking for pkt on device %d, port %r", device_number, ports)
result = dp_poll(test, device_number=device_number, timeout=timeout, exp_pkt=pkt)
verify_no_other_packets(test, device_number=device_number, timeout=n_timeout)
if isinstance(result, test.dataplane.PollSuccess):
if result.port in ports:
return (ports.index(result.port), result.packet)
else:
test.fail(
"Received expected packet on port %r for device %d, but "
"it should have arrived on one of these ports: %r.\n%s"
% (result.port, device_number, ports, result.format())
)
return (0, None)
if isinstance(result, test.dataplane.PollFailure):
return (-1, None)
return (0, None)
class FgEcmpTest(BaseTest):
def __init__(self):
'''
@summary: constructor
'''
BaseTest.__init__(self)
self.test_params = test_params_get()
#---------------------------------------------------------------------
def log(self, message):
logging.info(message)
def trigger_mac_learning(self, serv_ports):
for src_port in serv_ports:
pkt = simple_eth_packet(
eth_dst=self.router_mac,
eth_src=self.dataplane.get_mac(0, src_port),
eth_type=0x1234)
send_packet(self, src_port, pkt)
def setUp(self):
'''
@summary: Setup for the test
'''
self.dataplane = ptf.dataplane_instance
self.test_params = testutils.test_params_get()
self.max_deviation = 0.25
if 'test_case' in self.test_params:
self.test_case = self.test_params['test_case']
else:
raise Exception("Need a test case param")
if self.test_case == 'withdraw_nh':
self.withdraw_nh_port = self.test_params['withdraw_nh_port']
elif self.test_case == 'add_nh':
self.add_nh_port = self.test_params['add_nh_port']
elif self.test_case == 'withdraw_bank':
self.withdraw_nh_bank = self.test_params['withdraw_nh_bank']
elif self.test_case == 'add_first_nh':
self.first_nh = self.test_params['first_nh']
if 'config_file' not in self.test_params:
raise Exception("required parameter 'config_file' is not present")
config = self.test_params['config_file']
if 'exp_flow_count' not in self.test_params:
raise Exception("required parameter 'exp_flow_count' is not present")
self.exp_flow_count = self.test_params['exp_flow_count']
if 'dst_ip' not in self.test_params:
raise Exception("required parameter 'dst_ip' is not present")
self.dst_ip = self.test_params['dst_ip']
if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)
with open(config) as fp:
graph = json.load(fp)
self.net_ports = graph['net_ports']
self.serv_ports = graph['serv_ports']
self.exp_port_set_one = graph['bank_0_port']
self.exp_port_set_two = graph['bank_1_port']
self.router_mac = graph['dut_mac']
self.num_flows = graph['num_flows']
self.inner_hashing = graph['inner_hashing']
self.src_ipv4_interval = lpm.LpmDict.IpInterval(ipaddress.ip_address(unicode(IPV4_SRC_IP_RANGE[0])), ipaddress.ip_address(unicode(IPV4_SRC_IP_RANGE[1])))
self.src_ipv6_interval = lpm.LpmDict.IpInterval(ipaddress.ip_address(unicode(IPV6_SRC_IP_RANGE[0])), ipaddress.ip_address(unicode(IPV6_SRC_IP_RANGE[1])))
self.vxlan_port = graph['vxlan_port']
self.log(self.net_ports)
self.log(self.serv_ports)
self.log(self.exp_port_set_one)
self.log(self.exp_port_set_two)
self.log(self.dst_ip)
self.log(self.router_mac)
self.log(self.test_case)
self.log(self.num_flows)
self.log(self.inner_hashing)
self.log(self.exp_flow_count)
self.log(self.vxlan_port)
if self.test_case != 'hash_check_warm_boot':
# We send bi-directional traffic during warm boot due to
# fdb clear, so no need to trigger mac learning
# during warm boot.
self.trigger_mac_learning(self.serv_ports)
time.sleep(3)
#---------------------------------------------------------------------
def test_balancing(self, hit_count_map):
for port, exp_flows in self.exp_flow_count.items():
assert port in hit_count_map
num_flows = hit_count_map[port]
deviation = float(num_flows)/float(exp_flows)
deviation = abs(1-deviation)
self.log("port "+ str(port) + " exp_flows " + str(exp_flows) +
" num_flows " + str(num_flows) + " deviation " + str(deviation))
assert deviation <= self.max_deviation
def fg_ecmp(self):
ipv4 = isinstance(ipaddress.ip_address(self.dst_ip.decode('utf8')),
ipaddress.IPv4Address)
# initialize all parameters
if self.inner_hashing:
dst_ip = '5.5.5.5'
else:
dst_ip = self.dst_ip
src_port = 20000
dst_port = 30000
tuple_to_port_map ={}
hit_count_map = {}
if not os.path.exists(PERSIST_MAP) and self.test_case == 'create_flows':
with open(PERSIST_MAP, 'w'): pass
elif not self.test_case == 'verify_packets_received':
with open(PERSIST_MAP) as fp:
try:
tuple_to_port_map = json.load(fp)
except ValueError:
print 'Decoding JSON failed for persist map'
assert False
if tuple_to_port_map is None or self.dst_ip not in tuple_to_port_map:
tuple_to_port_map[self.dst_ip] = {}
if self.test_case == 'create_flows':
# Send packets with varying src_ips to create NUM_FLOWS unique flows
# and generate a flow to port map
self.log("Creating flow to port map ...")
for i in range(0, self.num_flows):
if ipv4 or self.inner_hashing:
src_ip = self.src_ipv4_interval.get_random_ip()
else:
src_ip = self.src_ipv6_interval.get_random_ip()
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
elif self.test_case == 'initial_hash_check':
self.log("Ensure that flow to port map is maintained when the same flow is re-sent...")
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
assert port_idx == port
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
return
elif self.test_case == 'hash_check_warm_boot':
self.log("Ensure that flow to port map is maintained when the same flow is re-sent...")
total_flood_pkts = 0
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt_warm(
in_port, src_port, dst_port, src_ip, dst_ip, port, ipv4)
if port_idx == -1:
total_flood_pkts = total_flood_pkts + 1
# Ensure that flooding duration in warm reboot is less than 10% of total packet count
self.log("Number of flood packets were: " + str(total_flood_pkts))
assert (total_flood_pkts < (0.1 * len(tuple_to_port_map[self.dst_ip])))
return
elif self.test_case == 'verify_packets_received':
self.log("Ensure that all packets were received ...")
total_num_pkts_lost = 0
for i in range(0, self.num_flows):
if ipv4 or self.inner_hashing:
src_ip = self.src_ipv4_interval.get_random_ip()
else:
src_ip = self.src_ipv6_interval.get_random_ip()
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt_lossy(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
if port_idx == -1:
total_num_pkts_lost = total_num_pkts_lost + 1
self.log("Number of lost packets were: " + str(total_num_pkts_lost))
# Ensure less than 1% packet loss
assert (total_num_pkts_lost < (MAX_ONE_PERCENT_LOSS * self.num_flows))
return
elif self.test_case == 'bank_check':
self.log("Send the same flows once again and verify that they end up on the same bank...")
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
if port in self.exp_port_set_one:
assert port_idx in self.exp_port_set_one
if port in self.exp_port_set_two:
assert port_idx in self.exp_port_set_two
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
elif self.test_case == 'withdraw_nh':
self.log("Withdraw next-hop " + str(self.withdraw_nh_port) + " and ensure hash redistribution within correct bank")
if self.withdraw_nh_port in self.exp_port_set_one:
withdraw_port_grp = self.exp_port_set_one
else:
withdraw_port_grp = self.exp_port_set_two
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
assert port_idx != self.withdraw_nh_port
if port == self.withdraw_nh_port:
assert port_idx != self.withdraw_nh_port
assert (port_idx in withdraw_port_grp)
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port
elif self.test_case == 'add_nh':
self.log("Add next-hop " + str(self.add_nh_port) + " and ensure hash redistribution within correct bank")
if self.add_nh_port in self.exp_port_set_one:
add_port_grp = self.exp_port_set_one
else:
add_port_grp = self.exp_port_set_two
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
if port_idx == self.add_nh_port:
assert (port in add_port_grp)
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port
elif self.test_case == 'withdraw_bank':
self.log("Withdraw bank " + str(self.withdraw_nh_bank) + " and ensure hash redistribution is as expected")
if self.withdraw_nh_bank[0] in self.exp_port_set_one:
active_port_grp = self.exp_port_set_two
else:
active_port_grp = self.exp_port_set_one
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
if port in self.withdraw_nh_bank:
assert (port_idx in active_port_grp)
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
else:
assert port_idx == port
elif self.test_case == 'add_first_nh':
self.log("Add 1st next-hop " + str(self.first_nh) + " and ensure hash redistribution is as expected")
if self.first_nh in self.exp_port_set_one:
active_port_grp = self.exp_port_set_two
else:
active_port_grp = self.exp_port_set_one
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.net_ports)
else:
in_port = self.net_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.serv_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
flow_redistribution_in_correct_grp = False
if port_idx in active_port_grp:
assert port_idx == port
flow_redistribution_in_correct_grp = True
elif port_idx == self.first_nh:
flow_redistribution_in_correct_grp = True
tuple_to_port_map[self.dst_ip][src_ip] = port_idx
assert flow_redistribution_in_correct_grp == True
elif self.test_case == 'net_port_hashing':
self.log("Send packets destined to network ports and ensure hash distribution is as expected")
for src_ip, port in tuple_to_port_map[self.dst_ip].iteritems():
if self.inner_hashing:
in_port = random.choice(self.serv_ports)
else:
in_port = self.serv_ports[0]
(port_idx, _) = self.send_rcv_ip_pkt(
in_port, src_port, dst_port, src_ip, dst_ip, self.net_ports, ipv4)
hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1
self.test_balancing(hit_count_map)
return
else:
self.log("Unsupported testcase " + self.test_case)
return
self.test_balancing(hit_count_map)
json.dump(tuple_to_port_map, open(PERSIST_MAP,"w"))
return
def send_rcv_ip_pkt_lossy(self, in_port, sport, dport, src_ip_addr, dst_ip_addr,
exp_port, ipv4=True):
if ipv4:
(matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, exp_port, verify_packet_any_port_lossy)
else:
(matched_index, received) = self.send_rcv_ipv6_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, exp_port, verify_packet_any_port_lossy)
return (matched_index, received)
def send_rcv_ip_pkt_warm(self, in_port, sport, dport, src_ip_addr, dst_ip_addr,
exp_port, ipv4=True):
# Simulate bidirectional traffic for mac learning, since mac learning(fdb) is flushed
# as part of warm reboot
self.trigger_mac_learning([exp_port])
if ipv4:
(matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, exp_port, verify_packet_warm)
else:
(matched_index, received) = self.send_rcv_ipv6_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, exp_port, verify_packet_warm)
return (matched_index, received)
def send_rcv_ip_pkt(self, in_port, sport, dport, src_ip_addr, dst_ip_addr,
dst_port_list, ipv4=True):
if ipv4:
(matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, dst_port_list, verify_packet_any_port)
else:
(matched_index, received) = self.send_rcv_ipv6_pkt(in_port, sport, dport,
src_ip_addr, dst_ip_addr, dst_port_list, verify_packet_any_port)
assert received
matched_port = dst_port_list[matched_index]
logging.info("Received packet at " + str(matched_port))
return (matched_port, received)
def send_rcv_ipv4_pkt(self, in_port, sport, dport,
ip_src, ip_dst, dst_port_list, verify_fn):
src_mac = self.dataplane.get_mac(0, in_port)
rand_int = random.randint(1, 254)
pkt = simple_tcp_packet(
eth_dst=self.router_mac,
eth_src=src_mac,
ip_src=ip_src,
ip_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64)
if self.inner_hashing:
pkt = simple_vxlan_packet(
eth_dst=self.router_mac,
eth_src=src_mac,
ip_id=0,
ip_src='2.2.2.' + str(rand_int),
ip_dst=self.dst_ip,
ip_ttl=64,
udp_sport=rand_int,
udp_dport=self.vxlan_port,
vxlan_vni=20000+rand_int,
with_udp_chksum=False,
inner_frame=pkt)
send_packet(self, in_port, pkt)
masked_exp_pkt = Mask(pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
return verify_fn(self, masked_exp_pkt, dst_port_list)
def send_rcv_ipv6_pkt(self, in_port, sport, dport,
ip_src, ip_dst, dst_port_list, verify_fn):
src_mac = self.dataplane.get_mac(0, in_port)
rand_int = random.randint(1, 254)
if self.inner_hashing:
pkt = simple_tcp_packet(
eth_dst=self.router_mac,
eth_src=src_mac,
ip_src=ip_src,
ip_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64)
pkt = simple_vxlanv6_packet(
eth_dst=self.router_mac,
eth_src=src_mac,
ipv6_src='2:2:2::' + str(rand_int),
ipv6_dst=self.dst_ip,
udp_sport=rand_int,
udp_dport=self.vxlan_port,
vxlan_vni=20000+rand_int,
with_udp_chksum=False,
inner_frame=pkt)
else:
pkt = simple_tcpv6_packet(
eth_dst=self.router_mac,
eth_src=src_mac,
ipv6_dst=ip_dst,
ipv6_src=ip_src,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64)
send_packet(self, in_port, pkt)
masked_exp_pkt = Mask(pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim")
return verify_fn(self, masked_exp_pkt,dst_port_list)
def runTest(self):
# Main function which triggers all the tests
self.fg_ecmp()
|
jesse/models/__init__.py | noenfugler/jesse | 3,999 | 12743492 | <reponame>noenfugler/jesse
from .Candle import Candle
from .CompletedTrade import CompletedTrade
from .Exchange import Exchange
from .FuturesExchange import FuturesExchange
from .Order import Order
from .Position import Position
from .Route import Route
from .SpotExchange import SpotExchange
from .Ticker import Ticker
from .utils import store_candle_into_db, store_ticker_into_db, store_trade_into_db, store_orderbook_into_db
|
lldb/third_party/Python/module/unittest2/unittest2/collector.py | medismailben/llvm-project | 2,338 | 12743511 | <gh_stars>1000+
import os
import sys
from unittest2.loader import defaultTestLoader
def collector():
# import __main__ triggers code re-execution
__main__ = sys.modules['__main__']
setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
return defaultTestLoader.discover(setupDir)
|
Python/antivirus_type_utility.py | PushpneetSingh/Hello-world | 1,428 | 12743518 | <reponame>PushpneetSingh/Hello-world
import os
import hashlib
import time
from sys import exit
dirName=os.path.dirname(os.path.realpath(__file__))
yesAnswers=['yes','Yes','YES','Y','y','']
noAnswers=['No','no','n','N','NO']
ENILfilePath=''
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
#remove hashchecker.enil from allFiles as the hash for that will never match cached hash
for entry in allFiles:
if entry[-5:] == ".enil":
proxy=allFiles.index(entry)
ENILfilePath=allFiles.pop(proxy)
return allFiles
def encrypterString(unencryptedString):
#takes any string and converts it into an encrypted string based on cipherSource
CSVstring=''
cipherSource="abCDlm:nfcde)istuxyzv-UVWjkBghGYoEFpq+rw*1(2H89\\0.~53K LIMQ_T467RSNOP=/AZ;"
length=len(unencryptedString)
proxy=""
for char in range(0,length):
indexNum=cipherSource.index(str(unencryptedString[char]))+1
proxy=proxy+cipherSource[indexNum]
CSVstring=proxy+","
correctedCSVstring=CSVstring[0:-1]
return correctedCSVstring
allFiles=getListOfFiles(dirName)
def encrypterList(unencryptedList):
#Takes a list of strings and returns a comma separated string of encrypted strings from the list
outputCSVstring=''
for file_name in unencryptedList:
proxy=encrypterString(file_name)
outputCSVstring=outputCSVstring+','+proxy
correctedOutputCSVstring=outputCSVstring[1:]
return(correctedOutputCSVstring)
def decrypterString(CSVstring):
#same as encrypter string but decrypts
outputString=''
cipherSource="abCDlm:nfcde)istuxyzv-UVWjkBghGYoEFpq+rw*1(2H89\\0.~53K LIMQ_T467RSNOP=/AZ;"
length=len(CSVstring)
proxy=""
for char in range(0,length):
if CSVstring[char]==",":
proxy=proxy+","
else:
indexNum=cipherSource.index(str(CSVstring[char]))-1
proxy=proxy+cipherSource[indexNum]
outputString=proxy+","
return outputString
def decrypterList(encryptedList):
#same as encrypterList but decrypts
outputString=''
for encrypted_item in encryptedList:
proxy=decrypterString(encrypted_item)
outputString=outputString+','+proxy
correctedOutputCSVstring=outputString[1:]
return(correctedOutputCSVstring)
def storeCreator(hashesDict, masterHash):
#creating the text for the enil file
tempFiles=list(hashesDict.keys())
tempHashes=list(hashesDict.values())
#preparing the CSV string of files in a ciphered way
files=""
files=encrypterList(tempFiles)
files=files+"\n"
#preparing CSV string of hashes in a ciphered way
hashes=""
hashes=encrypterList(tempHashes)
hashes=hashes+"\n"
#preparing masterHash in a ciphered way
masterHash=encrypterString(masterHash)
return(files,hashes,masterHash)
def dictcreator(allFiles):
#creates a dictionary of filePath:hash of file and a hash of the string made of the sum of all hashes(called the masterHash)
hashesDict={}
masterHash=""
sha256_hash = hashlib.sha256()
for filename in allFiles:
with open(filename,"rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
g=sha256_hash.hexdigest()
hashesDict[filename]=g
masterHash=masterHash+g
hash_object=hashlib.sha256(masterHash.encode())
masterHash=hash_object.hexdigest()
return(hashesDict,masterHash)
#returns (files,hashes,masterHash) all of which are strings
def noENILfile():
#should run when there is no ENIL file.
#creates an ENIL file with the foll encyrpted entries:1)all file paths,2)all hashes of files,3)masterhash
allFiles=getListOfFiles(dirName)
hashesDict,masterHash=dictcreator(allFiles)
files,hashes,masterHash=storeCreator(hashesDict,masterHash)
with open("hashstore.enil","w") as f:
f.write(files)
f.write(hashes)
f.write(masterHash)
f.close()
print("Hash checker 'ENIL' file did not exist previously so one has been created")
time.sleep(2)
exit()
def ENILfileFound():
#should run when an ENIL file is found
#reads the enil file and decrypts and returns files,hashes and masterHash
with open('hashstore.enil','r') as f:
sums=f.readlines()
sums =[x.strip() for x in sums]
f.close()
files=sums[0]
hashes=sums[1]
masterHash=sums[2]
files=str(decrypterString(files))[0:-1]
files=files.split(',')
hashes=str(decrypterString(hashes))[0:-1]
hashes=hashes.split(',')
masterHashFromENIL=decrypterString(masterHash)
hashesDictFromENIL={}
if len(files)==len(hashes):
for n in range(len(files)):
hashesDictFromENIL[files[n]]=hashes[n]
return(hashesDictFromENIL,masterHashFromENIL)
def ENILfileUpdate():
#should run only after checking with the user
#over writes previous ENIL file with updates values
aF=getListOfFiles(dirName)
has,mas=dictcreator(aF)
fil,hashes,maste=storeCreator(has,mas)
with open("hashstore.enil","w") as f:
f.write(fil)
f.write(hashes)
f.write(maste)
f.close()
print("Hash checker 'ENIL' file has been updated")
def checkForDeletions(CurrentFiles,CacheFiles):
#chceks the 2 files list to see if any file from old list was deleted ie not there in current list
deletedFiles=[]
for file in CacheFiles:
if file in CurrentFiles:
nono=0
else:
deletedFiles.append(file)
return(deletedFiles)
def checkForAdditions(CurrentFiles,CacheFiles):
#checks to see if any file was added ie file was not there in old list but is there in new list
addedFiles=[]
for file in CurrentFiles:
if file in CacheFiles:
continue
else:
addedFiles.append(file)
return(addedFiles)
def deleteFiles(addedFiles):
#allows user to manually delete files he/she hasnt added or modified directly through command line
filePath=input('Copy the path of the file you want to delete from the list above and paste it here(one at a time):')
if filePath in addedFiles:
os.remove(filePath)
else:
print(filePath+' isnt a file path that was recently added. ',end="")
time.sleep(4)
retryResponse=input('Would you like to try again?(y/n)')
if retryResponse in yesAnswers:
deleteFiles(addedFiles)
anotherOne=input('Would you like to add another file?(y/n)')
if anotherOne in yesAnswers:
deleteFiles(addedFiles)
return
def UserCheckAdded(addedFiles):
#allows user to go through each addedFile to see if theres a file that they didnt add
confirmation=input('Were some of the added files not added by you? Would you like to remove one or more of the added files?(y/n)\n')
if confirmation in yesAnswers:
print('The following is a list of all files that have been added since last run time:')
time.sleep(2)
for file in addedFiles:
print(file)
time.sleep(1.5)
print("If any of these files was not added by you and you suspect the file of being malicious you should delete the file immediately.")
time.sleep(2.5)
maliciousFileChecker=input('Would you like to delete files not added by you?(y/n)')
if maliciousFileChecker in yesAnswers:
deleteFiles(addedFiles)
else: return(0)
def antiModChecker(hashesDictFromENIL,masterHashFromENIL):
allFiles=getListOfFiles(dirName)
hashesDict,masterHash=dictcreator(allFiles)
masterHashFromENIL=masterHashFromENIL[:-1]
#check that masterHash is same
if masterHash==masterHashFromENIL:
print('Files have not been modified.')
time.sleep(1)
print('Integrity of all files is maintained. Virus checker will now close')
time.sleep(4)
#exits program
exit()
else:
CurrentFiles=list(hashesDict.keys())
CacheFiles=list(hashesDictFromENIL.keys())
#check for file additions and deletions
addedFiles=checkForAdditions(CurrentFiles,CacheFiles)
deletedFiles=checkForDeletions(CurrentFiles,CacheFiles)
if len(addedFiles)==0 and len(deletedFiles)==0:
print("No files have been added or deleted")
time.sleep(3)
else:
if len(deletedFiles)!=0:
print("The following files have been deleted:")
time.sleep(2)
for file in deletedFiles:
print(file)
time.sleep(0.5)
garbage=input('press enter to continue')
if len(addedFiles)!=0:
print("\nThe following files have been added:")
time.sleep(2)
for file in addedFiles:
print(file)
time.sleep(0.5)
garbage=input('press enter to continue')
#Make sure that the added files were added by the user:
UserCheckAdded(addedFiles)
#check the hashes
#only need to check hash of files that are currently in folders
verified=[]
modifie=[]
print('\nVerifying file integrity of old files',end='')
time.sleep(1.5)
print('.',end='')
time.sleep(1.5)
print('.',end='')
time.sleep(1.5)
print('.')
time.sleep(2)
for file in CurrentFiles:
if file in addedFiles:
print(file)
else:
if hashesDict[file]==hashesDictFromENIL[file]:
verified.append(file)
continue
else:
modifie.append(file)
print('\nFollowing files have not been modified and their integrity is guranteed:')
time.sleep(1)
for file in verified:
print(file)
garbage=input('press enter to continue')
print('\nFollowing files have been modified so they may have been infected with a virus:')
time.sleep(1)
for file in modifie:
print(file)
garbage=input('press enter to continue')
print('Note: Only update the cache if you are confident that integrity of all files are intact.')
time.sleep(4)
updateConfirmation=input('Would you like to update the cache of file integrity keys?(y/n)')
if updateConfirmation in yesAnswers:
print('Virus check will close soon after updating',end='')
time.sleep(0.5)
print('.',end='')
time.sleep(0.5)
print('.',end='')
time.sleep(0.5)
print('.',end='')
time.sleep(2.5)
ENILfileUpdate()
time.sleep(4)
exit()
else:
print('not updating the cache file means the integrity of recently added or modified files cannot not be verified the next time you run this file.')
time.sleep(2)
reconfirmation=input('Are you sure you dont want to update files?')
if reconfirmation in yesAnswers:
print('Virus checker will now close' )
time.sleep(2)
exit()
###Logics Start here
ENILtester=os.listdir(dirName)
ENILpresent=False
for entry in ENILtester:
if entry[-5:] == ".enil":
ENILpresent=True
if ENILpresent:
hashesDictFromENIL,masterHashFromENIL= ENILfileFound()
antiModChecker(hashesDictFromENIL,masterHashFromENIL)
else:
noENILfile()
|
python_web/models/post.py | LouisYZK/Frodo | 123 | 12743524 | <filename>python_web/models/post.py
import re
import ast
import types
import random
import inspect
from datetime import datetime, timedelta
from aioredis.errors import RedisError
from html.parser import HTMLParser
from sqlalchemy import Column, SmallInteger, String, Integer, Boolean, DateTime
from sqlalchemy.sql import func
from .base import Base, BaseModel, ModelMeta
from .mc import cache, clear_mc
from .user import User
from .utils import trunc_utf8
from .comment import CommentMixin
from .react import ReactMixin
from .markdown import markdown, toc, toc_md, MLStripper
from . import schemas
import config
MC_KEY_TAGS_BY_POST_ID = 'post:%s:tags'
MC_KEY_RELATED = 'post:related_posts:%s:limit:%s'
MC_KEY_POST_BY_SLUG = 'post:%s:slug'
MC_KEY_ALL_POSTS = 'core:posts:%s:v2'
MC_KEY_FEED = 'core:feed'
MC_KEY_SITEMAP = 'core:sitemap'
MC_KEY_SEARCH = 'core:search.json'
MC_KEY_ARCHIVES = 'core:archives'
MC_KEY_ARCHIVE = 'core:archive:%s'
MC_KEY_TAGS = 'core:tags'
MC_KEY_TAG = 'core:tag:%s'
MC_KEY_SPECIAL_ITEMS = 'special:%s:items'
MC_KEY_SPECIAL_POST_ITEMS = 'special:%s:post_items'
MC_KEY_SPECIAL_BY_PID = 'special:by_pid:%s'
MC_KEY_SPECIAL_BY_SLUG = 'special:%s:slug'
MC_KEY_ALL_SPECIAL_TOPICS = 'special:topics'
RK_PAGEVIEW = 'frodo:pageview:{}:v2'
RK_ALL_POST_IDS = 'frodo:all_post_ids'
RK_VISITED_POST_IDS = 'frodo:visited_post_ids'
BQ_REGEX = re.compile(r'<blockquote>.*?</blockquote>')
PAGEVIEW_FIELD = 'pv'
class Post(BaseModel, CommentMixin, ReactMixin):
STATUSES = (
STATUS_UNPUBLISHED,
STATUS_ONLINE
) = range(2)
status = Column(SmallInteger(), default=STATUS_UNPUBLISHED)
(TYPE_ARTICLE, TYPE_PAGE) = range(2)
created_at = Column(DateTime, server_default=func.now(), nullable=False)
title = Column(String(100), unique=True)
author_id = Column(Integer())
slug = Column(String(100))
summary = Column(String(255))
can_comment = Column(Boolean(), default=True)
type = Column(Integer(), default=TYPE_ARTICLE)
pageview = Column(Integer(), default=0)
kind = config.K_POST
@cache(MC_KEY_RELATED % ('{self.id}', '{limit}'))
async def get_related(self, limit: int=4):
tag_ids = [tag.id for tag in await self.tags]
if not tag_ids:
return []
post_ids = set([ item['post_id']
for item in await PostTag.async_in('tag_id', tag_ids)])
post_ids -= set([self.id])
if not post_ids: return []
related_posts = [
Post(**p)
for p in await Post.async_in('id', post_ids)
]
return related_posts[:limit] if len(related_posts) >= limit else related_posts
@classmethod
async def acreate(cls, **kwargs):
tags = kwargs.pop('tags', [])
content = kwargs.pop('content')
obj_id = await super().acreate(**kwargs)
kwargs['id'] = obj_id
if tags:
try:
await PostTag.update_multi(obj_id, tags)
except:
await Post.adelete(id=obj_id)
return
obj = cls(**(await cls.async_first(id=obj_id)))
await obj.set_content(content)
return obj
async def update_tags(self, tagnames):
if tagnames:
await PostTag.update_multi(self.id, tagnames)
return True
@property
@cache(MC_KEY_TAGS_BY_POST_ID % ('{self.id}'))
async def tags(self):
pts = await PostTag.async_filter(post_id=self.id)
if not pts:
return []
ids = [item['tag_id'] for item in pts]
tags = await Tag.async_in('id', ids)
tags = [Tag(**t) for t in tags]
return tags
@property
async def author(self):
print('user_id', self.author_id)
rv = await User.cache(id=self.author_id)
return {'name': rv['name'], 'id': self.author_id, 'avatar': rv['avatar']}
@property
def is_page(self):
return self.type == self.TYPE_PAGE
@property
def preview_url(self):
return f'/{self.__class__.__name__.lower()}/{self.id}/preview'
async def set_content(self, content):
return await self.set_props_by_key('content', content)
async def asave(self, *args, **kwargs):
content = kwargs.pop('content', None)
if content is not None:
await self.set_content('content', content)
return await super().asave(*args, **kwargs)
@property
async def content(self):
rv = await self.get_props_by_key('content')
if rv:
return rv.decode('utf-8')
@classmethod
@cache(MC_KEY_POST_BY_SLUG % '{slug}')
async def get_by_slug(cls, slug):
return await cls.async_first(slug=slug)
@classmethod
@cache(MC_KEY_ALL_POSTS % '{with_page}')
async def get_all(cls, with_page=True):
if with_page:
posts = await Post.async_filter(status=Post.STATUS_ONLINE)
else:
posts = await Post.async_filter(status=Post.STATUS_ONLINE,
type=Post.TYPE_ARTICLE)
return sorted(posts, key=lambda p: p['created_at'], reverse=True)
@property
def url(self):
if self.is_page:
return f'/page/{self.slug}'
return f'/post/{getattr(self, config.PERMALINK_TYPE) or self.id}/'
@property
async def html_content(self):
content = await self.content
if not content:
return ''
return markdown(content)
@property
async def excerpt(self):
if self.summary:
return self.summary
s = MLStripper()
s.feed(await self.html_content)
return trunc_utf8(BQ_REGEX.sub('', s.get_data()).replace('\n', ''), 100)
@property
async def toc(self):
content = await self.content
if not content:
return ''
toc.reset_toc()
toc_md.parse(content)
return toc.render_toc(level=4)
@classmethod
async def cache(cls, ident):
if str(ident).isdigit():
return await super().cache(id=ident)
return await cls.get_by_slug(ident)
async def clear_mc(self):
print('Clear POst MC', self.created_at)
try:
keys = [
MC_KEY_FEED, MC_KEY_SITEMAP, MC_KEY_SEARCH, MC_KEY_ARCHIVES,
MC_KEY_TAGS, MC_KEY_RELATED % (self.id, 4),
MC_KEY_POST_BY_SLUG % self.slug,
MC_KEY_ARCHIVE % self.created_at.year
]
except:
import traceback
traceback.print_exc()
for i in [True, False]:
keys.append(MC_KEY_ALL_POSTS % i)
for tag in await self.tags:
keys.append(MC_KEY_TAG % tag.id)
await clear_mc(*keys)
async def incr_pageview(self, increment=1):
redis = await self.redis
try:
await redis.sadd(RK_ALL_POST_IDS,self.id)
await redis.sadd(RK_VISITED_POST_IDS, self.id)
return await redis.hincrby(RK_PAGEVIEW.format(self.id),
PAGEVIEW_FIELD,
increment)
except:
return self.pageview
@property
async def pageview_(self):
try:
return int(await (await self.redis).hget(
RK_PAGEVIEW.format(self.id), PAGEVIEW_FIELD) or 0
)
except RedisError:
return self.pageview
class Tag(BaseModel):
name = Column(String(100), unique=True)
@classmethod
def create(cls, **kwargs):
name = kwargs.pop('name')
kwargs['name'] = name.lower()
return super().acreate(**kwargs)
@classmethod
async def get_by_name(cls, name):
return await cls.async_filter(name=name)
class PostTag(BaseModel):
post_id = Column(Integer())
tag_id = Column(Integer())
updated_at = Column(DateTime, server_default=func.now(), nullable=False)
@classmethod
async def update_multi(cls, post_id, tags: list):
origin_tags_id = [t['tag_id'] for t in (
await PostTag.async_filter(post_id=post_id)
)]
origin_tags_name = set([t['name'] for t in await Tag.async_in('id', origin_tags_id)])
need_add = set(tags) - origin_tags_name
need_del = origin_tags_name - set(tags)
need_add_tags_id = []
need_del_tags_id = set()
for tag_name in need_add:
rv = await Tag.get_or_create(name=tag_name)
if isinstance(rv, int): need_add_tags_id.append(rv)
else: need_add_tags_id.append(rv['id'])
for tag_name in need_del:
rv = await Tag.get_or_create(name=tag_name)
if isinstance(rv, int): need_del_tags_id.append(rv)
else: need_del_tags_id.add(rv['id'])
if need_del_tags_id:
for id in list(need_del_tags_id):
await cls.adelete(post_id=post_id, tag_id=id)
for tag_id in need_add_tags_id:
await cls.get_or_create(post_id=post_id, tag_id=tag_id)
await clear_mc(MC_KEY_TAGS_BY_POST_ID % post_id) |
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/chinese/set_extractor_config.py | AhmedLeithy/Recognizers-Text | 688 | 12743545 | <reponame>AhmedLeithy/Recognizers-Text
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Pattern
from recognizers_text import RegExpUtility
from ...resources.chinese_date_time import ChineseDateTime
from ..base_set import SetExtractorConfiguration
from .duration_extractor import ChineseDurationExtractor
from .time_extractor import ChineseTimeExtractor
from .date_extractor import ChineseDateExtractor
from .datetime_extractor import ChineseDateTimeExtractor
class ChineseSetExtractorConfiguration(SetExtractorConfiguration):
@property
def last_regex(self) -> Pattern:
return self._last_regex
@property
def each_prefix_regex(self) -> Pattern:
return self._each_prefix_regex
@property
def periodic_regex(self) -> any:
return None
@property
def each_unit_regex(self) -> Pattern:
return self._each_unit_regex
@property
def each_day_regex(self) -> Pattern:
return self._each_day_regex
@property
def before_each_day_regex(self) -> Pattern:
return self._before_each_day_regex
@property
def set_week_day_regex(self) -> any:
return None
@property
def set_each_regex(self) -> any:
return None
@property
def duration_extractor(self) -> ChineseDurationExtractor:
return self._duration_extractor
@property
def time_extractor(self) -> ChineseTimeExtractor:
return self._time_extractor
@property
def date_extractor(self) -> ChineseDateExtractor:
return self._date_extractor
@property
def date_time_extractor(self) -> ChineseDateTimeExtractor:
return self._date_time_extractor
@property
def date_period_extractor(self) -> any:
return None
@property
def time_period_extractor(self) -> any:
return None
@property
def date_time_period_extractor(self) -> any:
return None
def __init__(self):
self._last_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SetLastRegex)
self._each_prefix_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SetEachPrefixRegex)
self._each_unit_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SetEachUnitRegex)
self._each_day_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SetEachDayRegex)
self._before_each_day_regex = RegExpUtility.get_safe_reg_exp(
ChineseDateTime.SetEachDayRegex)
self._duration_extractor = ChineseDurationExtractor()
self._time_extractor = ChineseTimeExtractor()
self._date_extractor = ChineseDateExtractor()
self._date_time_extractor = ChineseDateTimeExtractor()
|
rasa/cli/run.py | jeanveau/rasa_core | 2,433 | 12743562 | <gh_stars>1000+
import argparse
import logging
import os
import shutil
from typing import List
from rasa import model
from rasa.cli.default_arguments import add_model_param
from rasa.cli.utils import get_validated_path
from rasa.constants import (
DEFAULT_ACTIONS_PATH, DEFAULT_CREDENTIALS_PATH, DEFAULT_ENDPOINTS_PATH,
DEFAULT_MODELS_PATH)
from rasa.model import get_latest_model
logger = logging.getLogger(__name__)
# noinspection PyProtectedMember
def add_subparser(subparsers: argparse._SubParsersAction,
parents: List[argparse.ArgumentParser]):
run_parser = subparsers.add_parser(
"run",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Start a Rasa server which loads a trained model")
add_run_arguments(run_parser)
run_parser.set_defaults(func=run)
run_subparsers = run_parser.add_subparsers()
run_core_parser = run_subparsers.add_parser(
"core",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Run a trained Core model"
)
add_run_arguments(run_core_parser)
run_core_parser.set_defaults(func=run)
nlu_subparser = run_subparsers.add_parser(
"nlu",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Run a trained NLU model"
)
_add_nlu_arguments(nlu_subparser)
nlu_subparser.set_defaults(func=run_nlu)
sdk_subparser = run_subparsers.add_parser(
"actions",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Run the action server"
)
_adk_sdk_arguments(sdk_subparser)
sdk_subparser.set_defaults(func=run_actions)
def add_run_arguments(parser: argparse.ArgumentParser):
from rasa.core.cli.run import add_run_arguments
add_run_arguments(parser)
add_model_param(parser)
parser.add_argument(
"--credentials",
type=str,
default="credentials.yml",
help="Authentication credentials for the connector as a yml file")
def _add_nlu_arguments(parser: argparse.ArgumentParser):
from rasa_nlu.cli.server import add_server_arguments
add_server_arguments(parser)
parser.add_argument('--path',
default=DEFAULT_MODELS_PATH,
type=str,
help="Working directory of the server. Models are"
"loaded from this directory and trained models "
"will be saved here")
add_model_param(parser, "NLU")
def _adk_sdk_arguments(parser: argparse.ArgumentParser):
import rasa_core_sdk.cli.arguments as sdk
sdk.add_endpoint_arguments(parser)
parser.add_argument(
'--actions',
type=str,
default="actions",
help="name of action package to be loaded")
def run_nlu(args: argparse.Namespace):
import rasa_nlu.server
import tempfile
args.model = get_validated_path(args.path, "path", DEFAULT_MODELS_PATH)
model_archive = get_latest_model(args.model)
working_directory = tempfile.mkdtemp()
unpacked_model = model.unpack_model(model_archive, working_directory)
args.path = os.path.dirname(unpacked_model)
rasa_nlu.server.main(args)
shutil.rmtree(unpacked_model)
def run_actions(args: argparse.Namespace):
import rasa_core_sdk.endpoint as sdk
import sys
args.actions = args.actions or DEFAULT_ACTIONS_PATH
# insert current path in syspath so module is found
sys.path.insert(1, os.getcwd())
path = args.actions.replace('.', os.sep) + ".py"
_ = get_validated_path(path, "action", DEFAULT_ACTIONS_PATH)
sdk.main(args)
def run(args: argparse.Namespace):
import rasa.run
args.model = get_validated_path(args.model, "model", DEFAULT_MODELS_PATH)
args.endpoints = get_validated_path(args.endpoints, "endpoints",
DEFAULT_ENDPOINTS_PATH, True)
args.credentials = get_validated_path(args.credentials, "credentials",
DEFAULT_CREDENTIALS_PATH, True)
rasa.run(**vars(args))
|
tests/conftest.py | simonvh/genomepy | 112 | 12743568 | """
Global fixtures and functions for pytest
pytest can only share fixtures between modules if they are declared here.
"""
import logging
import os
import pytest
from loguru import logger
import genomepy.providers
from genomepy.providers.base import BaseProvider
from genomepy.providers.ensembl import EnsemblProvider
from genomepy.providers.gencode import GencodeProvider
from genomepy.providers.local import LocalProvider
from genomepy.providers.ncbi import NcbiProvider
from genomepy.providers.ucsc import UcscProvider
from genomepy.providers.url import UrlProvider
@pytest.fixture(scope="function")
def caplog(caplog):
"""Fixture is necessary to be able to check loguru log messages"""
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
handler_id = logger.add(PropogateHandler(), format="{message} {extra}")
yield caplog
logger.remove(handler_id)
def teardown(gprefix, skip=None):
for ext in [
"fa.fai",
"fa.sizes",
"gaps.bed",
"fa.gz.fai",
"fa.gz.sizes",
"annotation.gtf",
"annotation.bed",
]:
if skip and ext in skip:
continue
file = gprefix + ext
if os.path.exists(file):
os.remove(file)
gdir = os.path.dirname(gprefix)
readme = os.path.join(gdir, "README.txt")
if os.path.exists(readme):
os.remove(readme)
@pytest.fixture(scope="function")
def small_genome():
yield genomepy.Genome("tests/data/small_genome.fa.gz")
teardown("tests/data/small_genome.")
@pytest.fixture(scope="function")
def gap_genome():
yield genomepy.Genome("tests/data/gap.fa")
teardown("tests/data/gap.")
@pytest.fixture(scope="function")
def annot():
genome_file = "tests/data/regexp/regexp.fa"
gtf_file = "tests/data/regexp/regexp.annotation.gtf"
bed_file = "tests/data/regexp/regexp.annotation.bed"
genomepy.Genome(genome_file)
with open(gtf_file, "w") as f:
f.write("# skip this line\n")
f.write(
"""chrM\tvanHeeringen-lab\tNP_059343.1\t15307\t16448\t42\t+\t.\tattributes"""
)
with open(bed_file, "w") as f:
f.write(
"""chrM\t15307\t16448\tNP_059343.1\t42\t+\t15307\t16448\t0\t1\t1141,\t0,"""
)
yield genomepy.Annotation("regexp", genomes_dir="tests/data")
teardown("tests/data/regexp/regexp.")
def validate_annot(fname, ftype):
"""fname = path, ftype = 'bed' or 'gtf'."""
assert os.path.exists(fname)
columns = 12 if ftype == "bed" else 9
start, end = (3, 4) if ftype == "gtf" else (1, 2)
with open(fname, "r") as f:
for line in f:
if line.startswith("#"):
continue
vals = line.split("\t")
assert columns == len(vals)
int(vals[start]), int(vals[end])
break
@pytest.fixture(scope="function")
def base():
return BaseProvider()
@pytest.fixture(scope="function")
def ensembl():
return EnsemblProvider()
@pytest.fixture(scope="function")
def ucsc():
return UcscProvider()
@pytest.fixture(scope="function")
def gencode():
return GencodeProvider()
@pytest.fixture(scope="function")
def ncbi():
return NcbiProvider()
@pytest.fixture(scope="function")
def local():
return LocalProvider()
@pytest.fixture(scope="function")
def url():
return UrlProvider()
@pytest.fixture(scope="function")
def provider():
return genomepy.Provider()
|
spleeter/model/__init__.py | au-deps/spleeter | 19,827 | 12743595 | <reponame>au-deps/spleeter
#!/usr/bin/env python
# coding: utf8
""" This package provide an estimator builder as well as model functions. """
import importlib
# pyright: reportMissingImports=false
# pylint: disable=import-error
import tensorflow as tf
from tensorflow.signal import hann_window, inverse_stft, stft
from ..utils.tensor import pad_and_partition, pad_and_reshape
# pylint: enable=import-error
__email__ = "<EMAIL>"
__author__ = "Dee<NAME>"
__license__ = "MIT License"
placeholder = tf.compat.v1.placeholder
def get_model_function(model_type):
"""
Get tensorflow function of the model to be applied to the input tensor.
For instance "unet.softmax_unet" will return the softmax_unet function
in the "unet.py" submodule of the current module (spleeter.model).
Params:
- model_type: str
the relative module path to the model function.
Returns:
A tensorflow function to be applied to the input tensor to get the
multitrack output.
"""
relative_path_to_module = ".".join(model_type.split(".")[:-1])
model_name = model_type.split(".")[-1]
main_module = ".".join((__name__, "functions"))
path_to_module = f"{main_module}.{relative_path_to_module}"
module = importlib.import_module(path_to_module)
model_function = getattr(module, model_name)
return model_function
class InputProvider(object):
def __init__(self, params):
self.params = params
def get_input_dict_placeholders(self):
raise NotImplementedError()
@property
def input_names(self):
raise NotImplementedError()
def get_feed_dict(self, features, *args):
raise NotImplementedError()
class WaveformInputProvider(InputProvider):
@property
def input_names(self):
return ["audio_id", "waveform"]
def get_input_dict_placeholders(self):
shape = (None, self.params["n_channels"])
features = {
"waveform": placeholder(tf.float32, shape=shape, name="waveform"),
"audio_id": placeholder(tf.string, name="audio_id"),
}
return features
def get_feed_dict(self, features, waveform, audio_id):
return {features["audio_id"]: audio_id, features["waveform"]: waveform}
class SpectralInputProvider(InputProvider):
def __init__(self, params):
super().__init__(params)
self.stft_input_name = "{}_stft".format(self.params["mix_name"])
@property
def input_names(self):
return ["audio_id", self.stft_input_name]
def get_input_dict_placeholders(self):
features = {
self.stft_input_name: placeholder(
tf.complex64,
shape=(
None,
self.params["frame_length"] // 2 + 1,
self.params["n_channels"],
),
name=self.stft_input_name,
),
"audio_id": placeholder(tf.string, name="audio_id"),
}
return features
def get_feed_dict(self, features, stft, audio_id):
return {features["audio_id"]: audio_id, features[self.stft_input_name]: stft}
class InputProviderFactory(object):
@staticmethod
def get(params):
stft_backend = params["stft_backend"]
assert stft_backend in (
"tensorflow",
"librosa",
), "Unexpected backend {}".format(stft_backend)
if stft_backend == "tensorflow":
return WaveformInputProvider(params)
else:
return SpectralInputProvider(params)
class EstimatorSpecBuilder(object):
"""A builder class that allows to builds a multitrack unet model
estimator. The built model estimator has a different behaviour when
used in a train/eval mode and in predict mode.
* In train/eval mode: it takes as input and outputs magnitude spectrogram
* In predict mode: it takes as input and outputs waveform. The whole
separation process is then done in this function
for performance reason: it makes it possible to run
the whole spearation process (including STFT and
inverse STFT) on GPU.
:Example:
>>> from spleeter.model import EstimatorSpecBuilder
>>> builder = EstimatorSpecBuilder()
>>> builder.build_predict_model()
>>> builder.build_evaluation_model()
>>> builder.build_train_model()
>>> from spleeter.model import model_fn
>>> estimator = tf.estimator.Estimator(model_fn=model_fn, ...)
"""
# Supported model functions.
DEFAULT_MODEL = "unet.unet"
# Supported loss functions.
L1_MASK = "L1_mask"
WEIGHTED_L1_MASK = "weighted_L1_mask"
# Supported optimizers.
ADADELTA = "Adadelta"
SGD = "SGD"
# Math constants.
WINDOW_COMPENSATION_FACTOR = 2.0 / 3.0
EPSILON = 1e-10
def __init__(self, features, params):
"""Default constructor. Depending on built model
usage, the provided features should be different:
* In train/eval mode: features is a dictionary with a
"mix_spectrogram" key, associated to the
mix magnitude spectrogram.
* In predict mode: features is a dictionary with a "waveform"
key, associated to the waveform of the sound
to be separated.
:param features: The input features for the estimator.
:param params: Some hyperparameters as a dictionary.
"""
self._features = features
self._params = params
# Get instrument name.
self._mix_name = params["mix_name"]
self._instruments = params["instrument_list"]
# Get STFT/signals parameters
self._n_channels = params["n_channels"]
self._T = params["T"]
self._F = params["F"]
self._frame_length = params["frame_length"]
self._frame_step = params["frame_step"]
def include_stft_computations(self):
return self._params["stft_backend"] == "tensorflow"
def _build_model_outputs(self):
"""Created a batch_sizexTxFxn_channels input tensor containing
mix magnitude spectrogram, then an output dict from it according
to the selected model in internal parameters.
:returns: Build output dict.
:raise ValueError: If required model_type is not supported.
"""
input_tensor = self.spectrogram_feature
model = self._params.get("model", None)
if model is not None:
model_type = model.get("type", self.DEFAULT_MODEL)
else:
model_type = self.DEFAULT_MODEL
try:
apply_model = get_model_function(model_type)
except ModuleNotFoundError:
raise ValueError(f"No model function {model_type} found")
self._model_outputs = apply_model(
input_tensor, self._instruments, self._params["model"]["params"]
)
def _build_loss(self, labels):
"""Construct tensorflow loss and metrics
:param output_dict: dictionary of network outputs (key: instrument
name, value: estimated spectrogram of the instrument)
:param labels: dictionary of target outputs (key: instrument
name, value: ground truth spectrogram of the instrument)
:returns: tensorflow (loss, metrics) tuple.
"""
output_dict = self.model_outputs
loss_type = self._params.get("loss_type", self.L1_MASK)
if loss_type == self.L1_MASK:
losses = {
name: tf.reduce_mean(tf.abs(output - labels[name]))
for name, output in output_dict.items()
}
elif loss_type == self.WEIGHTED_L1_MASK:
losses = {
name: tf.reduce_mean(
tf.reduce_mean(labels[name], axis=[1, 2, 3], keep_dims=True)
* tf.abs(output - labels[name])
)
for name, output in output_dict.items()
}
else:
raise ValueError(f"Unkwnown loss type: {loss_type}")
loss = tf.reduce_sum(list(losses.values()))
# Add metrics for monitoring each instrument.
metrics = {k: tf.compat.v1.metrics.mean(v) for k, v in losses.items()}
metrics["absolute_difference"] = tf.compat.v1.metrics.mean(loss)
return loss, metrics
def _build_optimizer(self):
"""Builds an optimizer instance from internal parameter values.
Default to AdamOptimizer if not specified.
:returns: Optimizer instance from internal configuration.
"""
name = self._params.get("optimizer")
if name == self.ADADELTA:
return tf.compat.v1.train.AdadeltaOptimizer()
rate = self._params["learning_rate"]
if name == self.SGD:
return tf.compat.v1.train.GradientDescentOptimizer(rate)
return tf.compat.v1.train.AdamOptimizer(rate)
@property
def instruments(self):
return self._instruments
@property
def stft_name(self):
return f"{self._mix_name}_stft"
@property
def spectrogram_name(self):
return f"{self._mix_name}_spectrogram"
def _build_stft_feature(self):
"""Compute STFT of waveform and slice the STFT in segment
with the right length to feed the network.
"""
stft_name = self.stft_name
spec_name = self.spectrogram_name
if stft_name not in self._features:
# pad input with a frame of zeros
waveform = tf.concat(
[
tf.zeros((self._frame_length, self._n_channels)),
self._features["waveform"],
],
0,
)
stft_feature = tf.transpose(
stft(
tf.transpose(waveform),
self._frame_length,
self._frame_step,
window_fn=lambda frame_length, dtype: (
hann_window(frame_length, periodic=True, dtype=dtype)
),
pad_end=True,
),
perm=[1, 2, 0],
)
self._features[f"{self._mix_name}_stft"] = stft_feature
if spec_name not in self._features:
self._features[spec_name] = tf.abs(
pad_and_partition(self._features[stft_name], self._T)
)[:, :, : self._F, :]
@property
def model_outputs(self):
if not hasattr(self, "_model_outputs"):
self._build_model_outputs()
return self._model_outputs
@property
def outputs(self):
if not hasattr(self, "_outputs"):
self._build_outputs()
return self._outputs
@property
def stft_feature(self):
if self.stft_name not in self._features:
self._build_stft_feature()
return self._features[self.stft_name]
@property
def spectrogram_feature(self):
if self.spectrogram_name not in self._features:
self._build_stft_feature()
return self._features[self.spectrogram_name]
@property
def masks(self):
if not hasattr(self, "_masks"):
self._build_masks()
return self._masks
@property
def masked_stfts(self):
if not hasattr(self, "_masked_stfts"):
self._build_masked_stfts()
return self._masked_stfts
def _inverse_stft(self, stft_t, time_crop=None):
"""Inverse and reshape the given STFT
:param stft_t: input STFT
:returns: inverse STFT (waveform)
"""
inversed = (
inverse_stft(
tf.transpose(stft_t, perm=[2, 0, 1]),
self._frame_length,
self._frame_step,
window_fn=lambda frame_length, dtype: (
hann_window(frame_length, periodic=True, dtype=dtype)
),
)
* self.WINDOW_COMPENSATION_FACTOR
)
reshaped = tf.transpose(inversed)
if time_crop is None:
time_crop = tf.shape(self._features["waveform"])[0]
return reshaped[self._frame_length : self._frame_length + time_crop, :]
def _build_mwf_output_waveform(self):
"""Perform separation with multichannel Wiener Filtering using Norbert.
Note: multichannel Wiener Filtering is not coded in Tensorflow and thus
may be quite slow.
:returns: dictionary of separated waveforms (key: instrument name,
value: estimated waveform of the instrument)
"""
import norbert # pylint: disable=import-error
output_dict = self.model_outputs
x = self.stft_feature
v = tf.stack(
[
pad_and_reshape(
output_dict[f"{instrument}_spectrogram"],
self._frame_length,
self._F,
)[: tf.shape(x)[0], ...]
for instrument in self._instruments
],
axis=3,
)
input_args = [v, x]
stft_function = (
tf.py_function(
lambda v, x: norbert.wiener(v.numpy(), x.numpy()),
input_args,
tf.complex64,
),
)
return {
instrument: self._inverse_stft(stft_function[0][:, :, :, k])
for k, instrument in enumerate(self._instruments)
}
def _extend_mask(self, mask):
"""Extend mask, from reduced number of frequency bin to the number of
frequency bin in the STFT.
:param mask: restricted mask
:returns: extended mask
:raise ValueError: If invalid mask_extension parameter is set.
"""
extension = self._params["mask_extension"]
# Extend with average
# (dispatch according to energy in the processed band)
if extension == "average":
extension_row = tf.reduce_mean(mask, axis=2, keepdims=True)
# Extend with 0
# (avoid extension artifacts but not conservative separation)
elif extension == "zeros":
mask_shape = tf.shape(mask)
extension_row = tf.zeros((mask_shape[0], mask_shape[1], 1, mask_shape[-1]))
else:
raise ValueError(f"Invalid mask_extension parameter {extension}")
n_extra_row = self._frame_length // 2 + 1 - self._F
extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
return tf.concat([mask, extension], axis=2)
def _build_masks(self):
"""
Compute masks from the output spectrograms of the model.
:return:
"""
output_dict = self.model_outputs
stft_feature = self.stft_feature
separation_exponent = self._params["separation_exponent"]
output_sum = (
tf.reduce_sum(
[e ** separation_exponent for e in output_dict.values()], axis=0
)
+ self.EPSILON
)
out = {}
for instrument in self._instruments:
output = output_dict[f"{instrument}_spectrogram"]
# Compute mask with the model.
instrument_mask = (
output ** separation_exponent + (self.EPSILON / len(output_dict))
) / output_sum
# Extend mask;
instrument_mask = self._extend_mask(instrument_mask)
# Stack back mask.
old_shape = tf.shape(instrument_mask)
new_shape = tf.concat(
[[old_shape[0] * old_shape[1]], old_shape[2:]], axis=0
)
instrument_mask = tf.reshape(instrument_mask, new_shape)
# Remove padded part (for mask having the same size as STFT);
instrument_mask = instrument_mask[: tf.shape(stft_feature)[0], ...]
out[instrument] = instrument_mask
self._masks = out
def _build_masked_stfts(self):
input_stft = self.stft_feature
out = {}
for instrument, mask in self.masks.items():
out[instrument] = tf.cast(mask, dtype=tf.complex64) * input_stft
self._masked_stfts = out
def _build_manual_output_waveform(self, masked_stft):
"""Perform ratio mask separation
:param output_dict: dictionary of estimated spectrogram (key: instrument
name, value: estimated spectrogram of the instrument)
:returns: dictionary of separated waveforms (key: instrument name,
value: estimated waveform of the instrument)
"""
output_waveform = {}
for instrument, stft_data in masked_stft.items():
output_waveform[instrument] = self._inverse_stft(stft_data)
return output_waveform
def _build_output_waveform(self, masked_stft):
"""Build output waveform from given output dict in order to be used in
prediction context. Regarding of the configuration building method will
be using MWF.
:returns: Built output waveform.
"""
if self._params.get("MWF", False):
output_waveform = self._build_mwf_output_waveform()
else:
output_waveform = self._build_manual_output_waveform(masked_stft)
return output_waveform
def _build_outputs(self):
if self.include_stft_computations():
self._outputs = self._build_output_waveform(self.masked_stfts)
else:
self._outputs = self.masked_stfts
if "audio_id" in self._features:
self._outputs["audio_id"] = self._features["audio_id"]
def build_predict_model(self):
"""Builder interface for creating model instance that aims to perform
prediction / inference over given track. The output of such estimator
will be a dictionary with a "<instrument>" key per separated instrument
, associated to the estimated separated waveform of the instrument.
:returns: An estimator for performing prediction.
"""
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT, predictions=self.outputs
)
def build_evaluation_model(self, labels):
"""Builder interface for creating model instance that aims to perform
model evaluation. The output of such estimator will be a dictionary
with a key "<instrument>_spectrogram" per separated instrument,
associated to the estimated separated instrument magnitude spectrogram.
:param labels: Model labels.
:returns: An estimator for performing model evaluation.
"""
loss, metrics = self._build_loss(labels)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL, loss=loss, eval_metric_ops=metrics
)
def build_train_model(self, labels):
"""Builder interface for creating model instance that aims to perform
model training. The output of such estimator will be a dictionary
with a key "<instrument>_spectrogram" per separated instrument,
associated to the estimated separated instrument magnitude spectrogram.
:param labels: Model labels.
:returns: An estimator for performing model training.
"""
loss, metrics = self._build_loss(labels)
optimizer = self._build_optimizer()
train_operation = optimizer.minimize(
loss=loss, global_step=tf.compat.v1.train.get_global_step()
)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_operation,
eval_metric_ops=metrics,
)
def model_fn(features, labels, mode, params, config):
"""
:param features:
:param labels:
:param mode: Estimator mode.
:param params:
:param config: TF configuration (not used).
:returns: Built EstimatorSpec.
:raise ValueError: If estimator mode is not supported.
"""
builder = EstimatorSpecBuilder(features, params)
if mode == tf.estimator.ModeKeys.PREDICT:
return builder.build_predict_model()
elif mode == tf.estimator.ModeKeys.EVAL:
return builder.build_evaluation_model(labels)
elif mode == tf.estimator.ModeKeys.TRAIN:
return builder.build_train_model(labels)
raise ValueError(f"Unknown mode {mode}")
|
third_party/tests/Opentitan/util/dvsim/FlowCfg.py | parzival3/Surelog | 156 | 12743596 | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""
Class describing a flow configuration object
"""
import datetime
import logging as log
import pprint
from shutil import which
from .utils import *
# Interface class for extensions.
class FlowCfg():
def __str__(self):
return pprint.pformat(self.__dict__)
def __repr__(self):
return pprint.pformat(self.__dict__)
def __init__(self, flow_cfg_file, proj_root, args):
# Options set from command line
self.items = []
self.items.extend(args.items)
self.list_items = []
self.list_items.extend(args.list)
self.flow_cfg_file = flow_cfg_file
self.proj_root = proj_root
self.args = args
self.scratch_root = args.scratch_root
self.branch = args.branch
self.job_prefix = args.job_prefix
# Imported cfg files using 'import_cfgs' keyword
self.imported_cfg_files = []
self.imported_cfg_files.append(flow_cfg_file)
# Add exports using 'exports' keyword - these are exported to the child
# process' environment.
self.exports = []
# Add overrides using the overrides keyword - existing attributes
# are overridden with the override values.
self.overrides = []
# List of cfgs if the parsed cfg is a master cfg list
self.cfgs = []
# Add a notion of "master" cfg - this is indicated using
# a special key 'use_cfgs' within the hjson cfg.
self.is_master_cfg = False
# Set the partial path to the IP's DV area.
self.rel_path = os.path.dirname(flow_cfg_file).replace(
self.proj_root + '/', '')
# Timestamp
self.ts_format_long = args.ts_format_long
self.timestamp_long = args.timestamp_long
self.ts_format = args.ts_format
self.timestamp = args.timestamp
def __post_init__(self):
# Run some post init checks
if not self.is_master_cfg:
# Check if self.cfgs is a list of exactly 1 item (self)
if not (len(self.cfgs) == 1 and self.cfgs[0].name == self.name):
log.error("Parse error!\n%s", self.cfgs)
sys.exit(1)
@staticmethod
def create_instance(flow_cfg_file, proj_root, args):
'''Create a new instance of this class as with given parameters.
'''
return FlowCfg(flow_cfg_file, proj_root, args)
def parse_flow_cfg(self, flow_cfg_file, is_entry_point=True):
'''
Parse the flow cfg hjson file. This is a private API used within the
extended class' __init__ function. This parses the hjson cfg (and
imports / use cfgs) and builds an initial dictionary.
This method takes 2 args.
flow_cfg_file: This is the flow cfg file to be parsed.
is_entry_point: the cfg file that is passed on the command line is
the entry point cfg. If the cfg file is a part of an inport_cfgs
or use_cfgs key, then it is not an entry point.
'''
hjson_dict = parse_hjson(flow_cfg_file)
# Check if this is the master cfg, if this is the entry point cfg file
if is_entry_point:
self.is_master_cfg = self.check_if_master_cfg(hjson_dict)
# If not a master cfg, then register self with self.cfgs
if self.is_master_cfg is False:
self.cfgs.append(self)
# Resolve the raw hjson dict to build this object
self.resolve_hjson_raw(hjson_dict)
def check_if_master_cfg(self, hjson_dict):
# This is a master cfg only if it has a single key called "use_cfgs"
# which contains a list of actual flow cfgs.
hjson_cfg_dict_keys = hjson_dict.keys()
return (len(hjson_cfg_dict_keys) == 1 and \
"use_cfgs" in hjson_cfg_dict_keys and \
type(hjson_dict["use_cfgs"]) is list)
def resolve_hjson_raw(self, hjson_dict):
attrs = self.__dict__.keys()
rm_hjson_dict_keys = []
import_cfgs = []
use_cfgs = []
for key in hjson_dict.keys():
if key in attrs:
hjson_dict_val = hjson_dict[key]
self_val = getattr(self, key)
scalar_types = {str: [""], int: [0, -1], bool: [False]}
# Case 1: key value in class and hjson_dict differ - error!
if type(hjson_dict_val) != type(self_val):
log.error("Coflicting key types: \"%s\" {\"%s, \"%s\"}",
key,
type(hjson_dict_val).__name__,
type(self_val).__name__)
sys.exit(1)
# Case 2: key value in class and hjson_dict are strs - set if
# not already set, else error!
elif type(hjson_dict_val) in scalar_types.keys():
defaults = scalar_types[type(hjson_dict_val)]
if self_val == hjson_dict_val:
rm_hjson_dict_keys.append(key)
elif self_val in defaults and not hjson_dict_val in defaults:
setattr(self, key, hjson_dict_val)
rm_hjson_dict_keys.append(key)
elif not self_val in defaults and not hjson_dict_val in defaults:
log.error(
"Coflicting values {\"%s\", \"%s\"} encountered for key \"%s\"",
str(self_val), str(hjson_dict_val), key)
sys.exit(1)
# Case 3: key value in class and hjson_dict are lists - merge'em
elif type(hjson_dict_val) is list and type(self_val) is list:
self_val.extend(hjson_dict_val)
setattr(self, key, self_val)
rm_hjson_dict_keys.append(key)
# Case 4: unknown issue
else:
log.error(
"Type of \"%s\" (%s) in %s appears to be invalid (should be %s)",
key,
type(hjson_dict_val).__name__, hjson_dict,
type(self_val).__name__)
sys.exit(1)
# If key is 'import_cfgs' then add to the list of cfgs to
# process
elif key == 'import_cfgs':
import_cfgs.extend(hjson_dict[key])
rm_hjson_dict_keys.append(key)
# If this is a master cfg list and the key is 'use_cfgs'
elif self.is_master_cfg and key == "use_cfgs":
use_cfgs.extend(hjson_dict[key])
# If this is a not master cfg list and the key is 'use_cfgs'
elif not self.is_master_cfg and key == "use_cfgs":
# Throw an error and exit
log.error(
"Key \"use_cfgs\" encountered in a non-master cfg file list \"%s\"",
self.flow_cfg_file)
sys.exit(1)
else:
# add key-value to class
setattr(self, key, hjson_dict[key])
rm_hjson_dict_keys.append(key)
# Parse imported cfgs
for cfg_file in import_cfgs:
if not cfg_file in self.imported_cfg_files:
self.imported_cfg_files.append(cfg_file)
# Substitute wildcards in cfg_file files since we need to process
# them right away.
cfg_file = subst_wildcards(cfg_file, self.__dict__)
self.parse_flow_cfg(cfg_file, False)
else:
log.error("Cfg file \"%s\" has already been parsed", cfg_file)
# Parse master cfg files
if self.is_master_cfg:
for cfg_file in use_cfgs:
# Substitute wildcards in cfg_file files since we need to process
# them right away.
cfg_file = subst_wildcards(cfg_file, self.__dict__)
self.cfgs.append(
self.create_instance(cfg_file, self.proj_root, self.args))
def _process_overrides(self):
# Look through the dict and find available overrides.
# If override is available, check if the type of the value for existing
# and the overridden keys are the same.
overrides_dict = {}
if hasattr(self, "overrides"):
overrides = getattr(self, "overrides")
if type(overrides) is not list:
log.error(
"The type of key \"overrides\" is %s - it should be a list",
type(overrides))
sys.exit(1)
# Process override one by one
for item in overrides:
if type(item) is dict and set(item.keys()) == set(
["name", "value"]):
ov_name = item["name"]
ov_value = item["value"]
if ov_name not in overrides_dict.keys():
overrides_dict[ov_name] = ov_value
self._do_override(ov_name, ov_value)
else:
log.error(
"Override for key \"%s\" already exists!\nOld: %s\nNew: %s",
ov_name, overrides_dict[ov_name], ov_value)
sys.exit(1)
else:
log.error("\"overrides\" is is a list of dicts with {\"name\": <name>, " \
"\"value\": <value>} pairs. Found this instead:\n%s",
str(item))
sys.exit(1)
def _do_override(self, ov_name, ov_value):
# Go through self attributes and replace with overrides
if hasattr(self, ov_name):
orig_value = getattr(self, ov_name)
if type(orig_value) == type(ov_value):
log.debug("Overriding \"%s\" value \"%s\" with \"%s\"",
ov_name, orig_value, ov_value)
setattr(self, ov_name, ov_value)
else:
log.error("The type of override value \"%s\" for \"%s\" mismatches " + \
"the type of original value \"%s\"",
ov_value, ov_name, orig_value)
sys.exit(1)
else:
log.error("Override key \"%s\" not found in the cfg!", ov_name)
sys.exit(1)
def _process_exports(self):
# Convert 'exports' to dict
exports_dict = {}
if self.exports != []:
for item in self.exports:
if type(item) is dict:
exports_dict.update(item)
elif type(item) is str:
[key, value] = item.split(':', 1)
if type(key) is not str: key = str(key)
if type(value) is not str: value = str(value)
exports_dict.update({key.strip(): value.strip()})
else:
log.error("Type error in \"exports\": %s", str(item))
sys.exit(1)
self.exports = exports_dict
def _purge(self):
'''Purge the existing scratch areas in preperation for the new run.'''
return
def purge(self):
'''Public facing API for _purge().
'''
for item in self.cfgs:
item._purge()
def _print_list(self):
'''Print the list of available items that can be kicked off.
'''
return
def print_list(self):
'''Public facing API for _print_list().
'''
for item in self.cfgs:
item._print_list()
def _create_deploy_objects(self):
'''Create deploy objects from items that were passed on for being run.
The deploy objects for build and run are created from the objects that were
created from the create_objects() method.
'''
return
def create_deploy_objects(self):
'''Public facing API for _create_deploy_objects().
'''
if self.is_master_cfg:
self.deploy = []
for item in self.cfgs:
item._create_deploy_objects()
self.deploy.extend(item.deploy)
else:
self._create_deploy_objects()
def _gen_results(self, fmt="md"):
'''
The function is called after the flow has executed. It collates the status of
all run targets and generates a dict. It parses the testplan and maps the generated
result to the testplan entries to generate a final table (list). It uses the fmt arg
to dump the final result as a markdown or html.
'''
return
def gen_results(self):
'''Public facing API for _gen_results().
'''
results = []
for item in self.cfgs:
result = item._gen_results()
print(result)
results.append(result)
return results
def _publish_results(self):
'''Publish results to the opentitan web server.
Results are uploaded to {results_server}/{rel_path}/latest/results.
If the 'latest' directory exists, then it is renamed to its 'timestamp' directory.
If the list of directories in this area is > 7, then the oldest entry is removed.
{results_server}/{rel_path}/history.md contains links to the last 7 results.
'''
if which('gsutil') is None or which('gcloud') is None:
log.error(
"Google cloud SDK not installed! Cannot access the results server"
)
return
# Construct the paths
results_root_dir = self.results_server + '/' + self.rel_path
results_dir = results_root_dir + '/latest'
results_page = results_dir + '/results.md'
# Timeformat for moving the dir
tf = "%Y.%m.%d_%H.%M.%S"
# Extract the timestamp of the existing results_page
cmd = "gsutil ls -L " + results_page + " | " + "grep \'Creation time:\'"
cmd_output = subprocess.run(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
old_results_ts = cmd_output.stdout.decode("utf-8")
old_results_ts = old_results_ts.replace("Creation time:", "")
old_results_ts = old_results_ts.strip()
# Move the 'latest' to its timestamp directory if lookup succeeded
if cmd_output.returncode == 0:
try:
if old_results_ts != "":
ts = datetime.datetime.strptime(
old_results_ts, "%a, %d %b %Y %H:%M:%S %Z")
old_results_ts = ts.strftime(tf)
except ValueError as e:
log.error(
"%s: \'%s\' Timestamp conversion value error raised!", e)
old_results_ts = ""
# If the timestamp conversion failed - then create a dummy one with
# yesterday's date.
if old_results_ts == "":
log.log(VERBOSE,
"Creating dummy timestamp with yesterday's date")
ts = datetime.datetime.now(
datetime.timezone.utc) - datetime.timedelta(days=1)
old_results_ts = ts.strftime(tf)
old_results_dir = results_root_dir + "/" + old_results_ts
cmd = ["gsutil", "mv", results_dir, old_results_dir]
cmd_output = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
if cmd_output.returncode != 0:
log.error("Failed to mv old results page \"%s\" to \"%s\"!",
results_dir, old_results_dir)
# Do an ls in the results root dir to check what directories exist.
results_dirs = []
cmd = ["gsutil", "ls", results_root_dir]
cmd_output = subprocess.run(args=cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
if cmd_output.returncode == 0:
# Some directories exist. Check if 'latest' is one of them
results_dirs = cmd_output.stdout.decode("utf-8").strip()
results_dirs = results_dirs.split("\n")
else:
log.log(VERBOSE, "Failed to run \"%s\"!", cmd)
# Start pruning
log.log(VERBOSE, "Pruning %s area to limit last 7 results",
results_root_dir)
rdirs = []
for rdir in results_dirs:
dirname = rdir.replace(results_root_dir, '')
dirname = dirname.replace('/', '')
if dirname in ['latest', 'history.md']: continue
rdirs.append(dirname)
rdirs.sort(reverse=True)
rm_cmd = ""
history_txt = " History\n\n"
history_txt += "- [Latest](" + results_page + ")\n"
if len(rdirs) > 0:
for i in range(len(rdirs)):
if i < 6:
rdir_url = results_root_dir + '/' + rdirs[i] + "/results.md"
history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
else:
rm_cmd += results_root_dir + '/' + rdirs[i] + " "
if rm_cmd != "":
rm_cmd = "gsutil rm -r " + rm_cmd + "; "
# Publish the updated history page.
history_txt = history_txt.replace("gs://", "http://")
history_file = self.scratch_path + "/history_" + self.timestamp + ".md"
history_page = results_root_dir + "/history.md"
f = open(history_file, 'w')
f.write(history_txt)
f.close()
# Construct history cp cmd
history_cp_cmd = "gsutil cp " + history_file + " " + history_page + \
"; rm -rf " + history_file + "; "
# Copy over the latest regression result.
log.info("Publishing results to %s",
results_page.replace("gs://", "http://"))
cmd = history_cp_cmd + rm_cmd + \
"gsutil cp " + self.results_file + " " + results_page
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
def publish_results(self):
'''Public facing API for publishing results to the opentitan web server.
'''
for item in self.cfgs:
item._publish_results()
|
uncertainties/unumpy/__init__.py | AdityaSavara/uncertainties | 364 | 12743600 | <filename>uncertainties/unumpy/__init__.py
"""
Utilities for NumPy arrays and matrices that contain numbers with
uncertainties.
This package contains:
1) utilities that help with the creation and manipulation of NumPy
arrays and matrices of numbers with uncertainties;
2) generalizations of multiple NumPy functions so that they also work
with arrays that contain numbers with uncertainties.
- Arrays of numbers with uncertainties can be built as follows:
arr = unumpy.uarray([1, 2], [0.01, 0.002]) # (values, uncertainties)
NumPy arrays of numbers with uncertainties can also be built directly
through NumPy, thanks to NumPy's support of arrays of arbitrary objects:
arr = numpy.array([uncertainties.ufloat(1, 0.1),...])
- Matrices of numbers with uncertainties are best created in one of
two ways:
mat = unumpy.umatrix(([1, 2], [0.01, 0.002])) # (values, uncertainties)
Matrices can also be built by converting arrays of numbers with
uncertainties, through the unumpy.matrix class:
mat = unumpy.matrix(arr)
unumpy.matrix objects behave like numpy.matrix objects of numbers with
uncertainties, but with better support for some operations (such as
matrix inversion):
# The inverse or pseudo-inverse of a unumpy.matrix can be calculated:
print mat.I # Would not work with numpy.matrix([[ufloat(...),...]]).I
- Nominal values and uncertainties of arrays can be directly accessed:
print unumpy.nominal_values(arr) # [ 1. 2.]
print unumpy.std_devs(mat) # [ 0.01 0.002]
- This module defines uncertainty-aware mathematical functions that
generalize those from uncertainties.umath so that they work on NumPy
arrays of numbers with uncertainties instead of just scalars:
print unumpy.cos(arr) # Array with the cosine of each element
NumPy's function names are used, and not those of the math module (for
instance, unumpy.arccos is defined, like in NumPy, and is not named
acos like in the standard math module).
The definitions of the mathematical quantities calculated by these
functions are available in the documentation of uncertainties.umath.
- The unumpy.ulinalg module contains more uncertainty-aware functions
for arrays that contain numbers with uncertainties (see the
documentation for this module).
This module requires the NumPy package.
(c) 2009-2016 by <NAME> (EOL) <<EMAIL>>.
Please send feature requests, bug reports, or feedback to this address.
This software is released under a dual license. (1) The BSD license.
(2) Any other license, as long as it is obtained from the original
author."""
# Local modules:
from .core import *
from . import ulinalg # Local sub-module
# __all__ is set so that pydoc shows all important functions:
__all__ = core.__all__
# "import numpy" makes numpy.linalg available. This behavior is
# copied here, for maximum compatibility:
__all__.append('ulinalg')
|
nonebot/notice_request.py | remiliacn/nonebot | 1,101 | 12743606 | from typing import List, Optional, Union
from aiocqhttp import Event as CQEvent
from aiocqhttp.bus import EventBus
from . import NoneBot
from .log import logger
from .exceptions import CQHttpError
from .session import BaseSession
from .typing import NoticeHandler_T, RequestHandler_T
class EventHandler:
"""INTERNAL API"""
__slots__ = ('events', 'func')
def __init__(self, events: List[str], func: Union[NoticeHandler_T, RequestHandler_T]):
self.events = events
self.func = func
class EventManager:
"""INTERNAL API"""
bus = EventBus()
@classmethod
def add_event_handler(cls, handler: EventHandler) -> None:
for event in handler.events:
cls.bus.subscribe(event, handler.func)
@classmethod
def remove_event_handler(cls, handler: EventHandler) -> None:
for event in handler.events:
cls.bus.unsubscribe(event, handler.func)
@classmethod
def switch_event_handler_global(cls,
handler: EventHandler,
state: Optional[bool] = None) -> None:
for event in handler.events:
if handler.func in cls.bus._subscribers[event] and not state:
cls.bus.unsubscribe(event, handler.func)
elif handler.func not in cls.bus._subscribers[
event] and state is not False:
cls.bus.subscribe(event, handler.func)
class NoticeSession(BaseSession):
__slots__ = ()
def __init__(self, bot: NoneBot, event: CQEvent):
super().__init__(bot, event)
class RequestSession(BaseSession):
__slots__ = ()
def __init__(self, bot: NoneBot, event: CQEvent):
super().__init__(bot, event)
async def approve(self, remark: str = '') -> None:
"""
Approve the request.
:param remark: remark of friend (only works in friend request)
"""
try:
await self.bot.call_action(action='.handle_quick_operation_async',
self_id=self.event.self_id,
context=self.event,
operation={
'approve': True,
'remark': remark
})
except CQHttpError:
pass
async def reject(self, reason: str = '') -> None:
"""
Reject the request.
:param reason: reason to reject (only works in group request)
"""
try:
await self.bot.call_action(action='.handle_quick_operation_async',
self_id=self.event.self_id,
context=self.event,
operation={
'approve': False,
'reason': reason
})
except CQHttpError:
pass
async def handle_notice_or_request(bot: NoneBot, event: CQEvent) -> None:
"""INTERNAL API"""
if event.type == 'notice':
_log_notice(event)
session = NoticeSession(bot, event)
else: # must be 'request'
_log_request(event)
session = RequestSession(bot, event)
ev_name = event.name
logger.debug(f'Emitting event: {ev_name}')
try:
await EventManager.bus.emit(ev_name, session)
except Exception as e:
logger.error(f'An exception occurred while handling event {ev_name}:')
logger.exception(e)
def _log_notice(event: CQEvent) -> None:
logger.info(f'Notice: {event}')
def _log_request(event: CQEvent) -> None:
logger.info(f'Request: {event}')
__all__ = [
'NoticeSession',
'RequestSession',
]
|
examples/maml_regression/evjang_transforms.py | onlyrico/functorch | 279 | 12743631 | <gh_stars>100-1000
# This example code was modified from https://github.com/ericjang/maml-jax .
#
# The original code comes with the following license:
# https://github.com/ericjang/maml-jax/blob/master/LICENSE
# Copyright <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
from functorch import grad, vmap
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
def net(params, x):
x = F.linear(x, params[0], params[1])
x = F.relu(x)
x = F.linear(x, params[2], params[3])
x = F.relu(x)
x = F.linear(x, params[4], params[5])
return x
params = [
torch.Tensor(40, 1).uniform_(-1., 1.).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(40, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(1, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(1).zero_().requires_grad_(),
]
# The prototype doesn't like F.mse_loss.
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
opt = torch.optim.Adam(params, lr=1e-3)
alpha = 0.1
K = 20
losses = []
num_tasks = 4
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float), torch.tensor(ys, dtype=torch.float)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
for it in range(20000):
loss2 = 0.0
opt.zero_grad()
def get_loss_for_task(x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
grads = grad(inner_loss)(tuple(params), x1, y1)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(get_loss_for_task)(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses) / len(inner_losses)
loss2.backward()
opt.step()
if it % 100 == 0:
print('Iteration %d -- Outer Loss: %.4f' % (it, loss2))
losses.append(loss2)
t_A = torch.tensor(0.0).uniform_(0.1, 0.5)
t_b = torch.tensor(0.0).uniform_(0.0, math.pi)
t_x = torch.empty(4, 1).uniform_(-5, 5)
t_y = t_A * torch.sin(t_x + t_b)
opt.zero_grad()
t_params = params
for k in range(5):
t_f = net(t_x, t_params)
t_loss = F.l1_loss(t_f, t_y)
grads = torch.autograd.grad(t_loss, t_params, create_graph=True)
t_params = [(t_params[i] - alpha * grads[i]) for i in range(len(params))]
test_x = torch.arange(-2 * math.pi, 2 * math.pi, step=0.01).unsqueeze(1)
test_y = t_A * torch.sin(test_x + t_b)
test_f = net(test_x, t_params)
plt.plot(test_x.data.numpy(), test_y.data.numpy(), label='sin(x)')
plt.plot(test_x.data.numpy(), test_f.data.numpy(), label='net(x)')
plt.plot(t_x.data.numpy(), t_y.data.numpy(), 'o', label='Examples')
plt.legend()
plt.savefig('maml-sine.png')
plt.figure()
plt.plot(np.convolve(losses, [.05] * 20))
plt.savefig('losses.png')
|
kws_streaming/layers/conv1d_transpose.py | deepneuralmachine/google-research | 23,901 | 12743638 | <reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conv1DTranspose streaming aware layer."""
from kws_streaming.layers import modes
from kws_streaming.layers.compat import tf
class Conv1DTranspose(tf.keras.layers.Conv1DTranspose):
"""streaming aware Conv1DTranspose layer.
Attributes:
mode: Training or inference modes: non streaming, streaming.
inference_batch_size: batch size in inference mode
state_shape: shape of remainder state
crop_output: if True output will be cropped: aligned by stride
**kwargs: additional layer arguments
"""
def __init__(self,
mode=modes.Modes.TRAINING,
inference_batch_size=1,
pad_time_dim='causal',
state_shape=None,
crop_output=True,
**kwargs):
super(Conv1DTranspose, self).__init__(**kwargs)
if (kwargs.get('activation') not in [None, 'linear']) and self.use_bias:
raise ValueError('activation should be disabled because we need to '
'subtract bias from remainder state, in streaming mode',
kwargs.get('activation'))
self.mode = mode
self.inference_batch_size = inference_batch_size
self.pad_time_dim = pad_time_dim
self.state_shape = state_shape
self.crop_output = crop_output
self.overlap = self.kernel_size[0] - self.strides[0]
self.overlap = max(self.overlap, 0)
if pad_time_dim not in ['same', 'causal']:
raise ValueError(
'pad_time_dim (\'%s\') must be either \'same\' or \'causal\'' %
pad_time_dim)
if 'padding' in kwargs and kwargs['padding'] != 'valid':
raise ValueError(
'padding (\'%s\') must be \'valid\'. Use pad_time_dim to make the '
'layer causal (\'causal\') or with lookahead (\'same\')' %
kwargs['padding'])
def build(self, input_shape):
super(Conv1DTranspose, self).build(input_shape)
if input_shape.rank < 2:
raise ValueError('input_shape.rank:%d must at least 2' % input_shape.rank)
if self.mode in [
modes.Modes.STREAM_INTERNAL_STATE_INFERENCE,
modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
]:
if input_shape.as_list()[1] is None:
raise ValueError('in streaming mode time dimension of input packet '
'should not be dynamic: TFLite limitation')
self.output_time_dim = input_shape.as_list()[1] * self.strides[0]
if self.overlap > 0:
self.state_shape = [
self.inference_batch_size, self.overlap, self.filters
]
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
self.states = self.add_weight(
name='states',
shape=self.state_shape,
trainable=False,
initializer=tf.zeros_initializer)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# For streaming inference with extrnal states,
# the states are passed in as input.
self.input_state = tf.keras.layers.Input(
shape=self.state_shape[1:],
batch_size=self.inference_batch_size,
name=self.name + '/input_state_remainder')
self.output_state = None
def call(self, inputs):
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
return self._streaming_internal_state(inputs)
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming inference mode with external state
# in addition to the output we return the output state.
output, self.output_state = self._streaming_external_state(
inputs, self.input_state)
return output
elif self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE):
# run non streamable training or non streamable inference
return self._non_streaming(inputs)
else:
raise ValueError(f'Encountered unexpected mode `{self.mode}`.')
def get_config(self):
config = super(Conv1DTranspose, self).get_config()
# only variables which are listed in constructor can be updated here
# because they will be used to construct the class from config
config.update({
'mode': self.mode,
'inference_batch_size': self.inference_batch_size,
'pad_time_dim': self.pad_time_dim,
'state_shape': self.state_shape,
'crop_output': self.crop_output,
})
return config
def _streaming_internal_state(self, inputs):
outputs = super(Conv1DTranspose, self).call(inputs)
if self.overlap == 0:
if self.crop_output:
return tf.identity(outputs[:, 0:self.output_time_dim, :])
else:
return tf.identity(outputs)
output_shape = outputs.shape.as_list()
# need to add remainder state to a specific region of output as below:
# outputs[:,0:self.overlap,:] = outputs[:,0:self.overlap,:] + self.states
# but 'Tensor' object does not support item assignment,
# so doing it through full summation below
output_shape[1] -= self.state_shape[1]
padded_remainder = tf.concat(
[self.states, tf.zeros(output_shape, tf.float32)], 1)
outputs = outputs + padded_remainder
# extract remainder state and substruct bias if it is used:
# bias will be added in the next iteration again and remainder
# should have only convolution part, so that bias is not added twice
if self.use_bias:
new_state = outputs[:, -self.overlap:, :] - self.bias
else:
new_state = outputs[:, -self.overlap:, :]
assign_states = self.states.assign(new_state)
with tf.control_dependencies([assign_states]):
if self.crop_output:
return tf.identity(outputs[:, 0:self.output_time_dim, :])
else:
return tf.identity(outputs)
def _streaming_external_state(self, inputs, states):
outputs = super(Conv1DTranspose, self).call(inputs)
if self.overlap == 0:
if self.crop_output:
return outputs[:, 0:self.output_time_dim, :], []
else:
return outputs, []
output_shape = outputs.shape.as_list()
output_shape[1] -= self.state_shape[1]
padded_remainder = tf.concat(
[states, tf.zeros(output_shape, tf.float32)], 1)
outputs = outputs + padded_remainder
if self.use_bias:
new_state = outputs[:, -self.overlap:, :] - self.bias
else:
new_state = outputs[:, -self.overlap:, :]
if self.crop_output:
return outputs[:, 0:self.output_time_dim, :], new_state
else:
return outputs, new_state
def _non_streaming(self, inputs):
outputs = super(Conv1DTranspose, self).call(inputs)
# during training or non streaming inference, input shape can be dynamic
output_time_dim = tf.shape(inputs)[1] * self.strides[0]
if self.crop_output:
if self.pad_time_dim == 'same':
crop_left = self.overlap // 2
return outputs[:, crop_left:crop_left + output_time_dim, :]
else:
return outputs[:, 0:output_time_dim, :]
else:
return outputs
def get_input_state(self):
# input state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.input_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
def get_output_state(self):
# output state will be used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.output_state]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.')
|
source/vsm-dashboard/vsm_dashboard/dashboards/vsm/openstackconnect/forms.py | ramkrsna/virtual-storage-manager | 172 | 12743639 |
# Copyright 2014 Intel Corporation, All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import forms
LOG = logging.getLogger(__name__)
class AddOpenstackEndpointForm(forms.SelfHandlingForm):
failure_url = 'horizon:vsm:openstackconnect:index'
os_tenant_name = forms.CharField(
label = _("Tenant Name"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_username = forms.CharField(
label = _("UserName"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_password = forms.CharField(
label = _("Password"),
widget=forms.PasswordInput(render_value=False),
max_length=255,
min_length=1,
error_messages = {
'required': _('This field is required.')
}
)
os_auth_url = forms.CharField(
label = _("Auth Url"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_region_name = forms.CharField(
label = _("Region Name"),
max_length = 255,
min_length = 0,
required = False
)
ssh_user = forms.CharField(
label = _("SSH User Name"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
def handle(self, request, data):
pass
# TODO deliver a cluster id in data
# data['cluster_id'] = 1
# try:
# LOG.info("CEPH_LOG in ADD ip, %s" % str(data))
# os_tenant_name = data['os_tenant_name']
# os_username = data['os_username']
# os_password = data['<PASSWORD>']
# os_auth_url = data['os_auth_url']
# ip = os_auth_url.split(":")[1][2:]
# appnodes = vsm_api.appnode_list(request)
# for appnode in appnodes:
# old_os_auth_url = appnode.os_auth_url
# old_ip = old_os_auth_url.split(":")[1][2:]
# if ip == old_ip:
# messages.error(request, "duplicate ip address")
# return False
# body = {
# 'appnodes': {
# 'os_tenant_name': os_tenant_name,
# 'os_username': os_username,
# 'os_password': <PASSWORD>,
# 'os_auth_url': os_auth_url
# }
# }
# LOG.info("CEPH_LOG in handle body %s" % str(body))
# ret = vsm_api.add_appnodes(request, body['appnodes'])
#
# messages.success(request,
# _('Successfully add openstack: %s')
# % data['os_auth_url'])
# return ret
# except:
# redirect = reverse("horizon:vsm:openstackconnect:index")
# exceptions.handle(request,
# _('Unable to create appnode.'),
# redirect=redirect)
class UpdateOpenstackEndpointForm(forms.SelfHandlingForm):
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
os_tenant_name = forms.CharField(
label = _("Tenant Name"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_username = forms.CharField(
label = _("UserName"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_password = forms.CharField(
label = _("Password"),
widget=forms.PasswordInput(render_value=False),
max_length=255,
min_length=1,
error_messages = {
'required': _('This field is required.')
}
)
os_auth_url = forms.CharField(
label = _("Auth Url"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
os_region_name = forms.CharField(
label = _("Region Name"),
max_length = 255,
min_length = 0,
required = False
)
ssh_user = forms.CharField(
label = _("SSH User Name"),
max_length = 255,
min_length = 1,
error_messages = {
'required': _('This field is required.')
}
)
def handle(self, request, data):
pass
# failed, succeeded = [], []
# id = data.pop('id')
# # ip = data.pop('ip')
# os_tenant_name = data.pop('os_tenant_name')
# os_username = data.pop('os_username')
# os_password = data.pop('os_password')
# os_auth_url = data.pop('os_auth_url')
# vsm_api.update_appnode(request, id,
# os_tenant_name=os_tenant_name,
# os_username=os_username,
# os_password=<PASSWORD>,
# os_auth_url=os_auth_url,
# ssh_status="",
# log_info="")
#
# messages.success(request, _('OpenStack auth has been updated successfully.'))
# return True
#
# if failed:
# failed = map(force_unicode, failed)
# messages.error(request,
# _('Unable to update %(attributes)s for the user.')
# % {"attributes": ", ".join(failed)})
# return True
|
tests/simple/arith.py | parag-hub/arrayfire-python | 420 | 12743718 | <gh_stars>100-1000
#!/usr/bin/env python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
from . import _util
def simple_arith(verbose=False):
display_func = _util.display_func(verbose)
a = af.randu(3, 3)
b = af.constant(4, 3, 3)
display_func(a)
display_func(b)
c = a + b
d = a
d += b
display_func(c)
display_func(d)
display_func(a + 2)
display_func(3 + a)
c = a - b
d = a
d -= b
display_func(c)
display_func(d)
display_func(a - 2)
display_func(3 - a)
c = a * b
d = a
d *= b
display_func(c * 2)
display_func(3 * d)
display_func(a * 2)
display_func(3 * a)
c = a / b
d = a
d /= b
display_func(c / 2.0)
display_func(3.0 / d)
display_func(a / 2)
display_func(3 / a)
c = a % b
d = a
d %= b
display_func(c % 2.0)
display_func(3.0 % d)
display_func(a % 2)
display_func(3 % a)
c = a ** b
d = a
d **= b
display_func(c ** 2.0)
display_func(3.0 ** d)
display_func(a ** 2)
display_func(3 ** a)
display_func(a < b)
display_func(a < 0.5)
display_func(0.5 < a)
display_func(a <= b)
display_func(a <= 0.5)
display_func(0.5 <= a)
display_func(a > b)
display_func(a > 0.5)
display_func(0.5 > a)
display_func(a >= b)
display_func(a >= 0.5)
display_func(0.5 >= a)
display_func(a != b)
display_func(a != 0.5)
display_func(0.5 != a)
display_func(a == b)
display_func(a == 0.5)
display_func(0.5 == a)
a = af.randu(3, 3, dtype=af.Dtype.u32)
b = af.constant(4, 3, 3, dtype=af.Dtype.u32)
display_func(a & b)
display_func(a & 2)
c = a
c &= 2
display_func(c)
display_func(a | b)
display_func(a | 2)
c = a
c |= 2
display_func(c)
display_func(a >> b)
display_func(a >> 2)
c = a
c >>= 2
display_func(c)
display_func(a << b)
display_func(a << 2)
c = a
c <<= 2
display_func(c)
display_func(-a)
display_func(+a)
display_func(~a)
display_func(a)
display_func(af.cast(a, af.Dtype.c32))
display_func(af.maxof(a, b))
display_func(af.minof(a, b))
display_func(af.clamp(a, 0, 1))
display_func(af.clamp(a, 0, b))
display_func(af.clamp(a, b, 1))
display_func(af.rem(a, b))
a = af.randu(3, 3) - 0.5
b = af.randu(3, 3) - 0.5
display_func(af.abs(a))
display_func(af.arg(a))
display_func(af.sign(a))
display_func(af.round(a))
display_func(af.trunc(a))
display_func(af.floor(a))
display_func(af.ceil(a))
display_func(af.hypot(a, b))
display_func(af.sin(a))
display_func(af.cos(a))
display_func(af.tan(a))
display_func(af.asin(a))
display_func(af.acos(a))
display_func(af.atan(a))
display_func(af.atan2(a, b))
c = af.cplx(a)
d = af.cplx(a, b)
display_func(c)
display_func(d)
display_func(af.real(d))
display_func(af.imag(d))
display_func(af.conjg(d))
display_func(af.sinh(a))
display_func(af.cosh(a))
display_func(af.tanh(a))
display_func(af.asinh(a))
display_func(af.acosh(a))
display_func(af.atanh(a))
a = af.abs(a)
b = af.abs(b)
display_func(af.root(a, b))
display_func(af.pow(a, b))
display_func(af.pow2(a))
display_func(af.sigmoid(a))
display_func(af.exp(a))
display_func(af.expm1(a))
display_func(af.erf(a))
display_func(af.erfc(a))
display_func(af.log(a))
display_func(af.log1p(a))
display_func(af.log10(a))
display_func(af.log2(a))
display_func(af.sqrt(a))
display_func(af.rsqrt(a))
display_func(af.cbrt(a))
a = af.round(5 * af.randu(3, 3) - 1)
b = af.round(5 * af.randu(3, 3) - 1)
display_func(af.factorial(a))
display_func(af.tgamma(a))
display_func(af.lgamma(a))
display_func(af.iszero(a))
display_func(af.isinf(a/b))
display_func(af.isnan(a/a))
a = af.randu(5, 1)
b = af.randu(1, 5)
c = af.broadcast(lambda x, y: x+y, a, b)
display_func(a)
display_func(b)
display_func(c)
@af.broadcast
def test_add(aa, bb):
return aa + bb
display_func(test_add(a, b))
_util.tests["arith"] = simple_arith
|
util/cpt_upgraders/multiple-event-queues.py | hyu-iot/gem5 | 765 | 12743726 | # Add support for multiple event queues
def upgrader(cpt):
cpt.set('Globals', 'numMainEventQueues', '1')
legacy_version = 12
|
alipay/aop/api/domain/ConsumerNotifyIstd.py | antopen/alipay-sdk-python-all | 213 | 12743736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ConsumerNotifyIstd(object):
def __init__(self):
self._goods_count = None
self._goods_img = None
self._goods_name = None
self._merchant_mobile = None
self._merchant_name = None
self._tiny_app_id = None
self._tiny_app_url = None
@property
def goods_count(self):
return self._goods_count
@goods_count.setter
def goods_count(self, value):
self._goods_count = value
@property
def goods_img(self):
return self._goods_img
@goods_img.setter
def goods_img(self, value):
self._goods_img = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def merchant_mobile(self):
return self._merchant_mobile
@merchant_mobile.setter
def merchant_mobile(self, value):
self._merchant_mobile = value
@property
def merchant_name(self):
return self._merchant_name
@merchant_name.setter
def merchant_name(self, value):
self._merchant_name = value
@property
def tiny_app_id(self):
return self._tiny_app_id
@tiny_app_id.setter
def tiny_app_id(self, value):
self._tiny_app_id = value
@property
def tiny_app_url(self):
return self._tiny_app_url
@tiny_app_url.setter
def tiny_app_url(self, value):
self._tiny_app_url = value
def to_alipay_dict(self):
params = dict()
if self.goods_count:
if hasattr(self.goods_count, 'to_alipay_dict'):
params['goods_count'] = self.goods_count.to_alipay_dict()
else:
params['goods_count'] = self.goods_count
if self.goods_img:
if hasattr(self.goods_img, 'to_alipay_dict'):
params['goods_img'] = self.goods_img.to_alipay_dict()
else:
params['goods_img'] = self.goods_img
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.merchant_mobile:
if hasattr(self.merchant_mobile, 'to_alipay_dict'):
params['merchant_mobile'] = self.merchant_mobile.to_alipay_dict()
else:
params['merchant_mobile'] = self.merchant_mobile
if self.merchant_name:
if hasattr(self.merchant_name, 'to_alipay_dict'):
params['merchant_name'] = self.merchant_name.to_alipay_dict()
else:
params['merchant_name'] = self.merchant_name
if self.tiny_app_id:
if hasattr(self.tiny_app_id, 'to_alipay_dict'):
params['tiny_app_id'] = self.tiny_app_id.to_alipay_dict()
else:
params['tiny_app_id'] = self.tiny_app_id
if self.tiny_app_url:
if hasattr(self.tiny_app_url, 'to_alipay_dict'):
params['tiny_app_url'] = self.tiny_app_url.to_alipay_dict()
else:
params['tiny_app_url'] = self.tiny_app_url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ConsumerNotifyIstd()
if 'goods_count' in d:
o.goods_count = d['goods_count']
if 'goods_img' in d:
o.goods_img = d['goods_img']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'merchant_mobile' in d:
o.merchant_mobile = d['merchant_mobile']
if 'merchant_name' in d:
o.merchant_name = d['merchant_name']
if 'tiny_app_id' in d:
o.tiny_app_id = d['tiny_app_id']
if 'tiny_app_url' in d:
o.tiny_app_url = d['tiny_app_url']
return o
|
ryu/app/network_awareness/__init__.py | hiArvin/ryu | 269 | 12743755 | <gh_stars>100-1000
"For loading module"
|
tests/test_providers/test_hardware.py | chinghwayu/mimesis | 2,619 | 12743783 | # -*- coding: utf-8 -*-
import re
import pytest
from mimesis import Hardware
from mimesis.data import (
CPU,
CPU_CODENAMES,
CPU_MODEL_CODES,
GENERATION,
GRAPHICS,
HDD_SSD,
MANUFACTURERS,
PHONE_MODELS,
RAM_SIZES,
RAM_TYPES,
RESOLUTIONS,
SCREEN_SIZES,
)
from . import patterns
class TestHardware(object):
@pytest.fixture
def hard(self):
return Hardware()
def test_str(self, hard):
assert re.match(patterns.PROVIDER_STR_REGEX, str(hard))
def test_resolution(self, hard):
result = hard.resolution()
assert result in RESOLUTIONS
def test_screen_size(self, hard):
result = hard.screen_size()
assert result in SCREEN_SIZES
def test_generation(self, hard):
result = hard.generation()
assert result in GENERATION
assert isinstance(result, str)
def test_cpu_model_code(self, hard):
result = hard.cpu_model_code()
assert result in CPU_MODEL_CODES
assert isinstance(result, str)
def test_cpu_frequency(self, hard):
result = hard.cpu_frequency().split("G")[0]
assert float(result) < 4.4
def test_cpu(self, hard):
result = hard.cpu()
assert result in CPU
def test_cpu_codename(self, hard):
result = hard.cpu_codename()
assert result in CPU_CODENAMES
def test_ram_type(self, hard):
result = hard.ram_type()
assert result in RAM_TYPES
def test_ram_size(self, hard):
result = hard.ram_size()
assert result in RAM_SIZES
def test_ssd_or_hdd(self, hard):
result = hard.ssd_or_hdd()
assert result in HDD_SSD
def test_graphics(self, hard):
result = hard.graphics()
assert result in GRAPHICS
def test_manufacturer(self, hard):
result = hard.manufacturer()
assert result in MANUFACTURERS
def test_phone_model(self, hard):
result = hard.phone_model()
assert result in PHONE_MODELS
class TestSeededHardware(object):
@pytest.fixture
def h1(self, seed):
return Hardware(seed=seed)
@pytest.fixture
def h2(self, seed):
return Hardware(seed=seed)
def test_resolution(self, h1, h2):
assert h1.resolution() == h2.resolution()
def test_screen_size(self, h1, h2):
assert h1.screen_size() == h2.screen_size()
def test_generation(self, h1, h2):
assert h1.generation() == h2.generation()
def test_cpu_model_code(self, h1, h2):
assert h1.cpu_model_code() == h2.cpu_model_code()
def test_cpu_frequency(self, h1, h2):
assert h1.cpu_frequency() == h2.cpu_frequency()
def test_cpu(self, h1, h2):
assert h1.cpu() == h2.cpu()
def test_cpu_codename(self, h1, h2):
assert h1.cpu_codename() == h2.cpu_codename()
def test_ram_type(self, h1, h2):
assert h1.ram_type() == h2.ram_type()
def test_ram_size(self, h1, h2):
assert h1.ram_size() == h2.ram_size()
def test_ssd_or_hdd(self, h1, h2):
assert h1.ssd_or_hdd() == h2.ssd_or_hdd()
def test_graphics(self, h1, h2):
assert h1.graphics() == h2.graphics()
def test_manufacturer(self, h1, h2):
assert h1.manufacturer() == h2.manufacturer()
def test_phone_model(self, h1, h2):
assert h1.phone_model() == h2.phone_model()
|
openstackclient/compute/v2/server_backup.py | efrat-elimelech/python-openstackclient | 262 | 12743790 | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 Server action implementations"""
import importlib
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
class CreateServerBackup(command.ShowOne):
_description = _("Create a server backup image")
IMAGE_API_VERSIONS = {
"1": "openstackclient.image.v1.image",
"2": "openstackclient.image.v2.image",
}
def get_parser(self, prog_name):
parser = super(CreateServerBackup, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server to back up (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<image-name>',
help=_('Name of the backup image (default: server name)'),
)
parser.add_argument(
'--type',
metavar='<backup-type>',
help=_(
'Used to populate the backup_type property of the backup '
'image (default: empty)'
),
)
parser.add_argument(
'--rotate',
metavar='<count>',
type=int,
help=_('Number of backups to keep (default: 1)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for backup image create to complete'),
)
return parser
def take_action(self, parsed_args):
def _show_progress(progress):
if progress:
self.app.stderr.write('\rProgress: %s' % progress)
self.app.stderr.flush()
compute_client = self.app.client_manager.sdk_connection.compute
server = compute_client.find_server(parsed_args.server)
# Set sane defaults as this API wants all mouths to be fed
if parsed_args.name is None:
backup_name = server.name
else:
backup_name = parsed_args.name
if parsed_args.type is None:
backup_type = ""
else:
backup_type = parsed_args.type
if parsed_args.rotate is None:
backup_rotation = 1
else:
backup_rotation = parsed_args.rotate
compute_client.backup_server(
server.id,
backup_name,
backup_type,
backup_rotation,
)
image_client = self.app.client_manager.image
image = image_client.find_image(backup_name, ignore_missing=False)
if parsed_args.wait:
if utils.wait_for_status(
image_client.get_image,
image.id,
callback=_show_progress,
):
self.app.stdout.write('\n')
else:
msg = _('Error creating server backup: %s') % parsed_args.name
raise exceptions.CommandError(msg)
if self.app.client_manager._api_version['image'] == '1':
info = {}
info.update(image._info)
info['properties'] = utils.format_dict(info.get('properties', {}))
else:
# Get the right image module to format the output
image_module = importlib.import_module(
self.IMAGE_API_VERSIONS[
self.app.client_manager._api_version['image']
]
)
info = image_module._format_image(image)
return zip(*sorted(info.items()))
|
hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/analysis/dimensionality_reduction.py | karenstemartins/hermione | 183 | 12743825 | from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition import FastICA
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import NMF
from sklearn.manifold import Isomap
from sklearn.manifold import MDS
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.manifold import SpectralEmbedding
from sklearn.manifold import TSNE
from umap import UMAP
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from ml.analysis.pca import PCA
import keras
import numpy as np
import pandas as pd
class Autoencoder:
def __init__(self, n_components, n_layers = 1, **kwargs):
self.n_components = n_components
self.n_layers = n_layers
self.kwargs = kwargs
def fit(self, X, y = None):
input_ = keras.layers.Input(shape=(X.shape[1]))
encoded = keras.layers.Dense(self.n_components, activation='relu')(input_)
decoded = keras.layers.Dense(X.shape[1], activation='relu')(encoded)
self.autoencoder = keras.Model(input_,decoded)
self.encoder = keras.Model(input_, encoded)
self.autoencoder.compile(loss = keras.losses.MeanSquaredError())
print(X.shape[1])
self.autoencoder.fit(X, X, epochs = 100, batch_size = 64, shuffle=True)
def transform(self, X, y = None):
return self.encoder.predict(X)
def fit_transform(self, X, y = None):
self.fit(X)
return self.encoder.predict(X)
class DimensionalityReducer:
def __init__(self, reducer, **kwargs):
"""
Constructor
Parameters
----------
selector : str
name of algorithm to be applied
**kwargs :
optional and positional arguments of the choosen algorithm (selector)
Returns
-------
FeatureSelector
Examples
---------
variance thresholding: f = FeatureSelector('variance', threshold=0.3) #Instantiating
f.fit(X[,y]) #fitting (y is optional for variance thresholding)
X = f.transform(X) #transforming
filter-based, k best (MAD): f = FeatureSelector('univariate_kbest', score_func=FeatureSelector.mean_abs_diff, k=2) #Instantiating
#score_func can be any function f: R^n -> R^n (n = number of columns)
f.fit(X,y) #fitting
X = f.transform(X) #transforming
wrapper, recursive: f = FeatureSelector('recursive', estimator = LinearSVC(), n_features_to_select=2) #Instantiating
#estimator should be an instance of a classification or regression model class from scikit-learn
#one can use a custom class but it must be compatible with scikit-learn arquitecture
f.fit(X,y) #fitting
X = f.transform(X) #transforming
wrapper, sequential: f = FeatureSelector('sequential', estimator = LinearSVC(), direction='forward') #Instantiating
#estimator should be an instance of a classification or regression model class from scikit-learn
#one can use a custom class but it must be compatible with scikit-learn arquitecture
f.fit(X,y) #fitting
X = f.transform(X) #transforming
to better understand the optional arguments of each algorithm see: https://scikit-learn.org/stable/modules/feature_selection.html
"""
self.reducer = reducer
self.reducers = {'factor_analysis': FactorAnalysis,
'pca': PCA,
'ica': FastICA,
'isomap': Isomap,
'locally_linear_embedding': LocallyLinearEmbedding,
'spectral_embedding': SpectralEmbedding,
'tsne': TSNE,
'mds':MDS,
'umap':UMAP,
'latent_dirichlet':LatentDirichletAllocation,
'truncated_svd':TruncatedSVD,
'nmf':NMF,
'linear_discriminant':LinearDiscriminantAnalysis,
'autoencoder':Autoencoder}
self.kwargs = kwargs
self.fitted = False
self.reduction = self.reducers[self.reducer](**self.kwargs)
def fit(self, X: pd.DataFrame, y = None):
"""
Identify the features to be selected.
Parameters
----------
X : pd.DataFrame
features to be selected
y : pd.DataFrame
target values
Returns
-------
None
"""
self.columns = X.columns
self.reduction.fit(X,y)
self.fitted = True
def transform(self, df: pd.DataFrame, y = None):
"""
Select features based on fit
Parameters
----------
pd.DataFrame
dataframe with features to be selected
Returns
-------
df : pd.DataFrame
dataframe with selected features only
"""
if not self.fitted:
raise Exception("Not yet trained.")
return self.reduction.transform(df)
def fit_transform(self, df: pd.DataFrame, y = None):
"""
Select features based on fit
Parameters
----------
pd.DataFrame
dataframe with features to be selected
Returns
-------
df : pd.DataFrame
dataframe with selected features only
"""
return self.reduction.fit_transform(df, y)
def inverse_transform(self, df: pd.DataFrame):
"""
Apply the invese_transform of vectorizer to each column
Options: index, bag_of_words and tf_idf
Parameters
----------
df : pd.DataFrame
dataframe with columns to be unvectorizer
Returns
-------
pd.DataFrame
"""
if not self.fitted:
raise Exception("Not yet trained.")
return self.reduction.inverse_transform(df)
|
alipay/aop/api/domain/KoubeiAdvertCommissionAdvertQueryModel.py | snowxmas/alipay-sdk-python-all | 213 | 12743832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiAdvertCommissionAdvertQueryModel(object):
def __init__(self):
self._identifies = None
self._identify_type = None
@property
def identifies(self):
return self._identifies
@identifies.setter
def identifies(self, value):
if isinstance(value, list):
self._identifies = list()
for i in value:
self._identifies.append(i)
@property
def identify_type(self):
return self._identify_type
@identify_type.setter
def identify_type(self, value):
self._identify_type = value
def to_alipay_dict(self):
params = dict()
if self.identifies:
if isinstance(self.identifies, list):
for i in range(0, len(self.identifies)):
element = self.identifies[i]
if hasattr(element, 'to_alipay_dict'):
self.identifies[i] = element.to_alipay_dict()
if hasattr(self.identifies, 'to_alipay_dict'):
params['identifies'] = self.identifies.to_alipay_dict()
else:
params['identifies'] = self.identifies
if self.identify_type:
if hasattr(self.identify_type, 'to_alipay_dict'):
params['identify_type'] = self.identify_type.to_alipay_dict()
else:
params['identify_type'] = self.identify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiAdvertCommissionAdvertQueryModel()
if 'identifies' in d:
o.identifies = d['identifies']
if 'identify_type' in d:
o.identify_type = d['identify_type']
return o
|
degiro_connector/quotecast/actions/action_subscribe.py | Chavithra/degiro-connector | 107 | 12743836 | # IMPORTATION STANDARD
import requests
import logging
from typing import Optional
# IMPORTATION THIRD PARTY
# IMPORTATION INTERNAL
import degiro_connector.core.constants.urls as urls
from degiro_connector.quotecast.models.quotecast_pb2 import (
Quotecast,
)
from degiro_connector.core.abstracts.abstract_action import AbstractAction
class ActionSubscribe(AbstractAction):
@staticmethod
def quotecast_request_to_api(request: Quotecast.Request) -> str:
payload = '{"controlData":"'
for vwd_id in request.subscriptions:
for metric_name in request.subscriptions[vwd_id]:
payload += "a_req(" + vwd_id + "." + metric_name + ");"
for vwd_id in request.unsubscriptions:
for metric_name in request.unsubscriptions[vwd_id]:
payload += "a_rel(" + vwd_id + "." + metric_name + ");"
payload += '"}'
return payload
@classmethod
def subscribe(
cls,
request: Quotecast.Request,
session_id: str,
session: requests.Session = None,
logger: logging.Logger = None,
) -> Optional[bool]:
"""Adds/removes metric from the data-stream.
Args:
request (QuotecastAPI.Request):
List of subscriptions & unsubscriptions to do.
Example :
request = Quotecast.Request()
request.subscriptions['360015751'].extend([
'LastPrice',
'LastVolume',
])
request.subscriptions['AAPL.BATS,E'].extend([
'LastPrice',
'LastVolume',
])
request.unsubscriptions['360015751'].extend([
'LastPrice',
'LastVolume',
])
session_id (str):
API's session id.
session (requests.Session, optional):
This object will be generated if None.
Defaults to None.
logger (logging.Logger, optional):
This object will be generated if None.
Defaults to None.
Raises:
BrokenPipeError:
A new "session_id" is required.
Returns:
bool:
Whether or not the subscription succeeded.
"""
if logger is None:
logger = cls.build_logger()
if session is None:
session = cls.build_session()
url = urls.QUOTECAST
url = f"{url}/{session_id}"
data = cls.quotecast_request_to_api(request=request)
logger.info("subscribe:data %s", data[:100])
session_request = requests.Request(method="POST", url=url, data=data)
prepped = session.prepare_request(request=session_request)
response = False
try:
raw_response = session.send(request=prepped, verify=False)
if raw_response.text == '[{"m":"sr"}]':
raise BrokenPipeError('A new "session_id" is required.')
else:
response = True
except Exception as e:
logger.fatal(e)
return None
return response
def call(self, request: Quotecast.Request) -> Optional[bool]:
session_id = self.connection_storage.session_id
session = self.session_storage.session
logger = self.logger
return self.subscribe(
request=request,
session_id=session_id,
session=session,
logger=logger,
)
|
seahub/share/utils.py | samuelduann/seahub | 420 | 12743861 | <filename>seahub/share/utils.py<gh_stars>100-1000
import logging
from seahub.group.utils import is_group_admin
from seahub.constants import PERMISSION_ADMIN, PERMISSION_READ_WRITE, CUSTOM_PERMISSION_PREFIX
from seahub.share.models import ExtraSharePermission, ExtraGroupsSharePermission, CustomSharePermissions
from seahub.utils import is_valid_org_id
import seaserv
from seaserv import seafile_api
logger = logging.getLogger(__name__)
def normalize_custom_permission_name(permission):
try:
if CUSTOM_PERMISSION_PREFIX in permission:
permission = permission.split('-')[1]
CustomSharePermissions.objects.get(id=int(permission))
except Exception as e:
logger.warning(e)
return None
return CUSTOM_PERMISSION_PREFIX + '-' + str(permission)
def is_repo_admin(username, repo_id):
# repo is shared to user with admin permission
try:
user_share_permission = ExtraSharePermission.objects. \
get_user_permission(repo_id, username)
if user_share_permission == PERMISSION_ADMIN:
return True
# get all groups that repo is shared to with admin permission
group_ids = ExtraGroupsSharePermission.objects.get_admin_groups_by_repo(repo_id)
for group_id in group_ids:
if is_group_admin(group_id, username):
return True
except Exception as e:
logger.error(e)
return False
repo_owner = seafile_api.get_repo_owner(repo_id) or seafile_api.get_org_repo_owner(repo_id)
if not repo_owner:
logger.error('repo %s owner is None' % repo_id)
return False
# repo owner
if username == repo_owner:
return True
# user is department admin
if '@seafile_group' in repo_owner:
# is group owned repo
group_id = int(repo_owner.split('@')[0])
if is_group_admin(group_id, username):
return True
return False
def share_dir_to_user(repo, path, owner, share_from, share_to, permission, org_id=None):
# Share repo or subdir to user with permission(r, rw, admin).
extra_share_permission = ''
if permission == PERMISSION_ADMIN:
extra_share_permission = permission
permission = PERMISSION_READ_WRITE
if is_valid_org_id(org_id):
if path == '/':
seaserv.seafserv_threaded_rpc.org_add_share(org_id, repo.repo_id,
owner, share_to,
permission)
else:
seafile_api.org_share_subdir_to_user(org_id, repo.repo_id,
path, owner,
share_to, permission)
else:
if path == '/':
seafile_api.share_repo(repo.repo_id, owner, share_to, permission)
else:
seafile_api.share_subdir_to_user(repo.repo_id, path,
owner, share_to,
permission)
if path == '/' and extra_share_permission == PERMISSION_ADMIN:
ExtraSharePermission.objects.create_share_permission(repo.repo_id, share_to, extra_share_permission)
def share_dir_to_group(repo, path, owner, share_from, gid, permission, org_id=None):
# Share repo or subdir to group with permission(r, rw, admin).
extra_share_permission = ''
if permission == PERMISSION_ADMIN:
extra_share_permission = permission
permission = PERMISSION_READ_WRITE
if is_valid_org_id(org_id):
if path == '/':
seafile_api.add_org_group_repo(repo.repo_id, org_id, gid,
owner, permission)
else:
seafile_api.org_share_subdir_to_group(org_id, repo.repo_id,
path, owner,
gid, permission)
else:
if path == '/':
seafile_api.set_group_repo(repo.repo_id, gid, owner,
permission)
else:
seafile_api.share_subdir_to_group(repo.repo_id, path,
owner, gid,
permission)
# add share permission if between is admin and is extra permission.
if path == '/' and extra_share_permission == PERMISSION_ADMIN:
ExtraGroupsSharePermission.objects.create_share_permission(repo.repo_id, gid, extra_share_permission)
def update_user_dir_permission(repo_id, path, owner, share_to, permission, org_id=None):
# Update the user's permission(r, rw, admin) in the repo or subdir.
extra_share_permission = ''
if permission == PERMISSION_ADMIN:
extra_share_permission = permission
permission = PERMISSION_READ_WRITE
if is_valid_org_id(org_id):
if path == '/':
seafile_api.org_set_share_permission(
org_id, repo_id, owner, share_to, permission)
else:
seafile_api.org_update_share_subdir_perm_for_user(
org_id, repo_id, path, owner, share_to, permission)
else:
if path == '/':
seafile_api.set_share_permission(
repo_id, owner, share_to, permission)
else:
seafile_api.update_share_subdir_perm_for_user(
repo_id, path, owner, share_to, permission)
if path == '/':
ExtraSharePermission.objects.update_share_permission(repo_id,
share_to,
extra_share_permission)
def update_group_dir_permission(repo_id, path, owner, gid, permission, org_id=None):
# Update the group's permission(r, rw, admin) in the repo or subdir.
extra_share_permission = ''
if permission == PERMISSION_ADMIN:
extra_share_permission = permission
permission = PERMISSION_READ_WRITE
if is_valid_org_id(org_id):
if path == '/':
seaserv.seafserv_threaded_rpc.set_org_group_repo_permission(
org_id, gid, repo_id, permission)
else:
seafile_api.org_update_share_subdir_perm_for_group(
org_id, repo_id, path, owner, gid, permission)
else:
if path == '/':
seafile_api.set_group_repo_permission(gid, repo_id, permission)
else:
seafile_api.update_share_subdir_perm_for_group(
repo_id, path, owner, gid, permission)
# update extra share permission if updated is repo
if path == '/':
ExtraGroupsSharePermission.objects.update_share_permission(repo_id,
gid,
extra_share_permission)
def check_user_share_out_permission(repo_id, path, share_to, is_org=False):
# Return the permission you share to others.
path = None if path == '/' else path
repo = seafile_api.get_shared_repo_by_path(repo_id, path, share_to, is_org)
if not repo:
return None
permission = repo.permission
if path is None:
extra_permission = ExtraSharePermission.objects.get_user_permission(repo_id, share_to)
permission = extra_permission if extra_permission else repo.permission
return permission
def check_user_share_in_permission(repo_id, share_to, is_org=False):
# Return the permission to share to you.
repo = seafile_api.get_shared_repo_by_path(repo_id, None, share_to, is_org)
if not repo:
return None
extra_permission = ExtraSharePermission.objects.get_user_permission(repo_id, share_to)
return extra_permission if extra_permission else repo.permission
def check_group_share_out_permission(repo_id, path, group_id, is_org=False):
# Return the permission that share to other's group.
path = None if path == '/' else path
repo = seafile_api.get_group_shared_repo_by_path(repo_id, path, group_id, is_org)
if not repo:
return None
permission = repo.permission
if path is None:
extra_permission = ExtraGroupsSharePermission.objects.get_group_permission(repo_id, group_id)
permission = extra_permission if extra_permission else repo.permission
return permission
def check_group_share_in_permission(repo_id, group_id, is_org=False):
# Returns the permission to share the group you joined.
repo = seafile_api.get_group_shared_repo_by_path(repo_id, None, group_id, is_org)
if not repo:
return None
extra_permission = ExtraGroupsSharePermission.objects.get_group_permission(repo_id, group_id)
return extra_permission if extra_permission else repo.permission
def has_shared_to_user(repo_id, path, username, org_id=None):
if is_valid_org_id(org_id):
# when calling seafile API to share authority related functions, change the uesrname to repo owner.
repo_owner = seafile_api.get_org_repo_owner(repo_id)
if path == '/':
share_items = seafile_api.list_org_repo_shared_to(org_id,
repo_owner,
repo_id)
else:
share_items = seafile_api.get_org_shared_users_for_subdir(org_id,
repo_id,
path,
repo_owner)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if path == '/':
share_items = seafile_api.list_repo_shared_to(repo_owner, repo_id)
else:
share_items = seafile_api.get_shared_users_for_subdir(repo_id,
path, repo_owner)
return username in [item.user for item in share_items]
def has_shared_to_group(repo_id, path, gid, org_id=None):
if is_valid_org_id(org_id):
# when calling seafile API to share authority related functions, change the uesrname to repo owner.
repo_owner = seafile_api.get_org_repo_owner(repo_id)
if path == '/':
share_items = seafile_api.list_org_repo_shared_group(org_id,
repo_owner, repo_id)
else:
share_items = seafile_api.get_org_shared_groups_for_subdir(org_id,
repo_id, path, repo_owner)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
if path == '/':
share_items = seafile_api.list_repo_shared_group_by_user(repo_owner, repo_id)
else:
share_items = seafile_api.get_shared_groups_for_subdir(repo_id,
path, repo_owner)
return gid in [item.group_id for item in share_items]
|
src/lib/pyclbr.py | DTenore/skulpt | 2,671 | 12743865 | import _sk_fail; _sk_fail._("pyclbr")
|
pandas/stats/plm.py | certik/pandas | 652 | 12743867 | <reponame>certik/pandas<gh_stars>100-1000
"""
Linear regression objects for panel data
"""
# pylint: disable-msg=W0231
# pylint: disable-msg=E1101,E1103
from __future__ import division
from pandas.compat import range
from pandas import compat
import warnings
import numpy as np
from pandas.core.panel import Panel
from pandas.core.frame import DataFrame
from pandas.core.reshape import get_dummies
from pandas.core.series import Series
from pandas.core.sparse import SparsePanel
from pandas.stats.ols import OLS, MovingOLS
import pandas.stats.common as com
import pandas.stats.math as math
from pandas.util.decorators import cache_readonly
class PanelOLS(OLS):
"""Implements panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False,
nw_overlap=False):
self._x_orig = x
self._y_orig = y
self._weights = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
self._entity_effects = entity_effects
self._time_effects = time_effects
self._x_effects = x_effects
self._dropped_dummies = dropped_dummies or {}
self._cluster = com._get_cluster_type(cluster)
self._verbose = verbose
(self._x, self._x_trans,
self._x_filtered, self._y,
self._y_trans) = self._prepare_data()
self._index = self._x.index.levels[0]
self._T = len(self._index)
def log(self, msg):
if self._verbose: # pragma: no cover
print(msg)
def _prepare_data(self):
"""Cleans and stacks input data into DataFrame objects
If time effects is True, then we turn off intercepts and omit an item
from every (entity and x) fixed effect.
Otherwise:
- If we have an intercept, we omit an item from every fixed effect.
- Else, we omit an item from every fixed effect except one of them.
The categorical variables will get dropped from x.
"""
(x, x_filtered, y, weights, cat_mapping) = self._filter_data()
self.log('Adding dummies to X variables')
x = self._add_dummies(x, cat_mapping)
self.log('Adding dummies to filtered X variables')
x_filtered = self._add_dummies(x_filtered, cat_mapping)
if self._x_effects:
x = x.drop(self._x_effects, axis=1)
x_filtered = x_filtered.drop(self._x_effects, axis=1)
if self._time_effects:
x_regressor = x.sub(x.mean(level=0), level=0)
unstacked_y = y.unstack()
y_regressor = unstacked_y.sub(unstacked_y.mean(1), axis=0).stack()
y_regressor.index = y.index
elif self._intercept:
# only add intercept when no time effects
self.log('Adding intercept')
x = x_regressor = add_intercept(x)
x_filtered = add_intercept(x_filtered)
y_regressor = y
else:
self.log('No intercept added')
x_regressor = x
y_regressor = y
if weights is not None:
if not y_regressor.index.equals(weights.index):
raise AssertionError("y_regressor and weights must have the "
"same index")
if not x_regressor.index.equals(weights.index):
raise AssertionError("x_regressor and weights must have the "
"same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
x_regressor = x_regressor.mul(rt_weights, axis=0)
return x, x_regressor, x_filtered, y, y_regressor
def _filter_data(self):
"""
"""
data = self._x_orig
cat_mapping = {}
if isinstance(data, DataFrame):
data = data.to_panel()
else:
if isinstance(data, Panel):
data = data.copy()
if not isinstance(data, SparsePanel):
data, cat_mapping = self._convert_x(data)
if not isinstance(data, Panel):
data = Panel.from_dict(data, intersect=True)
x_names = data.items
if self._weights is not None:
data['__weights__'] = self._weights
# Filter x's without y (so we can make a prediction)
filtered = data.to_frame()
# Filter all data together using to_frame
# convert to DataFrame
y = self._y_orig
if isinstance(y, Series):
y = y.unstack()
data['__y__'] = y
data_long = data.to_frame()
x_filt = filtered.filter(x_names)
x = data_long.filter(x_names)
y = data_long['__y__']
if self._weights is not None and not self._weights.empty:
weights = data_long['__weights__']
else:
weights = None
return x, x_filt, y, weights, cat_mapping
def _convert_x(self, x):
# Converts non-numeric data in x to floats. x_converted is the
# DataFrame with converted values, and x_conversion is a dict that
# provides the reverse mapping. For example, if 'A' was converted to 0
# for x named 'variety', then x_conversion['variety'][0] is 'A'.
x_converted = {}
cat_mapping = {}
# x can be either a dict or a Panel, but in Python 3, dicts don't have
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
if not isinstance(df, DataFrame):
raise AssertionError("all input items must be DataFrames, "
"at least one is of "
"type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
else:
try:
df = df.astype(float)
except (TypeError, ValueError):
values = df.values
distinct_values = sorted(set(values.flat))
cat_mapping[key] = dict(enumerate(distinct_values))
new_values = np.searchsorted(distinct_values, values)
x_converted[key] = DataFrame(new_values, index=df.index,
columns=df.columns)
if len(cat_mapping) == 0:
x_converted = x
return x_converted, cat_mapping
def _add_dummies(self, panel, mapping):
"""
Add entity and / or categorical dummies to input X DataFrame
Returns
-------
DataFrame
"""
panel = self._add_entity_effects(panel)
panel = self._add_categorical_dummies(panel, mapping)
return panel
def _add_entity_effects(self, panel):
"""
Add entity dummies to panel
Returns
-------
DataFrame
"""
from pandas.core.reshape import make_axis_dummies
if not self._entity_effects:
return panel
self.log('-- Adding entity fixed effect dummies')
dummies = make_axis_dummies(panel, 'minor')
if not self._use_all_dummies:
if 'entity' in self._dropped_dummies:
to_exclude = str(self._dropped_dummies.get('entity'))
else:
to_exclude = dummies.columns[0]
if to_exclude not in dummies.columns:
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log('-- Excluding dummy for entity: %s' % to_exclude)
dummies = dummies.filter(dummies.columns.difference([to_exclude]))
dummies = dummies.add_prefix('FE_')
panel = panel.join(dummies)
return panel
def _add_categorical_dummies(self, panel, cat_mappings):
"""
Add categorical dummies to panel
Returns
-------
DataFrame
"""
if not self._x_effects:
return panel
dropped_dummy = (self._entity_effects and not self._use_all_dummies)
for effect in self._x_effects:
self.log('-- Adding fixed effect dummies for %s' % effect)
dummies = get_dummies(panel[effect])
val_map = cat_mappings.get(effect)
if val_map:
val_map = dict((v, k) for k, v in compat.iteritems(val_map))
if dropped_dummy or not self._use_all_dummies:
if effect in self._dropped_dummies:
to_exclude = mapped_name = self._dropped_dummies.get(
effect)
if val_map:
mapped_name = val_map[to_exclude]
else:
to_exclude = mapped_name = dummies.columns[0]
if mapped_name not in dummies.columns: # pragma: no cover
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log(
'-- Excluding dummy for %s: %s' % (effect, to_exclude))
dummies = dummies.filter(dummies.columns.difference([mapped_name]))
dropped_dummy = True
dummies = _convertDummies(dummies, cat_mappings.get(effect))
dummies = dummies.add_prefix('%s_' % effect)
panel = panel.join(dummies)
return panel
@property
def _use_all_dummies(self):
"""
In the case of using an intercept or including time fixed
effects, completely partitioning the sample would make the X
not full rank.
"""
return (not self._intercept and not self._time_effects)
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
beta, _, _, _ = np.linalg.lstsq(X, Y)
return beta
@cache_readonly
def beta(self):
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = math.rank(self._x_trans.values)
if self._time_effects:
df += self._total_times
return df
@cache_readonly
def _r2_raw(self):
Y = self._y_trans.values.squeeze()
X = self._x_trans.values
resid = Y - np.dot(X, self._beta_raw)
SSE = (resid ** 2).sum()
if self._use_centered_tss:
SST = ((Y - np.mean(Y)) ** 2).sum()
else:
SST = (Y ** 2).sum()
return 1 - SSE / SST
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept or self._entity_effects or self._time_effects
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
Y = self._y.values.squeeze()
X = self._x.values
return Y - np.dot(X, self._beta_raw)
@cache_readonly
def resid(self):
return self._unstack_vector(self._resid_raw)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
# X = self._x.values
# Y = self._y.values.squeeze()
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
resid = Y - np.dot(X, self._beta_raw)
ss = (resid ** 2).sum()
return np.sqrt(ss / (self._nobs - self._df_raw))
@cache_readonly
def _var_beta_raw(self):
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
x = self._x
y = self._y
if self._time_effects:
xx = _xx_time_effects(x, y)
else:
xx = np.dot(x.values.T, x.values)
return _var_beta_panel(y, x, self._beta_raw, xx,
self._rmse_raw, cluster_axis, self._nw_lags,
self._nobs, self._df_raw, self._nw_overlap)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return np.dot(self._x.values, self._beta_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_vector(self._y_fitted_raw, index=self._x.index)
def _unstack_vector(self, vec, index=None):
if index is None:
index = self._y_trans.index
panel = DataFrame(vec, index=index, columns=['dummy'])
return panel.to_panel()['dummy']
def _unstack_y(self, vec):
unstacked = self._unstack_vector(vec)
return unstacked.reindex(self.beta.index)
@cache_readonly
def _time_obs_count(self):
return self._y_trans.count(level=0).values
@cache_readonly
def _time_has_obs(self):
return self._time_obs_count > 0
@property
def _nobs(self):
return len(self._y)
def _convertDummies(dummies, mapping):
# cleans up the names of the generated dummies
new_items = []
for item in dummies.columns:
if not mapping:
var = str(item)
if isinstance(item, float):
var = '%g' % item
new_items.append(var)
else:
# renames the dummies if a conversion dict is provided
new_items.append(mapping[int(item)])
dummies = DataFrame(dummies.values, index=dummies.index,
columns=new_items)
return dummies
def _is_numeric(df):
for col in df:
if df[col].dtype.name == 'object':
return False
return True
def add_intercept(panel, name='intercept'):
"""
Add column of ones to input panel
Parameters
----------
panel: Panel / DataFrame
name: string, default 'intercept']
Returns
-------
New object (same type as input)
"""
panel = panel.copy()
panel[name] = 1.
return panel.consolidate()
class MovingPanelOLS(MovingOLS, PanelOLS):
"""Implements rolling/expanding panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None,
window_type='expanding', window=None,
min_periods=None,
min_obs=None,
intercept=True,
nw_lags=None, nw_overlap=False,
entity_effects=False,
time_effects=False,
x_effects=None,
cluster=None,
dropped_dummies=None,
verbose=False):
self._args = dict(intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap,
entity_effects=entity_effects,
time_effects=time_effects,
x_effects=x_effects,
cluster=cluster,
dropped_dummies=dropped_dummies,
verbose=verbose)
PanelOLS.__init__(self, y=y, x=x, weights=weights,
**self._args)
self._set_window(window_type, window, min_periods)
if min_obs is None:
min_obs = len(self._x.columns) + 1
self._min_obs = min_obs
@cache_readonly
def resid(self):
return self._unstack_y(self._resid_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_y(self._y_fitted_raw)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return self._unstack_y(self._y_predict_raw)
def lagged_y_predict(self, lag=1):
"""
Compute forecast Y value lagging coefficient by input number
of time periods
Parameters
----------
lag : int
Returns
-------
DataFrame
"""
x = self._x.values
betas = self._beta_matrix(lag=lag)
return self._unstack_y((betas * x).sum(1))
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = self._rolling_rank()
if self._time_effects:
df += self._window_time_obs
return df[self._valid_indices]
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x
y = self._y
dates = x.index.levels[0]
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
if not self._time_effects:
# Non-transformed X
cum_xx = self._cum_xx(x)
results = []
for n, i in enumerate(self._valid_indices):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
date = dates[i]
x_slice = x.truncate(prior_date, date)
y_slice = y.truncate(prior_date, date)
if self._time_effects:
xx = _xx_time_effects(x_slice, y_slice)
else:
xx = cum_xx[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
result = _var_beta_panel(y_slice, x_slice, beta[n], xx, rmse[n],
cluster_axis, self._nw_lags,
nobs[n], df[n], self._nw_overlap)
results.append(result)
return np.array(results)
@cache_readonly
def _resid_raw(self):
beta_matrix = self._beta_matrix(lag=0)
Y = self._y.values.squeeze()
X = self._x.values
resid = Y - (X * beta_matrix).sum(1)
return resid
@cache_readonly
def _y_fitted_raw(self):
x = self._x.values
betas = self._beta_matrix(lag=0)
return (betas * x).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
x = self._x.values
betas = self._beta_matrix(lag=1)
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
labels = major_labels - lag
indexer = self._valid_indices.searchsorted(labels, side='left')
beta_matrix = self._beta_raw[indexer]
beta_matrix[labels < self._valid_indices[0]] = np.NaN
return beta_matrix
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
# TODO: write unit tests for this
rank_threshold = len(self._x.columns) + 1
if self._min_obs < rank_threshold: # pragma: no cover
warnings.warn('min_obs is smaller than rank of X matrix')
enough_observations = self._nobs_raw >= self._min_obs
enough_time_periods = self._window_time_obs >= self._min_periods
return enough_time_periods & enough_observations
def create_ols_dict(attr):
def attr_getter(self):
d = {}
for k, v in compat.iteritems(self.results):
result = getattr(v, attr)
d[k] = result
return d
return attr_getter
def create_ols_attr(attr):
return property(create_ols_dict(attr))
class NonPooledPanelOLS(object):
"""Implements non-pooled panel OLS.
Parameters
----------
y : DataFrame
x : Series, DataFrame, or dict of Series
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
window_type : {'full_sample', 'rolling', 'expanding'}
'full_sample' by default
window : int
size of window (for rolling/expanding OLS)
"""
ATTRIBUTES = [
'beta',
'df',
'df_model',
'df_resid',
'f_stat',
'p_value',
'r2',
'r2_adj',
'resid',
'rmse',
'std_err',
'summary_as_matrix',
't_stat',
'var_beta',
'x',
'y',
'y_fitted',
'y_predict'
]
def __init__(self, y, x, window_type='full_sample', window=None,
min_periods=None, intercept=True, nw_lags=None,
nw_overlap=False):
for attr in self.ATTRIBUTES:
setattr(self.__class__, attr, create_ols_attr(attr))
results = {}
for entity in y:
entity_y = y[entity]
entity_x = {}
for x_var in x:
entity_x[x_var] = x[x_var][entity]
from pandas.stats.interface import ols
results[entity] = ols(y=entity_y,
x=entity_x,
window_type=window_type,
window=window,
min_periods=min_periods,
intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap)
self.results = results
def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis,
nw_lags, nobs, df, nw_overlap):
xx_inv = math.inv(xx)
yv = y.values
if cluster_axis is None:
if nw_lags is None:
return xx_inv * (rmse ** 2)
else:
resid = yv - np.dot(x.values, beta)
m = (x.values.T * resid).T
xeps = math.newey_west(m, nw_lags, nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
else:
Xb = np.dot(x.values, beta).reshape((len(x.values), 1))
resid = DataFrame(yv[:, None] - Xb, index=y.index, columns=['resid'])
if cluster_axis == 1:
x = x.swaplevel(0, 1).sortlevel(0)
resid = resid.swaplevel(0, 1).sortlevel(0)
m = _group_agg(x.values * resid.values, x.index._bounds,
lambda x: np.sum(x, axis=0))
if nw_lags is None:
nw_lags = 0
xox = 0
for i in range(len(x.index.levels[0])):
xox += math.newey_west(m[i: i + 1], nw_lags,
nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xox, xx_inv))
def _group_agg(values, bounds, f):
"""
R-style aggregator
Parameters
----------
values : N-length or N x K ndarray
bounds : B-length ndarray
f : ndarray aggregation function
Returns
-------
ndarray with same length as bounds array
"""
if values.ndim == 1:
N = len(values)
result = np.empty(len(bounds), dtype=float)
elif values.ndim == 2:
N, K = values.shape
result = np.empty((len(bounds), K), dtype=float)
testagg = f(values[:min(1, len(values))])
if isinstance(testagg, np.ndarray) and testagg.ndim == 2:
raise AssertionError('Function must reduce')
for i, left_bound in enumerate(bounds):
if i == len(bounds) - 1:
right_bound = N
else:
right_bound = bounds[i + 1]
result[i] = f(values[left_bound:right_bound])
return result
def _xx_time_effects(x, y):
"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
"""
# X'X
xx = np.dot(x.values.T, x.values)
xt = x.sum(level=0).values
count = y.unstack().count(1).values
selector = count > 0
# X'X - (T'T)^-1 (T'X)
xt = xt[selector]
count = count[selector]
return xx - np.dot(xt.T / count, xt)
|
sources_non_forked/vim-minimap/autoload/drawille/examples/basic.py | bodhitreestudio/vimrc | 677 | 12743880 | <reponame>bodhitreestudio/vimrc<filename>sources_non_forked/vim-minimap/autoload/drawille/examples/basic.py
from __future__ import print_function
from drawille import Canvas
import math
s = Canvas()
for x in range(1800):
s.set(x/10, math.sin(math.radians(x)) * 10)
print(s.frame())
s.clear()
for x in range(0, 1800, 10):
s.set(x/10, 10 + math.sin(math.radians(x)) * 10)
s.set(x/10, 10 + math.cos(math.radians(x)) * 10)
print(s.frame())
s.clear()
for x in range(0, 3600, 20):
s.set(x/20, 4 + math.sin(math.radians(x)) * 4)
print(s.frame())
s.clear()
for x in range(0, 360, 4):
s.set(x/4, 30 + math.sin(math.radians(x)) * 30)
for x in range(30):
for y in range(30):
s.set(x,y)
s.toggle(x+30, y+30)
s.toggle(x+60, y)
print(s.frame())
|
tests/components/media_player/test_async_helpers.py | mtarjoianu/core | 30,023 | 12743888 | <reponame>mtarjoianu/core
"""The tests for the Async Media player helper functions."""
import pytest
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
class ExtendedMediaPlayer(mp.MediaPlayerEntity):
"""Media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.MediaPlayerEntityFeature.VOLUME_SET
| mp.const.MediaPlayerEntityFeature.VOLUME_STEP
| mp.const.MediaPlayerEntityFeature.PLAY
| mp.const.MediaPlayerEntityFeature.PAUSE
| mp.const.MediaPlayerEntityFeature.TURN_OFF
| mp.const.MediaPlayerEntityFeature.TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + 0.1))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - 0.1))
def media_play(self):
"""Play the media player."""
self._state = STATE_PLAYING
def media_pause(self):
"""Plause the media player."""
self._state = STATE_PAUSED
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def turn_on(self):
"""Turn on state."""
self._state = STATE_ON
def turn_off(self):
"""Turn off state."""
self._state = STATE_OFF
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
class SimpleMediaPlayer(mp.MediaPlayerEntity):
"""Media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.MediaPlayerEntityFeature.VOLUME_SET
| mp.const.MediaPlayerEntityFeature.VOLUME_STEP
| mp.const.MediaPlayerEntityFeature.PLAY
| mp.const.MediaPlayerEntityFeature.PAUSE
| mp.const.MediaPlayerEntityFeature.TURN_OFF
| mp.const.MediaPlayerEntityFeature.TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def media_play(self):
"""Play the media player."""
self._state = STATE_PLAYING
def media_pause(self):
"""Plause the media player."""
self._state = STATE_PAUSED
def turn_on(self):
"""Turn on state."""
self._state = STATE_ON
def turn_off(self):
"""Turn off state."""
self._state = STATE_OFF
@pytest.fixture(params=[ExtendedMediaPlayer, SimpleMediaPlayer])
def player(hass, request):
"""Return a media player."""
return request.param(hass)
async def test_volume_up(player):
"""Test the volume_up and set volume methods."""
assert player.volume_level == 0
await player.async_set_volume_level(0.5)
assert player.volume_level == 0.5
await player.async_volume_up()
assert player.volume_level == 0.6
async def test_volume_down(player):
"""Test the volume_down and set volume methods."""
assert player.volume_level == 0
await player.async_set_volume_level(0.5)
assert player.volume_level == 0.5
await player.async_volume_down()
assert player.volume_level == 0.4
async def test_media_play_pause(player):
"""Test the media_play_pause method."""
assert player.state == STATE_OFF
await player.async_media_play_pause()
assert player.state == STATE_PLAYING
await player.async_media_play_pause()
assert player.state == STATE_PAUSED
async def test_turn_on_off(player):
"""Test the turn on and turn off methods."""
assert player.state == STATE_OFF
await player.async_turn_on()
assert player.state == STATE_ON
await player.async_turn_off()
assert player.state == STATE_OFF
async def test_toggle(player):
"""Test the toggle method."""
assert player.state == STATE_OFF
await player.async_toggle()
assert player.state == STATE_ON
await player.async_toggle()
assert player.state == STATE_OFF
|
datadog/api/service_level_objectives.py | cclauss/datadogpy | 520 | 12743889 | <gh_stars>100-1000
# Unless explicitly stated otherwise all files in this repository are licensed under the BSD-3-Clause License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2015-Present Datadog, Inc
from datadog.util.format import force_to_epoch_seconds
from datadog.api.resources import (
GetableAPIResource,
CreateableAPIResource,
UpdatableAPIResource,
ListableAPIResource,
DeletableAPIResource,
ActionAPIResource,
)
class ServiceLevelObjective(
GetableAPIResource,
CreateableAPIResource,
UpdatableAPIResource,
ListableAPIResource,
DeletableAPIResource,
ActionAPIResource,
):
"""
A wrapper around Service Level Objective HTTP API.
"""
_resource_name = "slo"
@classmethod
def create(cls, attach_host_name=False, method="POST", id=None, params=None, **body):
"""
Create a SLO
:returns: created SLO details
"""
return super(ServiceLevelObjective, cls).create(
attach_host_name=False, method="POST", id=None, params=params, **body
)
@classmethod
def get(cls, id, **params):
"""
Get a specific SLO details.
:param id: SLO id to get details for
:type id: str
:returns: SLO details
"""
return super(ServiceLevelObjective, cls).get(id, **params)
@classmethod
def get_all(cls, query=None, ids=None, offset=0, limit=100, **params):
"""
Get all SLO details.
:param query: optional search query - syntax in UI && online documentation
:type query: str
:param ids: optional list of SLO ids to get many specific SLOs at once.
:type ids: list(str)
:param offset: offset of results to use (default 0)
:type offset: int
:param limit: limit of results to return (default: 100)
:type limit: int
:returns: SLOs matching the query
"""
search_terms = {}
if query:
search_terms["query"] = query
if ids:
search_terms["ids"] = ids
search_terms["offset"] = offset
search_terms["limit"] = limit
return super(ServiceLevelObjective, cls).get_all(**search_terms)
@classmethod
def update(cls, id, params=None, **body):
"""
Update a specific SLO details.
:param id: SLO id to update details for
:type id: str
:returns: SLO details
"""
return super(ServiceLevelObjective, cls).update(id, params, **body)
@classmethod
def delete(cls, id, **params):
"""
Delete a specific SLO.
:param id: SLO id to delete
:type id: str
:returns: SLO ids removed
"""
return super(ServiceLevelObjective, cls).delete(id, **params)
@classmethod
def bulk_delete(cls, ops, **params):
"""
Bulk Delete Timeframes from multiple SLOs.
:param ops: a dictionary mapping of SLO ID to timeframes to remove.
:type ops: dict(str, list(str))
:returns: Dictionary representing the API's JSON response
`errors` - errors with operation
`data` - updates and deletions
"""
return super(ServiceLevelObjective, cls)._trigger_class_action(
"POST",
"bulk_delete",
body=ops,
params=params,
suppress_response_errors_on_codes=[200],
)
@classmethod
def delete_many(cls, ids, **params):
"""
Delete Multiple SLOs
:param ids: a list of SLO IDs to remove
:type ids: list(str)
:returns: Dictionary representing the API's JSON response see `data` list(slo ids) && `errors`
"""
return super(ServiceLevelObjective, cls)._trigger_class_action(
"DELETE",
"",
params=params,
body={"ids": ids},
suppress_response_errors_on_codes=[200],
)
@classmethod
def can_delete(cls, ids, **params):
"""
Check if the following SLOs can be safely deleted.
This is used to check if SLO has any references to it.
:param ids: a list of SLO IDs to check
:type ids: list(str)
:returns: Dictionary representing the API's JSON response
"data.ok" represents a list of SLO ids that have no known references.
"errors" contains a dictionary of SLO ID to known reference(s).
"""
params["ids"] = ids
return super(ServiceLevelObjective, cls)._trigger_class_action(
"GET",
"can_delete",
params=params,
body=None,
suppress_response_errors_on_codes=[200],
)
@classmethod
def history(cls, id, from_ts, to_ts, **params):
"""
Get the SLO's history from the given time range.
:param id: SLO ID to query
:type id: str
:param from_ts: `from` timestamp in epoch seconds to query
:type from_ts: int|datetime.datetime
:param to_ts: `to` timestamp in epoch seconds to query, must be > `from_ts`
:type to_ts: int|datetime.datetime
:returns: Dictionary representing the API's JSON response
"data.ok" represents a list of SLO ids that have no known references.
"errors" contains a dictionary of SLO ID to known reference(s).
"""
params["id"] = id
params["from_ts"] = force_to_epoch_seconds(from_ts)
params["to_ts"] = force_to_epoch_seconds(to_ts)
return super(ServiceLevelObjective, cls)._trigger_class_action(
"GET",
"history",
id=id,
params=params,
body=None,
suppress_response_errors_on_codes=[200],
)
|
cleverhans_v3.1.0/cleverhans/dataset.py | xu-weizhen/cleverhans | 4,333 | 12743899 | <gh_stars>1000+
"""Dataset class for CleverHans
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import array
import functools
import gzip
import operator
import os
import struct
import tempfile
import sys
import warnings
import numpy as np
import tensorflow as tf
try:
from tensorflow.python.keras.utils import np_utils
from tensorflow.keras.datasets import cifar10
except ImportError:
# In tf 1.8, np_utils doesn't seem to be publicly exposed.
# In later tf versions, it is, and in pre-tf keras it was too.
from tensorflow.python.keras import _impl
np_utils = _impl.keras.utils.np_utils
# In tf 1.8, "from tensorflow.keras.datasets import cifar10" doesn't work even though the module exists
cifar10 = tf.keras.datasets.cifar10
warnings.warn(
"Support for TensorFlow versions prior to 1.12 is deprecated."
" CleverHans using earlier versions may quit working on or after 2019-07-07."
)
from cleverhans import utils
class Dataset(object):
"""Abstract base class representing a dataset."""
# The number of classes in the dataset. Should be specified by subclasses.
NB_CLASSES = None
def __init__(self, kwargs=None):
if kwargs is None:
kwargs = {}
if "self" in kwargs:
del kwargs["self"]
self.kwargs = kwargs
def get_factory(self):
"""Returns a picklable callable that recreates the dataset."""
return Factory(type(self), self.kwargs)
def get_set(self, which_set):
"""Returns the training set or test set as an (x_data, y_data) tuple.
:param which_set: 'train' or 'test'
"""
return (getattr(self, "x_" + which_set), getattr(self, "y_" + which_set))
def to_tensorflow(self):
raise NotImplementedError()
@classmethod
def in_memory_dataset(cls, x, y, shuffle=None, repeat=True):
assert x.shape[0] == y.shape[0]
d = tf.data.Dataset.range(x.shape[0])
if repeat:
d = d.repeat()
if shuffle:
d = d.shuffle(shuffle)
def lookup(p):
return x[p], y[p]
d = d.map(lambda i: tf.py_func(lookup, [i], [tf.float32] * 2))
return d
class MNIST(Dataset):
"""The MNIST dataset"""
NB_CLASSES = 10
def __init__(
self,
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
center=False,
max_val=1.0,
):
kwargs = locals()
if "__class__" in kwargs:
del kwargs["__class__"]
super(MNIST, self).__init__(kwargs)
x_train, y_train, x_test, y_test = data_mnist(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
if center:
x_train = x_train * 2.0 - 1.0
x_test = x_test * 2.0 - 1.0
x_train *= max_val
x_test *= max_val
self.x_train = x_train.astype("float32")
self.y_train = y_train.astype("float32")
self.x_test = x_test.astype("float32")
self.y_test = y_test.astype("float32")
def to_tensorflow(self, shuffle=4096):
return (
self.in_memory_dataset(self.x_train, self.y_train, shuffle),
self.in_memory_dataset(self.x_test, self.y_test, repeat=False),
)
class CIFAR10(Dataset):
"""The CIFAR-10 dataset"""
NB_CLASSES = 10
LABEL_NAMES = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
def __init__(
self,
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
center=False,
max_val=1.0,
):
kwargs = locals()
if "__class__" in kwargs:
del kwargs["__class__"]
super(CIFAR10, self).__init__(kwargs)
packed = data_cifar10(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train, x_test, y_test = packed
if center:
x_train = x_train * 2.0 - 1.0
x_test = x_test * 2.0 - 1.0
x_train *= max_val
x_test *= max_val
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.max_val = max_val
def to_tensorflow(self, shuffle=4096):
# This is much more efficient with data augmentation, see tutorials.
return (
self.in_memory_dataset(self.x_train, self.y_train, shuffle),
self.in_memory_dataset(self.x_test, self.y_test, repeat=False),
)
class Factory(object):
"""
A callable that creates an object of the specified type and configuration.
"""
def __init__(self, cls, kwargs):
self.cls = cls
self.kwargs = kwargs
def __call__(self):
"""Returns the created object."""
return self.cls(**self.kwargs)
def maybe_download_file(url, datadir=None, force=False):
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
if not datadir:
datadir = tempfile.gettempdir()
file_name = url[url.rfind("/") + 1 :]
dest_file = os.path.join(datadir, file_name)
isfile = os.path.isfile(dest_file)
if force or not isfile:
urlretrieve(url, dest_file)
return dest_file
def download_and_parse_mnist_file(file_name, datadir=None, force=False):
url = os.path.join('https://storage.googleapis.com/cvdf-datasets/mnist/', file_name)
file_name = maybe_download_file(url, datadir=datadir, force=force)
# Open the file and unzip it if necessary
if os.path.splitext(file_name)[1] == ".gz":
open_fn = gzip.open
else:
open_fn = open
# Parse the file
with open_fn(file_name, "rb") as file_descriptor:
header = file_descriptor.read(4)
assert len(header) == 4
zeros, data_type, n_dims = struct.unpack(">HBB", header)
assert zeros == 0
hex_to_data_type = {
0x08: "B",
0x09: "b",
0x0B: "h",
0x0C: "i",
0x0D: "f",
0x0E: "d",
}
data_type = hex_to_data_type[data_type]
# data_type unicode to ascii conversion (Python2 fix)
if sys.version_info[0] < 3:
data_type = data_type.encode("ascii", "ignore")
dim_sizes = struct.unpack(">" + "I" * n_dims, file_descriptor.read(4 * n_dims))
data = array.array(data_type, file_descriptor.read())
data.byteswap()
desired_items = functools.reduce(operator.mul, dim_sizes)
assert len(data) == desired_items
return np.array(data).reshape(dim_sizes)
def data_mnist(
datadir=tempfile.gettempdir(),
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
):
"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
X_train = (
download_and_parse_mnist_file("train-images-idx3-ubyte.gz", datadir=datadir)
/ 255.0
)
Y_train = download_and_parse_mnist_file(
"train-labels-idx1-ubyte.gz", datadir=datadir
)
X_test = (
download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz", datadir=datadir)
/ 255.0
)
Y_test = download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz", datadir=datadir)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
Y_train = utils.to_categorical(Y_train, nb_classes=10)
Y_test = utils.to_categorical(Y_test, nb_classes=10)
return X_train, Y_train, X_test, Y_test
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if tf.keras.backend.image_data_format() == "channels_first":
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
x_train = x_train[train_start:train_end, :, :, :]
y_train = y_train[train_start:train_end, :]
x_test = x_test[test_start:test_end, :]
y_test = y_test[test_start:test_end, :]
return x_train, y_train, x_test, y_test
|
tests/basics/tuple1.py | LabAixBidouille/micropython | 303 | 12743933 | # basic tuple functionality
x = (1, 2, 3 * 4)
print(x)
try:
x[0] = 4
except TypeError:
print("TypeError")
print(x)
try:
x.append(5)
except AttributeError:
print("AttributeError")
print(x[1:])
print(x[:-1])
print(x[2:3])
print(x + (10, 100, 10000))
|
recipes/Python/66009_Converting_Between_Different_Naming/recipe-66009.py | tdiprima/code | 2,023 | 12743947 | <filename>recipes/Python/66009_Converting_Between_Different_Naming/recipe-66009.py
import re
def cw2us(x): # capwords to underscore notation
return re.sub(r'(?<=[a-z])[A-Z]|(?<!^)[A-Z](?=[a-z])', r"_\g<0>", x).lower()
def mc2us(x): # mixed case to underscore notation
return cw2us(x)
def us2mc(x): # underscore to mixed case notation
return re.sub(r'_([a-z])', lambda m: (m.group(1).upper()), x)
def us2cw(x): # underscore to capwords notation
s = us2mc(x)
return s[0].upper()+s[1:]
##
## Expected output:
##
## >>> cw2us("PrintHTML")
## 'print_html'
## >>> cw2us("IOError")
## 'io_error'
## >>> cw2us("SetXYPosition")
## 'set_xy_position'
## >>> cw2us("GetX")
## 'get_x'
##
|
applications/aci-diagram/diagram.py | richardstrnad/acitoolkit | 351 | 12743961 | <filename>applications/aci-diagram/diagram.py<gh_stars>100-1000
#!/usr/bin/env python
from acitoolkit.acitoolkit import *
import pygraphviz as pgv
import sys
import logging
creds = Credentials('apic',
"Generate logical diagrams of a running Cisco ACI Application Policy Infrastructure Controller")
creds.add_argument('-o', '--output',
help='Output file for diagram - e.g. out.png, out.jpeg',
required=True)
creds.add_argument('-t', '--tenants',
help='Tenants to include when generating diagrams',
nargs='*')
creds.add_argument('-v', '--verbose', help='show verbose logging information',
action='store_true')
creds.add_argument('-d', '--debug', help='enable acitoolkit debug loggin information',
action='store_true')
args = creds.get()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
session = Session(args.url, args.login, args.password)
try:
assert (session.login().ok)
except:
print("Connection to APIC failed")
sys.exit()
graph = pgv.AGraph(directed=True, rankdir="LR")
if args.tenants:
tenants = Tenant.get_deep(session, args.tenants)
else:
tenants = Tenant.get_deep(session)
def tn_node(tn):
return "cluster-tn-" + tn.name
def ctx_node(tn, ctx):
return tn_node(tn) + "/ctx-" + ctx.name
def bd_node(tn, bd):
return tn_node(tn) + "/bd-" + bd.name
def sn_node(tn, bd, sn):
return bd_node(tn, bd) + "/sn-" + sn.get_addr()
def app_node(tn, app):
return tn_node(tn) + "/app-" + app.name
def epg_node(tn, app, epg):
return app_node(tn, app) + "/epg-" + epg.name
def ctrct_node(tn, ctrct):
return tn_node(tn) + "/ctrct-" + ctrct.name
for tenant in tenants:
print("Processing tenant " + tenant.name)
tncluster = graph.add_subgraph(name=tn_node(tenant),
label="Tenant: " + tenant.name, color="blue")
for context in tenant.get_children(only_class=Context):
tncluster.add_node(ctx_node(tenant, context),
label="Private Network\n" + context.name,
shape='circle')
for bd in tenant.get_children(only_class=BridgeDomain):
tncluster.add_node(bd_node(tenant, bd),
label="Bridge Domain\n" + bd.name, shape='box')
if bd.get_context():
tncluster.add_edge(ctx_node(tenant, bd.get_context()),
bd_node(tenant, bd))
else:
tncluster.add_node("_ctx-dummy-" + bd_node(tenant, bd),
style="invis", label='Private Network',
shape='circle')
tncluster.add_edge("_ctx-dummy-" + bd_node(tenant, bd),
bd_node(tenant, bd), style="invis")
for sn in bd.get_children(only_class=Subnet):
tncluster.add_node(sn_node(tenant, bd, sn),
label="Subnet\n" + sn.get_addr(), shape='box',
style='dotted')
tncluster.add_edge(bd_node(tenant, bd), sn_node(tenant, bd, sn))
for app in tenant.get_children(only_class=AppProfile):
appcluster = tncluster.add_subgraph(name=app_node(tenant, app),
label="Application Profile\n" + app.name)
for epg in app.get_children(only_class=EPG):
appcluster.add_node(epg_node(tenant, app, epg),
label="EPG\n" + epg.name)
if epg.has_bd():
tncluster.add_edge(bd_node(tenant, epg.get_bd()),
epg_node(tenant, app, epg), style='dotted')
for pc in epg.get_all_provided():
appcluster.add_node(ctrct_node(tenant, pc),
label="Contract\n" + pc.name, shape='box',
style='filled', color='lightgray')
appcluster.add_edge(epg_node(tenant, app, epg),
ctrct_node(tenant, pc))
for cc in epg.get_all_consumed():
appcluster.add_node(ctrct_node(tenant, cc),
label="Contract\n" + cc.name, shape='box',
style='filled', color='lightgray')
appcluster.add_edge(ctrct_node(tenant, cc),
epg_node(tenant, app, epg))
if args.verbose:
print("Finished loading the structure from APIC, here is the graph source (GraphViz DOT format):")
print("================================================================================")
print(graph.string())
print("================================================================================")
print("\n\nDrawing graph to %s" % args.output)
graph.draw(args.output, prog='dot')
|
qrcode/LUT.py | cbalfour/python-qrcode | 2,651 | 12743968 | <gh_stars>1000+
# Store all kinds of lookup table.
# # generate rsPoly lookup table.
# from qrcode import base
# def create_bytes(rs_blocks):
# for r in range(len(rs_blocks)):
# dcCount = rs_blocks[r].data_count
# ecCount = rs_blocks[r].total_count - dcCount
# rsPoly = base.Polynomial([1], 0)
# for i in range(ecCount):
# rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0)
# return ecCount, rsPoly
# rsPoly_LUT = {}
# for version in range(1,41):
# for error_correction in range(4):
# rs_blocks_list = base.rs_blocks(version, error_correction)
# ecCount, rsPoly = create_bytes(rs_blocks_list)
# rsPoly_LUT[ecCount]=rsPoly.num
# print(rsPoly_LUT)
# Result. Usage: input: ecCount, output: Polynomial.num
# e.g. rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0)
rsPoly_LUT = {
7: [1, 127, 122, 154, 164, 11, 68, 117],
10: [1, 216, 194, 159, 111, 199, 94, 95, 113, 157, 193],
13: [1, 137, 73, 227, 17, 177, 17, 52, 13, 46, 43, 83, 132, 120],
15: [1, 29, 196, 111, 163, 112, 74, 10, 105, 105, 139, 132, 151,
32, 134, 26],
16: [1, 59, 13, 104, 189, 68, 209, 30, 8, 163, 65, 41, 229, 98, 50, 36, 59],
17: [1, 119, 66, 83, 120, 119, 22, 197, 83, 249, 41, 143, 134, 85, 53, 125,
99, 79],
18: [1, 239, 251, 183, 113, 149, 175, 199, 215, 240, 220, 73, 82, 173, 75,
32, 67, 217, 146],
20: [1, 152, 185, 240, 5, 111, 99, 6, 220, 112, 150, 69, 36, 187, 22, 228,
198, 121, 121, 165, 174],
22: [1, 89, 179, 131, 176, 182, 244, 19, 189, 69, 40, 28, 137, 29, 123, 67,
253, 86, 218, 230, 26, 145, 245],
24: [1, 122, 118, 169, 70, 178, 237, 216, 102, 115, 150, 229, 73, 130, 72,
61, 43, 206, 1, 237, 247, 127, 217, 144, 117],
26: [1, 246, 51, 183, 4, 136, 98, 199, 152, 77, 56, 206, 24, 145, 40, 209,
117, 233, 42, 135, 68, 70, 144, 146, 77, 43, 94],
28: [1, 252, 9, 28, 13, 18, 251, 208, 150, 103, 174, 100, 41, 167, 12, 247,
56, 117, 119, 233, 127, 181, 100, 121, 147, 176, 74, 58, 197],
30: [1, 212, 246, 77, 73, 195, 192, 75, 98, 5, 70, 103, 177, 22, 217, 138,
51, 181, 246, 72, 25, 18, 46, 228, 74, 216, 195, 11, 106, 130, 150]
}
|
cli/sawtooth_cli/sawset.py | EddyKIL/sawtooth-core | 1,530 | 12743991 | <filename>cli/sawtooth_cli/sawset.py
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
from base64 import b64decode
import csv
import getpass
import hashlib
import json
import logging
import os
import sys
import traceback
import random
import yaml
import pkg_resources
from colorlog import ColoredFormatter
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_signing import ParseError
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_cli.exceptions import CliException
from sawtooth_cli.rest_client import RestClient
from sawtooth_cli.protobuf.settings_pb2 import SettingsPayload
from sawtooth_cli.protobuf.settings_pb2 import SettingProposal
from sawtooth_cli.protobuf.settings_pb2 import SettingVote
from sawtooth_cli.protobuf.settings_pb2 import SettingCandidates
from sawtooth_cli.protobuf.setting_pb2 import Setting
from sawtooth_cli.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_cli.protobuf.transaction_pb2 import Transaction
from sawtooth_cli.protobuf.batch_pb2 import BatchHeader
from sawtooth_cli.protobuf.batch_pb2 import Batch
from sawtooth_cli.protobuf.batch_pb2 import BatchList
DISTRIBUTION_NAME = 'sawset'
SETTINGS_NAMESPACE = '000000'
_MIN_PRINT_WIDTH = 15
_MAX_KEY_PARTS = 4
_ADDRESS_PART_SIZE = 16
def add_config_parser(subparsers, parent_parser):
"""Creates the arg parsers needed for the config command and
its subcommands.
"""
parser = subparsers.add_parser(
'config',
help='Changes genesis block settings and create, view, and '
'vote on settings proposals',
description='Provides subcommands to change genesis block settings '
'and to view, create, and vote on existing proposals.'
)
config_parsers = parser.add_subparsers(title="subcommands",
dest="subcommand")
config_parsers.required = True
def _do_config_proposal_create(args):
"""Executes the 'proposal create' subcommand. Given a key file, and a
series of key/value pairs, it generates batches of sawtooth_settings
transactions in a BatchList instance. The BatchList is either stored to a
file or submitted to a validator, depending on the supplied CLI arguments.
"""
settings = [s.split('=', 1) for s in args.setting]
signer = _read_signer(args.key)
txns = [_create_propose_txn(signer, setting)
for setting in settings]
batch = _create_batch(signer, txns)
batch_list = BatchList(batches=[batch])
if args.output is not None:
try:
with open(args.output, 'wb') as batch_file:
batch_file.write(batch_list.SerializeToString())
except IOError as e:
raise CliException(
'Unable to write to batch file: {}'.format(str(e))) from e
elif args.sabre_output is not None:
for i, txn in enumerate(txns):
with open("{}-{}".format(args.sabre_output, i), 'wb') as outfile:
outfile.write(txn.payload)
elif args.url is not None:
rest_client = RestClient(args.url)
rest_client.send_batches(batch_list)
else:
raise AssertionError('No target for create set.')
def _do_config_proposal_list(args):
"""Executes the 'proposal list' subcommand.
Given a url, optional filters on prefix and public key, this command lists
the current pending proposals for settings changes.
"""
def _accept(candidate, public_key, prefix):
# Check to see if the first public key matches the given public key
# (if it is not None). This public key belongs to the user that
# created it.
has_pub_key = (not public_key
or candidate.votes[0].public_key == public_key)
has_prefix = candidate.proposal.setting.startswith(prefix)
return has_prefix and has_pub_key
candidates_payload = _get_proposals(RestClient(args.url))
candidates = [
c for c in candidates_payload.candidates
if _accept(c, args.public_key, args.filter)
]
if args.format == 'default':
for candidate in candidates:
print('{}: {} => {}'.format(
candidate.proposal_id,
candidate.proposal.setting,
candidate.proposal.value))
elif args.format == 'csv':
writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
writer.writerow(['PROPOSAL_ID', 'KEY', 'VALUE'])
for candidate in candidates:
writer.writerow([
candidate.proposal_id,
candidate.proposal.setting,
candidate.proposal.value])
elif args.format == 'json' or args.format == 'yaml':
candidates_snapshot = \
{c.proposal_id: {c.proposal.setting: c.proposal.value}
for c in candidates}
if args.format == 'json':
print(json.dumps(candidates_snapshot, indent=2, sort_keys=True))
else:
print(yaml.dump(candidates_snapshot,
default_flow_style=False)[0:-1])
else:
raise AssertionError('Unknown format {}'.format(args.format))
def _do_config_proposal_vote(args):
"""Executes the 'proposal vote' subcommand. Given a key file, a proposal
id and a vote value, it generates a batch of sawtooth_settings transactions
in a BatchList instance. The BatchList is file or submitted to a
validator.
"""
signer = _read_signer(args.key)
rest_client = RestClient(args.url)
proposals = _get_proposals(rest_client)
proposal = None
for candidate in proposals.candidates:
if candidate.proposal_id == args.proposal_id:
proposal = candidate
break
if proposal is None:
raise CliException('No proposal exists with the given id')
for vote_record in proposal.votes:
if vote_record.public_key == signer.get_public_key().as_hex():
raise CliException(
'A vote has already been recorded with this signing key')
txn = _create_vote_txn(
signer,
args.proposal_id,
proposal.proposal.setting,
args.vote_value)
batch = _create_batch(signer, [txn])
batch_list = BatchList(batches=[batch])
rest_client.send_batches(batch_list)
def _do_config_genesis(args):
signer = _read_signer(args.key)
public_key = signer.get_public_key().as_hex()
authorized_keys = args.authorized_key if args.authorized_key else \
[public_key]
if public_key not in authorized_keys:
authorized_keys.append(public_key)
txns = []
txns.append(_create_propose_txn(
signer,
('sawtooth.settings.vote.authorized_keys',
','.join(authorized_keys))))
if args.approval_threshold is not None:
if args.approval_threshold < 1:
raise CliException('approval threshold must not be less than 1')
if args.approval_threshold > len(authorized_keys):
raise CliException(
'approval threshold must not be greater than the number of '
'authorized keys')
txns.append(_create_propose_txn(
signer,
('sawtooth.settings.vote.approval_threshold',
str(args.approval_threshold))))
batch = _create_batch(signer, txns)
batch_list = BatchList(batches=[batch])
try:
with open(args.output, 'wb') as batch_file:
batch_file.write(batch_list.SerializeToString())
print('Generated {}'.format(args.output))
except IOError as e:
raise CliException(
'Unable to write to batch file: {}'.format(str(e))) from e
def _get_proposals(rest_client):
state_leaf = rest_client.get_leaf(
_key_to_address('sawtooth.settings.vote.proposals'))
config_candidates = SettingCandidates()
if state_leaf is not None:
setting_bytes = b64decode(state_leaf['data'])
setting = Setting()
setting.ParseFromString(setting_bytes)
candidates_bytes = None
for entry in setting.entries:
if entry.key == 'sawtooth.settings.vote.proposals':
candidates_bytes = entry.value
if candidates_bytes is not None:
decoded = b64decode(candidates_bytes)
config_candidates.ParseFromString(decoded)
return config_candidates
def _read_signer(key_filename):
"""Reads the given file as a hex key.
Args:
key_filename: The filename where the key is stored. If None,
defaults to the default key for the current user.
Returns:
Signer: the signer
Raises:
CliException: If unable to read the file.
"""
filename = key_filename
if filename is None:
filename = os.path.join(os.path.expanduser('~'),
'.sawtooth',
'keys',
getpass.getuser() + '.priv')
try:
with open(filename, 'r') as key_file:
signing_key = key_file.read().strip()
except IOError as e:
raise CliException('Unable to read key file: {}'.format(str(e))) from e
try:
private_key = Secp256k1PrivateKey.from_hex(signing_key)
except ParseError as e:
raise CliException(
'Unable to read key in file: {}'.format(str(e))) from e
context = create_context('secp256k1')
crypto_factory = CryptoFactory(context)
return crypto_factory.new_signer(private_key)
def _create_batch(signer, transactions):
"""Creates a batch from a list of transactions and a public key, and signs
the resulting batch with the given signing key.
Args:
signer (:obj:`Signer`): The cryptographic signer
transactions (list of `Transaction`): The transactions to add to the
batch.
Returns:
`Batch`: The constructed and signed batch.
"""
txn_ids = [txn.header_signature for txn in transactions]
batch_header = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=txn_ids).SerializeToString()
return Batch(
header=batch_header,
header_signature=signer.sign(batch_header),
transactions=transactions)
def _create_propose_txn(signer, setting_key_value):
"""Creates an individual sawtooth_settings transaction for the given
key and value.
"""
setting_key, setting_value = setting_key_value
nonce = hex(random.randint(0, 2**64))
proposal = SettingProposal(
setting=setting_key,
value=setting_value,
nonce=nonce)
payload = SettingsPayload(data=proposal.SerializeToString(),
action=SettingsPayload.PROPOSE)
return _make_txn(signer, setting_key, payload)
def _create_vote_txn(signer, proposal_id, setting_key, vote_value):
"""Creates an individual sawtooth_settings transaction for voting on a
proposal for a particular setting key.
"""
if vote_value == 'accept':
vote_id = SettingVote.ACCEPT
else:
vote_id = SettingVote.REJECT
vote = SettingVote(proposal_id=proposal_id, vote=vote_id)
payload = SettingsPayload(data=vote.SerializeToString(),
action=SettingsPayload.VOTE)
return _make_txn(signer, setting_key, payload)
def _make_txn(signer, setting_key, payload):
"""Creates and signs a sawtooth_settings transaction with with a payload.
"""
serialized_payload = payload.SerializeToString()
header = TransactionHeader(
signer_public_key=signer.get_public_key().as_hex(),
family_name='sawtooth_settings',
family_version='1.0',
inputs=_config_inputs(setting_key),
outputs=_config_outputs(setting_key),
dependencies=[],
payload_sha512=hashlib.sha512(serialized_payload).hexdigest(),
batcher_public_key=signer.get_public_key().as_hex()
).SerializeToString()
return Transaction(
header=header,
header_signature=signer.sign(header),
payload=serialized_payload)
def _config_inputs(key):
"""Creates the list of inputs for a sawtooth_settings transaction, for a
given setting key.
"""
return [
_key_to_address('sawtooth.settings.vote.proposals'),
_key_to_address('sawtooth.settings.vote.authorized_keys'),
_key_to_address('sawtooth.settings.vote.approval_threshold'),
_key_to_address(key)
]
def _config_outputs(key):
"""Creates the list of outputs for a sawtooth_settings transaction, for a
given setting key.
"""
return [
_key_to_address('sawtooth.settings.vote.proposals'),
_key_to_address(key)
]
def _short_hash(in_str):
return hashlib.sha256(in_str.encode()).hexdigest()[:_ADDRESS_PART_SIZE]
def _key_to_address(key):
"""Creates the state address for a given setting key.
"""
key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1)
key_parts.extend([''] * (_MAX_KEY_PARTS - len(key_parts)))
return SETTINGS_NAMESPACE + ''.join(_short_hash(x) for x in key_parts)
def setting_key_to_address(key):
return _key_to_address(key)
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(create_console_handler(verbose_level))
def create_parent_parser(prog_name):
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parent_parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parent_parser
def create_parser(prog_name):
parent_parser = create_parent_parser(prog_name)
parser = argparse.ArgumentParser(
description='Provides subcommands to change genesis block settings '
'and to view, create, and vote on settings proposals.',
parents=[parent_parser])
subparsers = parser.add_subparsers(title='subcommands', dest='subcommand')
subparsers.required = True
# The following parser is for the `genesis` subcommand.
# This command creates a batch that contains all of the initial
# transactions for on-chain settings
genesis_parser = subparsers.add_parser(
'genesis',
help='Creates a genesis batch file of settings transactions',
description='Creates a Batch of settings proposals that can be '
'consumed by "sawadm genesis" and used '
'during genesis block construction.'
)
genesis_parser.add_argument(
'-k', '--key',
type=str,
help='specify signing key for resulting batches '
'and initial authorized key')
genesis_parser.add_argument(
'-o', '--output',
type=str,
default='config-genesis.batch',
help='specify the output file for the resulting batches')
genesis_parser.add_argument(
'-T', '--approval-threshold',
type=int,
help='set the number of votes required to enable a setting change')
genesis_parser.add_argument(
'-A', '--authorized-key',
type=str,
action='append',
help='specify a public key for the user authorized to submit '
'config transactions')
# The following parser is for the `proposal` subcommand group. These
# commands allow the user to create proposals which may be applied
# immediately or placed in ballot mode, depending on the current on-chain
# settings.
proposal_parser = subparsers.add_parser(
'proposal',
help='Views, creates, or votes on settings change proposals',
description='Provides subcommands to view, create, or vote on '
'proposed settings')
proposal_parsers = proposal_parser.add_subparsers(
title='subcommands',
dest='proposal_cmd')
proposal_parsers.required = True
prop_parser = proposal_parsers.add_parser(
'create',
help='Creates proposals for setting changes',
description='Create proposals for settings changes. The change '
'may be applied immediately or after a series of votes, '
'depending on the vote threshold setting.'
)
prop_parser.add_argument(
'-k', '--key',
type=str,
help='specify a signing key for the resulting batches')
prop_target_group = prop_parser.add_mutually_exclusive_group()
prop_target_group.add_argument(
'-o', '--output',
type=str,
help='specify the output file for the resulting batches')
prop_target_group.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
prop_target_group.add_argument(
'--sabre-output',
type=str,
help='specify an output file to write the settings payload to '
'for the sabre cli'
)
prop_parser.add_argument(
'setting',
type=str,
nargs='+',
help='configuration setting as key/value pair with the '
'format <key>=<value>')
proposal_list_parser = proposal_parsers.add_parser(
'list',
help='Lists the currently proposed (not active) settings',
description='Lists the currently proposed (not active) settings. '
'Use this list of proposals to find proposals to '
'vote on.')
proposal_list_parser.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
proposal_list_parser.add_argument(
'--public-key',
type=str,
default='',
help='filter proposals from a particular public key')
proposal_list_parser.add_argument(
'--filter',
type=str,
default='',
help='filter keys that begin with this value')
proposal_list_parser.add_argument(
'--format',
default='default',
choices=['default', 'csv', 'json', 'yaml'],
help='choose the output format')
vote_parser = proposal_parsers.add_parser(
'vote',
help='Votes for specific setting change proposals',
description='Votes for a specific settings change proposal. Use '
'"sawset proposal list" to find the proposal id.')
vote_parser.add_argument(
'--url',
type=str,
help="identify the URL of a validator's REST API",
default='http://localhost:8008')
vote_parser.add_argument(
'-k', '--key',
type=str,
help='specify a signing key for the resulting transaction batch')
vote_parser.add_argument(
'proposal_id',
type=str,
help='identify the proposal to vote on')
vote_parser.add_argument(
'vote_value',
type=str,
choices=['accept', 'reject'],
help='specify the value of the vote')
return parser
def main(prog_name=os.path.basename(sys.argv[0]), args=None,
with_loggers=True):
parser = create_parser(prog_name)
if args is None:
args = sys.argv[1:]
args = parser.parse_args(args)
if with_loggers is True:
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level)
if args.subcommand == 'proposal' and args.proposal_cmd == 'create':
_do_config_proposal_create(args)
elif args.subcommand == 'proposal' and args.proposal_cmd == 'list':
_do_config_proposal_list(args)
elif args.subcommand == 'proposal' and args.proposal_cmd == 'vote':
_do_config_proposal_vote(args)
elif args.subcommand == 'genesis':
_do_config_genesis(args)
else:
raise CliException(
'"{}" is not a valid subcommand of "config"'.format(
args.subcommand))
def main_wrapper():
# pylint: disable=bare-except
try:
main()
except CliException as e:
print("Error: {}".format(e), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
except BrokenPipeError:
sys.stderr.close()
except SystemExit as e:
raise e
except:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
|
tests/python/kivaloo/servers.py | Tarsnap/kivaloo | 176 | 12744004 | #!/usr/bin/env python3
""" Launch and organize servers for the python tests. """
import logging
import os
import shutil
import subprocess
import threading
import queue
import psutil
# ### Private constants
_KIVALOO_TEST_DIR = "/tmp/kivaloo-test/"
# - this is relative to TEST_DIR
# - this may be subjected to rm -rf
_DISK_LBS = "lbs-disk"
# These filenames are relative to this directory
_BIN_LBS = "../../lbs/lbs"
_BIN_KVLDS = "../../kvlds/kvlds"
# ### Public constants
LBS_BLOCKSIZE = 512
LBS_SOCK = os.path.join(_KIVALOO_TEST_DIR, "kivaloo-lbs-sock")
KVLDS_SOCK = os.path.join(_KIVALOO_TEST_DIR, "kivaloo-kvlds-sock")
def _enqueue_output(output, queue_toadd):
""" Reads data from a file descriptor and queues it. Usually launched
in a separate thread to provide non-blocking output.
"""
for line in iter(output.readline, b''):
queue_toadd.put(line)
output.close()
class StartError(RuntimeError):
""" We failed to start a server. """
pass
class Server(object):
""" Base class for interacting with a server. """
# Constants (will be set by subclasses)
cmd = []
pidfile = None
sock = None
# Variables
proc = None
def __init__(self):
if not os.path.exists(_KIVALOO_TEST_DIR):
os.mkdir(_KIVALOO_TEST_DIR)
# Variables to support non-blocking stderr from the server
self.stderr_queue = queue.Queue()
self.stderr_thread = None
def _start(self):
""" Start the server, or find an existing server. Should be called
automatically by the subclass.
"""
# cls refers to the derived class. Concretely, this gives us:
# - one cls.proc shared between all Server_lbs objects
# - one cls.proc shared between all Server_kvlds objects
cls = type(self)
if cls.proc:
logging.info("Server %s, pid %i: exists; reusing", self.cmd[0],
self.get_pid_from_file())
return
proc_unowned = self._search_for_process()
if proc_unowned:
logging.info("Terminating old process: %s", proc_unowned)
proc_unowned.terminate()
if os.path.exists(cls.pidfile):
os.remove(cls.pidfile)
# Clean up previous files
if self.sock and os.path.exists(self.sock):
os.remove(self.sock)
# Initialize server and start gathering stderr
cls.proc = subprocess.Popen(self.cmd, stderr=subprocess.PIPE)
self.stderr_thread = threading.Thread(target=_enqueue_output,
args=(cls.proc.stderr,
self.stderr_queue))
self.stderr_thread.start()
# Check for server fail
ret = cls.proc.wait()
if ret is not 0:
msg = "Error when running:\n%s\n\texitcode: %i, stderr:\n%s" % (
" ".join(self.cmd), ret, self.get_stderr())
# We don't have a running server
cls.proc = None
raise StartError(msg)
# Get server's daemon-forked pid
logging.info("Server %s, pid %i: started", self.cmd[0],
self.get_pid_from_file())
def get_stderr(self):
""" Get stderr from the server. Does not block. """
if self.stderr_queue.qsize():
stderr = ""
while self.stderr_queue.qsize():
stderr += self.stderr_queue.get_nowait().decode()
else:
stderr = None
return stderr
@classmethod
def get_pid_from_file(cls):
""" Get the server's daemon-forked pid. """
if not os.path.exists(cls.pidfile):
return None
with open(cls.pidfile) as filep:
return int(filep.read())
@classmethod
def _search_for_process(cls):
""" Try to find an existing server process. """
# Check existing pidfile
pid = cls.get_pid_from_file()
if pid:
proc = psutil.Process(pid)
if proc.cmdline() == cls.cmd:
return proc
# Look for the process
for proc in psutil.process_iter():
if proc.cmdline() == cls.cmd:
return proc
return None
@classmethod
def shutdown(cls):
""" Shut down the server. """
# The pid of self.proc is the pre-forked server's pid, so we get the
# pid of the daemonized server.
proc_unowned = cls._search_for_process()
if proc_unowned is not None:
proc_unowned.terminate()
ret = proc_unowned.wait()
# Did the server exit correctly?
if ret is not None and ret != 0:
raise Exception("Failed to shut down properly.")
logging.info("Server %s exited", cls.cmd[0])
if os.path.exists(cls.pidfile):
os.remove(cls.pidfile)
# One way or another, the previous server is unusable.
cls.proc = None
class Server_lbs(Server):
""" Interact with an lbs server. """
# Constant for Server_lbs
disk = os.path.join(_KIVALOO_TEST_DIR, _DISK_LBS)
# Constants for Server
sock = LBS_SOCK
cmd = ("%s -s %s -d %s -b %i" % (_BIN_LBS, sock, disk,
LBS_BLOCKSIZE)).split()
pidfile = sock + ".pid"
# Variable shared between all Server_lbs objects
proc = None
def __init__(self):
super().__init__()
if not os.path.exists(self.disk):
os.mkdir(self.disk)
self._start()
def reset(self):
""" Delete the lbs data and start the server again. """
self.shutdown()
shutil.rmtree(self.disk)
os.mkdir(self.disk)
self._start()
class Server_kvlds(Server):
""" Interact with a kvlds server. """
# Constant for Server_kvlds
sock_lbs = LBS_SOCK
# Constants for Server
sock = os.path.join(_KIVALOO_TEST_DIR, KVLDS_SOCK)
cmd = ("%s -s %s -l %s" % (_BIN_KVLDS, sock, sock_lbs)).split()
pidfile = sock + ".pid"
# Variable shared between all Server_kvlds objects
proc = None
def __init__(self):
super().__init__()
self._start()
|
grip/assets.py | botleague-results/grip | 5,502 | 12744008 | from __future__ import print_function, unicode_literals
import errno
import os
import posixpath
import re
import sys
import shutil
from abc import ABCMeta, abstractmethod
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import requests
from flask import safe_join
from .constants import (
STYLE_URLS_SOURCE, STYLE_URLS_RES, STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT)
from .vendor.six import add_metaclass
@add_metaclass(ABCMeta)
class ReadmeAssetManager(object):
"""
Manages the style and font assets rendered with Readme pages.
Set cache_path to None to disable caching.
"""
def __init__(self, cache_path, style_urls=None, quiet=None):
super(ReadmeAssetManager, self).__init__()
self.cache_path = cache_path
self.style_urls = list(style_urls) if style_urls else []
self.styles = []
self.quiet = quiet
def _strip_url_params(self, url):
return url.rsplit('?', 1)[0].rsplit('#', 1)[0]
def clear(self):
"""
Clears the asset cache.
"""
if self.cache_path and os.path.exists(self.cache_path):
shutil.rmtree(self.cache_path)
def cache_filename(self, url):
"""
Gets a suitable relative filename for the specified URL.
"""
# FUTURE: Use url exactly instead of flattening it here
url = posixpath.basename(url)
return self._strip_url_params(url)
@abstractmethod
def retrieve_styles(self, asset_url_path):
"""
Get style URLs from the source HTML page and specified cached asset
URL path.
"""
pass
class GitHubAssetManager(ReadmeAssetManager):
"""
Reads the styles used for rendering Readme pages.
Set cache_path to None to disable caching.
"""
def __init__(self, cache_path, style_urls=None, quiet=None):
super(GitHubAssetManager, self).__init__(cache_path, style_urls, quiet)
def _get_style_urls(self, asset_url_path):
"""
Gets the specified resource and parses all style URLs and their
assets in the form of the specified patterns.
"""
# Check cache
if self.cache_path:
cached = self._get_cached_style_urls(asset_url_path)
# Skip fetching styles if there's any already cached
if cached:
return cached
# Find style URLs
r = requests.get(STYLE_URLS_SOURCE)
if not 200 <= r.status_code < 300:
print('Warning: retrieving styles gave status code',
r.status_code, file=sys.stderr)
urls = []
for style_urls_re in STYLE_URLS_RES:
urls.extend(re.findall(style_urls_re, r.text))
if not urls:
print('Warning: no styles found - see https://github.com/joeyespo/'
'grip/issues/265', file=sys.stderr)
# Cache the styles and their assets
if self.cache_path:
is_cached = self._cache_contents(urls, asset_url_path)
if is_cached:
urls = self._get_cached_style_urls(asset_url_path)
return urls
def _get_cached_style_urls(self, asset_url_path):
"""
Gets the URLs of the cached styles.
"""
try:
cached_styles = os.listdir(self.cache_path)
except IOError as ex:
if ex.errno != errno.ENOENT and ex.errno != errno.ESRCH:
raise
return []
except OSError:
return []
return [posixpath.join(asset_url_path, style)
for style in cached_styles
if style.endswith('.css')]
def _cache_contents(self, style_urls, asset_url_path):
"""
Fetches the given URLs and caches their contents
and their assets in the given directory.
"""
files = {}
asset_urls = []
for style_url in style_urls:
if not self.quiet:
print(' * Downloading style', style_url, file=sys.stderr)
r = requests.get(style_url)
if not 200 <= r.status_code < 300:
print(' -> Warning: Style request responded with',
r.status_code, file=sys.stderr)
files = None
continue
asset_content = r.text
# Find assets and replace their base URLs with the cache directory
for url in re.findall(STYLE_ASSET_URLS_RE, asset_content):
asset_urls.append(urljoin(style_url, url))
contents = re.sub(
STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT.format(asset_url_path.rstrip('/')),
asset_content)
# Prepare cache
if files is not None:
filename = self.cache_filename(style_url)
files[filename] = contents.encode('utf-8')
for asset_url in asset_urls:
if not self.quiet:
print(' * Downloading asset', asset_url, file=sys.stderr)
# Retrieve binary file and show message
r = requests.get(asset_url, stream=True)
if not 200 <= r.status_code < 300:
print(' -> Warning: Asset request responded with',
r.status_code, file=sys.stderr)
files = None
continue
# Prepare cache
if files is not None:
filename = self.cache_filename(asset_url)
files[filename] = r.raw.read(decode_content=True)
# Skip caching if something went wrong to try again next time
if not files:
return False
# Cache files if all downloads were successful
cache = {}
for relname in files:
cache[safe_join(self.cache_path, relname)] = files[relname]
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
for filename in cache:
with open(filename, 'wb') as f:
f.write(cache[filename])
if not self.quiet:
print(
' * Cached all downloads in', self.cache_path, file=sys.stderr)
return True
def retrieve_styles(self, asset_url_path):
"""
Get style URLs from the source HTML page and specified cached
asset base URL.
"""
if not asset_url_path.endswith('/'):
asset_url_path += '/'
self.style_urls.extend(self._get_style_urls(asset_url_path))
|
samples/python/06.using-cards/bots/rich_cards_bot.py | Aliacf21/BotBuilder-Samples | 1,998 | 12744009 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.core import MessageFactory, TurnContext
from botbuilder.schema import ChannelAccount
from .dialog_bot import DialogBot
class RichCardsBot(DialogBot):
"""
RichCardsBot prompts a user to select a Rich Card and then returns the card
that matches the user's selection.
"""
def __init__(self, conversation_state, user_state, dialog):
super().__init__(conversation_state, user_state, dialog)
async def on_members_added_activity(
self, members_added: ChannelAccount, turn_context: TurnContext
):
for member in members_added:
if member.id != turn_context.activity.recipient.id:
reply = MessageFactory.text(
"Welcome to CardBot. "
+ "This bot will show you different types of Rich Cards. "
+ "Please type anything to get started."
)
await turn_context.send_activity(reply)
|
catboost/benchmarks/training_speed/plot.py | jochenater/catboost | 6,989 | 12744049 | <reponame>jochenater/catboost
import argparse
import json
import os
import numpy as np
from matplotlib import pyplot as plt
from log_parser import read_results
FONT_DICT = {'fontsize': 20}
FIGURE_SIZE = (10, 5)
def plot_time_per_iter(tracks, figsize=FIGURE_SIZE, title=None, save_path='time_per_iter.png'):
fig = plt.figure(figsize=figsize)
time_per_iters = []
algs = tracks.keys()
for alg_name in algs:
time_per_iter_alg = []
for track in tracks[alg_name]:
# aggregating statistic over different tracks
time_per_iter = track.get_time_per_iter()
time_per_iter_alg.extend(time_per_iter)
time_per_iters.append(time_per_iter_alg)
if title is not None:
plt.title(title, FONT_DICT)
for i, alg_name in enumerate(algs):
print(alg_name)
print(np.median(time_per_iters[i]))
plt.ylabel('Seconds', FONT_DICT)
plt.boxplot(time_per_iters, labels=algs)
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def plot_quality(tracks, from_iter, to_iter, figsize=FIGURE_SIZE, title=None, save_path='quality.png'):
fig = plt.figure(figsize=figsize)
if title is not None:
plt.title(title, FONT_DICT)
flat_tracks = []
for alg in tracks.keys():
flat_tracks += tracks[alg]
first_track = flat_tracks[0]
task_type = first_track.task_type
metric = 'Error' if task_type == 'Classification' or task_type == 'Multiclass' else 'RMSE'
plt.xlabel('iteration', FONT_DICT)
plt.ylabel(metric, FONT_DICT)
lines = []
names = []
for track in flat_tracks:
_, values = track.get_series()
cur_to_iter = to_iter
if to_iter is None or to_iter > track.get_fit_iterations():
cur_to_iter = track.get_fit_iterations()
values = values[from_iter:cur_to_iter]
x_values = np.arange(from_iter, cur_to_iter)
line, = plt.plot(x_values, values)
lines.append(line)
names.append(str(track))
plt.legend(lines, names, prop={'size': 9})
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def plot_quality_vs_time(tracks, best_quality, low_percent=0.8, num_bins=100, only_min=False,
figsize=FIGURE_SIZE, title=None, save_path='time_distr.png'):
fig = plt.figure(figsize=figsize)
if title is not None:
plt.title(title, FONT_DICT)
plt.xlabel('Quality (%)', FONT_DICT)
plt.ylabel('Time to obtain (sec)', FONT_DICT)
algs = tracks.keys()
up_percent = 1. - low_percent
for i, alg_name in enumerate(algs):
bins = [[] for j in range(num_bins + 1)]
for track in tracks[alg_name]:
time_series, values = track.get_series()
time_series = time_series - time_series[0]
for time, value in zip(time_series, values):
percent = value / best_quality - 1.
if percent > up_percent:
continue
idx = int(np.round(num_bins * percent / up_percent))
bins[idx].append(time)
time_median = []
time_q2 = []
time_min = []
x_values = []
for k, times in enumerate(bins):
if len(times) > 0:
time_median.append(np.median(times))
time_q2.append(np.quantile(times, 0.75))
time_min.append(np.min(times))
x_values.append(float(k) / num_bins * up_percent)
cur_min = time_min[0]
for t in range(1, len(time_min)):
if time_min[t] > cur_min:
time_min[t] = cur_min
else:
cur_min = time_min[t]
error_plus = np.array(time_q2) - np.array(time_median)
error_minus = np.array(time_median) - np.array(time_min)
x_values = np.array(x_values) - (float(i) - 1.) * up_percent / num_bins / 4.
x_values = 1. - x_values
if only_min:
plt.plot(x_values, time_min, label=alg_name)
else:
plt.errorbar(x=x_values, y=time_median, yerr=[error_minus, error_plus], fmt='o-', barsabove=True,
capsize=2, linewidth=2, label=alg_name)
plt.legend(fontsize='large')
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def params_to_str(params):
return ''.join(map(lambda (key, value): '{}{}'.format(key, str(value)), params.items()))
def get_best(tracks, top=1):
algorithms = tracks.keys()
best_tracks = {}
for algorithm_name in algorithms:
best_scores = map(lambda track: track.get_best_score(), tracks[algorithm_name])
idx_best = np.argsort(best_scores)[:top]
best_tracks[algorithm_name] = map(lambda idx: tracks[algorithm_name][idx], idx_best)
return best_tracks
def filter_tracks(tracks, params_cases):
filtered_tracks = {}
for alg in tracks.keys():
filtered_tracks[alg] = []
for track in tracks[alg]:
if all([track.params_dict[param_name] in params_cases[param_name] for param_name in params_cases.keys()]):
filtered_tracks[alg].append(track)
return filtered_tracks
ONLY_TYPES = {
'cat-cpu': ['catboost-CPU'],
'xgb-cpu': ['xgboost-CPU'],
'lgb-cpu': ['lightgbm-CPU'],
'cat-gpu': ['catboost-GPU'],
'xgb-gpu': ['xgboost-GPU'],
'lgb-gpu': ['lightgbm-GPU'],
'cat': ['catboost-CPU', 'catboost-GPU'],
'xgb': ['xgboost-CPU', 'xgboost-GPU'],
'lgb': ['lightgbm-CPU', 'lightgbm-GPU'],
'cpu': ['catboost-CPU', 'xgboost-CPU', 'lightgbm-CPU'],
'gpu': ['catboost-GPU', 'xgboost-GPU', 'lightgbm-GPU']
}
def get_default_file_name(plot_type, params):
default_file_names = {
'best': 'best_quality.png',
'quality-vs-time': 'quality_vs_time.png',
'time-per-iter': 'time_per_iter.png'
}
if plot_type in default_file_names.keys():
return default_file_names[plot_type]
if plot_type == 'custom':
return params_to_str(params) + '.png'
def plot_experiment(tracks, experiment_name, args):
file_name = args.file_name if args.file_name else get_default_file_name(args.type, args.params_cases)
save_dir = os.path.join(args.out_dir, experiment_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, file_name)
if args.only:
filtered_tracks = {}
for only_type in args.only:
for alg_name in ONLY_TYPES[only_type]:
filtered_tracks[alg_name] = tracks[alg_name]
tracks = filtered_tracks
if args.params_cases:
with open(args.params_cases) as f:
params_cases = json.load(f)
tracks = filter_tracks(tracks, params_cases)
if args.type == 'quality-vs-time':
best_tracks = get_best(tracks)
best_quality = min(map(lambda tracks: tracks[0].get_best_score(), best_tracks.values()))
print(best_quality)
if args.top:
tracks = get_best(tracks, top=args.top)
plot_quality_vs_time(tracks, best_quality=best_quality, low_percent=args.low_percent, only_min=args.only_min,
figsize=args.fig_size, num_bins=args.num_bins, save_path=save_path)
if args.type == 'best':
best_tracks = get_best(tracks, top=args.top)
for alg in best_tracks.keys():
for track in best_tracks[alg]:
print(track)
print(track.get_best_score())
plot_quality(best_tracks, args.from_iter, args.to_iter, figsize=args.fig_size,
title=args.title, save_path=save_path)
if args.type == 'custom':
plot_quality(tracks, args.from_iter, args.to_iter,
figsize=args.fig_size, title=args.title, save_path=save_path)
if args.type == 'time-per-iter':
plot_time_per_iter(tracks, figsize=args.fig_size, title=args.title, save_path=save_path)
def main():
plot_functions = {
'time-per-iter': plot_time_per_iter,
'best': plot_quality,
'quality-vs-time': plot_quality_vs_time,
'custom': plot_quality
}
parser = argparse.ArgumentParser()
parser.add_argument('--type', choices=plot_functions.keys(), required=True)
parser.add_argument('--only', nargs='+', choices=ONLY_TYPES.keys(), required=False)
parser.add_argument('-i', '--results-file', required=True)
parser.add_argument('-t', '--title')
parser.add_argument('-f', '--fig-size', nargs=2, type=int, default=FIGURE_SIZE)
parser.add_argument('-o', '--out-dir', default='plots')
parser.add_argument('--params-cases', help='draw plots only with those params (tracks filtering)'
' path to json file, each line corresponds to learner '
'parameter (e.g. max_depth) and list of its values')
parser.add_argument('--from-iter', type=int, default=0, help='only custom, best modes')
parser.add_argument('--to-iter', type=int, default=None, help='only custom, best modes')
parser.add_argument('--low-percent', type=float, default=0.9, help='only quality-vs-time mode')
parser.add_argument('--num-bins', type=int, default=200, help='only quality-vs-time mode')
parser.add_argument('--only-min', action='store_true', help='only quality-vs-time mode')
parser.add_argument('--top', type=int, default=3, help='only best mode')
args = parser.parse_args()
tracks = read_results(args.results_file)
for experiment_name in tracks:
plot_experiment(tracks[experiment_name], experiment_name, args)
if __name__ == '__main__':
main()
|
h2o-py/tests/testdir_algos/rulefit/pyunit_titanic_rulefit.py | vishalbelsare/h2o-3 | 6,098 | 12744119 | <reponame>vishalbelsare/h2o-3<gh_stars>1000+
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.rulefit import H2ORuleFitEstimator
def titanic():
df = h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv"),
col_types={'pclass': "enum", 'survived': "enum"})
x = ["age", "sibsp", "parch", "fare", "sex", "pclass"]
# Split the dataset into train and test
train, test = df.split_frame(ratios=[.8], seed=1234)
rfit = H2ORuleFitEstimator(min_rule_length=4, max_rule_length=5, max_num_rules=3, seed=1234, model_type="rules")
rfit.train(training_frame=train, x=x, y="survived", validation_frame=test)
assert rfit.rmse(valid=True) is not None, "validation metrics should be present"
print(rfit.rule_importance())
assert rfit._model_json["output"]["model_summary"] is not None, "model_summary should be present"
assert len(rfit._model_json["output"]["model_summary"]._cell_values) > 0, "model_summary's content should be present"
rfit_predictions = rfit.predict(test)
import tempfile
tmpdir = tempfile.mkdtemp()
try:
mojo_path = rfit.save_mojo(tmpdir)
mojo_model = h2o.upload_mojo(mojo_path)
finally:
import shutil
shutil.rmtree(tmpdir)
mojo_predictions = mojo_model.predict(test)
assert pyunit_utils.compare_frames(rfit_predictions, mojo_predictions, 0)
if __name__ == "__main__":
pyunit_utils.standalone_test(titanic)
else:
titanic()
|
evaluation_matrix.py | gsygsygsy123/SOTA-on-monocular-3D-pose-and-shape-estimation | 183 | 12744126 | import torch
import numpy as np
def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape)-1))
def weighted_mpjpe(predicted, target, w):
"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""
assert predicted.shape == target.shape
assert w.shape[0] == predicted.shape[0]
return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
def p_mpjpe_torch(predicted, target, with_sRt=False,full_torch=False,with_aligned=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = torch.mean(target, dim=1, keepdim=True)
muY = torch.mean(predicted, dim=1, keepdim=True)
X0 = target - muX
Y0 = predicted - muY
X0[X0**2<1e-6]=1e-3
normX = torch.sqrt(torch.sum(X0**2, dim=(1, 2), keepdim=True))
normY = torch.sqrt(torch.sum(Y0**2, dim=(1, 2), keepdim=True))
normX[normX<1e-3]=1e-3
X0 /= normX
Y0 /= normY
H = torch.matmul(X0.transpose(1,2), Y0)
if full_torch:
U, s, V = batch_svd(H)
else:
U, s, Vt = np.linalg.svd(H.cpu().numpy())
V = torch.from_numpy(Vt.transpose(0, 2, 1)).cuda()
U = torch.from_numpy(U).cuda()
s = torch.from_numpy(s).cuda()
R = torch.matmul(V, U.transpose(2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = torch.sign(torch.unsqueeze(torch.det(R[0]), 0))
V[:, :, -1] *= sign_detR.unsqueeze(0)
s[:, -1] *= sign_detR.flatten()
R = torch.matmul(V, U.transpose(2, 1)) # Rotation
tr = torch.unsqueeze(torch.sum(s, dim=1, keepdim=True), 2)
a = tr * normX / normY # Scale
t = muX - a*torch.matmul(muY, R) # Translation
if (a!=a).sum()>0:
print('NaN Error!!')
print('UsV:',U,s,V)
print('aRt:',a,R,t)
a[a!=a]=1.
R[R!=R]=0.
t[t!=t]=0.
# Perform rigid transformation on the input
predicted_aligned = a*torch.matmul(predicted, R) + t
if with_sRt:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),(a,R,t)#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))
if with_aligned:
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean(),predicted_aligned
# Return MPJPE
return torch.sqrt(((predicted_aligned - target)**2).sum(-1)).mean()#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))#,(a,R,t),predicted_aligned
def batch_svd(H):
num = H.shape[0]
U_batch, s_batch, V_batch = [],[],[]
for i in range(num):
U, s, V = H[i].svd(some=False)
U_batch.append(U.unsqueeze(0))
s_batch.append(s.unsqueeze(0))
V_batch.append(V.unsqueeze(0))
return torch.cat(U_batch,0),torch.cat(s_batch,0),torch.cat(V_batch,0)
def p_mpjpe(predicted, target, with_sRt=False,full_torch=False,with_aligned=False,each_separate=False):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0**2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0**2, axis=(1, 2), keepdims=True))
X0 /= (normX+1e-6)
Y0 /= (normY+1e-6)
H = np.matmul(X0.transpose(0, 2, 1), Y0).astype(np.float16).astype(np.float64)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a*np.matmul(muY, R) # Translation
# Perform rigid transformation on the input
predicted_aligned = a*np.matmul(predicted, R) + t
if each_separate:
return np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1)
error = np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape)-1))
if with_sRt and not with_aligned:
return error, (a,R,t)
if with_aligned:
return error,(a,R,t),predicted_aligned
# Return MPJPE
return error
def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape
norm_predicted = torch.mean(torch.sum(predicted**2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target*predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * predicted, target)
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape)-1))
def test():
r1 = np.random.rand(3,14,3)
r2 = np.random.rand(3,14,3)
pmpjpe = p_mpjpe(r1, r2,with_sRt=False)
pmpjpe_torch = p_mpjpe_torch(torch.from_numpy(r1), torch.from_numpy(r2),with_sRt=False,full_torch=True)
print('pmpjpe: {}; {:.6f}; {:.6f}; {:.6f}'.format(np.abs(pmpjpe-pmpjpe_torch.numpy())<0.01,pmpjpe,pmpjpe_torch.numpy(), pmpjpe-pmpjpe_torch.numpy()))
if __name__ == '__main__':
test()
|
src/accelerate/deepspeed_utils.py | Pandinosaurus/accelerate | 2,313 | 12744133 | <filename>src/accelerate/deepspeed_utils.py<gh_stars>1000+
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import AcceleratedOptimizer
from .state import is_apex_available, is_deepspeed_available
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_apex_available():
from apex import amp
class DeepSpeedEngineWrapper(DeepSpeedEngine):
"""
Wrapper over deepspeed.DeepSpeedEngine object
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# overwriting micro_steps for user's gradient_accumulation
self.micro_steps = -1
def step(self, lr_kwargs=None):
"""DeepSpeedEngine.step() without `micro_steps` update & no profiling"""
if self.is_gradient_accumulation_boundary(): # it shouldn't matter whether we keep this line or not
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
self._take_model_step(lr_kwargs)
def backward(self, loss):
"""DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update"""
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()
self.optimizer.backward(loss)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
elif self.fp16_enabled():
self.optimizer.backward(loss)
else:
loss.backward()
if self.enable_backward_allreduce:
self.allreduce_gradients()
# this will ensure deepspeed gradient_accumulation matches user's accumulation
self.micro_steps += 1
class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
"""
Internal wrapper around a deepspeed optimizer.
Args:
optimizer (:obj:`torch.optim.optimizer.Optimizer`):
The optimizer to wrap.
"""
def __init__(self, optimizer, model: DeepSpeedEngineWrapper):
super().__init__(optimizer, device_placement=False, scaler=None)
self.model = model
def zero_grad(self, set_to_none=None):
pass # `model.step()` is doing that automatically. Therefore, it's implementation is not needed
def step(self):
"""This will handle optimizer.step() & optimizer.zero_grad() with gradient_accumulation"""
self.model.step()
@property
def is_overflow(self):
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
overflow = False
if hasattr(self.optimizer, "overflow"):
overflow = self.optimizer.overflow
return overflow
|
edu/class0/dataset.py | haribhutanadhu/PaddleViT | 993 | 12744150 | <filename>edu/class0/dataset.py
from paddle.io import Dataset
from paddle.io import DataLoader
from paddle.vision import datasets
from paddle.vision import transforms
def get_transforms(mode='train'):
if mode == 'train':
data_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])])
else:
data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])])
return data_transforms
def get_dataset(name='cifar10', mode='train'):
if name == 'cifar10':
dataset = datasets.Cifar10(mode=mode, transform=get_transforms(mode))
return dataset
def get_dataloader(dataset, batch_size=128, mode='train'):
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=2, shuffle=(mode == 'train'))
return dataloader
|
generalizationTests/python2_double_curly/src/test.py | IMULMUL/websitesVulnerableToSSTI | 288 | 12744158 | import re
person = "xx{{\"asdasd\"+\"lala\"}} }} {1+1}xxx"
regex = r"{{(.*?)}}"
matches = re.finditer(regex, person, re.MULTILINE)
for matchNum, match in enumerate(matches):
eval_result = eval(match.group(1))
person = person.replace(str(match.group()),str(eval_result))
print(person) |
accounts/forms.py | annevandalfsen/screenbird | 121 | 12744171 | <reponame>annevandalfsen/screenbird<filename>accounts/forms.py
from django.conf import settings
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.core.validators import email_re
from django.template import Context, loader
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from paypal.standard.conf import *
from paypal.standard.forms import PayPalPaymentsForm
from social_auth.models import UserSocialAuth
from accounts.models import AccountLevel, UserProfile
class UsernameChangeForm(forms.ModelForm):
"""
Update username form
"""
def __init__(self, *args, **kwargs):
super(UsernameChangeForm, self).__init__(*args, **kwargs)
for myField in self.fields:
self.fields[myField].widget.attrs['class'] = 'input-text'
class Meta:
model = User
fields = ('username',)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without
entering the old password
"""
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
for myField in self.fields:
self.fields[myField].widget.attrs['class'] = 'input-text'
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super(PasswordChangeForm, self).__init__(*args, **kwargs)
for myField in self.fields:
self.fields[myField].widget.attrs['class'] = 'input-text'
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', '<PASSWORD>', '<PASSWORD>']
class UserProfileUpdateForm(forms.ModelForm):
"""
Update nickname form
"""
def __init__(self, *args, **kwargs):
super(UserProfileUpdateForm, self).__init__(*args, **kwargs)
for myField in self.fields:
self.fields[myField].widget.attrs['class'] = 'input-text'
class Meta:
model = UserProfile
exclude = ('user', 'account_level')
fields = ('nickname',)
class PasswordResetForm(forms.Form):
email_username = forms.CharField(label=_("E-mail or Username"),
max_length=75)
def __init__(self, *args, **kwargs):
super(PasswordResetForm, self).__init__(*args, **kwargs)
for myField in self.fields:
self.fields[myField].widget.attrs['class'] = 'input-text'
def clean_email_username(self):
"""
Validates that an active user exists with the given e-mail address or username
"""
email_username = self.cleaned_data["email_username"]
if email_re.search(email_username):
try:
self.users_cache = list(User.objects.filter(
email__iexact=email_username,
is_active=True
))
except User.DoesNotExist:
pass
else:
try:
self.users_cache = list(User.objects.filter(
username__iexact=email_username,
is_active=True
))
except User.DoesNotExist:
pass
# Allow user to reset password even if registered from a social networking site
for user in self.users_cache:
try:
oauth_user = UserSocialAuth.objects.get(user=user)
raise forms.ValidationError(_("Your Screenbird account is based off of either Google or Facebook. To login with either of those, please use one of these links:"))
except UserSocialAuth.DoesNotExist:
oauth_user = None
if len(self.users_cache) == 0:
raise forms.ValidationError(_("That e-mail address or username doesn't have an associated user account. Are you sure you've registered?"))
return email_username
def save(self, domain_override=None, email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator, from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the user
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
t = loader.get_template(email_template_name)
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
send_mail(_("Password reset on %s") % site_name,
t.render(Context(c)), from_email, [user.email], fail_silently=False)
class PayPalPaymentsForm(PayPalPaymentsForm):
'''Extended django-paypals PayPalPaymentsForm to customize button image and render
'''
MONTHLY_IMAGE = settings.MEDIA_URL + 'gfx/premium_button%201.png'
YEARLY_IMAGE = settings.MEDIA_URL + 'gfx/premium_button%202.png'
PASTEVID_MONTHLY = 'pastevid_monthly'
PASTEVID_YEARLY = 'pastevid_yearly'
def render(self):
if settings.SITE_ID == 2:
if self.button_type == self.PASTEVID_MONTHLY:
link_text = "Monthly"
tagline = "$9/month"
else:
link_text = "Yearly"
tagline = "$99/year"
rendered_form = mark_safe(u"""<form action="%s" method="post" id="%s">
%s
<a href="javascript:{}" style="text-align:center;" class="buy_now" onclick="document.getElementById('%s').submit(); return false;">%s</a><div class="tagline">%s</div><br><br>
</form>""" % (POSTBACK_ENDPOINT, self.button_type, self.as_p(), self.button_type, link_text, tagline))
else:
rendered_form = mark_safe(u"""<form action="%s" method="post" id="%s">
%s
<input type="image" src="%s" border="0" name="submit" alt="Buy it Now" />
</form>""" % (POSTBACK_ENDPOINT, self.button_type, self.as_p(), self.get_image()))
return rendered_form
def get_image(self):
return {
(True, self.PASTEVID_MONTHLY): self.MONTHLY_IMAGE,
(True, self.PASTEVID_YEARLY): self.YEARLY_IMAGE,
(True, self.SUBSCRIBE): SUBSCRIPTION_SANDBOX_IMAGE,
(True, self.BUY): SANDBOX_IMAGE,
(True, self.DONATE): DONATION_SANDBOX_IMAGE,
(False, self.PASTEVID_MONTHLY): self.MONTHLY_IMAGE,
(False, self.PASTEVID_YEARLY): self.YEARLY_IMAGE,
(False, self.SUBSCRIBE): SUBSCRIPTION_IMAGE,
(False, self.BUY): IMAGE,
(False, self.DONATE): DONATION_IMAGE,
}[TEST, self.button_type]
class PaymentInformationForm(forms.Form):
"""
A form that lets users enter their payment information to be used with
Authorize.net
Note: Authorize.net payment option is currently on backlog
"""
card_number = forms.CharField(required=True, max_length=16)
expiry_date = forms.DateField(required=True, widget=forms.widgets.DateInput(format="%m/%d/%Y") )
card_code = forms.CharField(required=True, max_length=10)
first_name = forms.CharField(required=False, max_length=30)
last_name = forms.CharField(required=False, max_length=30)
company = forms.CharField(required=False, max_length=150)
address = forms.CharField(required=False, max_length=150)
city = forms.CharField(required=False, max_length=150)
state = forms.CharField(required=False, max_length=150)
province = forms.CharField(required=False, max_length=150)
country = forms.CharField(required=False, max_length=150)
zip_code = forms.CharField(required=False, max_length=150)
email = forms.EmailField(required=False)
phone = forms.CharField(required=False, max_length=15)
|
sdk/python/pulumi_azure/loganalytics/linked_service.py | henriktao/pulumi-azure | 109 | 12744185 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['LinkedServiceArgs', 'LinkedService']
@pulumi.input_type
class LinkedServiceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LinkedService resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if linked_service_name is not None:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
if linked_service_name is not None:
pulumi.set(__self__, "linked_service_name", linked_service_name)
if read_access_id is not None:
pulumi.set(__self__, "read_access_id", read_access_id)
if resource_id is not None:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_id is not None:
pulumi.set(__self__, "workspace_id", workspace_id)
if workspace_name is not None:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
if write_access_id is not None:
pulumi.set(__self__, "write_access_id", write_access_id)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@linked_service_name.setter
def linked_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_service_name", value)
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@read_access_id.setter
def read_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_access_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_id", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
@write_access_id.setter
def write_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "write_access_id", value)
@pulumi.input_type
class _LinkedServiceState:
def __init__(__self__, *,
linked_service_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LinkedService resources.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
if linked_service_name is not None:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
if linked_service_name is not None:
pulumi.set(__self__, "linked_service_name", linked_service_name)
if name is not None:
pulumi.set(__self__, "name", name)
if read_access_id is not None:
pulumi.set(__self__, "read_access_id", read_access_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_id is not None:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_id is not None:
pulumi.set(__self__, "workspace_id", workspace_id)
if workspace_name is not None:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
if write_access_id is not None:
pulumi.set(__self__, "write_access_id", write_access_id)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@linked_service_name.setter
def linked_service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linked_service_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@read_access_id.setter
def read_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "read_access_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_id", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
@write_access_id.setter
def write_access_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "write_access_id", value)
class LinkedService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LinkedServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param LinkedServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LinkedServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LinkedServiceArgs.__new__(LinkedServiceArgs)
if linked_service_name is not None and not opts.urn:
warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")
__props__.__dict__["linked_service_name"] = linked_service_name
__props__.__dict__["read_access_id"] = read_access_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_id is not None and not opts.urn:
warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_id"] = workspace_id
if workspace_name is not None and not opts.urn:
warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""", DeprecationWarning)
pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["write_access_id"] = write_access_id
__props__.__dict__["name"] = None
super(LinkedService, __self__).__init__(
'azure:loganalytics/linkedService:LinkedService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
linked_service_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
read_access_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
write_access_id: Optional[pulumi.Input[str]] = None) -> 'LinkedService':
"""
Get an existing LinkedService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LinkedServiceState.__new__(_LinkedServiceState)
__props__.__dict__["linked_service_name"] = linked_service_name
__props__.__dict__["name"] = name
__props__.__dict__["read_access_id"] = read_access_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_id"] = resource_id
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_id"] = workspace_id
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["write_access_id"] = write_access_id
return LinkedService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="linkedServiceName")
def linked_service_name(self) -> pulumi.Output[str]:
"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "linked_service_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="readAccessId")
def read_access_id(self) -> pulumi.Output[str]:
"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "read_access_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Output[str]:
"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_id")
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Output[str]:
"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "workspace_name")
@property
@pulumi.getter(name="writeAccessId")
def write_access_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""
return pulumi.get(self, "write_access_id")
|
test cases/unit/68 test env value/test.py | iinuwa/meson | 4,047 | 12744211 | #!/usr/bin/env python3
import os
import sys
assert os.environ['TEST_VAR'] == sys.argv[1]
|
nengo/builder/tests/test_processes.py | gokhanagma/nengo | 762 | 12744229 | <reponame>gokhanagma/nengo<filename>nengo/builder/tests/test_processes.py
import pytest
from nengo.builder.processes import SimProcess
from nengo.builder.tests.test_operator import _test_operator_arg_attributes
def test_simprocess():
argnames = ["process", "input", "output", "t"]
non_signals = ["process"]
_, sim = _test_operator_arg_attributes(
SimProcess, argnames, non_signals=non_signals
)
assert str(sim) == "SimProcess{process, input -> output}"
with pytest.raises(ValueError, match="Unrecognized mode"):
_test_operator_arg_attributes(SimProcess, argnames, args={"mode": "badval"})
|
src/yggdrasil/cython-0.25.2/tests/run/ass2global.py | fengjixuchui/hydra | 110 | 12744231 | """
>>> getg()
5
>>> setg(42)
>>> getg()
42
"""
g = 5
def setg(a):
global g
g = a
def getg():
return g
class Test(object):
"""
>>> global_in_class
9
>>> Test.global_in_class
Traceback (most recent call last):
AttributeError: type object 'Test' has no attribute 'global_in_class'
>>> Test().global_in_class
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute 'global_in_class'
"""
global global_in_class
global_in_class = 9
|
venv/Lib/site-packages/jedi/inference/utils.py | ajayiagbebaku/NFL-Model | 4,213 | 12744281 | <reponame>ajayiagbebaku/NFL-Model<filename>venv/Lib/site-packages/jedi/inference/utils.py
""" A universal module with functions / classes without dependencies. """
import functools
import re
import os
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError as e:
raise UncaughtAttributeError(e) from e
return wrapper
class PushBackIterator:
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
|
Validation/EcalClusters/test/macro/plotMeanVsET.py | ckamtsikis/cmssw | 852 | 12744286 | import re, sys, os
file = open("output", "r")
lines = file.readlines()
file.close()
variable = []
eta1 = []
eta2 = []
mean = []
error= []
effS = []
for line in lines:
elements = re.split("\t", line)
variable += [elements[1],]
eta1 += [re.split(">", re.split("&&", elements[2])[0])[1],]
eta2 += [re.split("<", elements[2])[1],]
mean += [elements[3],]
error+= [elements[4],]
effS += [elements[5][:-1],]
header = """void plot_MeanVsET(){
TCanvas *c1 = new TCanvas("c1","Mean vs ET", 800, 600);
TH1F* h_emCorr_et = new TH1F("h_emCorr_et","",300,0,300);
TH1F* h_em_et = new TH1F("h_em_et","",300,0,300);
c1->cd();
"""
file = open("plot_MeanVsET.C", "w")
file.write(header)
for i in ("emCorr_et", "em_et"):
for j in range(0, len(eta1)):
if variable[j] != i:
continue
bin = str(int((float(eta1[j]) + float(eta2[j]))/2))
file.write(" h_" + i + "->SetBinContent(" + bin + ", " + mean[j] + ");\n")
file.write(" h_" + i + "->SetBinError (" + bin + ", " + error[j] +");\n")
file.write(" h_emCorr_et->SetMarkerStyle(23);\n")
file.write(" h_em_et ->SetMarkerStyle(20);\n")
file.write(" h_emCorr_et->SetMarkerColor(4);\n")
file.write(" h_em_et ->SetMarkerColor(1);\n")
file.write(" gStyle->SetOptStat(0);\n")
file.write(" h_em_et ->Draw();\n")
file.write(" h_emCorr_et->Draw(\"SAME\");\n")
file.write(" TLine* line = new TLine(0,1,300,1);\n")
file.write(" line->Draw();\n")
header ="""
TAxis* ax = h_em_et->GetXaxis();
ax->SetTitle("Et (GeV)");
TAxis* ay = h_em_et->GetYaxis();
ay->SetTitle("E_{T}^{RECO}/E_{T}^{MC}");
ay->SetRangeUser(0.9,1.05);
TLegend *leg = new TLegend(0.2, 0.2, 0.4, 0.4);
leg->AddEntry(h_em_et, "Before correction");
leg->AddEntry(h_emCorr_et, "After correction ");
leg->Draw();
TLine* line = new TLine(0,1,1.5,1);
line->SetLineWidth(2);
line->SetLineColor(2);
line->Draw();
c1->Print("MeanVsET.ps");
gROOT->ProcessLine(".q");
"""
file.write(header)
file.write("}\n")
|
pip_audit/_dependency_source/__init__.py | westonsteimel/pip-audit | 447 | 12744294 | <reponame>westonsteimel/pip-audit
"""
Dependency source interfaces and implementations for `pip-audit`.
"""
from .interface import (
DependencyFixError,
DependencyResolver,
DependencyResolverError,
DependencySource,
DependencySourceError,
)
from .pip import PipSource, PipSourceError
from .requirement import RequirementSource
from .resolvelib import ResolveLibResolver
__all__ = [
"DependencyFixError",
"DependencyResolver",
"DependencyResolverError",
"DependencySource",
"DependencySourceError",
"PipSource",
"PipSourceError",
"RequirementSource",
"ResolveLibResolver",
]
|
setup.py | aaltay/apitools | 143 | 12744297 | #!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration."""
import platform
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
import setuptools
# Configure the required packages and scripts to install, depending on
# Python version and OS.
REQUIRED_PACKAGES = [
'httplib2>=0.8',
'fasteners>=0.14',
'oauth2client>=1.4.12',
'six>=1.12.0',
]
CLI_PACKAGES = [
'python-gflags>=3.0.6',
]
TESTING_PACKAGES = [
'mock>=1.0.1',
]
CONSOLE_SCRIPTS = [
'gen_client = apitools.gen.gen_client:main',
]
py_version = platform.python_version()
_APITOOLS_VERSION = '0.5.32'
with open('README.rst') as fileobj:
README = fileobj.read()
setuptools.setup(
name='google-apitools',
version=_APITOOLS_VERSION,
description='client libraries for humans',
long_description=README,
url='http://github.com/google/apitools',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
# Contained modules and scripts.
packages=setuptools.find_packages(include=['apitools']),
entry_points={'console_scripts': CONSOLE_SCRIPTS},
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + CLI_PACKAGES + TESTING_PACKAGES,
extras_require={
'cli': CLI_PACKAGES,
'testing': TESTING_PACKAGES,
},
# Add in any packaged data.
include_package_data=True,
package_data={
'apitools.data': ['*'],
},
exclude_package_data={
'': [
'*_test.py',
'*/testing/*',
'*/testdata/*',
'base/protorpclite/test_util.py',
'gen/test_utils.py',
],
},
# PyPI package information.
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='apitools',
)
|
scispacy/base_project_code.py | rajeshkppt/scispacy | 1,139 | 12744317 | <reponame>rajeshkppt/scispacy
from typing import Optional, Callable, Iterable, Iterator
from pathlib import Path
import random
import itertools
import spacy
import warnings
from spacy.training import Corpus, Example
from spacy.language import Language
from scispacy.custom_tokenizer import combined_rule_tokenizer
from scispacy.data_util import read_full_med_mentions, read_ner_from_tsv
def iter_sample(iterable: Iterable, sample_percent: float) -> Iterator:
for item in iterable:
if len(item.reference) == 0:
continue
coin_flip = random.uniform(0, 1)
if coin_flip < sample_percent:
yield item
@spacy.registry.callbacks("replace_tokenizer")
def replace_tokenizer_callback() -> Callable[[Language], Language]:
def replace_tokenizer(nlp: Language) -> Language:
nlp.tokenizer = combined_rule_tokenizer(nlp)
return nlp
return replace_tokenizer
@spacy.registry.readers("parser_tagger_data")
def parser_tagger_data(
path: Path,
mixin_data_path: Optional[Path],
mixin_data_percent: float,
gold_preproc: bool,
max_length: int = 0,
limit: int = 0,
augmenter: Optional[Callable] = None,
seed: int = 0,
) -> Callable[[Language], Iterator[Example]]:
random.seed(seed)
main_corpus = Corpus(
path,
gold_preproc=gold_preproc,
max_length=max_length,
limit=limit,
augmenter=augmenter,
)
if mixin_data_path is not None:
mixin_corpus = Corpus(
mixin_data_path,
gold_preproc=gold_preproc,
max_length=max_length,
limit=limit,
augmenter=augmenter,
)
def mixed_corpus(nlp: Language) -> Iterator[Example]:
if mixin_data_path is not None:
main_examples = main_corpus(nlp)
mixin_examples = iter_sample(mixin_corpus(nlp), mixin_data_percent)
return itertools.chain(main_examples, mixin_examples)
else:
return main_corpus(nlp)
return mixed_corpus
@spacy.registry.readers("med_mentions_reader")
def med_mentions_reader(
directory_path: str, split: str
) -> Callable[[Language], Iterator[Example]]:
train, dev, test = read_full_med_mentions(
directory_path, label_mapping=None, span_only=True, spacy_format=True
)
def corpus(nlp: Language) -> Iterator[Example]:
if split == "train":
original_examples = train
elif split == "dev":
original_examples = dev
elif split == "test":
original_examples = test
else:
raise Exception(f"Unexpected split {split}")
for original_example in original_examples:
doc = nlp.make_doc(original_example[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
spacy_example = Example.from_dict(doc, original_example[1])
yield spacy_example
return corpus
@spacy.registry.readers("specialized_ner_reader")
def specialized_ner_reader(file_path: str):
original_examples = read_ner_from_tsv(file_path)
def corpus(nlp: Language):
for original_example in original_examples:
doc = nlp.make_doc(original_example[0])
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
spacy_example = Example.from_dict(doc, original_example[1])
yield spacy_example
return corpus
|
tests/refresh_reference_files.py | hgroll/tikzplotlib | 646 | 12744319 | <filename>tests/refresh_reference_files.py
import argparse
import importlib.util
import pathlib
import matplotlib.pyplot as plt
import tikzplotlib as tpl
def _main():
parser = argparse.ArgumentParser(description="Refresh all reference TeX files.")
parser.parse_args()
this_dir = pathlib.Path(__file__).resolve().parent
test_files = [
f
for f in this_dir.iterdir()
if (this_dir / f).is_file() and f.name[:5] == "test_" and f.name[-3:] == ".py"
]
test_modules = [f.name[:-3] for f in test_files]
# remove some edge cases
test_modules.remove("test_rotated_labels")
test_modules.remove("test_deterministic_output")
test_modules.remove("test_cleanfigure")
test_modules.remove("test_context")
for mod in test_modules:
module = importlib.import_module(mod)
module.plot()
code = tpl.get_tikz_code(include_disclaimer=False, float_format=".8g")
plt.close("all")
tex_filename = mod + "_reference.tex"
with open(this_dir / tex_filename, "w", encoding="utf8") as f:
f.write(code)
if __name__ == "__main__":
_main()
|
adblockparser/__init__.py | rriemann/adblockparser | 162 | 12744332 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .parser import AdblockRules, AdblockRule, AdblockParsingError
|
tests/test_kernels.py | RevanMacQueen/HpBandSter | 546 | 12744339 | <filename>tests/test_kernels.py
import os
import unittest
import numpy as np
#from scipy.integrate import quadrature as quadrature
from scipy.integrate import quad as quadrature
from statsmodels.nonparametric import kernels as sm_kernels
from hpbandster.optimizers.kde import kernels as hp_kernels
import ConfigSpace as CS
from pdb import set_trace
rapid_development=True
rapid_development=False
class TestGaussian(unittest.TestCase):
n_train = 256
n_test = 1024
def setUp(self):
self.x_train = np.random.rand(self.n_train)
self.x_test = np.random.rand(self.n_test)
def tearDown(self):
self.x_train = None
self.x_test = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_values(self):
for bw in [1e-3, 1e-2, 1e-1, 1]:
sm_values = sm_kernels.gaussian(bw, self.x_train[:,None], self.x_test[None,:])
hp_kernel = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=False)
hp_values = hp_kernel(self.x_test)
self.assertTrue(np.allclose(hp_values, sm_values/bw, 1e-4))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdf_boundary_simple(self):
self.x_train = np.array([0])
for bw in [1e-3, 1e-2, 1e-1]:
# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than twice the pdf
hp_kernel1 = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=False)
hp_kernel2 = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
hp_values1 = hp_kernel1(self.x_test)
hp_values2 = hp_kernel2(self.x_test)
self.assertTrue(np.allclose(2*hp_values1, hp_values2, 1e-4))
self.x_train = np.array([1])
for bw in [1e-3, 1e-2, 1e-1]:
# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than twice the pdf
hp_kernel1 = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=False)
hp_kernel2 = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
hp_values1 = hp_kernel1(self.x_test)
hp_values2 = hp_kernel2(self.x_test)
self.assertTrue(np.allclose(2*hp_values1, hp_values2, 1e-4))
# simple test based on 68, 95, 99% rule
self.x_train = np.array([0.5])
for bw, w in ([0.5, 0.6827], [0.25, 0.9545], [1/6, 0.9973]):
hp_kernel = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
self.assertAlmostEqual(hp_kernel.weights[0], 1/w, delta=1e-4)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdf_boundary_quadrature(self):
for bw in [1e-2, 1e-1, 1]:
hp_kernel = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
def quad_me(x):
x_test = np.array([x])
pdfs = hp_kernel(x_test)
return(pdfs.mean())
self.assertAlmostEqual(quadrature(quad_me, 0, 1)[0], 1, delta=1e-4)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_sample(self):
num_samples = 2**20
for bw in [1e-1, 5e-1, 1]:
hp_kernel = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
samples = hp_kernel.sample(num_samples=num_samples)
phat1, x = np.histogram(samples, normed=True)
phat2 = hp_kernel((x[1:] + x[:-1])/2).mean(axis=0)
for p1, p2 in zip(phat1, phat2):
self.assertAlmostEqual(p1, p2, delta=5e-2)
class Test1dCategorical(unittest.TestCase):
n_train = 256
n_test = 1024
def setUp(self):
self.configspace = CS.ConfigurationSpace(43)
HPs=[]
HPs.append( CS.CategoricalHyperparameter('cat1', choices=['foo', 'bar', 'baz']))
self.configspace.add_hyperparameters(HPs)
x_train_confs = [ self.configspace.sample_configuration() for i in range(self.n_train)]
self.x_train = np.array( [c.get_array() for c in x_train_confs]).squeeze()
x_test_confs = [ self.configspace.sample_configuration() for i in range(self.n_test)]
self.x_test= np.array( [c.get_array() for c in x_train_confs]).squeeze()
def tearDown(self):
self.configspace = None
self.x_train = None
self.x_test = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_values(self):
for bw in [1e-3, 1e-2, 1e-1, 1]:
sm_values = []
for x in self.x_test:
sm_values.append(sm_kernels.aitchison_aitken(bw, self.x_train, x))
sm_values = np.array(sm_values)
hp_kernel = hp_kernels.AitchisonAitken(data=self.x_train, bandwidth=bw, num_values=len(self.configspace.get_hyperparameters()[0].choices))
hp_values = hp_kernel(self.x_test)
self.assertTrue(np.allclose(hp_values.T, sm_values.squeeze(), 1e-4))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_sample(self):
num_samples = 2**20
for bw in [1e-1, 5e-1, 1]:
hp_kernel = hp_kernels.AitchisonAitken(data=self.x_train, bandwidth=bw, num_values=len(self.configspace.get_hyperparameters()[0].choices))
samples = hp_kernel.sample(num_samples=num_samples)
phat1, phat2 = [], []
for value in [0,1,2]:
phat1.append(np.sum(samples==value)/num_samples)
phat2.append(hp_kernel(np.array([value])).mean(axis=0)[0])
for p1, p2 in zip(phat1, phat2):
self.assertAlmostEqual(p1, p2, delta=5e-3)
self.assertAlmostEqual(np.sum(phat2), 1 , delta=1e-5)
class Test1dInteger(unittest.TestCase):
n_train = 128
n_test = 1024
def setUp(self):
self.configspace = CS.ConfigurationSpace(43)
HPs=[]
HPs.append( CS.UniformIntegerHyperparameter('int1', lower=-2, upper=2))
self.configspace.add_hyperparameters(HPs)
x_train_confs = [ self.configspace.sample_configuration() for i in range(self.n_train)]
self.x_train = np.array([c.get_array() for c in x_train_confs]).squeeze()
x_test_confs = [ self.configspace.sample_configuration() for i in range(self.n_test)]
self.x_test= np.array( [c.get_array() for c in x_test_confs]).squeeze()
def tearDown(self):
self.configspace = None
self.x_train = None
self.x_test = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_values(self):
n = self.configspace.get_hyperparameters()[0].upper - self.configspace.get_hyperparameters()[0].lower + 1
for bw in [1e-3, 1e-2, 1e-1, 0.99]:
sm_x_train= np.rint(self.x_train* n - .5).astype(np.int)
sm_x_test = np.rint(self.x_test * n - .5).astype(np.int).squeeze()
sm_values = np.array([sm_kernels.wang_ryzin(bw, sm_x_train[:,None], x) for x in sm_x_test]).squeeze()
hp_kernel = hp_kernels.WangRyzinInteger(data=self.x_train, bandwidth=bw, num_values=n, fix_boundary=False)
hp_values = hp_kernel(self.x_test).squeeze()
self.assertTrue(np.allclose(hp_values.T, sm_values, 1e-4))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdf_boundary_quadrature(self):
self.x_test = np.array([0,1,2,3,4])/5+(1/10)
for bw in [1e-2, 1e-1, 0.99]:
hp_kernel = hp_kernels.WangRyzinInteger(data=self.x_train, bandwidth=bw, num_values=5, fix_boundary=True)
hp_values = hp_kernel(self.x_test).mean(axis=0)
self.assertAlmostEqual(hp_values.sum(), 1, delta=1e-4)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_sample(self):
num_samples = 2**20
for bw in [1e-1, 5e-1, 0.99]:
hp_kernel = hp_kernels.WangRyzinInteger(data=self.x_train, bandwidth=bw, num_values=5, fix_boundary=True)
samples = hp_kernel.sample(num_samples=num_samples)
phat1, x = np.histogram(samples, normed=True, bins=[0, 0.2, .4, .6, .8, 1.])
phat1 /= 5 # account for bin width
phat2 = hp_kernel((x[1:] + x[:-1])/2).mean(axis=0)
for p1, p2 in zip(phat1, phat2):
self.assertAlmostEqual(p1, p2, delta=5e-2)
class Test1dOrdinal(unittest.TestCase):
n_train = 128
n_test = 5
def setUp(self):
self.configspace = CS.ConfigurationSpace(43)
HPs=[]
HPs.append( CS.OrdinalHyperparameter('ord1', ['cold', 'mild', 'warm', 'hot']))
self.configspace.add_hyperparameters(HPs)
x_train_confs = [ self.configspace.sample_configuration() for i in range(self.n_train)]
self.x_train = np.array([c.get_array() for c in x_train_confs]).squeeze()
x_test_confs = [ self.configspace.sample_configuration() for i in range(self.n_test)]
self.x_test= np.array( [c.get_array() for c in x_test_confs]).squeeze()
def tearDown(self):
self.configspace = None
self.x_train = None
self.x_test = None
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_values(self):
for bw in [1e-3, 1e-2, 1e-1, 1]:
sm_values = np.array([sm_kernels.wang_ryzin(bw, self.x_train[:,None], x) for x in self.x_test])
hp_kernel = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, fix_boundary=False)
hp_values = hp_kernel(self.x_test)
self.assertTrue(np.allclose(hp_values.T, sm_values, 1e-4))
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdf_boundary_simple(self):
self.x_train = np.array([0])
self.x_test = np.array([0, 1,2,3])
for bw in [1e-3, 1e-2]:
# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than the scaling computed here
hp_kernel1 = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=False)
hp_kernel2 = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=True)
hp_values1 = hp_kernel1(self.x_test).squeeze()
hp_values2 = hp_kernel2(self.x_test).squeeze()
weight = 1-hp_values1[1:].sum()
self.assertTrue(np.allclose(hp_values1/weight, hp_values2, 1e-4))
self.x_train = np.array([3])
self.x_test = np.array([0,1,2,3])
for bw in [1e-3, 1e-2]:
# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than the scaling computed here
hp_kernel1 = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=False)
hp_kernel2 = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=True)
hp_values1 = hp_kernel1(self.x_test).squeeze()
hp_values2 = hp_kernel2(self.x_test).squeeze()
weight = 1-hp_values1[:-1].sum()
self.assertTrue(np.allclose(hp_values1/weight, hp_values2, 1e-4))
# simple test based on 68, 95, 99% rule
self.x_train = np.array([0.5])
for bw, w in ([0.5, 0.6827], [0.25, 0.9545], [1/6, 0.9973]):
hp_kernel = hp_kernels.Gaussian(data=self.x_train, bandwidth=bw, fix_boundary=True)
self.assertAlmostEqual(hp_kernel.weights[0], 1/w, delta=1e-4)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_pdf_boundary_quadrature(self):
self.x_test = np.array([0,1,2,3])
for bw in [1e-2, 1e-1, 0.99]:
hp_kernel = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=True)
hp_values = hp_kernel(self.x_test).mean(axis=0)
self.assertAlmostEqual(hp_values.sum(), 1, delta=1e-4)
@unittest.skipIf(rapid_development, "test skipped to accelerate developing new tests")
def test_sample(self):
num_samples = 2**20
for bw in [1e-1, 5e-1, 0.99]:
hp_kernel = hp_kernels.WangRyzinOrdinal(data=self.x_train, bandwidth=bw, num_values=4, fix_boundary=True)
samples = hp_kernel.sample(num_samples=num_samples)
phat1, x = np.histogram(samples, normed=True, bins=[-0.5, 0.5, 1.5, 2.5, 3.5])
phat2 = hp_kernel((x[1:] + x[:-1])/2).mean(axis=0)
for p1, p2 in zip(phat1, phat2):
self.assertAlmostEqual(p1, p2, delta=5e-2)
if __name__ == '__main__':
unittest.main()
|
functions/update_weight/update_weight.py | MarkAtwood/aws-lambda-deploy | 121 | 12744367 | <filename>functions/update_weight/update_weight.py<gh_stars>100-1000
"""
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import boto3
def handler(event, context):
print(event)
weights = event['weights']
func_name = event['function-name']
version = event['new-version']
alias_name = event['alias-name']
if 'current-weight' in event:
current_weight = event['current-weight']
next_weight = get_next_weight(weights, current_weight)
else:
next_weight = weights[0]
update_weight(func_name, alias_name, version, next_weight)
return next_weight
def get_next_weight(weights, current_weight):
index = weights.index(current_weight)
return weights[index+1]
def update_weight(func_name, alias_name, version, next_weight):
print("next weight: {0}".format(next_weight))
client = boto3.client('lambda')
weights = {
version : next_weight
}
routing_config = {
'AdditionalVersionWeights' : weights
}
res = client.update_alias(FunctionName=func_name, Name=alias_name, RoutingConfig=routing_config)
print(res)
return
|
src/keybindings.py | maiki/k3x | 188 | 12744376 | <filename>src/keybindings.py
# keybindings.py
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from typing import Dict, Callable, List, Tuple
from .config import ApplicationSettings
def parse_keystroke(shortcut: str) -> List[str]:
"""
Translates a keystroke description like "<Ctrl><Alt>P" in a list ["control", "alt", "p"]
"""
res = []
for sub in ["<Ctrl>", "<Ctr>", "<Control>", "Control", "<ctrl>", "<ctr>", "<control>", "control"]:
if sub in shortcut:
shortcut = shortcut.replace(sub, "")
res += ["control"]
break
for sub in ["<Alt>", "<alt>", "Alt", "alt"]:
if sub in shortcut:
shortcut = shortcut.replace(sub, "")
res += ["alt"]
break
for sub in ["<Shift>", "<shift>", "Shift", "shift"]:
if sub in shortcut:
shortcut = shortcut.replace(sub, "")
res += ["shift"]
break
for sub in ["<Meta>", "<meta>", "Meta", "meta", "<Super>", "<super>", "Super", "super"]:
if sub in shortcut:
shortcut = shortcut.replace(sub, "")
res += ["super"]
break
if len(shortcut) > 0:
res += [shortcut.lower()]
return res
class Keybindings(object):
def __init__(self, settings: ApplicationSettings, mappings: Dict[str, Dict[str, Tuple[str, Callable]]]):
"""
Creates keybindings for shortcuts stores in GSettings.
The list of settings cannot be changed after created.
Pass a map of (setting_id -> callback)
"""
super().__init__()
self._mappings = mappings
self._settings = settings
self._active_shortcuts = dict()
# see https://github.com/timeyyy/system_hotkey
from system_hotkey import SystemHotkey
self._keybinder = SystemHotkey(check_queue_interval=0.01, use_xlib=True)
self.rebind_all()
def rebind_all(self):
for category, shortcuts in self._mappings.items():
if not shortcuts:
continue
for title, info in shortcuts.items():
shortcut_id, callback = info
shortcut = self._settings.get_keybinding(shortcut_id)
parsed = parse_keystroke(shortcut)
if not callback:
logging.warning(f"Empty callback for shortcut '{shortcut_id}': ignored")
continue
if not shortcut:
logging.warning(f"Empty shortcut for settings '{shortcut_id}': ignored")
continue
logging.info(f"Binding '{shortcut_id}' -> '{callback.__name__}'")
if shortcut and shortcut in self._active_shortcuts and self._active_shortcuts[shortcut] != callback:
logging.debug(f"Removing current binding '{shortcut}'")
try:
self._keybinder.unregister(parsed)
del self._active_shortcuts[shortcut]
except Exception as e:
logging.error(f"Could not unbind '{shortcut}': {e}")
continue
if shortcut and shortcut not in self._active_shortcuts:
logging.info(f"Binding '{shortcut}' ({parsed}) to '{callback.__name__}'")
try:
self._keybinder.register(parsed, callback=callback)
self._active_shortcuts[shortcut] = callback
except Exception as e:
logging.error(f"Could not bind {shortcut} to {callback.__name__}: {e}")
continue
self._settings.connect(f"changed::{shortcut_id}", lambda k, s: self.rebind_all())
|
exercises/zh/solution_03_16_02.py | Jette16/spacy-course | 2,085 | 12744378 | import spacy
nlp = spacy.load("zh_core_web_sm")
text = (
"在300多年的风雨历程中,历代同仁堂人始终恪守“炮制虽繁必不敢省人工,品味虽贵必不敢减物力”的古训,"
"树立“修合无人见,存心有天知”的自律意识,造就了制药过程中兢兢小心、精益求精的严细精神。"
)
# 关闭tagger和parser
with nlp.disable_pipes("tagger", "parser"):
# 处理文本
doc = nlp(text)
# 打印doc中的实体
print(doc.ents)
|
tests/test_wsgi_spec.py | skrytebane/meinheld | 1,186 | 12744387 | <filename>tests/test_wsgi_spec.py<gh_stars>1000+
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
import sys
from base import *
import requests
ASSERT_RESPONSE = b"Hello world!"
RESPONSE = [b"Hello ", b"world!"]
class App(BaseApp):
environ = None
def __call__(self, environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
return RESPONSE
class ErrApp(BaseApp):
def __call__(self, environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
environ["XXXX"]
return SIMPLE_RESPONSE
class ErrAppEx(BaseApp):
def __call__(self, environ, start_response):
status = '500 InternalServerError'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers, ZeroDivisionError)
self.environ = environ.copy()
return RESPONSE
class IterErrApp(BaseApp):
def __call__(self, environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
return [1]
class UpgradeApp(BaseApp):
def __call__(self, environ, start_response):
status = '101 Switching Protocols'
response_headers = [('Upgrade', 'websocket')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
return []
def test_check_key():
def client():
return requests.get("http://localhost:8000/foo/bar")
env, res = run_client(client, App)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("REQUEST_METHOD") == "GET")
assert(env.get("SCRIPT_NAME") == "")
assert(env.get("PATH_INFO") == "/foo/bar")
assert(env.get("QUERY_STRING") == None)
assert(env.get("CONTENT_TYPE") == None)
# assert(env.get("CONTENT_LENGTH") == "0")
assert(env.get("SERVER_NAME") == "0.0.0.0")
assert(env.get("SERVER_PORT") == "8000")
assert(env.get("SERVER_PROTOCOL") == "HTTP/1.1")
assert(env.get("HTTP_USER_AGENT") != None)
def test_simple():
def client():
return requests.get("http://localhost:8000/")
env, res = run_client(client, App)
# print(res.content)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("PATH_INFO") == "/")
assert(env.get("QUERY_STRING") == None)
def test_encode():
def client():
return requests.get("http://localhost:8000/あいう")
env, res = run_client(client, App)
assert(res.content == ASSERT_RESPONSE)
path_info = env.get('PATH_INFO')
expected = "/あいう" # utf-8
if sys.version_info[0] > 2:
expected = expected.encode('utf-8').decode('latin1')
assert(env.get("PATH_INFO") == expected)
assert(env.get("QUERY_STRING") == None)
def test_query():
def client():
return requests.get("http://localhost:8000/ABCDEF?a=1234&bbbb=ccc")
env, res = run_client(client, App)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("PATH_INFO") == "/ABCDEF")
assert(env.get("QUERY_STRING") == "a=1234&bbbb=ccc")
def test_chunk_response():
def client():
return requests.get("http://localhost:8000/")
env, res = run_client(client, App)
headers = res.headers
assert(res.content == ASSERT_RESPONSE)
assert(headers["transfer-encoding"] == "chunked")
assert(headers["connection"] == "close")
def test_err():
def client():
return requests.get("http://localhost:8000/")
env, res = run_client(client, ErrApp)
assert(res.status_code == 500)
def test_iter_err():
def client():
return requests.get("http://localhost:8000/")
env, res = run_client(client, IterErrApp)
assert(res.status_code == 500)
def test_headers():
def client():
headers = {"X-TEST":"123", "DNT":"1"}
return requests.get("http://localhost:8000/", headers=headers)
env, res = run_client(client, App)
assert(res.status_code == 200)
assert(res.content == ASSERT_RESPONSE)
assert(env["HTTP_X_TEST"] == "123")
assert(env["HTTP_DNT"] == "1")
def test_post():
def client():
payload = OrderedDict([('key1', 'value1'), ('key2', 'value2')])
return requests.post("http://localhost:8000/", data=payload)
env, res = run_client(client, App)
assert(res.status_code == 200)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("wsgi.input").read() == b"key1=value1&key2=value2")
def gen():
yield b"key1=value1&key2=value2"
def test_post_chunked():
def client():
return requests.post("http://localhost:8000/", data=gen())
env, res = run_client(client, App)
assert(res.status_code == 200)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("wsgi.input").read() == b"key1=value1&key2=value2")
def test_upload_file():
def client():
filepath = os.path.join(os.path.dirname(__file__), "wallpaper.jpg")
files = {'wallpaper.jpg': open(filepath, 'rb')}
return requests.post("http://localhost:8000/", files=files)
env, res = run_client(client, App)
assert(res.status_code == 200)
assert(res.content == ASSERT_RESPONSE)
length = env["CONTENT_LENGTH"]
data = env.get("wsgi.input").read()
assert(len(data) == int(length))
def test_error():
def client():
return requests.get("http://localhost:8000/foo/bar")
env, res = run_client(client, ErrAppEx)
assert(res.status_code == 500)
assert(res.content == ASSERT_RESPONSE)
assert(env.get("REQUEST_METHOD") == "GET")
def test_upgrade():
"""This server will assume the application will correctly set the
"Upgrade" header, and automatically set the "Connection" header to
"upgrade", instead of "keep-alive" or "close", for a response with
status "101 Switching Protocols". That is likely to better conform to
RFC 7230 (HTTP/1.1) and RFC 6455 (WebSocket).
"""
def client():
return requests.get("http://localhost:8000")
env, res = run_client(client, UpgradeApp)
headers = res.headers
assert(res.status_code == 101)
assert(headers["upgrade"] == "websocket")
assert(headers["connection"] == "upgrade")
def test_no_content():
class App(BaseApp):
def __call__(self, environ, start_response):
status = "204 No Content"
response_headers = []
start_response(status, response_headers)
self.environ = environ.copy()
return []
def client():
return requests.get("http://localhost:8000")
env, res = run_client(client, App)
headers = res.headers
# print(env)
# print(res)
assert(res.status_code == 204)
assert("Content-Length" not in headers)
assert("Transfer-Encoding" not in headers)
|
lightreid/data/datamanager.py | nataliamiccini/light-reid | 296 | 12744405 | <reponame>nataliamiccini/light-reid
"""
@author: wangguanan
@contact: <EMAIL>
"""
import numpy as np
import copy
from PIL import Image
import torch.utils.data as data
from .samplers import PKSampler
class ReIDDataset:
def __init__(self, samples, transform):
self.samples = samples
self.transform = transform
def __getitem__(self, index):
sample = copy.deepcopy(self.samples[index])
sample[0] = self._loader(sample[0])
if self.transform is not None:
sample[0] = self.transform(sample[0])
sample[1] = np.array(sample[1])
return sample
def __len__(self):
return len(self.samples)
def _loader(self, img_path):
return Image.open(img_path).convert('RGB')
class DataManager(object):
'''
Args:
sources(list): tuples of torch.data.ReIDDataset, source datasets to train with
target(torch.data.ReIDDataset): target dataset to evaluate on
transforms_train(torch.torchvision.transforms):
transforms_test(torch.torchvision.transforms):
sampler(str): sample strategy for train dataset, support 'pk' and 'random'.
when 'pk', params 'p' and 'k' must be given.
when 'random', params 'batch_size' must be given.
Example:
datamanager = DataManager(
sources=[lightreid.data.Market1501(data_path='', combineall=False), lightreid.data.DukeMTMCreID(data_path='', combineall=False)],
target=lightreid.data.Market1501(data_path='', combineall=False),
transforms_train=lightreid.data.build_transforms(img_size=[256,128], transform_list=['randomflip', 'padcrop', 'colorjitor', 'rea']),
transforms_test=lightreid.data.build_transforms(img_size=[256,128], transform_list=[]),
sampler='pk', p=16, k=4
)
train_loader = datamanager.train_loader
query_loader = datamanager.query_loader
gallery_loader = datamanager.gallery_loader
'''
KWARGS = ['batch_size', 'p', 'k']
SAMPLERS = ['random', 'pk']
def __init__(self, sources, target, transforms_train, transforms_test, sampler, **kwargs):
# check param sample and kwargs is legal
assert sampler in DataManager.SAMPLERS, \
'sampler expect {}. but got {}'.format(DataManager.SAMPLERS, sampler)
# init train/query/gallery dataset
train = self.combine([source.train for source in sources])
self.class_num = len(set([sample[1] for sample in train]))
self.train_dataset = ReIDDataset(train, transforms_train)
self.query_gallery_dataset_dict = {}
for val in target:
query_dataset = ReIDDataset(val.query, transforms_test)
gallery_dataset = ReIDDataset(val.gallery, transforms_test)
self.query_gallery_dataset_dict[val.__class__.__name__] = (query_dataset, gallery_dataset)
# train loader
if sampler == 'random':
assert 'batch_size' in kwargs.keys(), 'param batch_size(int) must be given when sample=\'random\''
batch_size = kwargs['batch_size']
self.train_loader = data.DataLoader(self.train_dataset, batch_size=batch_size, num_workers=8, drop_last=True, shuffle=True)
elif sampler == 'pk':
assert 'p' in kwargs.keys() and 'k' in kwargs.keys(), 'param p(int) and k(int) must be given when sample=\'random\''
p, k = kwargs['p'], kwargs['k']
self.train_loader = data.DataLoader(self.train_dataset, batch_size=p*k, num_workers=8, drop_last=True, sampler=PKSampler(self.train_dataset, k=k))
else:
assert 0, 'expect {}. but got {}'.format(DataManager.SAMPLERS, sampler)
# query and gallery loader
self.query_gallery_loader_dict = {}
for dataset_name, (query_dataset, gallery_dataset) in self.query_gallery_dataset_dict.items():
query_loader = data.DataLoader(query_dataset, batch_size=64, num_workers=8, drop_last=False, shuffle=False)
gallery_loader = data.DataLoader(gallery_dataset, batch_size=64, num_workers=8, drop_last=False, shuffle=False)
self.query_gallery_loader_dict[dataset_name] = (query_loader, gallery_loader)
def combine(self, samples_list):
'''combine more than one samples (e.g. market.train and duke.train) as a samples'''
all_samples = []
max_pid, max_cid = 0, 0
for samples in samples_list:
for a_sample in samples:
img_path = a_sample[0]
pid = max_pid + a_sample[1]
cid = max_cid + a_sample[2]
all_samples.append([img_path, pid, cid])
max_pid = max([sample[1] for sample in all_samples])
max_cid = max([sample[2] for sample in all_samples])
return all_samples
|
CertToESP32.py | berniew/HTTPS-for-Makers | 167 | 12744412 | <reponame>berniew/HTTPS-for-Makers<gh_stars>100-1000
#MIT License(MIT)
# CertToHex.py Version 1.0.0 #
# Copyright(c) 2018 <NAME> #
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import binascii
filename = 'howsmysslBase64.cer'
with open(filename, 'rb') as f:
content = f.read()
print('// '+filename)
print('const char* test_root_ca = \ ')
outString = '"'
caCertLen = 0
x = len(content)
for i in range(0, x-1):
first = (chr(content[i]))
# print(first,content[i])
if content[i]==13:
outString = outString + '\\n" \ '
outString = outString+first
if content[i]==10:
outString = outString + '"'
outString = outString[:-2] #remove last comma and space
print(outString[:-1]+';')
|
autoload/splice.py | mathstuf/vim-splice | 134 | 12744423 | <reponame>mathstuf/vim-splice
import vim, os, sys
# Add the library to the Python path.
for p in vim.eval("&runtimepath").split(','):
plugin_dir = os.path.join(p, "autoload")
if os.path.exists(os.path.join(plugin_dir, "splicelib")):
if plugin_dir not in sys.path:
sys.path.append(plugin_dir)
break
import splicelib.init as splice
# Wrapper functions ----------------------------------------------------------------
def SpliceInit():
splice.init()
def SpliceOriginal():
splice.modes.current_mode.key_original()
def SpliceOne():
splice.modes.current_mode.key_one()
def SpliceTwo():
splice.modes.current_mode.key_two()
def SpliceResult():
splice.modes.current_mode.key_result()
def SpliceGrid():
splice.modes.key_grid()
def SpliceLoupe():
splice.modes.key_loupe()
def SpliceCompare():
splice.modes.key_compare()
def SplicePath():
splice.modes.key_path()
def SpliceDiff():
splice.modes.current_mode.key_diff()
def SpliceDiffoff():
splice.modes.current_mode.key_diffoff()
def SpliceScroll():
splice.modes.current_mode.key_scrollbind()
def SpliceLayout():
splice.modes.current_mode.key_layout()
def SpliceNext():
splice.modes.current_mode.key_next()
def SplicePrev():
splice.modes.current_mode.key_prev()
def SpliceUse():
splice.modes.current_mode.key_use()
def SpliceUse1():
splice.modes.current_mode.key_use1()
def SpliceUse2():
splice.modes.current_mode.key_use2()
|
python/glow/gwas/tests/test_approx_firth.py | bcajes/glow | 214 | 12744438 | from dataclasses import dataclass
import functions as fx
import glow.gwas.log_reg as lr
import glow.gwas.approx_firth as af
import pandas as pd
from nptyping import Float, NDArray
import numpy as np
import pytest
from typing import Any
@dataclass
class TestData:
phenotypes: NDArray[(Any, ), Float]
covariates: NDArray[(Any, Any), Float]
offset: NDArray[(Any, ), Float]
def _get_test_data(use_offset, use_intercept):
test_file = 'test-data/r/sex2withoffset.txt'
df = pd.read_table(test_file, delimiter='\t').astype('float64')
phenotypes = df['case']
covariates = df.loc[:, 'age':'dia']
if use_intercept:
covariates.loc[:, 'intercept'] = 1
offset = df['offset']
if not use_offset:
offset = offset * 0
return TestData(phenotypes.to_numpy(), covariates.to_numpy(), offset.to_numpy())
def _compare_full_firth_beta(test_data, golden_firth_beta):
beta_init = np.zeros(test_data.covariates.shape[1])
X = test_data.covariates
y = test_data.phenotypes
offset = test_data.offset
test_firth_fit = af._fit_firth(beta_init=beta_init, X=X, y=y, offset=offset)
test_firth_beta = test_firth_fit.beta
assert np.allclose(golden_firth_beta, test_firth_beta)
def test_full_firth():
# table = read.table("sex2withoffset.txt", header=True)
# logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset), data=table)
golden_firth_beta = [
-1.1715911, # age
0.1568537, # oc
2.4752617, # vic
-2.2125007, # vicl
-0.8604622, # vis
2.7397140, # dia
-0.5679234 # intercept
]
test_data = _get_test_data(use_offset=True, use_intercept=True)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_full_firth_no_offset():
# logistf(case ~ age+oc+vic+vicl+vis+dia, data=table)
golden_firth_beta = [
-1.10598130, # age
-0.06881673, # oc
2.26887464, # vic
-2.11140816, # vicl
-0.78831694, # vis
3.09601263, # dia
0.12025404 # intercept
]
test_data = _get_test_data(use_offset=False, use_intercept=True)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_full_firth_no_intercept():
# logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset)-1, data=table)
golden_firth_beta = [
-1.2513849, # age
-0.3141151, # oc
2.2066573, # vic
-2.2988439, # vicl
-0.9922712, # vis
2.7046574 # dia
]
test_data = _get_test_data(use_offset=True, use_intercept=False)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_null_firth_fit_no_offset():
golden_firth_beta = [
-1.10598130, # age
-0.06881673, # oc
2.26887464, # vic
-2.11140816, # vicl
-0.78831694, # vis
3.09601263, # dia
0.12025404 # intercept
]
test_data = _get_test_data(use_offset=False, use_intercept=True)
fit = af.perform_null_firth_fit(test_data.phenotypes,
test_data.covariates,
~np.isnan(test_data.phenotypes),
None,
includes_intercept=True)
assert np.allclose(fit, test_data.covariates @ golden_firth_beta)
def _read_regenie_df(file, trait, num_snps):
df = pd.read_table(file, sep=r'\s+')
df = df[df['ID'] <= num_snps]
df['phenotype'] = trait
return df
def compare_corrections_to_regenie(spark,
pvalue_threshold,
output_prefix,
compare_all_cols,
uncorrected,
corrected,
missing=[]):
(genotype_df, phenotype_df, covariate_df, offset_df) = fx.get_input_dfs(spark,
binary=True,
missing=missing)
glowgr_df = lr.logistic_regression(genotype_df,
phenotype_df,
covariate_df,
offset_df,
correction=lr.correction_approx_firth,
pvalue_threshold=pvalue_threshold,
values_column='values').toPandas()
fx.compare_to_regenie(output_prefix, glowgr_df, compare_all_cols)
correction_counts = glowgr_df.correctionSucceeded.value_counts(dropna=False).to_dict()
if uncorrected > 0:
# null in Spark DataFrame converts to nan in pandas
assert correction_counts[np.nan] == uncorrected
if corrected > 0:
assert correction_counts[True] == corrected
assert False not in correction_counts
return glowgr_df
@pytest.mark.min_spark('3')
def test_correct_all_versus_regenie(spark):
compare_corrections_to_regenie(spark,
0.9999,
'test_bin_out_firth_',
compare_all_cols=True,
uncorrected=0,
corrected=200)
@pytest.mark.min_spark('3')
def test_correct_half_versus_regenie(spark):
compare_corrections_to_regenie(spark,
0.5,
'test_bin_out_half_firth_',
compare_all_cols=False,
uncorrected=103,
corrected=97)
@pytest.mark.min_spark('3')
def test_correct_missing_versus_regenie(spark):
compare_corrections_to_regenie(
spark,
0.9999,
'test_bin_out_missing_firth_',
compare_all_cols=True,
uncorrected=0,
corrected=200,
missing=['35_35', '136_136', '77_77', '100_100', '204_204', '474_474'])
|
custom_components/smartweather/weather.py | briis/smartweather | 112 | 12744443 | """Support for the SmartWeather weather service."""
import logging
from typing import Dict, List
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ID,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utc_from_timestamp
from homeassistant.util.temperature import celsius_to_fahrenheit
from pysmartweatherio import FORECAST_TYPE_DAILY
from .const import (
DOMAIN,
ATTR_CURRENT_ICON,
ATTR_FCST_UV,
ATTR_TEMP_HIGH_TODAY,
ATTR_TEMP_LOW_TODAY,
DEFAULT_ATTRIBUTION,
DEVICE_TYPE_WEATHER,
CONDITION_CLASSES,
)
from .entity import SmartWeatherEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Add a weather entity from station_id."""
unit_system = "metric" if hass.config.units.is_metric else "imperial"
fcst_coordinator = hass.data[DOMAIN][entry.entry_id]["fcst_coordinator"]
if not fcst_coordinator.data:
return
coordinator = hass.data[DOMAIN][entry.entry_id]["coordinator"]
if not coordinator.data:
return
station_info = hass.data[DOMAIN][entry.entry_id]["station"]
if not station_info:
return
fcst_type = hass.data[DOMAIN][entry.entry_id]["fcst_type"]
if not fcst_type:
return
weather_entity = SmartWeatherWeather(
coordinator,
entry.data,
DEVICE_TYPE_WEATHER,
station_info,
fcst_coordinator,
unit_system,
fcst_type,
)
async_add_entities([weather_entity], True)
return True
class SmartWeatherWeather(SmartWeatherEntity, WeatherEntity):
"""Representation of a weather entity."""
def __init__(
self,
coordinator,
entries,
device_type,
server,
fcst_coordinator,
unit_system,
fcst_type,
) -> None:
"""Initialize the SmartWeather weather entity."""
super().__init__(
coordinator, entries, device_type, server, fcst_coordinator, None
)
self._name = f"{DOMAIN.capitalize()} {entries[CONF_ID]}"
self._unit_system = unit_system
self._forecast_type = fcst_type
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def temperature(self) -> int:
"""Return the temperature."""
if self._current is not None:
return self._current.air_temperature
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
if self._current is not None:
return self._current.relative_humidity
return None
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
if self._current is not None:
return self._current.wind_avg
return None
@property
def wind_gust(self) -> float:
"""Return the wind Gust."""
if self._current is not None:
return self._current.wind_gust
return None
@property
def wind_bearing(self) -> int:
"""Return the wind bearing."""
if self._current is not None:
return self._current.wind_bearing
return None
@property
def precipitation(self) -> float:
"""Return the precipitation."""
if self._current is not None:
return round(self._current.precip_accum_local_day, 1)
return None
@property
def pressure(self) -> int:
"""Return the pressure."""
if self._current is not None:
if self._unit_system == "imperial":
return round(self._current.sea_level_pressure, 3)
return round(self._current.sea_level_pressure, 2)
return None
@property
def uv(self) -> int:
"""Return the UV Index."""
if self._current is not None:
return round(self._current.uv, 1)
return None
@property
def current_condition(self) -> int:
"""Return Current Condition Icon."""
if self._forecast is not None:
return self._forecast.current_icon
return None
@property
def condition(self) -> str:
"""Return the weather condition."""
return next(
(k for k, v in CONDITION_CLASSES.items() if self.current_condition in v),
None,
)
@property
def temp_high_today(self) -> float:
"""Return Todays High Temp Forecast."""
if self._forecast is not None:
if self._unit_system == "imperial":
return celsius_to_fahrenheit(self._forecast.temp_high_today)
return self._forecast.temp_high_today
return None
@property
def temp_low_today(self) -> float:
"""Return Todays Low Temp Forecast."""
if self._forecast is not None:
if self._unit_system == "imperial":
return celsius_to_fahrenheit(self._forecast.temp_low_today)
return self._forecast.temp_low_today
return None
@property
def attribution(self) -> str:
"""Return the attribution."""
return DEFAULT_ATTRIBUTION
@property
def device_state_attributes(self) -> Dict:
"""Return SmartWeather specific attributes."""
return {
ATTR_CURRENT_ICON: self.current_condition,
ATTR_FCST_UV: self.uv,
ATTR_WEATHER_HUMIDITY: self.humidity,
ATTR_WEATHER_PRESSURE: self.pressure,
ATTR_WEATHER_TEMPERATURE: self.temperature,
ATTR_WEATHER_WIND_BEARING: self.wind_bearing,
ATTR_WEATHER_WIND_SPEED: self.wind_speed,
ATTR_TEMP_HIGH_TODAY: self.temp_high_today,
ATTR_TEMP_LOW_TODAY: self.temp_low_today,
}
@property
def forecast(self) -> List:
"""Return the forecast."""
if self.fcst_coordinator.data is None or len(self.fcst_coordinator.data) < 2:
return None
data = []
for forecast in self.fcst_coordinator.data:
condition = next(
(k for k, v in CONDITION_CLASSES.items() if forecast.icon in v),
None,
)
if self._forecast_type == FORECAST_TYPE_DAILY:
data.append(
{
ATTR_FORECAST_TIME: utc_from_timestamp(
forecast.epochtime
).isoformat(),
ATTR_FORECAST_TEMP: forecast.temp_high,
ATTR_FORECAST_TEMP_LOW: forecast.temp_low,
ATTR_FORECAST_PRECIPITATION: round(forecast.precip, 1)
if forecast.precip is not None
else None,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: forecast.precip_probability,
ATTR_FORECAST_CONDITION: condition,
ATTR_FORECAST_WIND_SPEED: forecast.wind_avg,
ATTR_FORECAST_WIND_BEARING: forecast.wind_bearing,
}
)
else:
data.append(
{
ATTR_FORECAST_TIME: utc_from_timestamp(
forecast.epochtime
).isoformat(),
ATTR_FORECAST_TEMP: forecast.temperature,
ATTR_FORECAST_PRECIPITATION: round(forecast.precip, 1)
if forecast.precip is not None
else None,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: forecast.precip_probability,
ATTR_FORECAST_CONDITION: condition,
ATTR_FORECAST_WIND_SPEED: forecast.wind_avg,
ATTR_FORECAST_WIND_BEARING: forecast.wind_bearing,
}
)
return data
|
Src/StdLib/Lib/test/test_setcomps.py | cwensley/ironpython2 | 2,209 | 12744457 | doctests = """
########### Tests mostly copied from test_listcomps.py ############
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
set([3])
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
set([None])
########### Tests for various scoping corner cases ############
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
set([4])
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
set([4])
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
set([2])
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
set([2])
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import test_support
from test import test_setcomps
test_support.run_doctest(test_setcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
test_support.run_doctest(test_setcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
docker/site/wsgi.py | timgates42/django-leonardo | 102 | 12744460 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import sys
from os.path import abspath, dirname, join, normpath
import django
import django.core.handlers.wsgi
from django.core.management import execute_from_command_line
sys.path.append('/app/site')
sys.path.append('/app/module')
sys.path.append('/app/settings')
os.environ['DJANGO_SETTINGS_MODULE'] = 'leonardo.settings'
django.setup()
application = django.core.handlers.wsgi.WSGIHandler()
|
lib-other/deterministiclib/svd.py | endolith/Truthcoin | 161 | 12744461 |
# Almost exact translation of the ALGOL SVD algorithm published in
# Numer. Math. 14, 403-420 (1970) by <NAME> and <NAME>
#
# by <NAME>, helicity314-stitch <at> yahoo <dot> com
#
# Pure Python SVD algorithm.
# Input: 2-D list (m by n) with m >= n
# Output: U,W V so that A = U*W*VT
# Note this program returns V not VT (=transpose(V))
# On error, a ValueError is raised.
#
# Here is the test case (first example) from Golub and Reinsch
#
# a = [[22.,10., 2., 3., 7.],
# [14., 7.,10., 0., 8.],
# [-1.,13.,-1.,-11., 3.],
# [-3.,-2.,13., -2., 4.],
# [ 9., 8., 1., -2., 4.],
# [ 9., 1.,-7., 5.,-1.],
# [ 2.,-6., 6., 5., 1.],
# [ 4., 5., 0., -2., 2.]]
#
# import svd
# import math
# u,w,vt = svd.svd(a)
# print w
#
# [35.327043465311384, 1.2982256062667619e-15,
# 19.999999999999996, 19.595917942265423, 0.0]
#
# the correct answer is (the order may vary)
#
# print (math.sqrt(1248.),20.,math.sqrt(384.),0.,0.)
#
# (35.327043465311391, 20.0, 19.595917942265423, 0.0, 0.0)
#
# transpose and matrix multiplication functions are also included
# to facilitate the solution of linear systems.
#
# Version 1.0 2005 May 01
import copy
from cdecimal import Decimal
ten=Decimal('10')
one=Decimal('1')
zero=Decimal('0')
half=Decimal('0.5')
def sqrt(n): return n**half#return sqrt_h(n, decimal.Decimal(1))
def svd(a):
'''Compute the singular value decomposition of array.'''
# <NAME> Reinsch state that eps should not be smaller than the
# machine precision, ie the smallest number
# for which 1+e>1. tol should be beta/e where beta is the smallest
# positive number representable in the computer.
eps = ten**(-15)# assumes double precision
tol = (ten**(-64))/eps
assert one+eps > one # if this fails, make eps bigger
assert tol > zero # if this fails, make tol bigger
itmax = 50
u = copy.deepcopy(a)
m = len(a)
n = len(a[0])
#if __debug__: print 'a is ',m,' by ',n
if m < n:
if __debug__: print 'Error: m is less than n'
raise ValueError,'SVD Error: m is less than n.'
e = [zero]*n # allocate arrays
q = [zero]*n
v = []
for k in range(n): v.append([zero]*n)
# Householder's reduction to bidiagonal form
g = zero
x = zero
for i in range(n):
e[i] = g
s = zero
l = i+1
for j in range(i,m): s += (u[j][i]*u[j][i])
if s <= tol:
g = zero
else:
f = u[i][i]
if f < zero:
g = sqrt(s)
else:
g = -sqrt(s)
h = f*g-s
u[i][i] = f-g
for j in range(l,n):
s = zero
for k in range(i,m): s += u[k][i]*u[k][j]
f = s/h
for k in range(i,m): u[k][j] = u[k][j] + f*u[k][i]
q[i] = g
s = zero
for j in range(l,n): s = s + u[i][j]*u[i][j]
if s <= tol:
g = zero
else:
f = u[i][i+1]
if f < zero:
g = sqrt(s)
else:
g = -sqrt(s)
h = f*g - s
u[i][i+1] = f-g
for j in range(l,n): e[j] = u[i][j]/h
for j in range(l,m):
s=zero
for k in range(l,n): s = s+(u[j][k]*u[i][k])
for k in range(l,n): u[j][k] = u[j][k]+(s*e[k])
y = abs(q[i])+abs(e[i])
if y>x: x=y
# accumulation of right hand gtransformations
for i in range(n-1,-1,-1):
if g != zero:
h = g*u[i][i+1]
for j in range(l,n): v[j][i] = u[i][j]/h
for j in range(l,n):
s=zero
for k in range(l,n): s += (u[i][k]*v[k][j])
for k in range(l,n): v[k][j] += (s*v[k][i])
for j in range(l,n):
v[i][j] = zero
v[j][i] = zero
v[i][i] = one
g = e[i]
l = i
#accumulation of left hand transformations
for i in range(n-1,-1,-1):
l = i+1
g = q[i]
for j in range(l,n): u[i][j] = zero
if g != zero:
h = u[i][i]*g
for j in range(l,n):
s=zero
for k in range(l,m): s += (u[k][i]*u[k][j])
f = s/h
for k in range(i,m): u[k][j] += (f*u[k][i])
for j in range(i,m): u[j][i] = u[j][i]/g
else:
for j in range(i,m): u[j][i] = zero
u[i][i] += one
#diagonalization of the bidiagonal form
eps = eps*x
for k in range(n-1,-1,-1):
for iteration in range(itmax):
# test f splitting
for l in range(k,-1,-1):
goto_test_f_convergence = False
if abs(e[l]) <= eps:
# goto test f convergence
goto_test_f_convergence = True
break # break out of l loop
if abs(q[l-1]) <= eps:
# goto cancellation
break # break out of l loop
if not goto_test_f_convergence:
#cancellation of e[l] if l>0
c = zero
s = one
l1 = l-1
for i in range(l,k+1):
f = s*e[i]
e[i] = c*e[i]
if abs(f) <= eps:
#goto test f convergence
break
g = q[i]
h = pythag(f,g)
q[i] = h
c = g/h
s = -f/h
for j in range(m):
y = u[j][l1]
z = u[j][i]
u[j][l1] = y*c+z*s
u[j][i] = -y*s+z*c
# test f convergence
z = q[k]
if l == k:
# convergence
if z<zero:
#q[k] is made non-negative
q[k] = -z
for j in range(n):
v[j][k] = -v[j][k]
break # break out of iteration loop and move on to next k value
if iteration >= itmax-1:
if __debug__: print 'Error: no convergence.'
# should this move on the the next k or exit with error??
#raise ValueError,'SVD Error: No convergence.' # exit the program with error
break # break out of iteration loop and move on to next k
# shift from bottom 2x2 minor
x = q[l]
y = q[k-1]
g = e[k-1]
h = e[k]
f = ((y-z)*(y+z)+(g-h)*(g+h))/(2*one*h*y)
g = pythag(f,one)
if f < 0:
f = ((x-z)*(x+z)+h*(y/(f-g)-h))/x
else:
f = ((x-z)*(x+z)+h*(y/(f+g)-h))/x
# next QR transformation
c = one
s = one
for i in range(l+1,k+1):
g = e[i]
y = q[i]
h = s*g
g = c*g
z = pythag(f,h)
e[i-1] = z
c = f/z
s = h/z
f = x*c+g*s
g = -x*s+g*c
h = y*s
y = y*c
for j in range(n):
x = v[j][i-1]
z = v[j][i]
v[j][i-1] = x*c+z*s
v[j][i] = -x*s+z*c
z = pythag(f,h)
q[i-1] = z
c = f/z
s = h/z
f = c*g+s*y
x = -s*g+c*y
for j in range(m):
y = u[j][i-1]
z = u[j][i]
u[j][i-1] = y*c+z*s
u[j][i] = -y*s+z*c
e[l] = zero
e[k] = f
q[k] = x
# goto test f splitting
#vt = transpose(v)
#return (u,q,vt)
return (u,q,v)
def pythag(a,b):
absa = abs(a)
absb = abs(b)
if absa > absb: return absa*sqrt(one+(absb/absa)**2)
else:
if absb == zero: return zero
else: return absb*sqrt(one+(absa/absb)**2)
def transpose(a):
'''Compute the transpose of a matrix.'''
m = len(a)
n = len(a[0])
at = []
for i in range(n): at.append([zero]*m)
for i in range(m):
for j in range(n):
at[j][i]=a[i][j]
return at
def matrixmultiply(a,b):
'''Multiply two matrices.
a must be two dimensional
b can be one or two dimensional.'''
am = len(a)
bm = len(b)
an = len(a[0])
try:
bn = len(b[0])
except TypeError:
bn = 1
if an != bm:
raise ValueError, 'matrixmultiply error: array sizes do not match.'
cm = am
cn = bn
if bn == 1:
c = [zero]*cm
else:
c = []
for k in range(cm): c.append([zero]*cn)
for i in range(cm):
for j in range(cn):
for k in range(an):
if bn == 1:
c[i] += a[i][k]*b[k]
else:
c[i][j] += a[i][k]*b[k][j]
return c
if __name__ == "__main__":
a = [[22.,10., 2., 3., 7.],
[14., 7.,10., 0., 8.],
[-1.,13.,-1.,-11., 3.],
[-3.,-2.,13., -2., 4.],
[ 9., 8., 1., -2., 4.],
[ 9., 1.,-7., 5.,-1.],
[ 2.,-6., 6., 5., 1.],
[ 4., 5., 0., -2., 2.]]
a=map(lambda row: map( lambda x: Decimal(x), row), a)
u,w,vt = svd(a)
print w
print([35.327043465311384, 1.2982256062667619e-15, 19.999999999999996, 19.595917942265423, 0.0])
|
tests/unit/objects/test_vendor.py | varunbheemaiah/python-quickbooks | 234 | 12744487 | import unittest
from quickbooks import QuickBooks
from quickbooks.objects.vendor import Vendor, ContactInfo
class VendorTests(unittest.TestCase):
def test_unicode(self):
vendor = Vendor()
vendor.DisplayName = "test"
self.assertEquals(str(vendor), "test")
def test_to_ref(self):
vendor = Vendor()
vendor.DisplayName = "test"
vendor.Id = 100
ref = vendor.to_ref()
self.assertEquals(ref.name, "test")
self.assertEquals(ref.type, "Vendor")
self.assertEquals(ref.value, 100)
def test_valid_object_name(self):
obj = Vendor()
client = QuickBooks()
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
class ContactInfoTests(unittest.TestCase):
def test_init(self):
contact_info = ContactInfo()
self.assertEquals(contact_info.Type, "")
self.assertEquals(contact_info.Telephone, None)
|
src/hca/hcat/mysite/hcat/urls.py | andypohl/kent | 171 | 12744500 | from django.urls import path
from . import views
from . import api
app_name = 'hcat'
urlpatterns = [
path('', views.index, name='index'),
path('project/<int:pk>/', views.ProjectDetailView.as_view(), name='project_detail'),
path('project', views.ProjectListView.as_view(), name='project_list'),
path('project/', views.ProjectListView.as_view(), name='project_list'),
]
|
pytest.py | nanjekyejoannah/pypy | 333 | 12744533 | #!/usr/bin/env python2
# PYTHON_ARGCOMPLETE_OK
"""
pytest: unit and functional testing with Python.
"""
__all__ = [
'main',
'UsageError',
'cmdline',
'hookspec',
'hookimpl',
'__version__',
]
if __name__ == '__main__': # if run as a script or by 'python -m pytest'
# we trigger the below "else" condition by the following import
import pytest
import sys
if sys.platform == 'win32':
#Try to avoid opeing a dialog box if one of the tests causes a system error
import ctypes
winapi = ctypes.windll.kernel32
SetErrorMode = winapi.SetErrorMode
SetErrorMode.argtypes=[ctypes.c_int]
SEM_FAILCRITICALERRORS = 1
SEM_NOGPFAULTERRORBOX = 2
SEM_NOOPENFILEERRORBOX = 0x8000
flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX
#Since there is no GetErrorMode, do a double Set
old_mode = SetErrorMode(flags)
SetErrorMode(old_mode | flags)
raise SystemExit(pytest.main())
# else we are imported
from _pytest.config import (
main, UsageError, _preloadplugins, cmdline,
hookspec, hookimpl
)
from _pytest import __version__
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
pycaption/__init__.py | vpaul-dev/pycaption-github-release-notes | 183 | 12744616 | from .base import (
CaptionConverter, CaptionNode, Caption, CaptionList, CaptionSet,
)
from .dfxp import DFXPWriter, DFXPReader
from .microdvd import MicroDVDReader, MicroDVDWriter
from .sami import SAMIReader, SAMIWriter
from .srt import SRTReader, SRTWriter
from .scc import SCCReader, SCCWriter
from .scc.translator import translate_scc
from .webvtt import WebVTTReader, WebVTTWriter
from .exceptions import (
CaptionReadError, CaptionReadNoCaptions, CaptionReadSyntaxError,
)
__all__ = [
'CaptionConverter', 'DFXPReader', 'DFXPWriter', 'MicroDVDReader',
'MicroDVDWriter', 'SAMIReader', 'SAMIWriter', 'SRTReader', 'SRTWriter',
'SCCReader', 'SCCWriter', 'translate_scc', 'WebVTTReader', 'WebVTTWriter',
'CaptionReadError', 'CaptionReadNoCaptions', 'CaptionReadSyntaxError',
'detect_format', 'CaptionNode', 'Caption', 'CaptionList', 'CaptionSet'
]
SUPPORTED_READERS = (
DFXPReader, MicroDVDReader, WebVTTReader, SAMIReader, SRTReader, SCCReader,
)
def detect_format(caps):
"""
Detect the format of the provided caption string.
:returns: the reader class for the detected format.
"""
for reader in SUPPORTED_READERS:
if reader().detect(caps):
return reader
return None
|
osf/migrations/0136_add_ember_auth_register_waffle_flag.py | gaybro8777/osf.io | 628 | 12744631 | <reponame>gaybro8777/osf.io
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-10-08 13:01
from __future__ import unicode_literals
from django.db import migrations
from osf import features
from osf.utils.migrations import AddWaffleFlags
class Migration(migrations.Migration):
dependencies = [
('osf', '0135_user_settings_waffles'),
]
operations = [
AddWaffleFlags([features.EMBER_AUTH_REGISTER]),
]
|
data_collection/gazette/spiders/sc_correia_pinto.py | kaiocp/querido-diario | 454 | 12744646 | <filename>data_collection/gazette/spiders/sc_correia_pinto.py
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCorreiaPintoSpider(FecamGazetteSpider):
name = "sc_correia_pinto"
FECAM_QUERY = "cod_entidade:77"
TERRITORY_ID = "4204558"
|
test/nnUNetV1/network_training/nnUNetTrainer_DiceBD.py | jianhuasong/medical-image-segmentation2 | 2,774 | 12744653 | <filename>test/nnUNetV1/network_training/nnUNetTrainer_DiceBD.py
from nnunet.training.loss_functions.boundary_loss import DC_and_BD_loss
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
class nnUNetTrainer_DiceBD(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.loss = DC_and_BD_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'square': False},{})
|
build/go/gen_library_metadata.py | wwjiang007/fuchsia-1 | 210 | 12744656 | <filename>build/go/gen_library_metadata.py
#!/usr/bin/env python3.8
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
class Source(object):
def __init__(self, name, path, file):
self.name = name
self.path = path
self.file = file
def __str__(self):
return '%s[%s]' % (self.name, self.path)
def __hash__(self):
return hash((self.name, self.path))
def __eq__(self, other):
return self.name == other.name and self.path == other.path
def get_sources(dep_files, extra_sources=None):
# Aggregate source data from dependencies.
sources = set()
if extra_sources:
sources.update(extra_sources)
for dep in dep_files:
with open(dep, 'r') as dep_file:
for name, path in json.load(dep_file).items():
sources.add(Source(name, path, dep))
# Verify duplicates.
sources_by_name = {}
for src in sources:
sources_by_name.setdefault(src.name, []).append(src)
for name, srcs in sources_by_name.items():
if len(srcs) <= 1:
continue
print('Error: source "%s" has multiple paths.' % name)
for src in srcs:
print(' - %s (%s)' % (src.path, src.file))
raise Exception('Could not aggregate sources')
return {s.name: s.path for s in sources}
def main():
parser = argparse.ArgumentParser()
name_group = parser.add_mutually_exclusive_group(required=True)
name_group.add_argument('--name', help='Name of the current library')
name_group.add_argument(
'--name-file',
help='Path to a file containing the name of the current library')
parser.add_argument(
'--source-dir',
help='Path to the library\'s source directory',
required=True)
sources_group = parser.add_mutually_exclusive_group(required=True)
sources_group.add_argument(
'--sources', help='List of source files', nargs='*')
sources_group.add_argument(
'--allow-globbing',
action='store_true',
help='Allow globbing the entire source directory')
parser.add_argument(
'--output', help='Path to the file to generate', required=True)
parser.add_argument(
'--deps', help='Dependencies of the current library', nargs='*')
args = parser.parse_args()
if args.name:
name = args.name
elif args.name_file:
with open(args.name_file, 'r') as name_file:
name = name_file.read()
current_sources = []
if args.sources:
# TODO(fxbug.dev/3037): verify that the sources are in a single folder.
for source in args.sources:
current_sources.append(
Source(
os.path.join(name, source),
os.path.join(args.source_dir, source), args.output))
elif args.allow_globbing:
current_sources.append(Source(name, args.source_dir, args.output))
result = get_sources(args.deps, extra_sources=current_sources)
with open(args.output, 'w') as output_file:
json.dump(result, output_file, indent=2, sort_keys=True)
if __name__ == '__main__':
sys.exit(main())
|
lambdaguard/utils/arnparse.py | CGarces/LambdaGuard | 354 | 12744661 | <filename>lambdaguard/utils/arnparse.py
"""
Copyright 2020 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
class ARN(object):
def __init__(
self,
full,
partition=None,
service=None,
region=None,
account_id=None,
resource_type=None,
resource=None,
):
self.full = full
self.partition = partition
self.service = service
self.region = region
self.account_id = account_id
self.resource_type = resource_type
self.resource = resource
def to_dict(self):
return {
"full": self.full,
"partition": self.partition,
"service": self.service,
"region": self.region,
"account_id": self.account_id,
"resource_type": self.resource_type,
"resource": self.resource,
}
def empty_str_to_none(str_):
if str_ == "":
return None
return str_
def arnparse(arn_str):
# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
if not arn_str.startswith("arn:") or len(arn_str.split(":")) < 4:
raise ValueError("Invalid ARN format: {}".format(arn_str))
elements = arn_str.split(":", 5)
elements += [""] * (6 - len(elements))
resource = elements[5].split("/")[-1]
resource_type = None
service = elements[2]
if service == "execute-api":
service = "apigateway"
if service == "iam":
resource_type = "/".join(elements[5].split("/")[:-1]) # role type
elif service == "sts":
res = elements[5].split("/")
if len(res) > 1:
resource_type = res[0] # assumed-role
resource = res[1] # group
elif service == "dynamodb":
resource_type = elements[5].split("/")[0] # table
resource = elements[5].split("/")[1] # table name
elif service == "s3":
if len(elements[5].split("/")) > 1:
resource_type = elements[5].split("/", 1)[1] # objects
resource = elements[5].split("/")[0] # bucket name
elif service == "kms":
resource_type = elements[5].split("/")[0]
elif service == "logs":
resource_type = elements[5].split(":")[0]
resource = ":".join(elements[5].split(":")[1:])
elif service == "apigateway":
resource_type, *resource = elements[5].split("/")
resource = "/".join(resource)
elif "/" in resource:
resource_type, resource = resource.split("/", 1)
elif ":" in resource:
resource_type, resource = resource.split(":", 1)
return ARN(
full=arn_str,
partition=elements[1],
service=service,
region=empty_str_to_none(elements[3]),
account_id=empty_str_to_none(elements[4]),
resource_type=resource_type,
resource=resource,
)
|
205 Isomorphic Strings.py | ChiFire/legend_LeetCode | 872 | 12744667 | <reponame>ChiFire/legend_LeetCode
"""
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two
characters may map to the same character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length.
"""
__author__ = 'Daniel'
class Solution:
def isIsomorphic(self, s, t):
"""
:param s:
:param t:
:rtype: bool
"""
m = {}
mapped = set() # case "ab", "aa"
for i in xrange(len(s)):
if s[i] not in m and t[i] not in mapped:
m[s[i]] = t[i]
mapped.add(t[i])
elif s[i] in m and m[s[i]] == t[i]:
pass
else:
return False
return True
|
pydruid/async_client.py | trakru/pydruid | 444 | 12744691 | <reponame>trakru/pydruid<filename>pydruid/async_client.py
#
# Copyright 2016 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from pydruid.client import BaseDruidClient
try:
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
except ImportError:
print("Warning: unable to import Tornado. The asynchronous client will not work.")
class AsyncPyDruid(BaseDruidClient):
"""
Asynchronous PyDruid client which mirrors functionality of the synchronous
PyDruid, but it executes queries
asynchronously (using an asynchronous http client from Tornado framework).
Returns Query objects that can be used for exporting query results into
TSV files or pandas.DataFrame objects
for subsequent analysis.
:param str url: URL of Broker node in the Druid cluster
:param str endpoint: Endpoint that Broker listens for queries on
:param dict defaults: (optional) Dict of parameters for the Async HTTP Client subclass
:param str http_client: Tornado HTTP client implementation to use.
Default: None (use simple_httpclient)
Example
.. code-block:: python
:linenos:
>>> from pydruid.async_client import *
>>> query = AsyncPyDruid('http://localhost:8083', 'druid/v2/')
>>> top = yield query.topn(
datasource='twitterstream',
granularity='all',
intervals='2013-10-04/pt1h',
aggregations={"count": doublesum("count")},
dimension='user_name',
filter = Dimension('user_lang') == 'en',
metric='count',
threshold=2
)
>>> print json.dumps(top.query_dict, indent=2)
>>> {
"metric": "count",
"aggregations": [
{
"type": "doubleSum",
"fieldName": "count",
"name": "count"
}
],
"dimension": "user_name",
"filter": {
"type": "selector",
"dimension": "user_lang",
"value": "en"
},
"intervals": "2013-10-04/pt1h",
"dataSource": "twitterstream",
"granularity": "all",
"threshold": 2,
"queryType": "topN"
}
>>> print top.result
>>> [{'timestamp': '2013-10-04T00:00:00.000Z',
'result': [{'count': 7.0, 'user_name': 'user_1'},
{'count': 6.0, 'user_name': 'user_2'}]}]
>>> df = top.export_pandas()
>>> print df
>>> count timestamp user_name
0 7 2013-10-04T00:00:00.000Z user_1
1 6 2013-10-04T00:00:00.000Z user_2
"""
def __init__(self, url, endpoint, defaults=None, http_client=None):
super(AsyncPyDruid, self).__init__(url, endpoint)
self.async_http_defaults = defaults
self.http_client = http_client
@gen.coroutine
def _post(self, query):
AsyncHTTPClient.configure(self.http_client, defaults=self.async_http_defaults)
http_client = AsyncHTTPClient()
try:
headers, querystr, url = self._prepare_url_headers_and_body(query)
response = yield http_client.fetch(
url, method="POST", headers=headers, body=querystr
)
except HTTPError as e:
self.__handle_http_error(e, query)
else:
query.parse(response.body.decode("utf-8"))
raise gen.Return(query)
@staticmethod
def __handle_http_error(e, query):
err = None
if e.code == 500:
# has Druid returned an error?
try:
err = json.loads(e.response.body.decode("utf-8"))
except ValueError:
pass
else:
err = err.get("error", None)
raise IOError(
"{0} \n Druid Error: {1} \n Query is: {2}".format(
e, err, json.dumps(query.query_dict, indent=4)
)
)
@gen.coroutine
def topn(self, **kwargs):
query = self.query_builder.topn(kwargs)
result = yield self._post(query)
raise gen.Return(result)
@gen.coroutine
def timeseries(self, **kwargs):
query = self.query_builder.timeseries(kwargs)
result = yield self._post(query)
raise gen.Return(result)
@gen.coroutine
def groupby(self, **kwargs):
query = self.query_builder.groupby(kwargs)
result = yield self._post(query)
raise gen.Return(result)
@gen.coroutine
def segment_metadata(self, **kwargs):
query = self.query_builder.segment_metadata(kwargs)
result = yield self._post(query)
raise gen.Return(result)
@gen.coroutine
def time_boundary(self, **kwargs):
query = self.query_builder.time_boundary(kwargs)
result = yield self._post(query)
raise gen.Return(result)
@gen.coroutine
def select(self, **kwargs):
query = self.query_builder.select(kwargs)
result = yield self._post(query)
raise gen.Return(result)
|
python/paddle/fluid/tests/unittests/test_dataset_dataloader.py | zmxdream/Paddle | 17,085 | 12744695 | <filename>python/paddle/fluid/tests/unittests/test_dataset_dataloader.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
import six
import os
import unittest
from simple_nets import simple_fc_net_with_inputs
BATCH_SIZE = 32
BATCH_NUM = 10
EPOCH_NUM = 4
IMAGE_SHAPE = [2, 3]
LABEL_SHAPE = [1]
ALL_WRITTEN_FILES = set()
def get_place_string(p):
if isinstance(p, (fluid.CPUPlace or fluid.CUDAPlace)):
tmp = fluid.core.Place()
tmp.set_place(p)
p = tmp
if p._type() == fluid.CPUPlace()._type():
return 'CPUPlace()'
else:
return 'CUDAPlace()'
def remove_all_written_files():
for filename in ALL_WRITTEN_FILES:
os.remove(filename)
def write_reader_data_to_file(filename, reader):
ALL_WRITTEN_FILES.add(filename)
with open(filename, 'w') as fid:
for instance_list in reader():
for i, instance in enumerate(instance_list):
instance = np.reshape(instance, [instance.size, ])
fid.write(str(instance.size) + ' ')
fid.write(' '.join(map(str, instance)))
fid.write(' ')
fid.write('\n')
def fake_reader(batch_size=BATCH_SIZE, batch_num=BATCH_NUM):
def __reader__():
iteration = BATCH_SIZE * BATCH_NUM
iteration = int(iteration + BATCH_SIZE / 2)
for _ in six.moves.range(iteration):
image = np.random.random(size=IMAGE_SHAPE).astype('float32')
label = np.random.random_integers(
size=LABEL_SHAPE, low=0, high=9).astype('int64')
yield image, label
return __reader__
class DatasetLoaderTestBase(unittest.TestCase):
def setUp(self):
self.dataset_name = "QueueDataset"
self.drop_last = False
def tearDown(self):
return
remove_all_written_files()
def build_network(self):
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
image = fluid.layers.data(
name='image', shape=IMAGE_SHAPE, dtype='float32')
label = fluid.layers.data(
name='label', shape=LABEL_SHAPE, dtype='int64')
simple_fc_net_with_inputs(image, label)
return main_prog, startup_prog, [image, label]
def check_batch_number(self, place, randomize_batch_num=False):
main_prog, startup_prog, feeds = self.build_network()
if self.dataset_name == "QueueDataset":
dataset = paddle.distributed.QueueDataset()
else:
dataset = paddle.distributed.InMemoryDataset()
dataset._set_batch_size(BATCH_SIZE)
if isinstance(place, fluid.CPUPlace):
file_num = 10
os.environ['CPU_NUM'] = str(file_num)
places = fluid.cpu_places()
use_cuda = False
else:
file_num = fluid.core.get_cuda_device_count()
places = fluid.cuda_places()
use_cuda = True
filelist = []
if file_num > 1 and randomize_batch_num:
random_delta_batch_size = np.random.random_integers(
low=-BATCH_NUM / 2, high=BATCH_NUM / 2, size=[file_num])
random_delta_batch_size[-1] = -int(
np.sum(random_delta_batch_size[0:-1]))
else:
random_delta_batch_size = np.zeros(shape=[file_num])
for i in six.moves.range(file_num):
filename = 'dataset_test_{}.txt'.format(i)
filelist.append(filename)
write_reader_data_to_file(
filename,
fake_reader(batch_num=BATCH_NUM + random_delta_batch_size[i]))
dataset.set_filelist(filelist)
dataset._set_use_var(feeds)
dataset._set_pipe_command("cat")
if self.dataset_name == 'InMemoryDataset':
dataset.load_into_memory()
dataloader = fluid.io.DataLoader.from_dataset(
dataset=dataset, places=places, drop_last=self.drop_last)
prog = fluid.CompiledProgram(main_prog).with_data_parallel()
exe = fluid.Executor(place)
exe.run(startup_prog)
for _ in six.moves.range(EPOCH_NUM):
has_complete_batch = False
for batch_id, data in enumerate(dataloader):
self.assertEquals(len(places), len(data))
for idx, data_on_each_device in enumerate(data):
image = data_on_each_device["image"]
label = data_on_each_device["label"]
if self.drop_last:
batch_size = BATCH_SIZE
else:
if batch_id == BATCH_NUM:
batch_size = BATCH_SIZE / 2
else:
batch_size = BATCH_SIZE
self.assertEquals(image.shape()[1:], IMAGE_SHAPE)
self.assertTrue(
image._place()._equals(places[idx]),
msg=get_place_string(image._place()) + ' vs ' +
get_place_string(places[idx]))
if self.drop_last:
self.assertEquals(image.shape()[0], BATCH_SIZE)
else:
self.assertTrue(image.shape()[0] == BATCH_SIZE or
image.shape()[0] == BATCH_SIZE / 2)
self.assertEquals(label.shape()[1:], LABEL_SHAPE)
self.assertTrue(label._place()._equals(places[idx]))
if self.drop_last:
self.assertEquals(label.shape()[0], BATCH_SIZE)
else:
self.assertTrue(label.shape()[0] == BATCH_SIZE or
label.shape()[0] == BATCH_SIZE / 2)
self.assertEquals(image.shape()[0], label.shape()[0])
if image.shape()[0] == BATCH_SIZE:
has_complete_batch = True
exe.run(prog, feed=data)
self.assertTrue(has_complete_batch)
def get_all_places(self):
p = [fluid.CPUPlace()]
if fluid.is_compiled_with_cuda():
p.append(fluid.CUDAPlace(0))
return p
def test_batch_number_with_same_length_files(self):
for p in self.get_all_places():
with fluid.scope_guard(fluid.Scope()):
self.check_batch_number(place=p, randomize_batch_num=False)
def test_batch_number_with_different_length_files(self):
for p in self.get_all_places():
with fluid.scope_guard(fluid.Scope()):
self.check_batch_number(place=p, randomize_batch_num=True)
class QueueDatasetTestWithoutDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "QueueDataset"
self.drop_last = True
class InMemoryDatasetTestWithoutDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "InMemoryDataset"
self.drop_last = False
class InMemoryDatasetTestWithDropLast(DatasetLoaderTestBase):
def setUp(self):
self.dataset_name = "InMemoryDataset"
self.drop_last = True
if __name__ == '__main__':
unittest.main()
|
artemis/experiments/decorators.py | wouterkool/artemis | 235 | 12744712 | <filename>artemis/experiments/decorators.py
from collections import OrderedDict
from artemis.experiments.experiment_record_view import show_record, compare_experiment_records
from artemis.experiments.experiments import Experiment
from artemis.general.display import sensible_str
from artemis.general.should_be_builtins import uniquify_duplicates, izip_equal
def experiment_function(f):
"""
Use this decorator (@experiment_function) on a function that you want to run. e.g.
.. code-block:: python
@experiment_function
def demo_my_experiment(a=1, b=2, c=3):
...
This turns your function demo_my_experiment into an experiment. It can still be called as a normal function, but
it now has can also be called with the methods of an Experiment object (eg. demo_my_experiment.run()).
"""
return ExperimentFunction()(f)
def experiment_root(f):
"""
Use this decorator on a function that you want to build variants off of:
.. code-block:: python
@experiment_root
def demo_my_experiment(a, b=2, c=3):
...
The root experiment is not runnable by itself, and will not appear in the list in the browse experiments UI, but
you can call ``demo_my_experiment.add_variant(...)`` to create runnable variants.
"""
return ExperimentFunction(is_root=True)(f)
class ExperimentFunction(object):
"""
This is the most general decorator. You can use this to add details on the experiment.
"""
def __init__(self, show = show_record, compare = compare_experiment_records, display_function=None, comparison_function=None, one_liner_function=sensible_str, is_root=False):
"""
:param show: A function that is called when you "show" an experiment record in the UI. It takes an experiment
record as an argument.
:param compare: A function that is called when you "compare" a set of experiment records in the UI.
:param display_function: [Deprecated] A function that takes the results (whatever your experiment returns) and displays them.
:param comparison_function: [Deprecated] A function that takes an OrderedDict<experiment_name, experiment_return_value>.
You can optionally define this function to compare the results of different experiments.
You can use call this via the UI with the compare_experiment_results command.
:param one_liner_function: A function that takes your results and returns a 1 line string summarizing them.
:param is_root: True to make this a root experiment - so that it is not listed to be run itself.
"""
self.show = show
self.compare = compare
if display_function is not None:
assert show is show_record, "You can't set both display function and show. (display_function is deprecated)"
show = lambda rec: display_function(rec.get_result())
if comparison_function is not None:
assert compare is compare_experiment_records, "You can't set both display function and show. (display_function is deprecated)"
def compare(records):
record_experiment_ids_uniquified = uniquify_duplicates(rec.get_experiment_id() for rec in records)
comparison_function(OrderedDict((unique_rid, rec.get_result()) for unique_rid, rec in izip_equal(record_experiment_ids_uniquified, records)))
self.show = show
self.compare = compare
self.is_root = is_root
self.one_liner_function = one_liner_function
def __call__(self, f):
f.is_base_experiment = True
ex = Experiment(
name=f.__name__,
function=f,
show=self.show,
compare = self.compare,
one_liner_function=self.one_liner_function,
is_root=self.is_root
)
return ex |
td4a/controllers/validate.py | cidrblock/td4a | 171 | 12744720 | <filename>td4a/controllers/validate.py
""" /retrieve
"""
import json
from flask import current_app as app
from flask import request, jsonify, Blueprint
from td4a.models.exception_handler import ExceptionHandler, HandledException
from td4a.models.td4ayaml import Td4aYaml
from jsonschema import validate
from jsonschema import Draft4Validator, FormatChecker
from jsonschema.exceptions import UnknownType
api_validate = Blueprint('api_validate', __name__) # pylint: disable=invalid-name
@ExceptionHandler
def parse_yaml(yamul, typ):
_ = typ
yaml = Td4aYaml()
obj = yaml.load(yamul)
return obj
def validation(payload):
""" Validate schema from data
"""
try:
yaml_safe = Td4aYaml(typ='safe')
yaml = Td4aYaml()
data = yaml_safe.load(payload['p1'])
schema = yaml_safe.load(payload['p2'])
errors = []
v = Draft4Validator(schema, format_checker=FormatChecker())
for error in sorted(v.iter_errors(data)):
errors.append(error.message)
if errors:
return {"p3": yaml.dump({"messages":errors})}
return {"p3": yaml.dump({"messages":["validation passed"]})}
except UnknownType as error:
error_message = str(error)
lines = error_message.splitlines()
message = [x for x in lines if x.startswith('Unknown type')]
return {"p3": yaml.dump({"messages":message})}
@api_validate.route('/validate', methods=['POST'])
def rest_validate():
""" Build a schema for data
"""
try:
payload = request.json
data = parse_yaml(yamul=payload['p1'], typ='p1')
schema = parse_yaml(yamul=payload['p2'], typ='p2')
response = validation(payload=payload)
return jsonify(response)
except HandledException as error:
return jsonify(error.json())
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.