id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
5079951
|
# -*- coding: UTF-8 -*-
import logging
from typing import Callable, Union
from homeassistant.components.light import (
LightEntity,
SUPPORT_EFFECT,
ATTR_EFFECT,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_ON, CONF_HOST, CONF_PORT
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .pydigitalstrom.client import DSClient
from .pydigitalstrom import constants as dsconst
from .pydigitalstrom.devices.scene import DSScene, DSColorScene
from .pydigitalstrom.websocket import DSWebsocketEventListener
from .const import DOMAIN
from .util import slugify_entry
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_devices: Callable,
discovery_info: dict = None,
):
"""Platform uses config entry setup."""
pass
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities: Callable
) -> None:
entry_slug: str = slugify_entry(
host=entry.data[CONF_HOST], port=entry.data[CONF_PORT]
)
client: DSClient = hass.data[DOMAIN][entry_slug]["client"]
listener: DSWebsocketEventListener = hass.data[DOMAIN][entry_slug]["listener"]
devices: dict = []
scenes: dict = client.get_scenes()
scene: Union[DSScene, DSColorScene]
for scene in scenes.values():
# only handle light (color 1) scenes
if not isinstance(scene, DSColorScene) or scene.color != dsconst.GROUP_LIGHTS:
continue
# not an area or broadcast turn off scene
if scene.scene_id not in (
dsconst.SCENES["PRESET"]["SCENE_PRESET0"],
dsconst.SCENES["AREA"]["SCENE_AREA1_OFF"],
dsconst.SCENES["AREA"]["SCENE_AREA2_OFF"],
dsconst.SCENES["AREA"]["SCENE_AREA3_OFF"],
dsconst.SCENES["AREA"]["SCENE_AREA4_OFF"],
):
continue
# get turn on counterpart
scene_on: Union[DSScene, DSColorScene] = scenes.get(
f"{scene.zone_id}_{scene.color}_{scene.scene_id + 5}", None,
)
effects = dict()
# get Preset X2-x4
if scene.scene_id == dsconst.SCENES["PRESET"]["SCENE_PRESET0"]:
effects["preset2"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET2']}", None,)
effects["preset3"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET3']}", None,)
effects["preset4"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET4']}", None,)
if scene.scene_id == dsconst.SCENES["PRESET"]["SCENE_PRESET10"]:
effects["preset2"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET12']}", None,)
effects["preset3"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET13']}", None,)
effects["preset4"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET14']}", None,)
if scene.scene_id == dsconst.SCENES["PRESET"]["SCENE_PRESET20"]:
effects["preset2"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET22']}", None,)
effects["preset3"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET23']}", None,)
effects["preset4"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET24']}", None,)
if scene.scene_id == dsconst.SCENES["PRESET"]["SCENE_PRESET30"]:
effects["preset2"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET32']}", None,)
effects["preset3"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET33']}", None,)
effects["preset4"] = scenes.get(f"{scene.zone_id}_{scene.color}_{dsconst.SCENES['PRESET']['SCENE_PRESET34']}", None,)
# no turn on scene found, skip
if not scene_on:
continue
# add light
_LOGGER.info(f"adding light {scene.scene_id}: Off: {scene.name}, On: {scene_on.name}, Preset2: {effects['preset2'].name}, Preset3: {effects['preset3'].name}, Preset4: {effects['preset4'].name}")
devices.append(
DigitalstromLight(
hass=hass, scene_on=scene_on, scene_off=scene, listener=listener, effects=effects
)
)
device: DigitalstromLight
async_add_entities(device for device in devices)
class DigitalstromLight(RestoreEntity, LightEntity):
def __init__(
self,
hass: HomeAssistantType,
scene_on: Union[DSScene, DSColorScene],
scene_off: Union[DSScene, DSColorScene],
listener: DSWebsocketEventListener,
effects,
*args,
**kwargs,
):
self._hass: HomeAssistantType = hass
self._scene_on: Union[DSScene, DSColorScene] = scene_on
self._scene_off: Union[DSScene, DSColorScene] = scene_off
self._listener: DSWebsocketEventListener = listener
self._state: bool = None
self._scene_effects = effects
self._effect = ""
super().__init__(*args, **kwargs)
self.register_callback()
@property
def supported_features(self):
"""Flag supported features."""
support = SUPPORT_EFFECT
return support
@property
def effect(self):
"""Return the name of the currently running effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return ["PRESET1","PRESET2", "PRESET3", "PRESET4"]
def register_callback(self):
async def event_callback(event: dict) -> None:
# sanity checks
if "name" not in event:
return
if event["name"] != "callScene":
return
if "properties" not in event:
return
if "sceneID" not in event["properties"]:
return
if "groupID" not in event["properties"]:
return
if "zoneID" not in event["properties"]:
return
# cast event data
zone_id: int = int(event["properties"]["zoneID"])
group_id: int = int(event["properties"]["groupID"])
scene_id: int = int(event["properties"]["sceneID"])
# device turned on or broadcast turned on
if (
self._scene_on.zone_id == zone_id
and self._scene_on.color == group_id
and (self._scene_on.scene_id == scene_id or dsconst.SCENES["PRESET"]["SCENE_PRESET1"] == scene_id)
):
self._state = True
await self.async_update_ha_state()
# device turned off or broadcast turned off
elif (
self._scene_off.zone_id == zone_id
and self._scene_off.color == group_id
and (self._scene_off.scene_id == scene_id or dsconst.SCENES["PRESET"]["SCENE_PRESET0"] == scene_id)
):
self._state = False
await self.async_update_ha_state()
_LOGGER.debug(
f"Register callback for {self._scene_off.name}"
)
self._listener.register(callback=event_callback)
@property
def name(self) -> str:
return self._scene_off.name
@property
def unique_id(self) -> str:
return f"dslight_{self._scene_off.unique_id}"
@property
def available(self) -> bool:
return True
@property
def is_on(self) -> bool:
return self._state
async def async_turn_on(self, **kwargs) -> None:
if ATTR_EFFECT in kwargs:
_LOGGER.debug(
f"call turn on with Effect {kwargs[ATTR_EFFECT]}"
)
if kwargs[ATTR_EFFECT] == "PRESET1":
await self._scene_on.turn_on()
self._effect = "PRESET1"
if kwargs[ATTR_EFFECT] == "PRESET2":
await self._scene_effects["preset2"].turn_on()
self._effect = "PRESET2"
if kwargs[ATTR_EFFECT] == "PRESET3":
await self._scene_effects["preset3"].turn_on()
self._effect = "PRESET3"
if kwargs[ATTR_EFFECT] == "PRESET4":
await self._scene_effects["preset4"].turn_on()
self._effect = "PRESET4"
else:
await self._scene_on.turn_on()
self._effect = "PRESET1"
self._state = True
async def async_turn_off(self, **kwargs) -> None:
await self._scene_off.turn_on()
self._effect = ""
self._state = False
async def async_added_to_hass(self) -> None:
await super().async_added_to_hass()
state: bool = await self.async_get_last_state()
if not state:
return
_LOGGER.debug(
f"trying to restore state of entity {self.entity_id} to {state.state}"
)
self._state = state.state == STATE_ON
def should_poll(self) -> bool:
return False
@property
def device_info(self) -> dict:
"""Return information about the device."""
return {
"identifiers": {(DOMAIN, self._scene_off.unique_id)},
"name": self._scene_off.name,
"model": "DSLight",
"manufacturer": "digitalSTROM AG",
}
|
StarcoderdataPython
|
6584538
|
<reponame>ryanstwrt/FRIDGE
import fridge.Assembly.FuelAssembly as FuelAssembly
import fridge.Assembly.SmearAssembly as SmearAssembly
import fridge.Assembly.Assembly as Assembly
import fridge.driver.global_variables as gb
import numpy as np
global_vars = gb.GlobalVariables()
global_vars.read_input_file('A271_Assembly_Test')
assembly_info = [global_vars.file_name, '01A01', global_vars, None]
def test_assembly():
"""Check the base assembly init"""
baseAssembly = Assembly.Assembly(assembly_info)
assert baseAssembly.assembly_file_name == 'A271_Test'
assert baseAssembly.assemblyPosition == '01A01'
assert baseAssembly.universe == 100
assert baseAssembly.cellNum == 100
assert baseAssembly.surfaceNum == 100
assert baseAssembly.materialNum == 100
assert baseAssembly.assemblyType == ''
assert baseAssembly.pinsPerAssembly == 0
assert baseAssembly.assemblyPitch == 0
assert baseAssembly.ductInnerFlatToFlat == 0
assert baseAssembly.ductOuterFlatToFlat == 0
assert baseAssembly.ductOuterFlatToFlat == 0
assert baseAssembly.assemblyHeight == 0
assert baseAssembly.coolantMaterial == ''
assert baseAssembly.assemblyMaterial == ''
def test_updateIdentifiers():
"""Check the updateIdentifiers function"""
baseAssembly = Assembly.Assembly(assembly_info)
assert baseAssembly.universe == 100
assert baseAssembly.cellNum == 100
assert baseAssembly.surfaceNum == 100
assert baseAssembly.materialNum == 100
baseAssembly.update_global_identifiers(False)
assert baseAssembly.universe == 100
assert baseAssembly.cellNum == 101
assert baseAssembly.surfaceNum == 101
assert baseAssembly.materialNum == 101
def test_fuel_assembly():
"""Check the Fuel Assembly subclass of Assembly."""
a = FuelAssembly.FuelAssembly(assembly_info)
assert a.universe == 104
assert a.cellNum == 114
assert a.surfaceNum == 114
assert a.materialNum == 114
assert a.fuel is not None
assert a.fuel is not None
assert a.bond is not None
assert a.clad is not None
assert a.coolant is not None
assert a.blankUniverse is not None
assert a.blankCoolant is not None
assert a.latticeUniverse is not None
assert a.fuelUniverse is not None
assert a.innerDuct is not None
assert a.duct is not None
assert a.plenum is not None
assert a.upperReflector is not None
assert a.lowerReflector is not None
assert a.upperSodium is not None
assert a.lowerSodium is not None
assert a.assemblyShell is not None
assert a.everythingElse is not None
assert a.cladOD == 0.53
assert a.cladID == 0.53 - 0.037 * 2
assert np.allclose(a.fuelDiameter, 0.394907)
assert a.fuelPitch == 0.66144
assert a.wireWrapDiameter == 0.126
assert a.fuelHeight == 60
assert a.fuelMaterial == '5Pu22U10Zr'
assert a.cladMaterial == 'HT9'
assert a.bondMaterial == 'LiquidNa'
assert a.plenumHeight == 60
assert a.plenumMaterial == {'HT9': 0.25, 'Void': 0.25, 'LiquidNa': 0.5}
assert a.plenumPosition == [0, 0, 60.6]
assert a.reflectorHeight == 60
assert a.reflectorMaterial == {'LiquidNa': 0.20, 'HT9': 0.80}
assert a.fuel.cellCard == '100 100 0.04574 -100 u=101 imp:n=1 $Pin: Fuel'
assert a.fuel.surfaceCard == '100 RCC 0.0 0.0 0.0 0 0 60.0 0.19745 $Pin: Fuel'
assert a.bond.cellCard == '101 101 0.02428 100 -101 u=101 imp:n=1 $Pin: Bond'
assert a.bond.surfaceCard == '101 RCC 0.0 0.0 0.0 0 0 60.6 0.228 $Pin: Bond - 1% higher than fuel'
assert a.clad.cellCard == '102 102 0.08598 101 -102 u=101 imp:n=1 $Pin: Clad'
assert a.clad.surfaceCard == '102 RCC 0.0 0.0 0.0 0 0 60.6 0.265 $Pin: Clad - 1% higher than fuel'
assert a.coolant.cellCard == '103 103 0.02929 102 u=101 imp:n=1 $Pin: Wirewrap + Coolant'
assert a.coolant.surfaceCard == '103 RHP 0.0 0.0 0.0 0 0 60.6 0.66144 0 0 $Pin: Coolant - 1% higher than fuel'
assert a.blankCoolant.cellCard == '104 104 0.02428 -103 u=102 imp:n=1 $Pin: Blank Pin Coolant'
assert a.blankCoolant.surfaceCard == '104 RHP 0.0 0.0 0.0 0 0 60.6 0.33072 0 0 $Pin: Blank Pin - 1%\
higher than fuel'
assert a.fuelUniverse.cellCard == '105 0 -104 lat=2 u=103 imp:n=1\n'\
' fill=-10:10 -10:10 0:0\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 102 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102'
assert a.innerDuct.cellCard == '106 0 -106 u=100 fill=103 imp:n=1 $Assembly: Inner Portion of Assembly'
assert a.innerDuct.surfaceCard == '106 RHP 0.0 0.0 0.0 0 0 60.6 0 5.505 0 $Assembly: Duct Inner Surface'
assert a.plenum.cellCard == '107 107 0.03364 -107 u=100 imp:n=1 $Assembly: Plenum'
assert a.plenum.surfaceCard == '107 RHP 0.0 0.0 60.6 0 0 60.0 0 5.505 0 $Assembly: Plenum'
assert a.upperReflector.cellCard == '108 108 0.07364 -108 u=100 imp:n=1 $Assembly: Upper Reflector'
assert a.upperReflector.surfaceCard == '108 RHP 0.0 0.0 120.6 0 0 60.0 0 5.505 0 $Assembly: Upper Reflector'
assert a.lowerReflector.cellCard == '109 109 0.07364 -109 u=100 imp:n=1 $Assembly: Lower Reflector'
assert a.lowerReflector.surfaceCard == '109 RHP 0.0 0.0 -60.0 0 0 60.0 0 5.505 0 $Assembly: Lower Reflector'
assert a.duct.cellCard == '110 110 0.08598 106 109 108 107 -110 u=100 imp:n=1 $Assembly: Assembly Duct'
assert a.duct.surfaceCard == '110 RHP 0.0 0.0 -60.0 0 0 240.6 0 5.80529 0 $Assembly: Duct Outer Surface'
assert a.lowerSodium.cellCard == '111 111 0.02428 -111 u=100 imp:n=1 $Assembly: Lower Coolant'
assert a.lowerSodium.surfaceCard == '111 RHP 0.0 0.0 -99.8 0 0 39.8 0 5.80529 0 $Assembly: Lower Coolant'
assert a.upperSodium.cellCard == '112 112 0.02428 -112 u=100 imp:n=1 $Assembly: Upper Coolant'
assert a.upperSodium.surfaceCard == '112 RHP 0.0 0.0 180.6 0 0 39.7 0 5.80529 0 $Assembly: Upper Coolant'
assert a.assemblyShell.cellCard == '113 0 -113 fill=100 imp:n=1 $Assembly'
assert a.assemblyShell.surfaceCard == '113 RHP 0.0 0.0 -99.7 0 0 320.0 0 5.805 0 $Assembly: Full Assembly Surface'
assert a.everythingElse.cellCard == '114 0 113 imp:n=0 $Everything Else'
global_vars = gb.GlobalVariables()
global_vars.read_input_file('Smear_Assembly_Test')
assembly_info2 = [global_vars.file_name, '01A01', global_vars, None]
def test_smearAssembly():
a = SmearAssembly.SmearAssembly(assembly_info2)
assert a.assemblyPitch == 12
assert a.coolantMaterial == 'LiquidNa'
assert a.assemblyMaterial == 'HT9'
assert a.smearMaterial == {'LiquidNa': 0.3, 'HT9': 0.7}
assert a.smearRegion.cellCard == "100 100 0.06747 -100 u=100 imp:n=1 $Assembly: Smear Region"
assert a.smearRegion.surfaceCard == "100 RHP 0.0 0.0 -60.0 0 0 240 0 5.80529 0 $Assembly: Smear Region"
assert a.lowerCoolant.cellCard == '101 101 0.02428 -101 u=100 imp:n=1 $Assembly: Lower Coolant'
assert a.lowerCoolant.surfaceCard == '101 RHP 0.0 0.0 -100.1 0 0 40.1 0 5.80529 0 $Assembly: Lower Coolant'
assert a.upperCoolant.cellCard == '102 102 0.02428 -102 u=100 imp:n=1 $Assembly: Upper Coolant'
assert a.upperCoolant.surfaceCard == '102 RHP 0.0 0.0 180.0 0 0 40.0 0 5.80529 0 $Assembly: Upper Coolant'
assert a.assemblyShell.cellCard == '103 0 -103 fill=100 imp:n=1 $Assembly'
assert a.assemblyShell.surfaceCard == '103 RHP 0.0 0.0 -100.0 0 0 320.0 0 5.805 0 $Assembly: Full Assembly Surface'
def test_getAssemblyLocation():
assembly_info1 = ['Nonsense', '01A01', global_vars, None]
a = None
try:
a = Assembly.Assembly(assembly_info1)
except IndexError:
assert a is None
global_vars = gb.GlobalVariables()
global_vars.read_input_file('A271_Assembly_Shifted_Test')
assembly_info3 = [global_vars.file_name, '01A01', global_vars, None]
def test_shifted_fuel_assembly():
"""Check the Fuel Assembly subclass of Assembly with a shifted Z position."""
a = FuelAssembly.FuelAssembly(assembly_info3)
assert a.universe == 104
assert a.cellNum == 114
assert a.surfaceNum == 114
assert a.materialNum == 114
assert a.fuel is not None
assert a.fuel is not None
assert a.bond is not None
assert a.clad is not None
assert a.coolant is not None
assert a.blankUniverse is not None
assert a.blankCoolant is not None
assert a.latticeUniverse is not None
assert a.fuelUniverse is not None
assert a.innerDuct is not None
assert a.duct is not None
assert a.plenum is not None
assert a.upperReflector is not None
assert a.lowerReflector is not None
assert a.upperSodium is not None
assert a.lowerSodium is not None
assert a.assemblyShell is not None
assert a.everythingElse is not None
assert a.cladOD == 0.53
assert a.cladID == 0.53 - 0.037 * 2
assert np.allclose(a.fuelDiameter, 0.394907)
assert a.fuelPitch == 0.66144
assert a.wireWrapDiameter == 0.126
assert a.fuelHeight == 60
assert a.fuelMaterial == '5Pu22U10Zr'
assert a.cladMaterial == 'HT9'
assert a.bondMaterial == 'LiquidNa'
assert a.zPosition == -10
assert a.position == [0, 0, -10]
assert a.plenumHeight == 60
assert a.plenumMaterial == {'HT9': 0.25, 'Void': 0.25, 'LiquidNa': 0.5}
assert a.plenumPosition == [0, 0, 50.6]
assert a.reflectorHeight == 60
assert a.reflectorMaterial == {'LiquidNa': 0.20, 'HT9': 0.80}
assert a.fuel.cellCard == '100 100 0.04574 -100 u=101 imp:n=1 $Pin: Fuel'
assert a.fuel.surfaceCard == '100 RCC 0.0 0.0 -10.0 0 0 60.0 0.19745 $Pin: Fuel'
assert a.bond.cellCard == '101 101 0.02428 100 -101 u=101 imp:n=1 $Pin: Bond'
assert a.bond.surfaceCard == '101 RCC 0.0 0.0 -10.0 0 0 60.6 0.228 $Pin: Bond - 1% higher than fuel'
assert a.clad.cellCard == '102 102 0.08598 101 -102 u=101 imp:n=1 $Pin: Clad'
assert a.clad.surfaceCard == '102 RCC 0.0 0.0 -10.0 0 0 60.6 0.265 $Pin: Clad - 1% higher than fuel'
assert a.coolant.cellCard == '103 103 0.02929 102 u=101 imp:n=1 $Pin: Wirewrap + Coolant'
assert a.coolant.surfaceCard == '103 RHP 0.0 0.0 -10.0 0 0 60.6 0.66144 0 0 $Pin: Coolant - 1% higher than fuel'
assert a.blankCoolant.cellCard == '104 104 0.02428 -103 u=102 imp:n=1 $Pin: Blank Pin Coolant'
assert a.blankCoolant.surfaceCard == '104 RHP 0.0 0.0 -10.0 0 0 60.6 0.33072 0 0 $Pin: Blank Pin - 1% \
higher than fuel'
assert a.fuelUniverse.cellCard == '105 0 -104 lat=2 u=103 imp:n=1\n'\
' fill=-10:10 -10:10 0:0\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 102 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 102 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 102 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 102 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 102 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 102 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 102 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 102 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 102\n'\
' 102 101 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 101 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 101 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 101 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 101 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 101 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 101 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 101 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 101\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 101 101 101 101 101 101 101 101 101 101\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102 102 102 102 102 102 102 102 102 102\n'\
' 102'
assert a.innerDuct.cellCard == '106 0 -106 u=100 fill=103 imp:n=1 $Assembly: Inner Portion of Assembly'
assert a.innerDuct.surfaceCard == '106 RHP 0.0 0.0 -10.0 0 0 60.6 0 5.505 0 $Assembly: Duct Inner Surface'
assert a.plenum.cellCard == '107 107 0.03364 -107 u=100 imp:n=1 $Assembly: Plenum'
assert a.plenum.surfaceCard == '107 RHP 0.0 0.0 50.6 0 0 60.0 0 5.505 0 $Assembly: Plenum'
assert a.upperReflector.cellCard == '108 108 0.07364 -108 u=100 imp:n=1 $Assembly: Upper Reflector'
assert a.upperReflector.surfaceCard == '108 RHP 0.0 0.0 110.6 0 0 60.0 0 5.505 0 $Assembly: Upper Reflector'
assert a.lowerReflector.cellCard == '109 109 0.07364 -109 u=100 imp:n=1 $Assembly: Lower Reflector'
assert a.lowerReflector.surfaceCard == '109 RHP 0.0 0.0 -70.0 0 0 60.0 0 5.505 0 $Assembly: Lower Reflector'
assert a.duct.cellCard == '110 110 0.08598 106 109 108 107 -110 u=100 imp:n=1 $Assembly: Assembly Duct'
assert a.duct.surfaceCard == '110 RHP 0.0 0.0 -70.0 0 0 240.6 0 5.80529 0 $Assembly: Duct Outer Surface'
assert a.lowerSodium.cellCard == '111 111 0.02428 -111 u=100 imp:n=1 $Assembly: Lower Coolant'
assert a.lowerSodium.surfaceCard == '111 RHP 0.0 0.0 -109.8 0 0 39.8 0 5.80529 0 $Assembly: Lower Coolant'
assert a.upperSodium.cellCard == '112 112 0.02428 -112 u=100 imp:n=1 $Assembly: Upper Coolant'
assert a.upperSodium.surfaceCard == '112 RHP 0.0 0.0 170.6 0 0 39.7 0 5.80529 0 $Assembly: Upper Coolant'
assert a.assemblyShell.cellCard == '113 0 -113 fill=100 imp:n=1 $Assembly'
assert a.assemblyShell.surfaceCard == '113 RHP 0.0 0.0 -109.7 0 0 320.0 0 5.805 0 $Assembly: Full Assembly Surface'
assert a.everythingElse.cellCard == '114 0 113 imp:n=0 $Everything Else'
global_vars = gb.GlobalVariables()
core = 'A271_Assembly_Test'
assem = 'A271_Test'
global_vars.read_input_file(core, assembly_perturbations={assem: {'fuelMaterial': 'U10Zr', 'fuelDiameter': 0.1,
'cladMaterial': 'SS316'}})
assembly_info4 = [global_vars.file_name, '01A01', global_vars, None]
def test_fueled_perturbation():
a = FuelAssembly.FuelAssembly(assembly_info4)
assert a.fuelMaterial == 'U10Zr'
assert a.fuelDiameter == 0.1
assert a.cladMaterial == 'SS316'
global_vars = gb.GlobalVariables()
core = 'Smear_Assembly_Test'
assem = 'Smear_Test'
global_vars.read_input_file(core, assembly_perturbations={assem: {'smearMaterial': {'LiquidPb': 1.0},
'zPosition': 50,
'smearRegionHeight': 50}})
assembly_info5 = [global_vars.file_name, '01A01', global_vars, None]
def test_smear_perturbation():
a = SmearAssembly.SmearAssembly(assembly_info5)
assert a.smearMaterial == {'LiquidPb': 1.0}
assert a.zPosition == 50
assert a.smearRegionHeight == 50
assert a.smearRegion.cellCard == "100 100 0.03103 -100 u=100 imp:n=1 $Assembly: Smear Region"
|
StarcoderdataPython
|
27761
|
<filename>forms/forms/constants.py
"""Stores constants used as numbers for readability that are used across all apps"""
class AdminRoles:
""" """
JCRTREASURER = 1
SENIORTREASURER = 2
BURSARY = 3
ASSISTANTBURSAR = 4
CHOICES = (
(JCRTREASURER, 'JCR Treasurer'),
(SENIORTREASURER, 'Senior Treasurer'),
(BURSARY, 'Bursary'),
(ASSISTANTBURSAR, 'Assistant Bursar')
)
|
StarcoderdataPython
|
1916647
|
<filename>nabu/processing/processors/__init__.py
'''@package processors
contains the data processors'''
from . import processor, processor_factory, feature_computers
|
StarcoderdataPython
|
3331032
|
<filename>python/crawel/demo.py<gh_stars>0
l = [
['Apple', 'Google', 'Microsoft'],
['Java', 'Python', 'Ruby', 'PHP'],
['Adam', 'Bart', 'Lisa']
]
for x in l:
for i in x:
print ("hello:" + i + "\t")
|
StarcoderdataPython
|
5067764
|
import os
import re
import json
import time
import hashlib
import collections
from lazyapi import ApiClient
from lazycls import classproperty
from .utils import *
from .classes import *
from .config import KctlContextCfg
from kubernetes.client import ApiClient as KubernetesClient
class KctlBaseClient:
def __init__(self, host: str = "", api_version: str = None, *args, **kwargs):
self._cfg = KctlContextCfg(host=host, api_version = api_version, *args, **kwargs)
self.url = self._cfg.url
self._client = ApiClient(headers = self._cfg.headers, verify = self._cfg.ssl_verify, module_name=f'kctl.{self._cfg.api_version}', default_resp = True)
self.schema = None
if self._cfg.is_enabled: self._load_schemas()
def reset_config(self, host: str = None, api_version: str = None, reset_schema: bool = True, *args, **kwargs):
self._cfg = KctlContextCfg(host=host, api_version = api_version, *args, **kwargs)
self.url = self._cfg.url
self._client = ApiClient(headers = self._cfg.headers, verify = self._cfg.ssl_verify, module_name=f'kctl.{self._cfg.api_version}', default_resp = True)
if reset_schema: self.reload_schema()
def set_cluster(self, cluster_name: str, reset_schema: bool = True):
""" Sets the Base url property to the cluster"""
self.url = self._cfg.get_url(cluster_name = cluster_name, set_default= True)
if reset_schema: self.reload_schema()
def reload_schema(self):
self._load_schemas(force=True)
def valid(self):
return self.url is not None and self.schema is not None
def object_hook(self, obj):
if isinstance(obj, list): return [self.object_hook(x) for x in obj]
if isinstance(obj, dict):
result = RestObject()
for k, v in obj.items():
setattr(result, k, self.object_hook(v))
for link in ['next', 'prev']:
try:
url = getattr(result.pagination, link)
if url is not None: setattr(result, link, lambda url=url: self._get(url))
except AttributeError: pass
if hasattr(result, 'type') and isinstance(getattr(result, 'type'), str):
if hasattr(result, 'links'):
for link_name, link in result.links.items():
def cb_link(_link=link, **kw):
return self._get(_link, data=kw)
if hasattr(result, link_name): setattr(result, link_name + '_link', cb_link)
else: setattr(result, link_name, cb_link)
if hasattr(result, 'actions'):
for link_name, link in result.actions.items():
def cb_action(_link_name=link_name, _result=result, *args, **kw):
return self.action(_result, _link_name, *args, **kw)
if hasattr(result, link_name): setattr(result, link_name + '_action', cb_action)
else: setattr(result, link_name, cb_action)
return result
return obj
def object_pairs_hook(self, pairs):
ret = collections.OrderedDict()
for k, v in pairs:
ret[k] = v
return self.object_hook(ret)
def _get(self, url: str, data=None):
return self._unmarshall(self._get_raw(url, data=data))
async def _async_get(self, url: str, data=None):
return self._unmarshall(await self._async_get_raw(url, data=data))
def _error(self, text):
raise ApiError(self._unmarshall(text))
@timed_url
def _get_raw(self, url: str, data=None):
r = self._get_response(url, data)
return r.text
@timed_url
async def _async_get_raw(self, url: str, data=None):
r = await self._async_get_response(url, data)
return r.text
def _get_response(self, url: str, data=None):
r = self._client.get(url, params=data, headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return r
async def _async_get_response(self, url: str, data=None):
r = await self._client.async_get(url, params=data, headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return r
@timed_url
def _post(self, url: str, data=None):
r = self._client.post(url, data=self._marshall(data), headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
@timed_url
async def _async_post(self, url: str, data=None):
r = await self._client.async_post(url, data=self._marshall(data), headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
@timed_url
def _put(self, url, data=None):
r = self._client.put(url, data=self._marshall(data), headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
@timed_url
async def _async_put(self, url, data=None):
r = await self._client.async_put(url, data=self._marshall(data), headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
@timed_url
def _delete(self, url):
r = self._client.delete(url, headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
@timed_url
async def _async_delete(self, url):
r = await self._client.async_delete(url, headers=self._cfg.headers)
if r.status_code < 200 or r.status_code >= 300: self._error(r.text)
return self._unmarshall(r.text)
def _unmarshall(self, text):
if text is None or text == '': return text
return json.loads(text, object_hook=self.object_hook, object_pairs_hook=self.object_pairs_hook)
def _marshall(self, obj, indent=None, sort_keys=True):
if obj is None: return None
return json.dumps(self._to_dict(obj), indent=indent, sort_keys=sort_keys)
def _load_schemas(self, force=False):
if self.schema and not force: return
schema_text = self._get_cached_schema()
if force or not schema_text:
response = self._get_response(self.url)
schema_url = response.headers.get('X-API-Schemas')
if schema_url is not None and self.url != schema_url: schema_text = self._get_raw(schema_url)
else: schema_text = response.text
self._cache_schema(schema_text)
obj = self._unmarshall(schema_text)
schema = Schema(schema_text, obj)
if len(schema.types) > 0:
self._bind_methods(schema)
self.schema = schema
#############################################################################
# Base Methods #
#############################################################################
def by_id(self, type, id, **kw):
id = str(id)
type_name = convert_type_name(type)
url = self.schema.types[type_name].links.collection
if url.endswith('/'): url += id
else: url = '/'.join([url, id])
try: return self._get(url, self._to_dict(**kw))
except ApiError as e:
if e.error.status == 404: return None
else: raise e
def update_by_id(self, type, id, *args, **kw):
type_name = convert_type_name(type)
url = self.schema.types[type_name].links.collection
url = url + id if url.endswith('/') else '/'.join([url, id])
return self._put_and_retry(url, *args, **kw)
def update(self, obj, *args, **kw):
url = obj.links.self
return self._put_and_retry(url, *args, **kw)
def update_data(self, obj, *args, **kw):
url = obj.links.self
return self._put_and_retry(url, obj, *args, **kw)
def _put_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try: return self._put(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1: raise e
if e.error.status == 409: time.sleep(.1)
else: raise e
def _post_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try: return self._post(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1: raise e
if e.error.status == 409: time.sleep(.1)
else: raise e
def _validate_list(self, type, **kw):
if not self._cfg.strict: return
type_name = convert_type_name(type)
collection_filters = self.schema.types[type_name].collectionFilters
for k in kw:
if hasattr(collection_filters, k): return
for filter_name, filter_value in collection_filters.items():
for m in filter_value.modifiers:
if k == '_'.join([filter_name, m]): return
raise ClientApiError(k + ' is not searchable field')
def list(self, type, **kw):
type_name = convert_type_name(type)
if type_name not in self.schema.types: raise ClientApiError(type_name + ' is not a valid type')
self._validate_list(type_name, **kw)
collection_url = self.schema.types[type_name].links.collection
collection_url = self._cfg.validate_fleet_url(collection_url)
return self._get(collection_url, data=self._to_dict(**kw))
def reload(self, obj):
return self.by_id(obj.type, obj.id)
def create(self, type, *args, **kw):
type_name = convert_type_name(type)
collection_url = self.schema.types[type_name].links.collection
collection_url = self._cfg.validate_fleet_url(collection_url)
return self._post(collection_url, data=self._to_dict(*args, **kw))
def delete(self, *args):
for i in args:
if isinstance(i, RestObject): return self._delete(i.links.self)
def action(self, obj, action_name, *args, **kw):
url = getattr(obj.actions, action_name)
return self._post_and_retry(url, *args, **kw)
#############################################################################
# Async Methods #
#############################################################################
async def async_by_id(self, type, id, **kw):
id = str(id)
type_name = convert_type_name(type)
url = self.schema.types[type_name].links.collection
if url.endswith('/'): url += id
else: url = '/'.join([url, id])
try: return await self._async_get(url, self._to_dict(**kw))
except ApiError as e:
if e.error.status == 404: return None
else: raise e
async def async_update_by_id(self, type, id, *args, **kw):
type_name = convert_type_name(type)
url = self.schema.types[type_name].links.collection
url = url + id if url.endswith('/') else '/'.join([url, id])
return await self._async_put_and_retry(url, *args, **kw)
async def async_update(self, obj, *args, **kw):
url = obj.links.self
return await self._async_put_and_retry(url, *args, **kw)
async def async_update_data(self, obj, *args, **kw):
url = obj.links.self
return await self._async_put_and_retry(url, obj, *args, **kw)
async def _async_put_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try: return await self._async_put(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1: raise e
if e.error.status == 409: time.sleep(.1)
else: raise e
async def _async_post_and_retry(self, url, *args, **kw):
retries = kw.get('retries', 3)
for i in range(retries):
try: return await self._async_post(url, data=self._to_dict(*args, **kw))
except ApiError as e:
if i == retries-1: raise e
if e.error.status == 409: time.sleep(.1)
else: raise e
async def async_list(self, type, **kw):
type_name = convert_type_name(type)
if type_name not in self.schema.types: raise ClientApiError(type_name + ' is not a valid type')
self._validate_list(type_name, **kw)
collection_url = self.schema.types[type_name].links.collection
collection_url = self._cfg.validate_fleet_url(collection_url)
return await self._async_get(collection_url, data=self._to_dict(**kw))
async def async_reload(self, obj):
return await self.async_by_id(obj.type, obj.id)
async def async_create(self, type, *args, **kw):
type_name = convert_type_name(type)
collection_url = self.schema.types[type_name].links.collection
collection_url = self._cfg.validate_fleet_url(collection_url)
return await self._async_post(collection_url, data=self._to_dict(*args, **kw))
async def async_delete(self, *args):
for i in args:
if isinstance(i, RestObject): return await self._async_delete(i.links.self)
async def async_action(self, obj, action_name, *args, **kw):
url = getattr(obj.actions, action_name)
return await self._async_post_and_retry(url, *args, **kw)
#############################################################################
# Class Funcs #
#############################################################################
def _is_list(self, obj):
if isinstance(obj, list): return True
if isinstance(obj, RestObject) and 'type' in obj.__dict__ and obj.type == 'collection': return True
return False
def _to_value(self, value):
if isinstance(value, dict):
ret = {k: self._to_value(v) for k, v in value.items()}
return ret
if isinstance(value, list):
ret = [self._to_value(v) for v in value]
return ret
if isinstance(value, RestObject):
ret = {}
for k, v in vars(value).items():
if not isinstance(v, RestObject) and not callable(v):
if not k.startswith('_'): ret[k] = self._to_value(v)
elif isinstance(v, RestObject):
if not k.startswith('_'): ret[k] = self._to_dict(v)
return ret
return value
def _to_dict(self, *args, **kw):
if len(kw) == 0 and len(args) == 1 and self._is_list(args[0]):
ret = [self._to_dict(i) for i in args[0]]
return ret
ret = {}
for i in args:
value = self._to_value(i)
if isinstance(value, dict):
for k, v in value.items():
ret[k] = v
for k, v in kw.items():
ret[k] = self._to_value(v)
return ret
@staticmethod
def _type_name_variants(name):
ret = [name]
python_name = re.sub(r'([a-z])([A-Z])', r'\1_\2', name)
if python_name != name: ret.append(python_name.lower())
return ret
def _bind_methods(self, schema):
bindings = [
('list', 'collectionMethods', GET_METHOD, self.list),
('by_id', 'collectionMethods', GET_METHOD, self.by_id),
('create', 'collectionMethods', POST_METHOD, self.create),
#('update', 'resourceMethods', PUT_METHOD, self.update),
('update_by_id', 'resourceMethods', PUT_METHOD, self.update_by_id),
#('update_data', 'resourceMethods', PUT_METHOD, self.update),
]
async_bindings = [
('async_list', 'collectionMethods', GET_METHOD, self.async_list),
('async_by_id', 'collectionMethods', GET_METHOD, self.async_by_id),
('async_create', 'collectionMethods', POST_METHOD, self.async_create),
#('async_update', 'resourceMethods', PUT_METHOD, self.async_update),
('async_update_by_id', 'resourceMethods', PUT_METHOD, self.async_update_by_id),
]
for type_name, typ in schema.types.items():
for name_variant in self._type_name_variants(type_name):
for (method_name, type_collection, test_method, m), (async_method_name, async_type_collection, async_test_method, async_m) in zip(bindings, async_bindings):
# double lambda for lexical binding hack, I'm sure there's
# a better way to do this
def cb_bind(type_name=type_name, method=m):
def _cb(*args, **kw):
return method(type_name, *args, **kw)
return _cb
def async_cb_bind(type_name=type_name, method=async_m):
async def _cb(*args, **kw):
return await method(type_name, *args, **kw)
return _cb
if test_method in getattr(typ, type_collection, []): setattr(self, '_'.join([method_name, name_variant]), cb_bind())
if async_test_method in getattr(typ, async_type_collection, []): setattr(self, '_'.join([async_method_name, name_variant]), async_cb_bind())
#for method_name, type_collection, test_method, m in async_bindings:
# def cb_bind(type_name=type_name, method=m):
# def _cb(*args, **kw):
# return method(type_name, *args, **kw)
# return _cb
# if test_method in getattr(typ, type_collection, []): setattr(self, '_'.join([method_name, name_variant]), cb_bind())
def _get_schema_hash(self):
h = hashlib.new('sha1')
h.update(self.url.encode('utf-8'))
if self._cfg.token is not None: h.update(self._cfg.token.encode('utf-8'))
return h.hexdigest()
def _get_cached_schema_file_name(self):
h = self._get_schema_hash()
return self._cfg.cache_dir.joinpath('schema-' + h + '.json')
def _cache_schema(self, text):
cached_schema = self._get_cached_schema_file_name()
if not cached_schema: return None
cached_schema.write_text(text, encoding='utf-8')
def _get_cached_schema(self):
cached_schema = self._get_cached_schema_file_name()
if not cached_schema: return None
if os.path.exists(cached_schema):
mod_time = os.path.getmtime(cached_schema)
if time.time() - mod_time < self._cfg.cache_time: return cached_schema.read_text(encoding='utf-8')
return None
def wait_success(self, obj, timeout=-1):
obj = self.wait_transitioning(obj, timeout)
if obj.transitioning != 'no': raise ClientApiError(obj.transitioningMessage)
return obj
def wait_transitioning(self, obj, timeout=-1, sleep=0.01):
timeout = _get_timeout(timeout)
start = time.time()
obj = self.reload(obj)
while obj.transitioning == 'yes':
time.sleep(sleep)
sleep *= 2
sleep = min(sleep, 2)
obj = self.reload(obj)
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] to be done after {} seconds'
msg = msg.format(obj.type, obj.id, delta)
raise Exception(msg)
return obj
class KctlClient:
v1: KctlBaseClient = KctlBaseClient(api_version='v1')
v3: KctlBaseClient = KctlBaseClient(api_version='v3')
@classmethod
def build_rancher_ctx(cls):
cls.v1._cfg.build_rancher_ctx(v1_client=cls.v1, v3_client=cls.v3)
cls.v3._cfg.build_rancher_ctx(v1_client=cls.v1, v3_client=cls.v3)
@classmethod
def reset_context(cls, host: str = None, reset_schema: bool = True, *args, **kwargs):
cls.v1.reset_config(host = host, api_version = 'v1', reset_schema = reset_schema, *args, **kwargs)
cls.v3.reset_config(host = host, api_version = 'v3', reset_schema = reset_schema, *args, **kwargs)
@classmethod
def set_cluster(cls, cluster_name: str, *args, **kwargs):
cls.v1.set_cluster(cluster_name = cluster_name, *args, **kwargs)
#cls.v3.set_cluster(cluster_name = cluster_name, *args, **kwargs)
@classproperty
def api(cls) -> KubernetesClient:
return KubernetesClient(cls.v1._cfg.config)
|
StarcoderdataPython
|
6430007
|
class Coffee:
coffeeCupCounter = 0
def __init__(self, themilk, thesugar, thecoffeemate):
self.milk = themilk
self.sugar = thesugar
self.coffeemate = thecoffeemate
Coffee.coffeeCupCounter = Coffee.coffeeCupCounter +1
print(f"You now have your coffee with {self.milk} milk, {self.sugar} sugar {self.coffeemate} coffeemate")
mySugarFreeCoffee = Coffee(2,0,1)
print(mySugarFreeCoffee.sugar)
myMuchSugarCoffee = Coffee(2, 10, 1)
print(myMuchSugarCoffee.sugar)
print(f"we have made {Coffee.coffeeCupCounter} coffee cups so farr!")
print(f"we have made {mySugarFreeCoffee.coffeeCupCounter} coffee cups so farr!")
print(f"we have made {myMuchSugarCoffee.milk} coffee cups so farr!")
print(f"we have made {myMuchSugarCoffee.coffeeCupCounter} coffee cups so farr!")
|
StarcoderdataPython
|
4968654
|
import random
import typing as t
import spacy
import pytest
from spacy.util import minibatch
from spacy.training import Example
train_data: t.List[t.Tuple[str, t.Dict[str, t.List[t.Tuple[int, int, str]]]]] = [
("Google has changed the logo of its apps", {"entities": [(0, 6, "ORG")]}),
("Facebook has introduced a new app!", {"entities": [(0, 8, "ORG")]}),
("Amazon has partnered with small businesses.", {"entities": [(0, 6, "ORG")]}),
]
@pytest.fixture(scope="module")
def spacy_model() -> spacy.language.Language:
examples: t.List[t.Any] = []
model = spacy.blank("en")
if "ner" not in model.pipe_names:
ner = model.add_pipe("ner", last=True)
else:
ner = model.get_pipe("ner")
for text, annotations in train_data:
examples.append(Example.from_dict(model.make_doc(text), annotations)) # noqa
for ent in annotations.get("entities"):
ner.add_label(ent[2])
other_pipes = [pipe for pipe in model.pipe_names if pipe != "ner"]
with model.disable_pipes(*other_pipes):
optimizer = model.begin_training()
for _ in range(10):
random.shuffle(examples)
for batch in minibatch(examples, size=8):
model.update(batch, sgd=optimizer)
return model
|
StarcoderdataPython
|
8070978
|
# Generated by Django 2.1 on 2019-02-18 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='historicalprofile',
name='date_verified',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='date_verified',
field=models.DateTimeField(blank=True, null=True),
),
]
|
StarcoderdataPython
|
8045717
|
from fusion import *
import torchvision.datasets as dset
from lucchi import LucchiPPDataset, Resize, Scale
from torch.utils import data
import torch
from torch.autograd import Variable
from torchvision import utils as v_utils
from torchvision import transforms
import argparse, os
from tqdm import tqdm
from tensorboardX import SummaryWriter
import numpy as np
try:
from evaluation.align.jaccard import jaccard_index
except:
raise ValueError('Download the evaluation package from https://github.com/mental689/evaluation')
from PIL import Image
def parse():
p = argparse.ArgumentParser('Training Fusion net')
p.add_argument('--batch_size', type=int, default=16)
p.add_argument('--lr', type=float, default=2e-04)
p.add_argument('--epochs', type=int, default=1000)
p.add_argument('--img_size', type=int, default=512)
p.add_argument('--root', type=str, default='dataset')
p.add_argument('--cuda', action='store_true')
p.add_argument('--finetune', action='store_true')
p.add_argument('--finetune_from', type=str, default='models/fusion_best.pt')
p.add_argument('--logdir', type=str, default='logs')
p.add_argument('--dataset', type=str, default='lucchipp')
p.add_argument('--test', action='store_true')
return p.parse_args()
def train(args):
if args.dataset == 'lucchipp':
img_data = LucchiPPDataset(train=True, transforms=transforms.Compose([
Resize(size=(args.img_size, args.img_size)),
Scale()
]))
img_batch = data.DataLoader(img_data, batch_size=args.batch_size,
shuffle=True, num_workers=2)
fusion = nn.DataParallel(FusionGenerator(3, 3, 16))
if args.cuda:
fusion.cuda()
if args.finetune:
try:
fusion = torch.load(args.finetune_from)
print("\n--------model restored--------\n")
except:
print("\n--------model not restored--------\n")
pass
# loss function & optimizer
loss_func = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(fusion.parameters(), lr=args.lr)
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
writer = SummaryWriter(log_dir=args.logdir)
# training
for i in range(args.epochs):
pbar = tqdm(img_batch)
num_iter = 0
for (image, label) in pbar:
optimizer.zero_grad()
x = Variable(image)
y = Variable(label)
if args.cuda:
x = x.cuda()
y = y.cuda()
pred = fusion(x)
loss = loss_func(pred, y)
loss.backward()
optimizer.step()
num_iter += 1
pbar.set_description('Epoch {}, Iter {}, loss: {:.5f}'.format(i+1, num_iter, loss.item()))
writer.add_scalars(main_tag='Training', tag_scalar_dict={
'loss': loss.item()
}, global_step=i*len(img_batch)+num_iter-1)
if num_iter == len(img_batch):
# v_utils.save_image(x[0].cpu().data, "./dataset/original_image_{}_{}.png".format(i, num_iter))
# v_utils.save_image(y[0].cpu().data, "./dataset/label_image_{}_{}.png".format(i, num_iter))
# v_utils.save_image(pred[0].cpu().data, "./dataset/gen_image_{}_{}.png".format(i, num_iter))
torch.save(fusion, args.finetune_from)
writer.add_image(tag='Training orig', img_tensor=x[0], global_step=i*len(img_batch)+num_iter-1)
writer.add_image(tag='Training label', img_tensor=y[0], global_step=i * len(img_batch) + num_iter - 1)
writer.add_image(tag='Training gen', img_tensor=pred[0], global_step=i * len(img_batch) + num_iter - 1)
def test(args, i=0):
if args.dataset == 'lucchipp':
img_data = LucchiPPDataset(train=False, transforms=transforms.Compose([
Resize(size=(args.img_size, args.img_size)),
Scale()
]))
img_batch = data.DataLoader(img_data, batch_size=args.batch_size,
shuffle=False, num_workers=2)
fusion = nn.DataParallel(FusionGenerator(3, 3, 16))
if args.cuda:
fusion.cuda()
fusion.train(False)
fusion.eval()
try:
fusion = torch.load(args.finetune_from)
print("\n--------model restored--------\n")
except:
print("\n--------model not restored--------\n")
pass
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
writer = SummaryWriter(log_dir=args.logdir)
# testing
pbar = tqdm(img_batch)
num_iter = 0
jaccard = 0.
scores = []
for (image, label) in pbar:
x = Variable(image)
y = Variable(label)
if args.cuda:
x = x.cuda()
y = y.cuda()
pred = fusion(x)
#loss = jaccard_index(pred, y, smooth=100)
jaccards = jaccard_index(y.cpu().data.numpy(), pred.cpu().data.numpy())
scores.extend(jaccards)
num_iter += 1
pbar.set_description('Epoch {}, Iter {}, Jaccard Index: {:.5f}'.format(i + 1, num_iter, jaccards.mean()))
writer.add_scalars(main_tag='Testing', tag_scalar_dict={
'Jaccard index': np.array(scores).mean()
}, global_step=i )
print('Testing Jaccard Index: {}'.format(np.array(scores).mean()))
# v_utils.save_image(x[0].cpu().data, "./dataset/test_original_image_{}.png".format(i))
# v_utils.save_image(y[0].cpu().data, "./dataset/test_label_image_{}.png".format(i))
# v_utils.save_image(pred[0].cpu().data, "./dataset/test_gen_image_{}.png".format(i))
writer.add_image(tag='Testing orig', img_tensor=x[0], global_step=i)
writer.add_image(tag='Testing label', img_tensor=y[0], global_step=i)
writer.add_image(tag='Testing gen', img_tensor=pred[0], global_step=i)
if __name__ == '__main__':
args = parse()
if not args.test:
train(args)
test(args, i=0)
|
StarcoderdataPython
|
3323796
|
import json
import unittest
from ExternalAPIs.NIH_NCBI import NIH_NCBI
class TestNIH_NCBI(unittest.TestCase, NIH_NCBI):
#----------------------------------------------------
# test_NIHFundingDetailsPayload:
# Generate POST request payloads for 2 examples in the NIH reporter API documentation.
# Check whether our generated payload matches example in the documentatio.
#----------------------------------------------------
def test_NIHFundingDetailsPayload (self):
payload = self._generateFundingDetailsPayload('5UG1HD0784*')
self.assertEquals(payload.replace(" ", ""), '{"criteria":{"project_nums":"5UG1HD0784*"}}', msg='[ERROR] Generating payload failed.')
payload = self._generateFundingDetailsPayload(['5UG1HD078437-07', '5R01DK102815-05'])
self.assertEquals(payload.replace(" ", ""), '{"criteria":{"project_nums":["5UG1HD078437-07","5R01DK102815-05"]}}', msg='[ERROR] Generating payload failed.')
return
#----------------------------------------------------
# test_NIHFundingDetails:
# Check whether we get correct data. If the number of results prescribed in the meta data
# of the request matches the actual number of results returned, this test passes.
#----------------------------------------------------
def test_NIHFundingDetails (self):
proj_num = ['OT3OD025349']
data = self.getProjectFundingDetails(proj_num)
self.assertEquals(len(data['results']), data['meta']['total'])
return
#----------------------------------------------------
# test_NIHRecord:
# Check to see whether the generated record from the response data in 'test_response.txt'
# is correct.
#----------------------------------------------------
def test_NIHRecord (self):
with open('./tests/test_response.txt', 'r') as f:
jsonData = json.loads(f.read())
record = self.generateRecord(jsonData)
for sub_proj in jsonData['results']:
sub_record = record[sub_proj['project_num']]
self.assertEquals(sub_proj['org_name'], sub_record['institute'])
self.assertEquals(sub_proj['org_country'], sub_record['country'])
self.assertEquals(sub_proj['award_amount'], sub_record['amount'])
self.assertEquals(sub_proj['fiscal_year'], sub_record['year'])
self.assertEquals(sub_proj['terms'], sub_record['keywords'])
return
#----------------------------------------------------
# test_NIHPublications:
# Check to see whether the publications retrieved from the test NIH reponse data in test_response_2.txt
# matches the number of publications shown in the website (which is 5).
#----------------------------------------------------
def test_NIHPublications (self):
with open('./tests/test_response_2.txt', 'r') as f:
jsonData = json.loads(f.read())
record = self.generateRecord(jsonData)
publications = {}
for k in record:
item = record[k]
pubRecord = self.getPublications(item['appl_id'])
publications.update(pubRecord)
self.assertEquals(len(publications), 5)
return
#----------------------------------------------------
# test_PublicationsOfDatasets:
# Check whether the publications retrieved from the NCBI eutils API for a known
# dataset doi matches the result we found from a web search.
#----------------------------------------------------
def test_PublicationsOfDatasets (self):
pubData = self.getPublicationWithSearchTerm('"10.26275/DUZ8-MQ3N"')
self.assertEquals(len(pubData), 1)
for k in pubData:
self.assertEquals(pubData[k]['title'], 'Computational analysis of mechanical stress in colonic diverticulosis')
return
#----------------------------------------------------
# test_getCitedBy:
# Check whether we can retrieve citations of a given paper
#----------------------------------------------------
def test_getCitedBy (self):
# Check whether we can retrieve citations of a pm_id
records = self.getCitedBy('pm_id', '32265489')
self.assertEquals(len(records), 1)
for k in records:
self.assertEquals(records[k]['title'], 'Mechanotransduction in gastrointestinal smooth muscle cells: role of mechanosensitive ion channels')
# Check whether we can retrieve citations of a pmc_id
records = self.getCitedBy('pmc_id', '7138845')
self.assertEquals(len(records), 1)
for k in records:
self.assertEquals(records[k]['title'], 'Mechanotransduction in gastrointestinal smooth muscle cells: role of mechanosensitive ion channels')
return
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
159769
|
<filename>reviewboard/accounts/tests/test_privacy_form.py
"""Unit tests for reviewboard.accounts.forms.pages.PrivacyForm."""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from reviewboard.accounts.forms.pages import PrivacyForm
from reviewboard.accounts.pages import PrivacyPage
from reviewboard.accounts.views import MyAccountView
from reviewboard.testing import TestCase
class PrivacyFormTests(TestCase):
"""Unit tests for reviewboard.accounts.forms.pages.PrivacyForm."""
def setUp(self):
super(PrivacyFormTests, self).setUp()
self.user = User.objects.create(username='test-user')
self.request = RequestFactory().get('/account/preferences/')
self.request.user = self.user
self.page = PrivacyPage(config_view=MyAccountView(),
request=self.request,
user=self.user)
def test_init_with_privacy_enable_user_consent_true(self):
"""Testing PrivacyForm with privacy_enable_user_consent=True"""
with self.siteconfig_settings({'privacy_enable_user_consent': True}):
form = PrivacyForm(page=self.page,
request=self.request,
user=self.user)
self.assertIn('consent', form.fields)
self.assertEqual(form.save_label, 'Save')
def test_init_with_privacy_enable_user_consent_false(self):
"""Testing PrivacyForm with privacy_enable_user_consent=False"""
with self.siteconfig_settings({'privacy_enable_user_consent': False}):
form = PrivacyForm(page=self.page,
request=self.request,
user=self.user)
self.assertNotIn('consent', form.fields)
self.assertIsNone(form.save_label)
def test_is_visible_with_no_privacy(self):
"""Testing PrivacyForm.is_visible with no privacy details"""
settings = {
'privacy_enable_user_consent': False,
'privacy_info_html': '',
}
with self.siteconfig_settings(settings):
form = PrivacyForm(page=self.page,
request=self.request,
user=self.user)
self.assertFalse(form.is_visible())
def test_is_visible_with_consent(self):
"""Testing PrivacyForm.is_visible with consent option enabled"""
settings = {
'privacy_enable_user_consent': True,
'privacy_info_html': '',
}
with self.siteconfig_settings(settings):
form = PrivacyForm(page=self.page,
request=self.request,
user=self.user)
self.assertTrue(form.is_visible())
def test_is_visible_with_privacy_info(self):
"""Testing PrivacyForm.is_visible with privacy_info_html set"""
settings = {
'privacy_enable_user_consent': False,
'privacy_info_html': 'Test.',
}
with self.siteconfig_settings(settings):
form = PrivacyForm(page=self.page,
request=self.request,
user=self.user)
self.assertTrue(form.is_visible())
|
StarcoderdataPython
|
1664122
|
<gh_stars>0
from django.shortcuts import render, redirect, get_object_or_404
from rezerwacje.models import Rezerwacja
from .forms import RezerwacjaForm
from django.utils import timezone
import json
def index(request):
res = Rezerwacja.objects.all()
days = [[], [], [], []]
for r in res:
if r.data_przyjazdu.month == 6:
start_end = []
start_end.append(r.data_przyjazdu.day)
start_end.append(r.data_wyjazdu.day)
days[0].append(start_end)
elif r.data_przyjazdu.month == 7:
start_end = []
start_end.append(r.data_przyjazdu.day)
start_end.append(r.data_wyjazdu.day)
days[1].append(start_end)
elif r.data_przyjazdu.month == 8:
start_end = []
start_end.append(r.data_przyjazdu.day)
start_end.append(r.data_wyjazdu.day)
days[2].append(start_end)
else:
start_end = []
start_end.append(r.data_przyjazdu.day)
start_end.append(r.data_wyjazdu.day)
days[3].append(start_end)
day_list = json.dumps(days)
if request.method == 'POST':
form = RezerwacjaForm(request.POST)
if form.is_valid():
date_from = form.cleaned_data['data_przyjazdu']
date_to = form.cleaned_data['data_wyjazdu']
if date_to > timezone.now().date() and date_from > timezone.now().date() and date_to > date_from:
form.save()
cd = form.cleaned_data
reservation = get_object_or_404(Rezerwacja, imię=cd['imię'], nazwisko=cd['nazwisko'], email=cd['email'], wiadomość=cd['wiadomość'])
return render(request, 'views/reservation_confirm.html', {'reservation': reservation})
else:
message = 'Podano błędne daty'
return render(request, 'views/reservation_index.html', {'form': form, 'message': message})
else:
message = 'Formularz wypełniono błędnie.'
return render(request, 'views/reservation_index.html', {'form': form, 'message': message})
else:
form = RezerwacjaForm()
return render(request, 'views/reservation_index.html', {'form': form, 'day_list': day_list})
|
StarcoderdataPython
|
9645193
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Grammar definition for C files."""
from __future__ import annotations
import logging
from typing import Generator
from pyparsing import *
from ..abstract_grammar import AbstractGrammar
class Grammar(AbstractGrammar):
"""Grammar definition for C files."""
logger = logging.getLogger('taintalyzing')
def __init__(self, file_):
"""Constructor for a grammar object.
Parameters
----------
file_ : InputFile
The file to parse
"""
super().__init__(file_)
ParserElement.enablePackrat()
# Helpers
self.attribute_separator = oneOf('. ->')
self.ident = Word(alphas, alphanums + '_')
self.ident = Combine(ZeroOrMore(self.ident + self.attribute_separator)('object_name*') +
self.ident('ident*'))
self.vartype = Suppress(Combine(Optional(oneOf('signed unsigned')) + self.ident +
Optional(Word('*')), adjacent=False))
self.array_index = '[' + Word(nums) + ']'
self.rvalue = Forward()
self.func_call = Forward()
self.operators = Suppress(oneOf('|| && | & ^ . -> + - * / % << >> == != < <= > >='))
self.expression = Group(self.rvalue + ZeroOrMore(self.operators + self.rvalue |
self.func_call))
self.expression |= Group(self.func_call + ZeroOrMore(self.operators + (self.rvalue |
self.func_call)))
self.stmt = Forward()
# Function calls
self.param_list = Optional(delimitedList(self.expression))
self.func_call << self.ident('name') + Suppress('(') + self.param_list('args') + \
Suppress(')')
# Control structures -> increase edge count
self.control_structures = ((Keyword('case') + self.expression + ':') |
(Keyword('default') + ':') |
(Keyword('while') + '(' + self.expression + ')') |
(Keyword('for') + '(' + Optional(self.expression) + ';' +
Optional(self.expression) + ';' +
Optional(self.expression) + ')') |
(Keyword('goto') + self.ident))('control_structure')
# Mutually exclusive combinations: else if-else if, else if-else, else-if, if-else if, if-else, if,
# else
self.mutually_exclusive_helper_expr = Suppress('(') + self.expression + Suppress(')')
self.mutually_exclusive_helper_body = Suppress('{') + ZeroOrMore(self.stmt) + Suppress('}')
self.mutually_exclusive_helper_body |= self.stmt
self.mutually_exclusive = (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body))('alternative')
self.mutually_exclusive |= (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else') +
self.mutually_exclusive_helper_body))('alternative')
self.mutually_exclusive |= (Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body)('alternative')
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body))
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body + FollowedBy(
Keyword('else') + self.mutually_exclusive_helper_body))
self.mutually_exclusive |= (Keyword('if') + self.mutually_exclusive_helper_expr +
self.mutually_exclusive_helper_body)
self.mutually_exclusive |= (Keyword('else') +
self.mutually_exclusive_helper_body)('alternative-end')
# Function body
self.prototype = Forward()
self.func_body = Group(OneOrMore(Group(SkipTo(self.stmt | self.control_structures,
failOn=self.prototype, include=True))))
# Assignments
self.assignment = self.ident('lvalue') + Optional(self.array_index) + \
Suppress(oneOf('= -= += ^= &= |= *= %= /=')) + self.expression('expression')
self.assignment |= self.vartype + self.assignment
# Return
self.return_ = Suppress(Keyword('return')) + self.rvalue('return_value')
# Statements
self.stmt << (self.func_call('func_call') | self.assignment('assignment') |
self.return_('return')) + Suppress(';')
self.rvalue << (self.func_call | self.ident + Optional(self.array_index) | Word(nums) |
quotedString)
# Function definitions
self.arg_list = Optional(delimitedList(Group(self.vartype + self.ident('name') +
Suppress(ZeroOrMore('[]')))))
self.prototype << self.vartype('type') + self.ident('name') + Suppress('(') + \
self.arg_list('args') + Suppress(')')
self.func_def = self.prototype + Suppress('{') + self.func_body('body') + Suppress('}')
self.func_def.ignore(cppStyleComment)
def get_statement_count(self, start: int, end: int) -> int:
"""Return the number of statements between `start` and `end`.
Statements are all lines that have an actual effect on the program flow, e.g. method calls
or loops.
Parameters
----------
start : int
The start column
end : int
The end column
Returns
-------
int
The number of statements between `start` and `end`.
"""
return len(list(self.control_structures.scanString(self.file_contents[start:end]))) + \
len(list(self.mutually_exclusive.scanString(self.file_contents[start:end]))) + \
len(list(self.stmt.scanString(self.file_contents[start:end])))
def get_edge_count(self, start: int, end: int) -> int:
"""Return the edge count between `start` and `end`.
Edges are all statements that can branch into two paths, e.g. loops, conditions etc.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
int
The edge count between `start` and `end`.
"""
# Loops have three edges: Going into the loop, skipping the loop and returning from the last
# position in the loop to the start of the loop
# Mutually exclusive blocks have two edges, entering or not entering them
return len(list(self.control_structures.scanString(self.file_contents[start:end]))) * 3 + \
len(list(self.mutually_exclusive.scanString(self.file_contents[start:end]))) * 2 + \
len(list(self.stmt.scanString(self.file_contents[start:end])))
def get_mutually_exclusive_positions(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all mutually exclusive positions from `start` to `end`.
That is return the start and end position for all the statements where a mutually exclusive
block begins and where it ends.
Parameters
----------
start : int
The start column
end : int
The end column
Returns
-------
Generator
Generator for all mutually exclusive paths from `start` to `end`.
"""
return self.mutually_exclusive.scanString(self.file_contents[start:end])
def get_method_definitions(self) -> Generator[list, None, None]:
"""Return a generator for all methods with their bodies.
Returns
-------
Generator
Generator for all function definitions with their bodies
"""
return self.func_def.scanString(self.file_contents)
def get_method_calls(self, start, end) -> Generator[list, None, None]:
"""Return a generator for all function calls between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all function calls
"""
return self.func_call.scanString(self.file_contents[start:end])
def get_parameters(self, start: int, end: int) -> dict:
"""Return a dictionary of all parameters between `start` and `end` with their default value.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
dict
Dictionary with parameter: default value
"""
try:
args = self.prototype.parseString(self.file_contents[start:end]).get('args', [])
parameters = dict()
for parameter in args:
parameters[parameter['name']] = None # There are no default values in C
return parameters
except ParseException:
Grammar.logger.error('Tried to parse parameters in "{file}", but no match at start '
'column {start}.', file=self.file_.path, start=start)
def get_declarations(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for variable declarations between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all declarations
"""
declarations = Suppress(self.ident) + self.ident + Suppress(Optional(self.array_index)) + \
Suppress(';')
return declarations.scanString(self.file_contents[start:end])
def get_global_variables(self) -> list:
"""Return a list of all global variables.
Returns
-------
list
List of all global variables
"""
# First step: Find all the functions
func_defs = self.get_method_definitions()
func_defs_positions = [(function[1], function[2]) for function in func_defs]
# Second step: Globals are by definition outside of functions
start = -1
outside_func_defs = []
for position in func_defs_positions:
outside_func_defs.append([start + 1, position[0] - 1 if position[0] > 0 else 0])
start = position[1]
if start + 1 <= len(self.file_contents):
outside_func_defs.append([start + 1, len(self.file_contents)])
# Third step: Find declarations and assignments in these regions
globals_ = list()
for start, end in outside_func_defs:
assignments = list(self.get_assignments(start, end))
assignments = [assignment[0] for assignment in assignments]
globals_.extend(assignments)
globals_.extend(list(self.get_declarations(start, end)))
return globals_
def get_assignments(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all assignments betweeen `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all assignments
"""
return self.assignment.scanString(self.file_contents[start:end])
def get_control_structures(self, start: int, end: int) -> Generator[list, None, None]:
"""Return a generator for all control structures between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all control structures
"""
return self.control_structures.scanString(self.file_contents[start:end])
def get_returns(self, start, end) -> Generator[list, None, None]:
"""Return a generator for all return values between `start` and `end`.
Parameters
----------
start : int
Start column
end : int
End column
Returns
-------
Generator
Generator for all return values
"""
return self.return_.scanString(self.file_contents[start:end])
|
StarcoderdataPython
|
4963982
|
# engineer_number module
#
# Copyright (c) 2012-2017 梅濁酒(umedoblock)
#
# This software is released under the MIT License.
# https://github.com/umedoblock/engineer_number
import os
import sys
import unittest
from test import support
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from engineer_number import *
from engineer_number.constants import *
from engineer_number.lib import get_resistors, make_all_combinations, close_values
class TestEngineerNumberUtil(unittest.TestCase):
def test_make_all_combinations(self):
exponent10s = range(ONE, MEGA + 1)
combs = make_all_combinations("E12", exponent10s)
tup_combs = tuple(str(x) for x in combs)
expected = (
"1.000", "1.200", "1.500", "1.800", "2.200", "2.700", "3.300",
"3.900", "4.700", "5.600", "6.800", "8.200", "10.000", "12.000",
"15.000", "18.000", "22.000", "27.000", "33.000", "39.000",
"47.000", "56.000", "68.000", "82.000", "100.000", "120.000",
"150.000", "180.000", "220.000", "270.000", "330.000", "390.000",
"470.000", "560.000", "680.000", "820.000", "1.000k", "1.200k",
"1.500k", "1.800k", "2.200k", "2.700k", "3.300k", "3.900k",
"4.700k", "5.600k", "6.800k", "8.200k", "10.000k", "12.000k",
"15.000k", "18.000k", "22.000k", "27.000k", "33.000k", "39.000k",
"47.000k", "56.000k", "68.000k", "82.000k", "100.000k", "120.000k",
"150.000k", "180.000k", "220.000k", "270.000k", "330.000k",
"390.000k", "470.000k", "560.000k", "680.000k", "820.000k",
"1.000M", "1.200M", "1.500M", "1.800M", "2.200M", "2.700M",
"3.300M", "3.900M", "4.700M", "5.600M", "6.800M", "8.200M")
for x in combs:
self.assertIsInstance(x, EngineerNumber)
self.assertEqual(expected, tup_combs)
def test_make_resistors(self):
resistors = get_resistors("E12", ORDERS_RESISTOR)
resistors_ = tuple(str(x) for x in resistors)
expected = (
"1.000", "1.200", "1.500", "1.800", "2.200", "2.700", "3.300",
"3.900", "4.700", "5.600", "6.800", "8.200", "10.000", "12.000",
"15.000", "18.000", "22.000", "27.000", "33.000", "39.000",
"47.000", "56.000", "68.000", "82.000", "100.000", "120.000",
"150.000", "180.000", "220.000", "270.000", "330.000", "390.000",
"470.000", "560.000", "680.000", "820.000", "1.000k", "1.200k",
"1.500k", "1.800k", "2.200k", "2.700k", "3.300k", "3.900k",
"4.700k", "5.600k", "6.800k", "8.200k", "10.000k", "12.000k",
"15.000k", "18.000k", "22.000k", "27.000k", "33.000k", "39.000k",
"47.000k", "56.000k", "68.000k", "82.000k", "100.000k", "120.000k",
"150.000k", "180.000k", "220.000k", "270.000k", "330.000k",
"390.000k", "470.000k", "560.000k", "680.000k", "820.000k",
"1.000M", "1.200M", "1.500M", "1.800M", "2.200M", "2.700M",
"3.300M", "3.900M", "4.700M", "5.600M", "6.800M", "8.200M",
"10.000M")
for x in resistors:
self.assertIsInstance(x, EngineerNumber)
self.assertEqual(expected, resistors_)
def test_close_values_eq_up(self):
resistors = get_resistors("E24", ORDERS_RESISTOR)
k15 = EngineerNumber(15, 3)
self.assertEqual(EngineerNumber(15, 3), close_values(k15, "up", resistors))
def test_close_values_eq_down(self):
resistors = get_resistors("E24", ORDERS_RESISTOR)
k15 = EngineerNumber(15, 3)
self.assertEqual(EngineerNumber(15, 3), close_values(k15, "down", resistors))
def test_close_values_same_exponent(self):
k47 = EngineerNumber(47, 3)
k50 = EngineerNumber(50, 3)
k56 = EngineerNumber(56, 3)
resistors = get_resistors("E12", ORDERS_RESISTOR)
self.assertEqual(k56, close_values(k50, "up", resistors))
self.assertEqual(k47, close_values(k50, "down", resistors))
def test_close_values_transfer_next_exponent(self):
resistors = get_resistors("E12", ORDERS_RESISTOR)
r83 = EngineerNumber(8.3, 1)
r100 = EngineerNumber(1.0, 2)
self.assertEqual(r100, close_values(r83, "up", resistors))
k094 = EngineerNumber(0.94, 3)
r820 = EngineerNumber(8.2, 2)
self.assertEqual(r820, close_values(k094, "down", resistors))
def test_close_values_out_of_range(self):
resistors = get_resistors("E12", ORDERS_RESISTOR)
r0_9 = EngineerNumber(0.9, ONE)
M101 = EngineerNumber(10.1, MEGA)
self.assertIsNone(close_values(r0_9, "down", resistors))
self.assertIsNone(close_values(M101, "up", resistors))
def test_close_values_at_limit(self):
resistors = get_resistors("E12", ORDERS_RESISTOR)
r0_9 = EngineerNumber(0.9, ONE)
M101 = EngineerNumber(10.1, MEGA)
self.assertEqual(EngineerNumber(1), close_values(r0_9, "up", resistors))
self.assertEqual(EngineerNumber("10M"), close_values(M101, "down", resistors))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3387646
|
<filename>fairseq/models/retrosys.py
from argparse import ArgumentParser, ArgumentTypeError, ArgumentError, Namespace
from dataclasses import dataclass, _MISSING_TYPE, MISSING
from enum import Enum
from fairseq.modules.multihead_attention import MultiheadAttention
from fairseq.models.transformer import Embedding, TransformerDecoder, TransformerEncoder
from fairseq.dataclass.utils import interpret_dc_type, eval_str_list, gen_parser_from_dataclass
import inspect
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.quant_noise import quant_noise
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from torch import Tensor
from fairseq.models.doublemodel import DoubleModel
from random import uniform
from fairseq.modules import GradMultiply
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
def gen_plm_parser_from_dataclass(
parser: ArgumentParser, dataclass_instance: FairseqDataclass, delete_default: bool = False
) -> None:
def argparse_name(name: str):
if name == "data":
return name
if name == "_name":
return None
return "--plm-" + name.replace("_", "-")
def get_kwargs_from_dc(dataclass_instance: FairseqDataclass, k: str) -> Dict[str, Any]:
kwargs = {}
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
field_const = dataclass_instance._get_argparse_const(k)
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (
isinstance(inter_type, type)
and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))
) or ("List" in str(inter_type) or "Tuple" in str(inter_type)):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError(
"parsing of type " + str(inter_type) + " is not implemented"
)
if field_default is not MISSING:
kwargs["default"] = (
",".join(map(str, field_default)) if field_default is not None else None
)
elif (isinstance(inter_type, type) and issubclass(inter_type, Enum)) or "Enum" in str(
inter_type
):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = "store_false" if field_default is True else "store_true"
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
kwargs["help"] = field_help
if field_const is not None:
kwargs["const"] = field_const
kwargs["nargs"] = "?"
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
field_type = dataclass_instance._get_type(k)
if field_name is None:
continue
elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass):
gen_parser_from_dataclass(parser, field_type(), delete_default)
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
field_args = [field_name]
alias = dataclass_instance._get_argparse_alias(k)
if alias is not None:
field_args.append(alias)
if "default" in kwargs:
if isinstance(kwargs["default"], str) and kwargs["default"].startswith("${"):
if kwargs["help"] is None:
# this is a field with a name that will be added elsewhere
continue
else:
del kwargs["default"]
if delete_default and "default" in kwargs:
del kwargs["default"]
try:
parser.add_argument(*field_args, **kwargs)
except ArgumentError:
pass
def gen_plm_args(args: Namespace) -> Namespace:
kwargs = {}
for k, v in vars(args).items():
if k.startswith("plm_"):
kwargs[k[len("plm_") :]] = v
return Namespace(**kwargs)
@register_model("retrosys")
class RetroSysModel(FairseqEncoderDecoderModel):
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
@staticmethod
def add_args(parser: ArgumentParser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout',
type=float,
metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout',
'--relu-dropout',
type=float,
metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path',
type=str,
metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim',
type=int,
metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim',
type=int,
metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads',
type=int,
metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before',
action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos',
action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path',
type=str,
metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim',
type=int,
metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim',
type=int,
metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads',
type=int,
metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos',
action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before',
action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim',
type=int,
metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed',
action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings',
action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings',
default=False,
action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff',
metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout',
type=float,
metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding',
action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding',
action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations',
action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument(
'--offload-activations',
action='store_true',
help=
'checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.'
)
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention',
default=False,
action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention',
default=False,
action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop',
type=float,
metavar='D',
default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop',
type=float,
metavar='D',
default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep',
default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep',
default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq',
type=float,
metavar='D',
default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size',
type=int,
metavar='D',
default=8,
help='block size of quantization noise at training time')
parser.add_argument(
'--quant-noise-scalar',
type=float,
metavar='D',
default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap',
type=int,
metavar='D',
default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=('minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'))
# args for doublemodel
from fairseq.models.doublemodel import DoubleModelConfig
gen_plm_parser_from_dataclass(parser, DoubleModelConfig())
# args for bertnmt
parser.add_argument(
"--dropnet",
type=float,
default=0
)
parser.add_argument(
"--gradmultiply",
type=float,
default=1.
)
parser.add_argument(
"--finetune-plm",
action="store_true",
default=False
)
parser.add_argument(
"--plm-grad",
type=float,
default=None
)
parser.add_argument(
"--from-scratch",
action='store_true',
default=False
)
parser.add_argument(
"--plm-as-encoder",
action="store_true",
default=False
)
# fmt: on
def load_state_dict(self, state_dict, strict=True, model_cfg=None, args=None):
keys_to_delete = []
cur_state = self.state_dict()
for k in state_dict.keys():
if k.startswith("encoder.plm_encoder.projection_heads") or k.startswith(
"encoder.plm_encoder.prediction_heads"
):
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
for k in cur_state.keys():
if k.startswith("encoder.plm_encoder.projection_heads") or k.startswith(
"encoder.plm_encoder.prediction_heads"
):
state_dict[k] = cur_state[k]
if "plm_attn" in k:
state_dict[k] = cur_state[k]
elif self.args.from_scratch and not k.startswith("encoder.plm_encoder"):
state_dict[k] = cur_state[k]
super().load_state_dict(state_dict, strict=strict, model_cfg=model_cfg, args=args)
@classmethod
def build_model(cls, args, task):
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path):
raise ValueError("--share-all-embeddings not compatible with --decoder-embed-path")
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
args_new = gen_plm_args(args)
task_new = Namespace(source_dictionary=task.plm_dict)
plm = DoubleModel.build_model(args_new, task_new)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens, plm)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
# plm = fsdp_wrap(plm, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens, plm_encoder):
return Encoder(args, src_dict, embed_tokens, plm_encoder)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return Decoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
plm_input,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
plm_input=plm_input,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class Encoder(TransformerEncoder):
def __init__(self, args, src_dict, embed_tokens, plm_encoder):
super().__init__(args, src_dict, embed_tokens)
self.plm_encoder = plm_encoder
self.finetune_plm = getattr(args, "finetune_plm", False)
if not self.finetune_plm:
for p in self.plm_encoder.parameters():
p.requires_grad = False
else:
for n, p in self.plm_encoder.named_parameters():
if n.startswith("prediction_heads") or n.startswith("projection_heads"):
p.requires_grad = False
if n.startswith("encoder0.lm_head"):
p.requires_grad = False
if n.startswith("encoder1"):
p.requires_grad = False
self.gradmultiply = getattr(args, "plm_grad", None)
if self.gradmultiply is None:
self.gradmultiply = 1 / getattr(args, "gradmultiply", 1.)
def build_encoder_layer(self, args):
layer = EncoderLayer(args)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP) if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
src_tokens,
plm_input=None,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
plm_input = plm_input["net_input0"]["src_tokens"]
return self.forward_scriptable(
src_tokens,
plm_input,
src_lengths,
return_all_hiddens,
token_embeddings,
)
def forward_scriptable(
self,
src_tokens,
plm_input: Optional[torch.Tensor],
src_lengths: Optional[torch.Tensor],
return_all_hiddens: bool,
token_embeddings: Optional[torch.Tensor],
):
plm_out = self.plm_encoder.forward_retrosys(plm_input)
if self.finetune_plm:
plm_out["encoder_out"][0] = GradMultiply.apply(plm_out["encoder_out"][0], self.gradmultiply)
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
if plm_out is not None:
plm_padding_mask = plm_out["encoder_padding_mask"][0]
plm_out = plm_out["encoder_out"][0]
plm_has_pads = plm_out.device.type == "xla" or plm_padding_mask.any()
# encoder layers
for layer in self.layers:
x = layer(
x,
encoder_padding_mask=encoder_padding_mask if has_pads else None,
plm_out=plm_out,
plm_padding_mask=plm_padding_mask if plm_has_pads else None,
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"plm_out": [plm_out], # T x B x C
"plm_padding_mask": [plm_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
new_encoder_out = super().reorder_encoder_out(encoder_out, new_order)
if len(encoder_out["plm_out"]) == 0:
new_plm_out = []
else:
new_plm_out = [encoder_out["plm_out"][0].index_select(1, new_order)]
if len(encoder_out["plm_padding_mask"]) == 0:
new_plm_paddding_mask = []
else:
new_plm_paddding_mask = [encoder_out["plm_padding_mask"][0].index_select(0, new_order)]
new_encoder_out.update({"plm_out": new_plm_out, "plm_padding_mask": new_plm_paddding_mask})
return new_encoder_out
class Decoder(TransformerDecoder):
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
full_context_alignment: bool,
alignment_layer: Optional[int],
alignment_heads: Optional[int],
):
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
plm: Optional[Tensor] = None
plm_padding_mask: Optional[Tensor] = None
if encoder_out is not None:
enc = encoder_out["encoder_out"][0]
padding_mask = encoder_out["encoder_padding_mask"][0]
assert enc.size()[1] == bs, f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
plm = encoder_out["plm_out"][0]
plm_padding_mask = encoder_out["plm_padding_mask"][0]
assert plm.size()[1] == bs, f"Expected plm.shape == (t, {bs}, c) got {plm.shape}"
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
plm,
plm_padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = DecoderLayer(args, no_encoder_attn=no_encoder_attn)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP) if not checkpoint else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
class EncoderLayer(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) or 8
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.plm_attn = self.build_encoder_plm_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu") or "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
args.encoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.encoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.dropnet = getattr(args, "dropnet", 0.25)
self.gradmultiply = getattr(args, "gradmultiply", 1.)
self.plm_as_encoder = getattr(args, "plm_as_encoder", False)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_plm_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
kdim=args.plm_encoder_embed_dim,
vdim=args.plm_encoder_embed_dim,
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def get_ratio(self):
if self.plm_as_encoder:
return [0, 1]
if self.dropnet > 0 and self.training:
frand = float(uniform(0, 1))
if frand < self.dropnet:
return [1, 0]
elif frand > 1 - self.dropnet:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [0.5, 0.5]
def forward(
self,
x,
plm_out,
encoder_padding_mask: Optional[Tensor],
plm_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
):
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x1, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
)
x2, _ = self.plm_attn(
query=x, key=plm_out, value=plm_out, key_padding_mask=plm_padding_mask, attn_mask=None
)
x1 = self.dropout_module(x1)
x2 = self.dropout_module(x2)
x2 = GradMultiply.apply(x2, self.gradmultiply)
dropnet = self.get_ratio()
x = residual + dropnet[0] * x1 + dropnet[1] * x2
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class DecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn,
)
self.plm_attn = self.build_decoder_plm_attention(self.embed_dim, args)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.dropnet = getattr(args, "dropnet", 0.25)
self.gradmultiply = getattr(args, "gradmultiply", 1.)
self.plm_as_encoder = getattr(args, "plm_as_encoder", False)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def build_decoder_plm_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
kdim=args.plm_encoder_embed_dim,
vdim=args.plm_encoder_embed_dim,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
def get_ratio(self):
if self.plm_as_encoder:
return [0, 1]
if self.dropnet > 0 and self.training:
frand = float(uniform(0, 1))
if frand < self.dropnet:
return [1, 0]
elif frand > 1 - self.dropnet:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [0.5, 0.5]
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
plm_out: Optional[torch.Tensor] = None,
plm_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x1 = self.dropout_module(x1)
x2, _ = self.plm_attn(
query=x,
key=plm_out,
value=plm_out,
key_padding_mask=plm_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=False,
need_head_weights=False,
)
x2 = GradMultiply.apply(x2, self.gradmultiply)
x2 = self.dropout_module(x2)
dropnet = self.get_ratio()
x = residual + dropnet[0] * x1 + dropnet[1] * x2
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
@register_model_architecture("retrosys", "retrosys")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(args, "share_decoder_input_output_embed", False)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(args, "no_token_positional_embeddings", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(args, "decoder_output_dim", args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("retrosys", "transformer_iwslt_de_en_retrosys")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
|
StarcoderdataPython
|
3480775
|
<filename>pysyrenn/frontend/tests/conv2d_layer.py
"""Tests the methods in conv2d_layer.py
"""
import numpy as np
import torch
from external.bazel_python.pytest_helper import main
from pysyrenn.frontend.strided_window_data import StridedWindowData
from pysyrenn.frontend.conv2d_layer import Conv2DLayer
def test_compute():
"""Tests that the Conv2D layer correctly computes a Conv2D.
"""
batch = 101
width = 32
height = 32
channels = 3
stride = (2, 2)
pad = (0, 0)
filter_height = 4
filter_width = 4
out_channels = 5
inputs = np.random.uniform(size=(101, height * width * channels))
# TODO(masotoud): use actual numbers for the filters and actually compute
# true_outputs.
filters = np.zeros(shape=(filter_height, filter_width, channels, out_channels))
biases = np.ones(shape=(out_channels))
# out height/width = (32 - 2) / 2 = 15
true_outputs = np.ones(shape=(batch, 15 * 15 * out_channels))
window_data = StridedWindowData((height, width, channels),
(filter_height, filter_width),
stride, pad, out_channels)
conv2d_layer = Conv2DLayer(window_data, filters, biases)
assert np.allclose(conv2d_layer.compute(inputs), true_outputs)
assert np.allclose(conv2d_layer.compute(inputs, jacobian=True),
np.zeros_like(true_outputs))
torch_inputs = torch.FloatTensor(inputs)
torch_outputs = conv2d_layer.compute(torch_inputs).numpy()
assert np.allclose(torch_outputs, true_outputs)
def test_serialize():
"""Tests Conv2D.{serialize, deserialize}.py.
"""
height, width, channels, out_channels = np.random.choice(
[8, 16, 32, 64, 128], size=4)
window_height, window_width = np.random.choice([2, 4, 8], size=2)
pad = (0, 0)
window_data = StridedWindowData((height, width, channels),
(window_height, window_width),
(window_height, window_width),
pad, out_channels)
filters = np.random.uniform(size=(window_height, window_width,
channels, out_channels))
biases = np.random.uniform(size=(out_channels))
serialized = Conv2DLayer(window_data, filters, biases).serialize()
assert serialized.WhichOneof("layer_data") == "conv2d_data"
serialized_window_data = serialized.conv2d_data.window_data
assert serialized_window_data.in_height == height
assert serialized_window_data.in_width == width
assert serialized_window_data.in_channels == channels
assert serialized_window_data.window_height == window_height
assert serialized_window_data.window_width == window_width
assert serialized_window_data.stride_height == window_height
assert serialized_window_data.stride_width == window_width
assert serialized_window_data.pad_height == 0
assert serialized_window_data.pad_width == 0
assert serialized_window_data.out_channels == out_channels
serialized_filters = np.array(serialized.conv2d_data.filters)
assert np.allclose(serialized_filters.flatten(), filters.flatten())
serialized_biases = np.array(serialized.conv2d_data.biases)
assert np.allclose(serialized_biases.flatten(), biases.flatten())
deserialized = Conv2DLayer.deserialize(serialized)
assert deserialized.serialize() == serialized
serialized.relu_data.SetInParent()
assert Conv2DLayer.deserialize(serialized) is None
main(__name__, __file__)
|
StarcoderdataPython
|
9686463
|
import numpy as np
import matplotlib.gridspec as gridspec
from matplotlib import pyplot as plt
data_dict = pc._extract_powder_lines(fit_tth_tol=1.0)
# %% sample plot to check fit line poistions ahead of fitting
frows = int(np.ceil(np.sqrt(instr.num_panels)))
fcols = int(np.floor(np.sqrt(instr.num_panels)))
fig, ax = plt.subplots(frows, fcols)
fig_row, fig_col = np.unravel_index(np.arange(instr.num_panels),
(frows, fcols))
ifig = 0
for det_key, panel in instr.detectors.items():
all_pts = np.vstack(data_dict[det_key])
'''
pimg = equalize_adapthist(
rescale_intensity(img_dict[det_key], out_range=(0, 1)),
10, clip_limit=0.2)
'''
pimg = np.array(img_dict[det_key], dtype=float)
# pimg[~panel.panel_buffer] = np.nan
ax[fig_row[ifig], fig_col[ifig]].imshow(
pimg,
vmin=np.percentile(img_dict[det_key], 5),
vmax=np.percentile(img_dict[det_key], 90),
cmap=plt.cm.bone_r
)
ideal_angs, ideal_xys = panel.make_powder_rings(plane_data,
delta_eta=eta_tol)
rijs = panel.cartToPixel(np.vstack(ideal_xys))
ax[fig_row[ifig], fig_col[ifig]].plot(rijs[:, 1], rijs[:, 0], 'cx')
ax[fig_row[ifig], fig_col[ifig]].set_title(det_key)
rijs = panel.cartToPixel(all_pts[:, :2])
ax[fig_row[ifig], fig_col[ifig]].plot(rijs[:, 1], rijs[:, 0], 'm+')
ax[fig_row[ifig], fig_col[ifig]].set_title(det_key)
ifig += 1
|
StarcoderdataPython
|
3217580
|
# ext/declarative/clsregistry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`_orm.relationship` using strings.
"""
import weakref
from . import attributes
from . import interfaces
from .descriptor_props import SynonymProperty
from .properties import ColumnProperty
from .util import class_mapper
from .. import exc
from .. import inspection
from .. import util
from ..sql.schema import _get_table_key
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls, decl_class_registry):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in decl_class_registry:
# class already exists.
existing = decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = decl_class_registry[classname] = _MultipleClassMarker(
[cls, existing]
)
else:
decl_class_registry[classname] = cls
try:
root_module = decl_class_registry["_sa_module_registry"]
except KeyError:
decl_class_registry[
"_sa_module_registry"
] = root_module = _ModuleMarker("_sa_module_registry", None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
def remove_class(classname, cls, decl_class_registry):
if classname in decl_class_registry:
existing = decl_class_registry[classname]
if isinstance(existing, _MultipleClassMarker):
existing.remove_item(cls)
else:
del decl_class_registry[classname]
try:
root_module = decl_class_registry["_sa_module_registry"]
except KeyError:
return
tokens = cls.__module__.split(".")
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.remove_class(classname, cls)
def _key_is_empty(key, decl_class_registry, test):
"""test if a key is empty of a certain object.
used for unit tests against the registry to see if garbage collection
is working.
"test" is a callable that will be passed an object should return True
if the given object is the one we were looking for.
We can't pass the actual object itself b.c. this is for testing garbage
collection; the caller will have to have removed references to the
object itself.
"""
if key not in decl_class_registry:
return True
thing = decl_class_registry[key]
if isinstance(thing, _MultipleClassMarker):
for sub_thing in thing.contents:
if test(sub_thing):
return False
else:
return not test(thing)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = "on_remove", "contents", "__weakref__"
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set(
[weakref.ref(item, self._remove_item) for item in classes]
)
_registries.add(self)
def remove_item(self, cls):
self._remove_item(weakref.ref(cls))
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
'Multiple classes found for path "%s" '
"in the registry of this declarative "
"base. Please use a fully module-qualified path."
% (".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.discard(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
modules = set(
[
cls.__module__
for cls in [ref() for ref in self.contents]
if cls is not None
]
)
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table."
% (item.__module__, item.__name__)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
"""Refers to a module name within
_decl_class_registry.
"""
__slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__"
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = _MultipleClassMarker(
[cls], on_remove=lambda: self._remove_item(name)
)
def remove_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.remove_item(cls)
class _ModNS(object):
__slots__ = ("__parent",)
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError(
"Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key)
)
class _GetColumns(object):
__slots__ = ("cls",)
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise AttributeError(
"Class %r does not have a mapped column named %r"
% (self.cls, key)
)
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key
)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls)
)
class _GetTable(object):
__slots__ = "key", "metadata"
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[_get_table_key(key, self.key)]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
__slots__ = "cls", "prop", "arg", "fallback", "_dict", "_resolvers"
def __init__(self, cls, prop, fallback, arg):
self.cls = cls
self.prop = prop
self.arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
def _access_cls(self, key):
cls = self.cls
manager = attributes.manager_of_class(cls)
decl_base = manager.registry
decl_class_registry = decl_base._class_registry
metadata = decl_base.metadata
if key in decl_class_registry:
return _determine_container(key, decl_class_registry[key])
elif key in metadata.tables:
return metadata.tables[key]
elif key in metadata._schemas:
return _GetTable(key, cls.metadata)
elif (
"_sa_module_registry" in decl_class_registry
and key in decl_class_registry["_sa_module_registry"]
):
registry = decl_class_registry["_sa_module_registry"]
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def _raise_for_name(self, name, err):
util.raise_(
exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined."
% (self.prop.parent, self.arg, name, self.cls)
),
from_=err,
)
def _resolve_name(self):
name = self.arg
d = self._dict
rval = None
try:
for token in name.split("."):
if rval is None:
rval = d[token]
else:
rval = getattr(rval, token)
except KeyError as err:
self._raise_for_name(name, err)
except NameError as n:
self._raise_for_name(n.args[0], n)
else:
if isinstance(rval, _GetColumns):
return rval.cls
else:
return rval
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
self._raise_for_name(n.args[0], n)
_fallback_dict = None
def _resolver(cls, prop):
global _fallback_dict
if _fallback_dict is None:
import sqlalchemy
from sqlalchemy.orm import foreign, remote
_fallback_dict = util.immutabledict(sqlalchemy.__dict__).union(
{"foreign": foreign, "remote": remote}
)
def resolve_arg(arg):
return _class_resolver(cls, prop, _fallback_dict, arg)
def resolve_name(arg):
return _class_resolver(cls, prop, _fallback_dict, arg)._resolve_name
return resolve_name, resolve_arg
|
StarcoderdataPython
|
277497
|
from __future__ import annotations
import os
import secrets
import socket
import stat
from ipaddress import ip_address
from typing import Optional
def bind_socket(host: str, port: int, *, backlog=100) -> socket.socket:
"""Create TCP server socket.
:param host: IPv4, IPv6 or hostname may be specified
:param port: TCP port number
:param backlog: Maximum number of connections to queue
:return: socket.socket object
"""
try: # IP address: family must be specified for IPv6 at least
ip = ip_address(host)
host = str(ip)
sock = socket.socket(
socket.AF_INET6 if ip.version == 6 else socket.AF_INET
)
except ValueError: # Hostname, may become AF_INET or AF_INET6
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog)
return sock
def bind_unix_socket(path: str, *, mode=0o666, backlog=100) -> socket.socket:
"""Create unix socket.
:param path: filesystem path
:param backlog: Maximum number of connections to queue
:return: socket.socket object
"""
"""Open or atomically replace existing socket with zero downtime."""
# Sanitise and pre-verify socket path
path = os.path.abspath(path)
folder = os.path.dirname(path)
if not os.path.isdir(folder):
raise FileNotFoundError(f"Socket folder does not exist: {folder}")
try:
if not stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
raise FileExistsError(f"Existing file is not a socket: {path}")
except FileNotFoundError:
pass
# Create new socket with a random temporary name
tmp_path = f"{path}.{secrets.token_urlsafe()}"
sock = socket.socket(socket.AF_UNIX)
try:
# Critical section begins (filename races)
sock.bind(tmp_path)
try:
os.chmod(tmp_path, mode)
# Start listening before rename to avoid connection failures
sock.listen(backlog)
os.rename(tmp_path, path)
except: # noqa: E722
try:
os.unlink(tmp_path)
finally:
raise
except: # noqa: E722
try:
sock.close()
finally:
raise
return sock
def remove_unix_socket(path: Optional[str]) -> None:
"""Remove dead unix socket during server exit."""
if not path:
return
try:
if stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
# Is it actually dead (doesn't belong to a new server instance)?
with socket.socket(socket.AF_UNIX) as testsock:
try:
testsock.connect(path)
except ConnectionRefusedError:
os.unlink(path)
except FileNotFoundError:
pass
|
StarcoderdataPython
|
380524
|
<reponame>mhubl/botrecon
from botrecon import botrecon
botrecon()
|
StarcoderdataPython
|
6436265
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same: # 'same' yes: input dim = output dim --> add a n-pixel boarder of zero
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else: # 'same' no: input dim =/= output dim
padding = self.padding
return padding
def forward(self, x): # x = adv_patch image
# using existing pytorch functions and tensor ops so that we get autograd,
# would likely be more efficient to implement from scratch at C/Cuda level
# NB in F.pad (https://pytorch.org/docs/stable/nn.functional.html):
# 2nd parameter is 'pad (tuple)', i.e. m-elements tuple, where m/2 <= input dimensions and m is even
# here m = 4 (left, right, top, bottom)
# PADDING:
x = F.pad(x, self._padding(x), mode='reflect') # put the zeros according to self._padding(x) --> l, r, t, b
# prepare to do MEDIAN:
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
# NB unfold: unfold(dimension, size, step) -> Tensor
# Returns a tensor which contains all slices of size `size` from `self` tensor along the dimension `dimension`
# Step between two slices is given by `step`
# Here size = kernel, step = stride, dimensions 2 and 3 respectively
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] #[:4] --> prob. kernel_size = 3, so I take values at pos 0, 1, 2, 3 ??
return x
|
StarcoderdataPython
|
4936142
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
provides low-level functionality, needed to undecorate\demangle compiler
generated unique names and map them to the declarations
On Windows:
ctypes package is used to call `UnDecorateSymbolName` function from
`dbghelp.dll`
On Linux:
"nm" utility is used.
"""
import os
import re
import ctypes
import warnings
from .. import declarations
class UNDECORATE_NAME_OPTIONS(object):
"""defines few constants for `UnDecorateSymbolName` function"""
UNDNAME_COMPLETE = 0x0000 # Enables full undecoration.
# Removes leading underscores from Microsoft extended keywords.
UNDNAME_NO_LEADING_UNDERSCORES = 0x0001
# Disables expansion of Microsoft extended keywords.
UNDNAME_NO_MS_KEYWORDS = 0x0002
# Disables expansion of return type for primary declaration.
UNDNAME_NO_FUNCTION_RETURNS = 0x0004
# Disables expansion of the declaration model.
UNDNAME_NO_ALLOCATION_MODEL = 0x0008
# Disables expansion of the declaration language specifier.
UNDNAME_NO_ALLOCATION_LANGUAGE = 0x0010
UNDNAME_RESERVED1 = 0x0020 # RESERVED.
UNDNAME_RESERVED2 = 0x0040 # RESERVED.
UNDNAME_NO_THISTYPE = 0x0060 # Disables all modifiers on the this type.
# Disables expansion of access specifiers for members.
UNDNAME_NO_ACCESS_SPECIFIERS = 0x0080
# Disables expansion of "throw-signatures" for functions and pointers to
# functions.
UNDNAME_NO_THROW_SIGNATURES = 0x0100
# Disables expansion of static or virtual members.
UNDNAME_NO_MEMBER_TYPE = 0x0200
# Disables expansion of the Microsoft model for UDT returns.
UNDNAME_NO_RETURN_UDT_MODEL = 0x0400
UNDNAME_32_BIT_DECODE = 0x0800 # Undecorates 32-bit decorated names.
# Gets only the name for primary declaration; returns just [scope::]name.
# Expands template params.
UNDNAME_NAME_ONLY = 0x1000
# Input is just a type encoding; composes an abstract declarator.
UNDNAME_TYPE_ONLY = 0x2000
# The real template parameters are available.
UNDNAME_HAVE_PARAMETERS = 0x4000
UNDNAME_NO_ECSU = 0x8000 # Suppresses enum/class/struct/union.
# Suppresses check for valid identifier characters.
UNDNAME_NO_IDENT_CHAR_CHECK = 0x10000
UNDNAME_NO_PTR64 = 0x20000 # Does not include ptr64 in output.
UNDNAME_SCOPES_ONLY = UNDNAME_NO_LEADING_UNDERSCORES \
| UNDNAME_NO_MS_KEYWORDS \
| UNDNAME_NO_FUNCTION_RETURNS \
| UNDNAME_NO_ALLOCATION_MODEL \
| UNDNAME_NO_ALLOCATION_LANGUAGE \
| UNDNAME_NO_ACCESS_SPECIFIERS \
| UNDNAME_NO_THROW_SIGNATURES \
| UNDNAME_NO_MEMBER_TYPE \
| UNDNAME_NO_ECSU \
| UNDNAME_NO_IDENT_CHAR_CHECK
SHORT_UNIQUE_NAME = UNDNAME_NO_MS_KEYWORDS \
| UNDNAME_NO_ACCESS_SPECIFIERS | UNDNAME_NO_ECSU
class undname_creator_t(object):
"""implementation details - should not be used directly
formats declarations string representation and exported symbols, so they
could be matched later.
The class formats variables, free and member functions, symbols exported
from .dll, .map and .so files.
On Windows, the class works with unique name produced by MSVC compiler and
with undecorated names produced by `dbghelp.dll`
On Linux, the class works with mangled names produced by GCC-XML
( GCC 4.2 ) compiler and demangled name produced by "nm" utility.
"""
def __init__(self):
warnings.warn(
"undname_creator_t is deprecated.\n" +
"Please have a look at the changelog for an explanation " +
"(since 1.8.0)",
DeprecationWarning)
if 'nt' == os.name:
import ctypes.wintypes
self.__undname = ctypes.windll.dbghelp.UnDecorateSymbolName
self.__undname.argtypes = [
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_uint,
ctypes.c_uint]
self.__clean_ecsu = (
re.compile(r'(?P<startswith>^|\W)(?:(class|enum|struct|union)\s)'),
'%(startswith)s')
self.__fundamental_types = (
('short unsigned int',
'unsigned short'),
('short int',
'short'),
('long int',
'long'),
('long unsigned int',
'unsigned long'))
self.__calling_conventions = (
re.compile((
r'(?P<startswith>^|\s)(?:__(cdecl|clrcall|stdcall|fastcall' +
'|thiscall)\s)')), '%(startswith)s')
def normalize_undecorated(self, undname, options=None):
if options is None:
options = UNDECORATE_NAME_OPTIONS.SHORT_UNIQUE_NAME
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_ECSU & options:
expr, substitute = self.__clean_ecsu
undname = expr.sub(lambda m: substitute % m.groupdict(), undname)
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_ACCESS_SPECIFIERS & options:
for prefix in ('public: ', 'private: ', 'protected: '):
if undname.startswith(prefix):
undname = undname[len(prefix):]
break
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_MS_KEYWORDS & options:
expr, substitute = self.__calling_conventions
undname = expr.sub(lambda m: substitute % m.groupdict(), undname)
return undname.strip()
def undecorate_blob(self, name, options=None):
if options is None:
options = UNDECORATE_NAME_OPTIONS.SHORT_UNIQUE_NAME
buffer = ctypes.create_string_buffer(1024 * 16)
res = self.__undname(str(name), buffer, ctypes.sizeof(buffer), options)
if res:
return self.normalize_undecorated(str(buffer[:res]))
else:
return name
def __remove_leading_scope(self, s):
if s and s.startswith('::'):
return s[2:]
else:
return s
def __format_type_as_undecorated(self, type_, is_argument, hint):
result = []
type_ = declarations.remove_alias(type_)
if declarations.is_array(type_):
result.append(declarations.array_item_type(type_).decl_string)
result.append('*')
if is_argument:
result.append('const')
else:
result.append(self.__remove_leading_scope(type_.decl_string))
result = ' '.join(result)
if hint == 'nm':
for x in ('*', '&'):
result = result.replace(' ' + x, x)
return result
def __normalize(self, name):
for what, with_ in self.__fundamental_types:
name = name.replace(what, with_)
name = name.replace(', ', ',')
return name
def format_argtypes(self, argtypes, hint):
if not argtypes:
if hint == 'msvc':
return 'void'
else:
return ''
else:
argsep = ','
if hint == 'nm':
# ugly hack, later, I will replace ', ' with ',', so single
# space will still exist
argsep = ', '
return argsep.join(
map(lambda type_: self.__format_type_as_undecorated(
type_, True, hint), argtypes))
def format_calldef(self, calldef, hint):
calldef_type = calldef.function_type()
result = []
is_mem_fun = isinstance(calldef, declarations.member_calldef_t)
if is_mem_fun and hint == 'msvc' and calldef.virtuality != \
declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
result.append('virtual ')
if is_mem_fun and hint == 'msvc'and calldef.has_static:
result.append('static ')
if hint == 'msvc' and calldef_type.return_type:
# nm doesn't dump return type information
result.append(
self.__format_type_as_undecorated(
calldef.return_type, False, hint))
result.append(' ')
if is_mem_fun:
result.append(
self.__remove_leading_scope(
calldef.parent.decl_string) + '::')
result.append(calldef.name)
if isinstance(
calldef, (declarations.constructor_t, declarations.destructor_t)) \
and declarations.templates.is_instantiation(calldef.parent.name):
if hint == 'msvc':
result.append('<%s>' % ','.join(
declarations.templates.args(calldef.parent.name)))
result.append('(%s)' % self.format_argtypes(
calldef_type.arguments_types, hint))
if is_mem_fun and calldef.has_const:
if hint == 'nm':
result.append(' ')
result.append('const')
return ''.join(result)
def format_var(self, decl, hint):
result = []
is_mem_var = isinstance(decl.parent, declarations.class_t)
if is_mem_var and decl.type_qualifiers.has_static and hint == 'msvc':
result.append('static ')
if hint == 'msvc':
result.append(
self.__format_type_as_undecorated(decl.decl_type, False, hint))
result.append(' ')
if is_mem_var:
result.append(
self.__remove_leading_scope(decl.parent.decl_string) + '::')
result.append(decl.name)
return ''.join(result)
def format_decl(self, decl, hint=None):
"""returns string, which contains full function name formatted exactly
as result of `dbghelp.UnDecorateSymbolName`, with
UNDNAME_NO_MS_KEYWORDS | UNDNAME_NO_ACCESS_SPECIFIERS | UNDNAME_NO_ECSU
options.
Different compilers/utilities undecorate/demangle mangled string
( unique names ) in a different way.
`hint` argument will tell pygccxml how to format declarations, so they
could be mapped later to the blobs.
The valid options are: "msvc" and "nm".
"""
if hint is None:
if 'nt' == os.name:
hint = 'msvc'
else:
hint = 'nm'
if isinstance(decl, declarations.calldef_t):
name = self.format_calldef(decl, hint)
elif isinstance(decl, declarations.variable_t):
name = self.format_var(decl, hint)
else:
raise NotImplementedError()
return self.__normalize(name)
|
StarcoderdataPython
|
4985313
|
<gh_stars>0
import torch
import numpy as np
import torch.nn.functional as F
from torch_scatter import scatter
def _similarity(h1: torch.Tensor, h2: torch.Tensor):
h1 = F.normalize(h1)
h2 = F.normalize(h2)
return h1 @ h2.t()
def nt_xent_loss(h1: torch.FloatTensor, h2: torch.FloatTensor,
tau: float, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
inter_sim = f(_similarity(h1, h1))
intra_sim = f(_similarity(h1, h2))
pos = intra_sim.diag()
neg = inter_sim.sum(dim=1) + intra_sim.sum(dim=1) - inter_sim.diag()
loss = pos / neg
loss = -torch.log(loss)
return loss
def debiased_nt_xent_loss(h1: torch.Tensor, h2: torch.Tensor,
tau: float, tau_plus: float, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
intra_sim = f(_similarity(h1, h1))
inter_sim = f(_similarity(h1, h2))
pos = inter_sim.diag()
neg = intra_sim.sum(dim=1) - intra_sim.diag() \
+ inter_sim.sum(dim=1) - inter_sim.diag()
num_neg = h1.size()[0] * 2 - 2
ng = (-num_neg * tau_plus * pos + neg) / (1 - tau_plus)
ng = torch.clamp(ng, min=num_neg * np.e ** (-1. / tau))
return -torch.log(pos / (pos + ng))
def hardness_nt_xent_loss(h1: torch.Tensor, h2: torch.Tensor,
tau: float, tau_plus: float, beta: float, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
intra_sim = f(_similarity(h1, h1))
inter_sim = f(_similarity(h1, h2))
pos = inter_sim.diag()
neg = intra_sim.sum(dim=1) - intra_sim.diag() \
+ inter_sim.sum(dim=1) - inter_sim.diag()
num_neg = h1.size()[0] * 2 - 2
# imp = (beta * neg.log()).exp()
imp = beta * neg
reweight_neg = (imp * neg) / neg.mean()
neg = (-num_neg * tau_plus * pos + reweight_neg) / (1 - tau_plus)
neg = torch.clamp(neg, min=num_neg * np.e ** (-1. / tau))
return -torch.log(pos / (pos + neg))
def subsampling_nt_xent_loss(h1: torch.Tensor, h2: torch.Tensor,
tau: float, sample_size: int, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
device = h1.device
num_nodes = h1.size(0)
neg_indices = torch.randint(low=0, high=num_nodes * 2, size=(sample_size,), device=device)
z_pool = torch.cat([h1, h2], dim=0)
negatives = z_pool[neg_indices]
pos = f(cos(h1, h2))
neg = f(_similarity(h1, negatives)).sum(dim=1)
loss = -torch.log(pos / (pos + neg))
return loss
def nt_xent_loss_en(anchor: torch.FloatTensor,
samples: torch.FloatTensor,
pos_mask: torch.FloatTensor,
tau: float, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
sim = f(_similarity(anchor, samples)) # anchor x sample
assert sim.size() == pos_mask.size() # sanity check
neg_mask = 1 - pos_mask
pos = (sim * pos_mask).sum(dim=1)
neg = (sim * neg_mask).sum(dim=1)
loss = pos / (pos + neg)
loss = -torch.log(loss)
return loss.mean()
class InfoNCELoss(torch.nn.Module):
def __init__(self, loss_fn=nt_xent_loss):
super(InfoNCELoss, self).__init__()
self.loss_fn = loss_fn
def forward(self, h1: torch.FloatTensor, h2: torch.FloatTensor, *args, **kwargs):
l1 = self.loss_fn(h1, h2, *args, **kwargs)
l2 = self.loss_fn(h2, h1, *args, **kwargs)
ret = (l1 + l2) * 0.5
ret = ret.mean()
return ret
class InfoNCELossG2L(torch.nn.Module):
def __init__(self):
super(InfoNCELossG2L, self).__init__()
def forward(self,
h1: torch.FloatTensor, g1: torch.FloatTensor,
h2: torch.FloatTensor, g2: torch.FloatTensor,
batch: torch.LongTensor, tau: float, *args, **kwargs):
num_nodes = h1.size()[0] # M := num_nodes
ones = torch.eye(num_nodes, dtype=torch.float32, device=h1.device) # [M, M]
pos_mask = scatter(ones, batch, dim=0, reduce='sum') # [M, N]
l1 = nt_xent_loss_en(g1, h2, pos_mask=pos_mask, tau=tau)
l2 = nt_xent_loss_en(g2, h1, pos_mask=pos_mask, tau=tau)
return l1 + l2
class InfoNCELossG2LEN(torch.nn.Module):
def __init__(self):
super(InfoNCELossG2LEN, self).__init__()
def forward(self,
h1: torch.FloatTensor, g1: torch.FloatTensor,
h2: torch.FloatTensor, g2: torch.FloatTensor,
h3: torch.FloatTensor, h4: torch.FloatTensor,
*args, **kwargs):
num_nodes = h1.size()[0]
device = h1.device
pos_mask1 = torch.ones((1, num_nodes), dtype=torch.float32, device=device)
pos_mask0 = torch.zeros((1, num_nodes), dtype=torch.float32, device=device)
pos_mask = torch.cat([pos_mask1, pos_mask0], dim=1)
samples1 = torch.cat([h2, h4], dim=0)
samples2 = torch.cat([h1, h3], dim=0)
l1 = nt_xent_loss_en(g1, samples1, pos_mask=pos_mask, *args, **kwargs)
l2 = nt_xent_loss_en(g2, samples2, pos_mask=pos_mask, *args, **kwargs)
return l1 + l2
class HardMixingLoss(torch.nn.Module):
def __init__(self, projection):
super(HardMixingLoss, self).__init__()
self.projection = projection
@staticmethod
def tensor_similarity(z1, z2):
z1 = F.normalize(z1, dim=-1) # [N, d]
z2 = F.normalize(z2, dim=-1) # [N, s, d]
return torch.bmm(z2, z1.unsqueeze(dim=-1)).squeeze()
def forward(self, z1: torch.Tensor, z2: torch.Tensor, threshold=0.1, s=150, mixup=0.2, *args, **kwargs):
f = lambda x: torch.exp(x / self.tau)
num_samples = z1.shape[0]
device = z1.device
threshold = int(num_samples * threshold)
refl1 = _similarity(z1, z1).diag()
refl2 = _similarity(z2, z2).diag()
pos_similarity = f(_similarity(z1, z2))
neg_similarity1 = torch.cat([_similarity(z1, z1), _similarity(z1, z2)], dim=1) # [n, 2n]
neg_similarity2 = torch.cat([_similarity(z2, z1), _similarity(z2, z2)], dim=1)
neg_similarity1, indices1 = torch.sort(neg_similarity1, descending=True)
neg_similarity2, indices2 = torch.sort(neg_similarity2, descending=True)
neg_similarity1 = f(neg_similarity1)
neg_similarity2 = f(neg_similarity2)
z_pool = torch.cat([z1, z2], dim=0)
hard_samples1 = z_pool[indices1[:, :threshold]] # [N, k, d]
hard_samples2 = z_pool[indices2[:, :threshold]]
hard_sample_idx1 = torch.randint(hard_samples1.shape[1], size=[num_samples, 2 * s]).to(device) # [N, 2 * s]
hard_sample_idx2 = torch.randint(hard_samples2.shape[1], size=[num_samples, 2 * s]).to(device)
hard_sample_draw1 = hard_samples1[
torch.arange(num_samples).unsqueeze(-1), hard_sample_idx1] # [N, 2 * s, d]
hard_sample_draw2 = hard_samples2[torch.arange(num_samples).unsqueeze(-1), hard_sample_idx2]
hard_sample_mixing1 = mixup * hard_sample_draw1[:, :s, :] + (1 - mixup) * hard_sample_draw1[:, s:, :]
hard_sample_mixing2 = mixup * hard_sample_draw2[:, :s, :] + (1 - mixup) * hard_sample_draw2[:, s:, :]
h_m1 = self.projection(hard_sample_mixing1)
h_m2 = self.projection(hard_sample_mixing2)
neg_m1 = f(self.tensor_similarity(z1, h_m1)).sum(dim=1)
neg_m2 = f(self.tensor_similarity(z2, h_m2)).sum(dim=1)
pos = pos_similarity.diag()
neg1 = neg_similarity1.sum(dim=1)
neg2 = neg_similarity2.sum(dim=1)
loss1 = -torch.log(pos / (neg1 + neg_m1 - refl1))
loss2 = -torch.log(pos / (neg2 + neg_m2 - refl2))
loss = (loss1 + loss2) * 0.5
loss = loss.mean()
return loss
class RingLoss(torch.nn.Module):
def __init__(self):
super(RingLoss, self).__init__()
def forward(self, h1: torch.Tensor, h2: torch.Tensor, y: torch.Tensor, tau, threshold=0.1, *args, **kwargs):
f = lambda x: torch.exp(x / tau)
num_samples = h1.shape[0]
device = h1.device
threshold = int(num_samples * threshold)
false_neg_mask = torch.zeros((num_samples, 2 * num_samples), dtype=torch.int).to(device)
for i in range(num_samples):
false_neg_mask[i] = (y == y[i]).repeat(2)
pos_sim = f(_similarity(h1, h2))
neg_sim1 = torch.cat([_similarity(h1, h1), _similarity(h1, h2)], dim=1) # [n, 2n]
neg_sim2 = torch.cat([_similarity(h2, h1), _similarity(h2, h2)], dim=1)
neg_sim1, indices1 = torch.sort(neg_sim1, descending=True)
neg_sim2, indices2 = torch.sort(neg_sim2, descending=True)
y_repeated = y.repeat(2)
false_neg_cnt = torch.zeros((num_samples)).to(device)
for i in range(num_samples):
false_neg_cnt[i] = (y_repeated[indices1[i, threshold:-threshold]] == y[i]).sum()
neg_sim1 = f(neg_sim1[:, threshold:-threshold])
neg_sim2 = f(neg_sim2[:, threshold:-threshold])
pos = pos_sim.diag()
neg1 = neg_sim1.sum(dim=1)
neg2 = neg_sim2.sum(dim=1)
loss1 = -torch.log(pos / neg1)
loss2 = -torch.log(pos / neg2)
loss = (loss1 + loss2) * 0.5
loss = loss.mean()
return loss
|
StarcoderdataPython
|
3364307
|
<reponame>lextoumbourou/plugin.video.rsa
import requests
from BeautifulSoup import BeautifulSoup
BASE_URL = 'http://comment.rsablogs.org.uk/videos/page/'
VIDEO_PAGE_URL = (
'http://www.thersa.org/events/video?result_4377_result_page={0}'
)
RSA_ANIMATE_PAGE_URL = 'http://www.thersa.org/events/rsaanimate'
RSA_SHORTS_PAGE_URL = 'http://www.thersa.org/events/rsashorts'
def get_videos(page_no):
"""
Return videos from RSA > Events > Videos as a list of dicts
"""
contents = requests.get(VIDEO_PAGE_URL.format(page_no))
return scrape_video_list(contents.text)
def get_rsa_animate_videos():
"""
Return videos from RSA > RSA Animate as list of dicts
"""
contents = requests.get(RSA_ANIMATE_PAGE_URL)
return scrape_video_list(contents.text.encode('utf-8', 'ignore'))
def get_rsa_shorts_videos():
"""
Return videos from RSA > RSA Shorts as list of dicts
"""
contents = requests.get(RSA_SHORTS_PAGE_URL)
return scrape_video_list(contents.text.encode('utf-8', 'ignore'))
def scrape_video_list(contents):
"""
Turn RSA Video HTML into list of dicts
"""
output = []
soup = BeautifulSoup(contents)
posts = soup.findAll('div', 'video-result')
for post in posts:
h3 = post.find('h3')
title_link = h3.find('a')
thumbnail = post.find('img')['src']
output.append({
'title': title_link.text,
'url': title_link['href'],
'thumbnail': thumbnail
})
return output
def get_youtube_id_from_video(url):
"""
Turn RSA Video page HTML into a youtube ID string
"""
contents = requests.get(url)
return scrape_video_page(contents.text.encode('utf-8', 'ignore'))
def scrape_video_page(contents):
soup = BeautifulSoup(contents)
youtube_id_meta = soup.find('meta', attrs={'name': 'youtube_url'})
if youtube_id_meta:
# Occassionally the meta tags with the youtube id have
# URLs in them, this extracts the Youtube ID in such cases
if youtube_id_meta['content'].startswith('http://youtu.be/'):
youtube_id = youtube_id_meta['content'].split('/')[-1]
else:
youtube_id = youtube_id_meta['content']
return youtube_id
else:
iframes = soup.findAll('iframe')
if iframes:
url = iframes[-1]['src']
youtube_id = url.split('/')[-1]
return youtube_id
return None
|
StarcoderdataPython
|
102481
|
<filename>test/testPicExif.py<gh_stars>1-10
# -*- coding: UTF-8 -*-
import unittest
from picture_category import pic_exif
class TestPicExif(unittest.TestCase):
def setUp(self):
self.picFile = 'F:\\DCIM\\100ANDRO\\DSC_0004.JPG'
def tearDown(self):
self.picFile = None
def testGetExif(self):
self.assertIsNotNone(pic_exif.get_exif(self.picFile), 'test success')
if __name__=='__main__':
# unittest.main()
suite = unittest.TestSuite()
suite.addTest(TestPicExif("testGetExif"))
# 执行测试
runner = unittest.TextTestRunner()
runner.run(suite)
|
StarcoderdataPython
|
8198061
|
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: <NAME>
#
# Std Lib
import sys
import os
import os.path
# Local
from base.g import *
from base import utils
from base.sixext import to_unicode
from .ui_utils import load_pixmap
try:
from fax import fax
except ImportError:
# This can fail on Python < 2.3 due to the datetime module
log.error("Fax address book disabled - Python 2.3+ required.")
sys.exit(1)
# Qt
from qt import *
from .faxaddrbookform_base import FaxAddrBookForm_base
from .faxaddrbookeditform_base import FaxAddrBookEditForm_base
from .faxaddrbookgroupsform_base import FaxAddrBookGroupsForm_base
from .faxaddrbookgroupeditform_base import FaxAddrBookGroupEditForm_base
# globals
db = None
# ****************************************************************************
class AddressBookItem2(QListViewItem):
def __init__(self, parent, entry):
QListViewItem.__init__(self, parent)
self.entry = entry
self.setText(0, entry['name'])
self.setText(1, entry['title'])
self.setText(2, entry['firstname'])
self.setText(3, entry['lastname'])
self.setText(4, entry['fax'])
self.setText(5, ', '.join(entry['groups']))
self.setText(6, entry['notes'])
class GroupValidator(QValidator):
def __init__(self, parent=None, name=None):
QValidator.__init__(self, parent, name)
def validate(self, input, pos):
input = to_unicode(input)
if input.find(',') > 0:
return QValidator.Invalid, pos
elif len(input) > 50:
return QValidator.Invalid, pos
else:
return QValidator.Acceptable, pos
class PhoneNumValidator(QValidator):
def __init__(self, parent=None, name=None):
QValidator.__init__(self, parent, name)
def validate(self, input, pos):
input = to_unicode(input)
if not input:
return QValidator.Acceptable, pos
elif input[pos-1] not in '0123456789-(+) *#':
return QValidator.Invalid, pos
elif len(input) > 50:
return QValidator.Invalid, pos
else:
return QValidator.Acceptable, pos
# **************************************************************************** #
class FaxAddrBookGroupEditForm(FaxAddrBookGroupEditForm_base):
"""
Called when clicking New... or Edit... from the Group Dialog
"""
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
FaxAddrBookGroupEditForm_base.__init__(self,parent,name,modal,fl)
self.edit_mode = False
self.okButton.setEnabled(True)
self.all_groups = db.get_all_groups()
self.groupnameEdit.setValidator(GroupValidator(self.groupnameEdit))
def setDlgData(self, group_name):
self.edit_mode = True
self.groupnameEdit.setText(group_name)
self.groupnameEdit.setReadOnly(True)
self.setEntries(group_name)
def setEntries(self, group_name=''):
self.entriesListView.clear()
all_entries = db.get_all_records()
for e, v in list(all_entries.items()):
i = QCheckListItem(self.entriesListView, e, QCheckListItem.CheckBox)
if group_name and group_name in v['groups']:
i.setState(QCheckListItem.On)
self.CheckOKButton()
def getDlgData(self):
group_name = to_unicode(self.groupnameEdit.text())
entries = []
i = self.entriesListView.firstChild()
while i is not None:
if i.isOn():
entries.append(to_unicode(i.text()))
i = i.itemBelow()
return group_name, entries
def groupnameEdit_textChanged(self,a0):
self.CheckOKButton()
def entriesListView_clicked(self,a0):
self.CheckOKButton()
def CheckOKButton(self):
group_name = to_unicode(self.groupnameEdit.text())
if not group_name or \
(not self.edit_mode and group_name in self.all_groups):
self.okButton.setEnabled(False)
return
i = self.entriesListView.firstChild()
while i is not None:
if i.isOn():
break
i = i.itemBelow()
else:
self.okButton.setEnabled(False)
return
self.okButton.setEnabled(True)
def __tr(self,s,c = None):
return qApp.translate("FaxAddrBookGroupEditForm",s,c)
# **************************************************************************** #
class FaxAddrBookGroupsForm(FaxAddrBookGroupsForm_base):
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
FaxAddrBookGroupsForm_base.__init__(self,parent,name,modal,fl)
self.current = None
QTimer.singleShot(0, self.InitialUpdate)
def InitialUpdate(self):
self.UpdateList()
def UpdateList(self):
self.groupListView.clear()
first_rec = None
all_groups = db.get_all_groups()
if all_groups:
for group in all_groups:
i = QListViewItem(self.groupListView, group,
', '.join(db.group_members(group)))
if first_rec is None:
first_rec = i
self.groupListView.setCurrentItem(i)
self.current = i
self.editButton.setEnabled(True)
self.deleteButton.setEnabled(True)
else:
self.editButton.setEnabled(False)
self.deleteButton.setEnabled(False)
def newButton_clicked(self):
dlg = FaxAddrBookGroupEditForm(self)
dlg.setEntries()
if dlg.exec_loop() == QDialog.Accepted:
group_name, entries = dlg.getDlgData()
db.update_groups(group_name, entries)
self.UpdateList()
def editButton_clicked(self):
dlg = FaxAddrBookGroupEditForm(self)
group_name = to_unicode(self.current.text(0))
dlg.setDlgData(group_name)
if dlg.exec_loop() == QDialog.Accepted:
group_name, entries = dlg.getDlgData()
db.update_groups(group_name, entries)
self.UpdateList()
def deleteButton_clicked(self):
x = QMessageBox.critical(self,
self.caption(),
self.__tr("<b>Annoying Confirmation: Are you sure you want to delete this group?</b>"),
QMessageBox.Yes,
QMessageBox.No | QMessageBox.Default,
QMessageBox.NoButton)
if x == QMessageBox.Yes:
db.delete_group(to_unicode(self.current.text(0)))
self.UpdateList()
def groupListView_currentChanged(self, a0):
self.current = a0
def groupListView_doubleClicked(self, a0):
self.editButton_clicked()
def groupListView_rightButtonClicked(self, item, pos, a2):
popup = QPopupMenu(self)
popup.insertItem(self.__tr("New..."), self.newButton_clicked)
if item is not None:
popup.insertItem(self.__tr("Edit..."), self.editButton_clicked)
popup.insertItem(self.__tr("Delete..."), self.deleteButton_clicked)
popup.insertSeparator()
popup.insertItem(self.__tr("Refresh List"), self.UpdateList)
popup.popup(pos)
def __tr(self,s,c = None):
return qApp.translate("FaxAddrBookGroupsForm",s,c)
# **************************************************************************** #
class FaxAddrBookEditForm(FaxAddrBookEditForm_base):
def __init__(self, editing=True, parent = None,name = None,modal = 0,fl = 0):
FaxAddrBookEditForm_base.__init__(self,parent,name,modal,fl)
self.editing = editing
self.faxEdit.setValidator(PhoneNumValidator(self.faxEdit))
self.initial_nickname = ''
self.OKButton.setEnabled(self.editing)
def setDlgData(self, name, title, firstname, lastname, fax, group_list, notes):
self.initial_nickname = name
self.name = name
self.titleEdit.setText(title)
self.firstnameEdit.setText(firstname)
self.lastnameEdit.setText(lastname)
self.faxEdit.setText(fax)
self.notesEdit.setText(notes)
self.nicknameEdit.setText(name)
self.setGroups(group_list)
def setGroups(self, entry_groups=[]):
self.groupListView.clear()
for g in db.get_all_groups():
i = QCheckListItem(self.groupListView, g, QCheckListItem.CheckBox)
if g in entry_groups:
i.setState(QCheckListItem.On)
def getDlgData(self):
in_groups = []
i = self.groupListView.firstChild()
while i is not None:
if i.isOn():
in_groups.append(to_unicode(i.text()))
i = i.itemBelow()
return {'name': to_unicode(self.nicknameEdit.text()),
'title': to_unicode(self.titleEdit.text()),
'firstname': to_unicode(self.firstnameEdit.text()),
'lastname': to_unicode(self.lastnameEdit.text()),
'fax': to_unicode(self.faxEdit.text()),
'groups': in_groups,
'notes': to_unicode(self.notesEdit.text())}
def firstnameEdit_textChanged(self,a0):
pass
def lastnameEdit_textChanged(self,a0):
pass
def nicknameEdit_textChanged(self, nickname):
self.CheckOKButton(nickname, None)
def faxEdit_textChanged(self, fax):
self.CheckOKButton(None, fax)
def CheckOKButton(self, nickname=None, fax=None):
if nickname is None:
nickname = to_unicode(self.nicknameEdit.text())
if fax is None:
fax = to_unicode(self.faxEdit.text())
ok = bool(len(nickname) and len(fax))
if nickname:
all_entries = db.get_all_records()
for e, v in list(all_entries.items()):
if nickname == e and nickname != self.initial_nickname:
ok = False
self.OKButton.setEnabled(ok)
def __tr(self,s,c = None):
return qApp.translate("FaxAddrBookEditForm",s,c)
# **************************************************************************** #
class FaxAddrBookForm(FaxAddrBookForm_base):
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
FaxAddrBookForm_base.__init__(self,parent,name,modal,fl)
self.setIcon(load_pixmap('hp_logo', '128x128'))
global db
db = fax.FaxAddressBook()
self.init_problem = False
QTimer.singleShot(0, self.InitialUpdate)
def InitialUpdate(self):
if self.init_problem:
self.close()
return
self.UpdateList()
def UpdateList(self):
self.addressListView.clear()
first_rec = None
all_entries = db.get_all_records()
log.debug("Number of records is: %d" % len(all_entries))
if all_entries:
for e, v in list(all_entries.items()):
if v['name'].startswith('__'):
continue
i = AddressBookItem2(self.addressListView, v)
if first_rec is None:
first_rec = i
self.addressListView.setCurrentItem(i)
self.current = i
self.editButton.setEnabled(True)
self.deleteButton.setEnabled(True)
else:
self.editButton.setEnabled(False)
self.deleteButton.setEnabled(False)
def groupButton_clicked(self):
FaxAddrBookGroupsForm(self).exec_loop()
self.sendUpdateEvent()
self.UpdateList()
def newButton_clicked(self):
dlg = FaxAddrBookEditForm(False, self)
dlg.setGroups()
if dlg.exec_loop() == QDialog.Accepted:
d = dlg.getDlgData()
db.set(**d)
self.sendUpdateEvent()
self.UpdateList()
def editButton_clicked(self):
dlg = FaxAddrBookEditForm(True, self)
c = self.current.entry
dlg.setDlgData(c['name'], c['title'], c['firstname'],
c['lastname'], c['fax'], c['groups'], c['notes'])
prev_name = c['name']
if dlg.exec_loop() == QDialog.Accepted:
d = dlg.getDlgData()
if prev_name != d['name']:
db.delete(prev_name)
db.set(**d)
self.sendUpdateEvent()
self.UpdateList()
def deleteButton_clicked(self):
if QMessageBox.critical(self,
self.caption(),
self.__tr("<b>Annoying Confirmation: Are you sure you want to delete this address book entry?</b>"),
QMessageBox.Yes,
QMessageBox.No | QMessageBox.Default,
QMessageBox.NoButton) == QMessageBox.Yes:
db.delete(self.current.entry['name'])
self.UpdateList()
self.sendUpdateEvent()
def addressListView_rightButtonClicked(self, item, pos, a2):
popup = QPopupMenu(self)
popup.insertItem(self.__tr("New..."), self.newButton_clicked)
if item is not None:
popup.insertItem(self.__tr("Edit..."), self.editButton_clicked)
popup.insertItem(self.__tr("Delete..."), self.deleteButton_clicked)
popup.insertSeparator()
popup.insertItem(self.__tr("Refresh List"), self.UpdateList)
popup.popup(pos)
def addressListView_doubleClicked(self,a0):
self.editButton_clicked()
def addressListView_currentChanged(self,item):
self.current = item
def FailureUI(self, error_text):
log.error(to_unicode(error_text).replace("<b>", "").replace("</b>", "").replace("<p>", " "))
QMessageBox.critical(self,
self.caption(),
QString(error_text),
QMessageBox.Ok,
QMessageBox.NoButton,
QMessageBox.NoButton)
def __tr(self,s,c = None):
return qApp.translate("FaxAddrBookForm",s,c)
def accept(self):
self.sendUpdateEvent()
FaxAddrBookForm_base.accept(self)
def sendUpdateEvent(self):
pass # TODO:
def importPushButton_clicked(self):
dlg = QFileDialog(user_conf.workingDirectory(), "LDIF (*.ldif *.ldi);;vCard (*.vcf)", None, None, True)
dlg.setCaption("openfile")
dlg.setMode(QFileDialog.ExistingFile)
dlg.show()
if dlg.exec_loop() == QDialog.Accepted:
result = str(dlg.selectedFile())
working_directory = to_unicode(dlg.dir().absPath())
log.debug("result: %s" % result)
user_conf.setWorkingDirectory(working_directory)
if result:
if result.endswith('.vcf'):
ok, error_str = db.import_vcard(result)
else:
ok, error_str = db.import_ldif(result)
if not ok:
self.FailureUI(error_str)
else:
self.UpdateList()
|
StarcoderdataPython
|
3347900
|
import os
import random as rnd
from PIL import Image, ImageFilter
from trdg import computer_text_generator, background_generator, distorsion_generator
try:
from trdg import handwritten_text_generator
except ImportError as e:
print("Missing modules for handwritten text generation.")
class FakeTextDataGenerator(object):
@classmethod
def generate_from_tuple(cls, t):
"""
Same as generate, but takes all parameters as one tuple
"""
cls.generate(*t)
@classmethod
def generate(
cls,
index,
text,
font,
out_dir,
size,
extension,
skewing_angle,
random_skew,
blur,
random_blur,
background_type,
distorsion_type,
distorsion_orientation,
is_handwritten,
name_format,
width,
alignment,
text_color,
orientation,
space_width,
character_spacing,
margins,
fit,
output_mask,
word_split,
image_dir,
):
image = None
margin_top, margin_left, margin_bottom, margin_right = margins
horizontal_margin = margin_left + margin_right
vertical_margin = margin_top + margin_bottom
##########################
# Create picture of text #
##########################
if is_handwritten:
if orientation == 1:
raise ValueError("Vertical handwritten text is unavailable")
image, mask = handwritten_text_generator.generate(text, text_color)
else:
image, mask = computer_text_generator.generate(
text,
font,
text_color,
size,
orientation,
space_width,
character_spacing,
fit,
word_split,
)
random_angle = rnd.randint(0 - skewing_angle, skewing_angle)
rotated_img = image.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
rotated_mask = mask.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
#############################
# Apply distorsion to image #
#############################
if distorsion_type == 0:
distorted_img = rotated_img # Mind = blown
distorted_mask = rotated_mask
elif distorsion_type == 1:
distorted_img, distorted_mask = distorsion_generator.sin(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
elif distorsion_type == 2:
distorted_img, distorted_mask = distorsion_generator.cos(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
else:
distorted_img, distorted_mask = distorsion_generator.random(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
##################################
# Resize image to desired format #
##################################
# Horizontal text
if orientation == 0:
new_width = int(
distorted_img.size[0]
* (float(size - vertical_margin) / float(distorted_img.size[1]))
)
resized_img = distorted_img.resize(
(new_width, size - vertical_margin), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize((new_width, size - vertical_margin))
background_width = width if width > 0 else new_width + horizontal_margin
background_height = size
# Vertical text
elif orientation == 1:
new_height = int(
float(distorted_img.size[1])
* (float(size - horizontal_margin) / float(distorted_img.size[0]))
)
resized_img = distorted_img.resize(
(size - horizontal_margin, new_height), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize(
(size - horizontal_margin, new_height), Image.ANTIALIAS
)
background_width = size
background_height = new_height + vertical_margin
else:
raise ValueError("Invalid orientation")
#############################
# Generate background image #
#############################
if background_type == 0:
background_img = background_generator.gaussian_noise(
background_height, background_width
)
elif background_type == 1:
background_img = background_generator.plain_white(
background_height, background_width
)
elif background_type == 2:
background_img = background_generator.quasicrystal(
background_height, background_width
)
else:
background_img = background_generator.image(
background_height, background_width, image_dir
)
background_mask = Image.new(
"RGB", (background_width, background_height), (0, 0, 0)
)
#############################
# Place text with alignment #
#############################
new_text_width, _ = resized_img.size
if alignment == 0 or width == -1:
background_img.paste(resized_img, (margin_left, margin_top), resized_img)
background_mask.paste(resized_mask, (margin_left, margin_top))
elif alignment == 1:
background_img.paste(
resized_img,
(int(background_width / 2 - new_text_width / 2), margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(int(background_width / 2 - new_text_width / 2), margin_top),
)
else:
background_img.paste(
resized_img,
(background_width - new_text_width - margin_right, margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(background_width - new_text_width - margin_right, margin_top),
)
##################################
# Apply gaussian blur #
##################################
gaussian_filter = ImageFilter.GaussianBlur(
radius=blur if not random_blur else rnd.randint(0, blur)
)
final_image = background_img.filter(gaussian_filter)
final_mask = background_mask.filter(gaussian_filter)
#####################################
# Generate name for resulting image #
#####################################
if name_format == 0:
image_name = "{}_{}.{}".format(text, str(index), extension)
mask_name = "{}_{}_mask.png".format(text, str(index))
elif name_format == 1:
image_name = "{}_{}.{}".format(str(index), text, extension)
mask_name = "{}_{}_mask.png".format(str(index), text)
elif name_format == 2:
image_name = "{}.{}".format(str(index), extension)
mask_name = "{}_mask.png".format(str(index))
else:
print("{} is not a valid name format. Using default.".format(name_format))
image_name = "{}_{}.{}".format(text, str(index), extension)
mask_name = "{}_{}_mask.png".format(text, str(index))
# Save the image
if out_dir is not None:
final_image.convert("RGB").save(os.path.join(out_dir, image_name))
if output_mask == 1:
final_mask.convert("RGB").save(os.path.join(out_dir, mask_name))
else:
if output_mask == 1:
return final_image.convert("RGB"), final_mask.convert("RGB")
return final_image.convert("RGB")
|
StarcoderdataPython
|
3352835
|
<filename>elkm1_lib/lights.py
"""Definition of an ElkM1 Light"""
from .const import Max, TextDescriptions
from .elements import Element, Elements
from .message import add_message_handler, ps_encode, pc_encode, pf_encode, \
pn_encode, pt_encode
class Light(Element):
"""Class representing a Light"""
def __init__(self, index, elk):
super().__init__(index, elk)
self.status = 0
def turn_off(self):
"""(Helper) Turn off light"""
self._elk.send(pf_encode(self._index))
def turn_on(self, brightness=100, time=0):
"""(Helper) Turn on light"""
if brightness == 100:
self._elk.send(pn_encode(self._index))
else:
self._elk.send(pc_encode(self._index, 9, brightness, time))
def toggle(self):
"""(Helper) Toggle light"""
self._elk.send(pt_encode(self._index))
class Lights(Elements):
"""Handling for multiple lights"""
def __init__(self, elk):
super().__init__(elk, Light, Max.LIGHTS.value)
add_message_handler('PC', self._pc_handler)
add_message_handler('PS', self._ps_handler)
def sync(self):
"""Retrieve lights from ElkM1"""
for i in range(4):
self.elk.send(ps_encode(i))
self.get_descriptions(TextDescriptions.LIGHT.value)
# pylint: disable=unused-argument
def _pc_handler(self, housecode, index, light_level):
self.elements[index].setattr('status', light_level, True)
def _ps_handler(self, bank, statuses):
for i in range(bank*64, (bank+1)*64):
self.elements[i].setattr('status', statuses[i-bank*64], True)
|
StarcoderdataPython
|
12804716
|
import torch
from torch import nn
class TextualEncoding(nn.Module):
def __init__(self, cfg):
super(TextualEncoding, self).__init__()
self.cfg = cfg
txt_input_size = cfg.TXT_INPUT_SIZE # 300
self.txt_hidden_size = cfg.TXT_HIDDEN_SIZE # 512
self.bidirectional = cfg.RNN.BIDIRECTIONAL
self.textual_encoder = nn.LSTM(txt_input_size,
self.txt_hidden_size,
num_layers=cfg.RNN.NUM_LAYERS, bidirectional=self.bidirectional, # 3, False
batch_first=True)
if self.bidirectional:
self.tex_linear = nn.Linear(self.txt_hidden_size * 2, self.txt_hidden_size)
def forward(self, x, textual_mask):
self.textual_encoder.flatten_parameters()
txt_h = self.textual_encoder(x)[0] * textual_mask # batch * seq_len * txt_hidden_size
if self.bidirectional:
shape = txt_h.shape
txt_h = txt_h.view(shape[0], shape[1], 2, self.txt_hidden_size)
# txt_h = torch.stack(
# [torch.cat([txt_h[i][torch.sum(mask).long() - 1][0], txt_h[i][0][1]], dim=0) for i, mask in
# enumerate(textual_mask)])
txt_h = torch.stack(
[torch.cat([txt_h[i][torch.sum(mask).long() - 1][0], txt_h[i][0][1]], dim=0) for i, mask in
enumerate(textual_mask)])
txt_h = self.tex_linear(txt_h) # batchsize * 512
else:
txt_h = torch.stack(
[txt_h[i][torch.sum(mask).long() - 1] for i, mask in enumerate(textual_mask)])
return txt_h # batch * txt_hidden_size
class WLTextualEncoding(nn.Module):
def __init__(self, cfg):
super(WLTextualEncoding, self).__init__()
self.cfg = cfg
txt_input_size = cfg.TXT_INPUT_SIZE # 300
self.txt_hidden_size = cfg.TXT_HIDDEN_SIZE # 512
self.bidirectional = cfg.RNN.BIDIRECTIONAL
self.textual_encoder = nn.GRU(txt_input_size,
self.txt_hidden_size,
num_layers=cfg.RNN.NUM_LAYERS, bidirectional=self.bidirectional, # 3, False
batch_first=True)
def forward(self, x, textual_mask):
'''
Bi LSTM
:param x: text batch * seq_len * input_size
:param textual_mask: batch * seq_len
:return:
'''
self.textual_encoder.flatten_parameters()
text_length = torch.sum(textual_mask, dim=1).int().squeeze(1) # batch
sorted_lengths, indices = torch.sort(text_length, descending=True)
x = x[indices]
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0, len(indices)).type_as(inv_ix)
packed = nn.utils.rnn.pack_padded_sequence(x, sorted_lengths.data.tolist(), batch_first=True)
out = self.textual_encoder(packed)[0] # batch * seq_len * txt_hidden_size
padded = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
cap_emb, cap_len = padded
cap_emb = cap_emb[inv_ix]
# cap_len = cap_len[inv_ix]
if self.bidirectional:
cap_emb = cap_emb.view(cap_emb.size(0), cap_emb.size(1), 2, -1)
# txt_h = torch.stack([cap_emb[i, l-1, 0, :] for i, l in enumerate(cap_len)]) + cap_emb[:, 0, 1, :]
cap_emb = (cap_emb[:, :, 0, :] + cap_emb[:, :, 1, :]) / 2 # batch * seq_len * txt_hidden_size
txt_h = torch.sum(cap_emb, dim=1) / text_length.unsqueeze(1)
# txt_h = torch.norm(txt_h, dim=1)
return txt_h, cap_emb # batch * txt_hidden_size
class GRUTextualEncoding(nn.Module):
def __init__(self, cfg):
super(GRUTextualEncoding, self).__init__()
self.cfg = cfg
txt_input_size = cfg.TXT_INPUT_SIZE # 300
self.txt_hidden_size = cfg.TXT_HIDDEN_SIZE # 512
self.bidirectional = cfg.RNN.BIDIRECTIONAL
self.textual_encoder = nn.GRU(txt_input_size,
self.txt_hidden_size // 2 if self.bidirectional else self.txt_hidden_size,
num_layers=cfg.RNN.NUM_LAYERS, bidirectional=self.bidirectional, # 3, False
batch_first=True)
def forward(self, x, textual_mask):
self.textual_encoder.flatten_parameters()
txt_h = self.textual_encoder(x)[0] * textual_mask # batch * seq_len * txt_hidden_size
if self.bidirectional:
shape = txt_h.shape
txt_h = txt_h.view(shape[0], shape[1], 2, self.txt_hidden_size // 2)
txt_h = torch.stack(
[torch.cat([txt_h[i][torch.sum(mask).long() - 1][0], txt_h[i][0][1]], dim=0) for i, mask in
enumerate(textual_mask)])
else:
txt_h = torch.stack(
[txt_h[i][torch.sum(mask).long() - 1] for i, mask in enumerate(textual_mask)])
return txt_h # batch * txt_hidden_size
|
StarcoderdataPython
|
4902746
|
import numpy as np
import pandas as pd
import collections
import os
import shutil
from . import database, data_exploration, baseline, analysis
"""
use with command : nosetests --with-coverage --cover-package=. test.py
"""
# database tests
TEST_SIZE = 50
TRAIN_SIZE = 280
VALID_SIZE = 139
import pkg_resources
DATAFOLDER = pkg_resources.resource_filename(__name__, "/swissroads_images")
print("DATAFOLDER : " + DATAFOLDER)
def test_getimage_wrongFolder():
batches_data, batches_cat, batches_file, batches_folder = database.get_images(
"fake_folder"
)
assert batches_data == [], "Expected %r, but got %r" % ([], batches_data)
assert batches_cat == [], "Expected %r, but got %r" % ([], batches_cat)
assert batches_file == [], "Expected %r, but got %r" % ([], batches_file)
assert batches_folder == [], "Expected %r, but got %r" % ([], batches_folder)
def test_getimage():
batches_data, batches_cat, batches_file, batches_folder = database.get_images(
DATAFOLDER
)
# batches lenght
data_lenght = 469
assert len(batches_data) == data_lenght, "Expected %r, but got %r" % (
[],
len(batches_data),
)
assert len(batches_cat) == data_lenght, "Expected %r, but got %r" % (
[],
len(batches_cat),
)
assert len(batches_file) == data_lenght, "Expected %r, but got %r" % (
[],
len(batches_file),
)
assert len(batches_folder) == data_lenght, "Expected %r, but got %r" % (
[],
len(batches_folder),
)
# label correspond to image name
for i in range(len(batches_cat)):
assert (
batches_cat[i] in batches_file[i]
), "Expected to contain %r, but got %r" % (batches_cat[i], batches_file[i])
# batches_folder size
counter = collections.Counter(batches_folder)
assert counter["test"] == TEST_SIZE, "Expected %r, but got %r" % (
TEST_SIZE,
counter["test"],
)
assert counter["train"] == TRAIN_SIZE, "Expected %r, but got %r" % (
TRAIN_SIZE,
counter["train"],
)
assert counter["valid"] == VALID_SIZE, "Expected %r, but got %r" % (
VALID_SIZE,
counter["valid"],
)
def test_get_batches():
list_lenght = 1357 # arbitrary number, can work with any int
batch_size = 32
test_list = np.arange(list_lenght)
expected_lengh = batch_size
for id, batch in enumerate(database.get_batches(test_list, batch_size)):
if id == int(list_lenght / batch_size):
expected_lengh = list_lenght % batch_size
assert len(batch) == expected_lengh, "Expected %r, but got %r" % (
expected_lengh,
len(batch),
)
assert batch[0] == test_list[id * batch_size], "Expected %r, but got %r" % (
test_list[id * batch_size],
batch[0],
)
def test_load_database():
# create local folder
output_path = r"./output"
if not os.path.exists(output_path):
os.makedirs(output_path)
# folder_name = "./swissroads_images"
database.load_data(DATAFOLDER)
database_path = "./output/images_data.npz"
assert os.path.isfile(database_path), "File %r does not exist" % (database_path)
# assert generated content
with np.load("./output/images_data.npz", allow_pickle=True) as npz_file:
df = pd.DataFrame(npz_file["values"], columns=npz_file["columns"])
df_test = df[df["imageSet"] == "test"]
df_train = df[df["imageSet"] == "train"]
df_valid = df[df["imageSet"] == "valid"]
assert len(df_test) == TEST_SIZE, "Expected %r, but got %r" % (
TEST_SIZE,
len(df_test),
)
assert len(df_train) == TRAIN_SIZE, "Expected %r, but got %r" % (
TRAIN_SIZE,
len(df_train),
)
assert len(df_valid) == VALID_SIZE, "Expected %r, but got %r" % (
VALID_SIZE,
len(df_valid),
)
categories = ["bike", "car", "motorcycle", "other", "truck", "van"]
for category in categories:
path = DATAFOLDER + "/test/" + category
list = os.listdir(path)
df_categories = df_test[df_test["category"] == category]
assert len(df_categories) == len(list), "Expected %r files, but got %r" % (
len(list),
len(df_categories),
)
# delete local folder
# os.remove(database_path)
# os.rmdir(output_path)
shutil.rmtree(output_path, ignore_errors=True)
# baseline tests
def function_cat2num(input, expected):
output = baseline.cat2num(input)
assert output == expected, "Expected %r, but got %r" % (expected, output)
def test_cat2num_1():
function_cat2num("bike", 0)
def test_cat2num_2():
function_cat2num("other", 3)
def test_analysis_create_data_sets_1():
# create a dataFrame and check the data split
data = {
"data1": [20, 21, 19, 18],
"data2": [20, 21, 19, 18],
"imageSet": ["train", "train", "test", "valid"],
"category": ["bike", "car", "van", "truck"],
"imageFileName": ["file1", "file2", "file3", "file4"],
"category_num": [0, 1, 2, 3],
}
# Create DataFrame
df = pd.DataFrame(data)
X_tr, y_tr, X_te, y_te, X_va, y_va = analysis.create_data_sets(df)
assert len(y_tr) == 2, "Expected %r, but got %r" % (2, len(y_tr))
assert len(y_te) == 1, "Expected %r, but got %r" % (1, len(y_te))
assert len(y_va) == 1, "Expected %r, but got %r" % (1, len(y_va))
assert X_tr.shape == (2, 3), "Expected %r, but got %r" % ((2, 3), X_tr.shape)
def test_get_batches_1():
X_tr = np.random.rand(100, 2)
y_tr = np.random.rand(100)
i = 0
batch_size_ok = False
for X_batch, y_batch in analysis.get_batches(X_tr, y_tr, 25):
i += 1
if len(X_batch) == len(y_batch) and len(y_batch) == 25:
batch_size_ok = True
else:
batch_size_ok = False
# test if the batch amount is ok given the inputs and batch_size
assert i == 4, "Expected %r, but got %r" % (4, i)
assert batch_size_ok == True, "Expected %r, but got %r" % (True, False)
def test_get_confusion_matrix_1():
y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
labels = ["ant", "bird", "cat"]
df = analysis.get_confusion_matrix(y_true, y_pred, labels, labels)
expected_res = np.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
assert (df.values == expected_res).all(), "Expected %r, but got %r" % (
res,
df.values,
)
def test_results():
output_path = r"./output"
if not os.path.exists(output_path):
os.makedirs(output_path)
database.load_data(DATAFOLDER)
df = data_exploration.main()
data_path = output_path + "/images_data.npz"
image_display_path = output_path + "/images_display.png"
pca_path = output_path + "/PCA.png"
assert os.path.isfile(data_path), "File images_data.npz does not exist"
assert os.path.isfile(image_display_path), "File images_display.png does not exist"
assert os.path.isfile(pca_path), "File PCA.png does not exist"
baseline_acc_tr, baseline_acc_te = baseline.main(df)
analysis_acc_tr, analysis_acc_te = analysis.main(df, 10)
assert baseline_acc_tr == 1.0, "Expected %r, but got %r" % (1.0, baseline_acc_tr)
assert baseline_acc_te >= 0.9, "Expected more than %r, but got %r" % (
0.9,
baseline_acc_te,
)
assert analysis_acc_tr >= 0.9, "Expected more than %r, but got %r" % (
0.9,
analysis_acc_tr,
)
assert analysis_acc_te >= 0.9, "Expected more than %r, but got %r" % (
0.9,
analysis_acc_te,
)
# os.remove(data_path)
# os.remove(image_display_path)
# os.remove(pca_path)
# os.rmdir(output_path)
shutil.rmtree(output_path, ignore_errors=True)
|
StarcoderdataPython
|
11392209
|
from codecs import open
from os import path
from setuptools import setup, find_packages
from subprocess import check_output
import sphinx_markdown_parser
here = path.abspath(path.dirname(__file__))
check_output(
'pandoc --from=markdown --to=rst --output=' +
path.join(here, 'README.rst') + ' ' + path.join(here, 'README.md'),
shell=True
)
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
install_requires = list()
with open(path.join(here, 'requirements.txt'), 'r', encoding='utf-8') as f:
for line in f.readlines():
install_requires.append(line)
setup(
name='sphinx_markdown_parser',
version=sphinx_markdown_parser.__version__,
description=(
'A docutils-compatibility bridge to markdown, '
'enabling you to write markdown with support for tables '
'inside of docutils & sphinx projects.'
),
long_description=long_description,
url='https://github.com/codejamninja/sphinx-markdown-parser',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sphinx docs documentation markdown',
packages=['sphinx_markdown_parser'],
install_requires=install_requires,
include_package_data=True,
entry_points={
'console_scripts': [
'cm2html = sphinx_markdown_parser.scripts:cm2html',
'cm2latex = sphinx_markdown_parser.scripts:cm2latex',
'cm2man = sphinx_markdown_parser.scripts:cm2man',
'cm2pseudoxml = sphinx_markdown_parser.scripts:cm2pseudoxml',
'cm2xetex = sphinx_markdown_parser.scripts:cm2xetex',
'cm2xml = sphinx_markdown_parser.scripts:cm2xml',
]
}
)
|
StarcoderdataPython
|
32402
|
import logging
import threading
import time
from pajbot.managers.db import DBManager
from pajbot.managers.schedule import ScheduleManager
from pajbot.models.songrequest import SongrequestQueue, SongrequestHistory, SongRequestSongInfo
from pajbot.models.user import User
log = logging.getLogger("pajbot")
WIDGET_ID = 4
class SongrequestManager:
def __init__(self, bot):
self.bot = bot
self.enabled = False
self.current_song_id = None
self.showVideo = None
self.isVideoShowing = None
self.youtube = None
self.settings = None
self.previously_playing_spotify = None
self.paused = None
self.module_opened = None
self.previous_queue = None
self.true_volume = None
def enable(self, settings, youtube):
self.enabled = True
self.showVideo = False
self.isVideoShowing = True
self.youtube = youtube
self.settings = settings
self.current_song_id = None
self.previously_playing_spotify = False
self.paused = False
self.module_opened = False
self.previous_queue = 0
self.true_volume = int(self.settings["volume"])
thread = threading.Thread(target=self.inc_current_song, daemon=True)
thread.start()
def volume_val(self):
return int(self.true_volume * (100 / int(self.settings["volume_multiplier"])))
def to_true_volume(self, multiplied_volume):
return int(multiplied_volume * int(self.settings["volume_multiplier"]) / 100)
def disable(self):
self.enabled = False
self.paused = False
self.settings = None
self.youtube = None
self.current_song_id = None
self.module_opened = False
def open_module_function(self):
if not self.enabled:
return False
if not self.module_opened:
self.module_opened = True
self.paused = False
if not self.current_song_id:
self.load_song()
return True
return False
def close_module_function(self):
if not self.enabled:
return False
if self.module_opened:
self.module_opened = False
self.paused = False
return True
return False
def skip_function(self, skipped_by):
with DBManager.create_session_scope() as db_session:
skipped_by = User.find_by_user_input(db_session, skipped_by)
if not skipped_by:
return
skipped_by_id = skipped_by.id
if not self.enabled and self.current_song_id:
return False
self.load_song(skipped_by_id)
return True
def previous_function(self, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return
requested_by_id = requested_by.id
SongrequestHistory._insert_previous(db_session, requested_by_id, self.previous_queue)
db_session.commit()
self.previous_queue += 1
self.load_song(requested_by_id)
return True
def pause_function(self):
if not self.enabled or not self.current_song_id:
return False
if not self.paused:
self.paused = True
self._pause()
return True
return False
def resume_function(self):
if not self.enabled or not self.current_song_id:
return False
if self.paused:
self.paused = False
self._resume()
if not self.current_song_id and self.module_opened:
self.load_song()
return True
return False
def seek_function(self, _time):
if not self.enabled:
return False
if self.current_song_id:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
current_song.current_song_time = _time
self._seek(_time)
return True
return False
def volume_function(self, volume):
if not self.enabled:
return False
self.true_volume = self.to_true_volume(volume)
self._volume()
return True
def play_function(self, database_id, skipped_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
skipped_by = User.find_by_user_input(db_session, skipped_by)
if not skipped_by:
return
skipped_by_id = skipped_by.id
song = SongrequestQueue._from_id(db_session, database_id)
song._move_song(db_session, 1)
db_session.commit()
self.load_song(skipped_by_id)
SongrequestQueue._update_queue()
return True
def move_function(self, database_id, to_id):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
song = SongrequestQueue._from_id(db_session, database_id)
song._move_song(db_session, to_id)
db_session.commit()
self._playlist()
SongrequestQueue._update_queue()
return True
def request_function(self, video_id, requested_by, queue=None):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
song_info = SongRequestSongInfo._create_or_get(db_session, video_id, self.youtube)
if not song_info:
log.error("There was an error!")
return False
skip_after = (
self.settings["max_song_length"] if song_info.duration > self.settings["max_song_length"] else None
)
song = SongrequestQueue._create(db_session, video_id, skip_after, requested_by_id)
if queue:
song._move_song(db_session, queue)
db_session.commit()
SongrequestQueue._update_queue()
return True
def replay_function(self, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
self.request_function(current_song.video_id, current_song.requested_by_id, 1)
db_session.commit()
self.load_song(requested_by_id)
SongrequestQueue._update_queue()
return True
def requeue_function(self, database_id, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
SongrequestHistory._from_id(db_session, database_id).requeue(db_session, requested_by_id)
db_session.commit()
SongrequestQueue._update_queue()
self._playlist()
return True
def show_function(self):
if not self.enabled:
return False
if not self.showVideo:
self.showVideo = True
if not self.paused:
self._show()
return True
return False
def hide_function(self):
if not self.enabled:
return False
if self.showVideo:
self.showVideo = False
self._hide()
return True
return False
def remove_function(self, database_id):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
song = SongrequestQueue._from_id(db_session, database_id)
song._remove(db_session)
db_session.commit()
SongrequestQueue._update_queue()
self._playlist()
return True
def inc_current_song(self):
while True:
if not self.enabled:
break
if self.current_song_id:
if not self.paused:
try:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
next_song = SongrequestQueue._get_next_song(db_session)
if not current_song or (
current_song.skip_after
and current_song.skip_after < current_song.current_song_time + 10
):
self.load_song()
else:
if (not current_song.requested_by) and next_song and next_song.requested_by:
self.load_song()
current_song.current_song_time += 1
except Exception as e:
log.error(e)
elif self.module_opened:
self.load_song()
time.sleep(1)
def load_song(self, skipped_by_id=None):
if not self.enabled:
return False
if self.current_song_id:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
if current_song:
if current_song.current_song_time > 5:
self.previous_queue = 0
histroy = current_song._to_histroy(db_session, skipped_by_id)
if not histroy:
log.info("History not added because stream is offline!")
else:
current_song._remove(db_session)
self._stop_video()
self._hide()
db_session.commit()
self._playlist_history()
SongrequestQueue._update_queue()
self.current_song_id = None
if not self.module_opened:
return False
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._get_current_song(db_session)
if not current_song:
current_song = SongrequestQueue._get_next_song(db_session)
if current_song:
current_song.playing = True
current_song.queue = 0
current_song.current_song_time = 0
self.current_song_id = current_song.id
song_info = current_song.song_info
self._play(
current_song.video_id,
song_info.title,
current_song.requested_by.username_raw if current_song.requested_by else "Backup list",
)
if self.settings["use_spotify"]:
is_playing, song_name, artistsArr = self.bot.spotify_api.state(self.bot.spotify_token_manager)
if is_playing:
self.bot.spotify_api.pause(self.bot.spotify_token_manager)
self.previously_playing_spotify = True
if not current_song.requested_by_id:
SongrequestQueue._create(
db_session,
current_song.video_id,
current_song.skip_after,
None,
SongrequestQueue._get_next_queue(db_session),
)
db_session.commit()
self._playlist()
SongrequestQueue._update_queue()
return True
if self.settings["use_spotify"]:
if self.previously_playing_spotify:
self.bot.spotify_api.play(self.bot.spotify_token_manager)
self.previously_playing_spotify = False
if self.isVideoShowing:
self._hide()
return False
def _play(self, video_id, video_title, requested_by_name):
self.bot.songrequest_websocket_manager.emit(
"play", {"video_id": video_id, "video_title": video_title, "requested_by": requested_by_name}
)
self.bot.websocket_manager.emit("songrequest_play", WIDGET_ID, {"video_id": video_id})
self.paused = True
if self.showVideo:
self._show()
self._playlist()
def ready(self):
self.resume_function()
ScheduleManager.execute_delayed(2, self._volume)
def _pause(self):
self.bot.songrequest_websocket_manager.emit("pause", {})
self.bot.websocket_manager.emit("songrequest_pause", WIDGET_ID, {})
self._hide()
def _resume(self):
self.bot.songrequest_websocket_manager.emit("resume", {})
self.bot.websocket_manager.emit("songrequest_resume", WIDGET_ID, {"volume": self.true_volume})
self.paused = False
if self.showVideo:
self._show()
def _volume(self):
self.bot.songrequest_websocket_manager.emit("volume", {"volume": self.volume_val()})
self.bot.websocket_manager.emit("songrequest_volume", WIDGET_ID, {"volume": self.true_volume})
def _seek(self, _time):
self.bot.songrequest_websocket_manager.emit("seek", {"seek_time": _time})
self.bot.websocket_manager.emit("songrequest_seek", WIDGET_ID, {"seek_time": _time})
self.paused = True
def _show(self):
self.bot.websocket_manager.emit("songrequest_show", WIDGET_ID, {})
self.isVideoShowing = True
def _hide(self):
self.bot.websocket_manager.emit("songrequest_hide", WIDGET_ID, {})
self.isVideoShowing = False
def _playlist(self):
with DBManager.create_session_scope() as db_session:
playlist = SongrequestQueue._get_playlist(db_session, 15)
self.bot.songrequest_websocket_manager.emit("playlist", {"playlist": playlist})
def _playlist_history(self):
with DBManager.create_session_scope() as db_session:
self.bot.songrequest_websocket_manager.emit(
"history", {"history": SongrequestHistory._get_history(db_session, 15)}
)
def _stop_video(self):
self.bot.songrequest_websocket_manager.emit("stop", {})
self.bot.websocket_manager.emit("songrequest_stop", WIDGET_ID, {})
|
StarcoderdataPython
|
5079539
|
from math import trunc
n = float(input('Insira um número Real: '))
result = math.trunc(n)
print('o valor digitado foi {} e sua porção inteira é : {}'.format(n, result))
|
StarcoderdataPython
|
9739338
|
"""Test SMHI component setup process."""
from smhi.smhi_lib import APIURL_TEMPLATE
from homeassistant.components.smhi.const import DOMAIN
from homeassistant.core import HomeAssistant
from . import ENTITY_ID, TEST_CONFIG
from tests.common import MockConfigEntry
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_setup_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, api_response: str
) -> None:
"""Test setup entry."""
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state
async def test_remove_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, api_response: str
) -> None:
"""Test remove entry."""
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state
await hass.config_entries.async_remove(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert not state
|
StarcoderdataPython
|
9755196
|
<filename>day1/day1_part2.py
#!/usr/bin/env python3
import os
with open("input.txt") as f:
texts = f.readlines()
nums = [int(x) for x in texts]
leng=len(nums)
print("There are ", leng, " numbers in input." )
for i in range(0,leng):
for j in range(i-1,leng):
for k in range(j-1,leng):
sum = nums[i] + nums[j] + nums[k]
if sum == 2020:
product = nums[i] * nums[j] * nums[k]
print(nums[i], " and ", nums[j], " and ",nums[k], " szorozva ", product)
|
StarcoderdataPython
|
1991004
|
<reponame>kostik/vrs
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import birth_registration.fields
import birth_registration.validators
class Migration(migrations.Migration):
dependencies = [
('birth_registration', '0026_auto_20150921_1654'),
]
operations = [
migrations.AlterField(
model_name='f101',
name='Father_name',
field=birth_registration.fields.Char100Field(max_length=100, null=True, verbose_name="Father's name", blank=True),
),
migrations.AlterField(
model_name='f101',
name='Mother_name',
field=birth_registration.fields.Char100Field(max_length=100, null=True, verbose_name="Mother's name"),
),
migrations.AlterField(
model_name='f101',
name='NCZN_F',
field=birth_registration.fields.CitizenshipField(blank=True, null=True, verbose_name="Father's citizenship", choices=[(1, '01 - Myanmar'), (2, '02 - Indian'), (3, '03 - Pakistani'), (4, '04 - Bangladesh'), (5, '05 - Nepalese'), (6, '06 - Chinese'), (7, '07 - European/American'), (8, '08 - Other Asian'), (9, '09 - Others'), (10, '10 - Not stated')]),
),
migrations.AlterField(
model_name='f101',
name='NOCC_F',
field=birth_registration.fields.OccupationField(blank=True, null=True, verbose_name="Father's occupation", choices=[(1, '01 - Professional Technical and related workers'), (2, '02 - Administrative and managerial workers'), (3, '03 - Clerical and related workers'), (4, '04 - Sales workers'), (5, '05 - Services workers'), (6, '06 - Agriculture, Animal Husbandry and Forest workers, Fishermen, Hunters'), (7, '07 - Production and related workers, Transport equipment operators and labours'), (8, '08 - Not classified by occupation'), (9, '09 - Armed Forces'), (0, '00 - Economically inactive')]),
),
migrations.AlterField(
model_name='f101',
name='NRACE_F',
field=birth_registration.fields.RaceField(blank=True, null=True, verbose_name="Father's race", choices=[(1, '01 - Kachin'), (2, '02 - Kayah'), (3, '03 - Kayin'), (4, '04 - Chin'), (5, '05 - Bamar'), (6, '06 - Mon'), (7, '07 - Rakhine'), (8, '08 - Shan'), (9, '09 - Other indigenous Races'), (10, '10 - Myanmar/Foreigners'), (11, '11 - Chinese'), (12, '12 - Indian'), (13, '13 - Pakistani'), (14, '14 - Bangladesh'), (15, '15 - Nepal'), (16, '16 - Other Asian'), (17, '17 - Others'), (18, '18 - Not stated')]),
),
migrations.AlterField(
model_name='f101',
name='NRACE_M',
field=birth_registration.fields.RaceField(blank=True, null=True, verbose_name="Mother's race", choices=[(1, '01 - Kachin'), (2, '02 - Kayah'), (3, '03 - Kayin'), (4, '04 - Chin'), (5, '05 - Bamar'), (6, '06 - Mon'), (7, '07 - Rakhine'), (8, '08 - Shan'), (9, '09 - Other indigenous Races'), (10, '10 - Myanmar/Foreigners'), (11, '11 - Chinese'), (12, '12 - Indian'), (13, '13 - Pakistani'), (14, '14 - Bangladesh'), (15, '15 - Nepal'), (16, '16 - Other Asian'), (17, '17 - Others'), (18, '18 - Not stated')]),
),
migrations.AlterField(
model_name='f101',
name='RCIR',
field=birth_registration.fields.Char300Field(max_length=300, null=True, verbose_name='Address of mother', blank=True),
),
migrations.AlterField(
model_name='f101',
name='RST_DV',
field=birth_registration.fields.StateDivisionField(validators=[birth_registration.validators.validate_2digits], choices=[(1, '01 - Kachin'), (2, '02 - Kayh'), (3, '03 - Kayin'), (4, '04 - Chin'), (5, '05 - Sagaing'), (6, '06 - Tanintharyi'), (7, '07 - Bago'), (8, '08 - Magway'), (9, '09 - Mandalay'), (10, '10 - Mon'), (11, '11 - Rakhine'), (12, '12 - Yangon'), (13, '13 - Shan'), (14, '14 - Ayyarwaddy'), (15, '15 - NayPyiTaw')], blank=True, help_text='State Division', null=True, verbose_name='Usual Place of residence of mother:'),
),
]
|
StarcoderdataPython
|
6424568
|
""" default setting configurations
each constant is set to its equivalent in environment variables or hardcoded default below
"""
import os
import sys
# --------------- Map -------------- #
MAP_PATH = os.environ.get("MAP_PATH", None) or os.path.join(
os.path.dirname(os.path.abspath(__file__)), "maps", "map9.json")
# --------------- Logging -------------- #
GAME_LOG_DESTINATION = os.environ.get("GAME_LOG_DESTINATION", None) or os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../gameLog')
GAME_LOG_STATIC_FILENAME = os.environ.get("GAME_LOG_STATIC_FILENAME", None)
ENGINE_LOG_LOGGER_LEVEL = os.environ.get("ENGINE_LOG_LOGGER_LEVEL", None) or 10
ENGINE_LOG_TO_STDERR = os.environ.get("ENGINE_LOG_TO_STDERR", None) or True
ENGINE_LOG_TO_FILE = os.environ.get("ENGINE_LOG_TO_FILE", None) or True
ENGINE_LOG_DESTINATION = os.environ.get("ENGINE_LOG_DESTINATION", None) or os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../')
ENGINE_LOG_FILENAME = os.environ.get("ENGINE_LOG_FILENAME", None) or 'output.log'
AGENT_LOG = os.environ.get("AGENT_LOG", None) or True
AGENT_LOG_TO_FILE = os.environ.get("AGENT_LOG_TO_FILE", None) or True
AGENT_LOG_DESTINATION = os.environ.get("AGENT_LOG_DESTINATION", None) or os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../agentLog')
# --------------- Players -------------- #
PLAYER_1_NAME = os.environ.get("PLAYER_1_NAME", None) or 'First player'
PLAYER_2_NAME = os.environ.get("PLAYER_2_NAME", None) or 'Second player'
# --------------- Turn -------------- #
TURN_INIT = os.environ.get("TURN_INIT", None) or 0
# --------------- Timeout -------------- #
TIME_OUT = os.environ.get("TIME_OUT", None) or 0.4
TIME_OUT_BEHAVIOUR = os.environ.get("TIME_OUT_BEHAVIOUR", None) or 'kill'
# --------------- Python -------------- #
exe = None
if 'win' in sys.platform.lower():
exe = 'py'
elif 'linux' in sys.platform.lower():
exe = 'python3'
PYTHON_EXECUTABLE = os.environ.get("PYTHON_EXECUTABLE", None) or exe or sys.executable
# --------------- Agent startup delay -------------- #
AGENT_STARTUP_DELAY = os.environ.get("AGENT_STARTUP_DELAY", None) or 0
|
StarcoderdataPython
|
202290
|
constants.physical_constants["proton Compton wavelength"]
|
StarcoderdataPython
|
1789032
|
<reponame>muffinresearch/solitude
# -*- coding: utf-8 -*-
import mock
from nose.tools import eq_
import test_utils
from ..client import (Client, ClientMock, ClientProxy, dict_to_mock,
get_client, response_to_dict)
from ..constants import OK, ACCESS_DENIED
from ..errors import AuthError, BangoError, ProxyError
import samples
class TestClient(test_utils.TestCase):
def setUp(self):
self.client = get_client()
def test_create_package(self):
res = self.client.CreatePackage(samples.good_address)
eq_(res.responseCode, OK)
assert res.packageId > 1
@mock.patch.object(ClientMock, 'mock_results')
def test_auth_failure(self, mock_results):
mock_results.return_value = {'responseCode': ACCESS_DENIED}
with self.assertRaises(AuthError):
self.client.CreatePackage(samples.good_address)
@mock.patch.object(ClientMock, 'mock_results')
def test_failure(self, mock_results):
mock_results.return_value = {'responseCode': 'wat'}
with self.assertRaises(BangoError):
self.client.CreatePackage(samples.good_address)
def test_update_support_email(self):
res = self.client.UpdateSupportEmailAddress(samples.good_email)
eq_(res.responseCode, OK)
def test_update_financial_email(self):
res = self.client.UpdateFinanceEmailAddress(samples.good_email)
eq_(res.responseCode, OK)
def test_create_bango_number(self):
res = self.client.CreateBangoNumber(samples.good_bango_number)
eq_(res.responseCode, OK)
def test_make_premium(self):
res = self.client.MakePremiumPerAccess(samples.good_make_premium)
eq_(res.responseCode, OK)
class TestRightClient(test_utils.TestCase):
def test_no_proxy(self):
with self.settings(BANGO_PROXY=None, SOLITUDE_PROXY=False):
assert isinstance(get_client(), Client)
def test_using_proxy(self):
with self.settings(BANGO_MOCK=False, BANGO_PROXY='http://foo.com'):
assert isinstance(get_client(), ClientProxy)
def test_am_proxy(self):
with self.settings(BANGO_PROXY='http://foo.com', SOLITUDE_PROXY=True):
assert isinstance(get_client(), Client)
def test_mock(self):
with self.settings(BANGO_MOCK=True):
assert isinstance(get_client(), ClientMock)
class TestProxy(test_utils.TestCase):
def setUp(self):
self.bango = ClientProxy()
self.url = 'http://foo.com'
@mock.patch('lib.bango.client.post')
def test_call(self, post):
resp = mock.Mock()
resp.status_code = 200
resp.content = samples.premium_response
post.return_value = resp
with self.settings(BANGO_PROXY=self.url):
self.bango.MakePremiumPerAccess(samples.good_make_premium)
args = post.call_args
eq_(args[0][0], self.url)
eq_(args[1]['headers']['x-solitude-service'],
'https://webservices.test.bango.org/mozillaexporter/service.asmx')
@mock.patch('lib.bango.client.post')
def test_failure(self, post):
resp = mock.Mock()
resp.status_code = 500
post.return_value = resp
with self.settings(BANGO_PROXY=self.url):
with self.assertRaises(ProxyError):
self.bango.MakePremiumPerAccess(samples.good_make_premium)
@mock.patch('lib.bango.client.post')
def test_ok(self, post):
resp = mock.Mock()
resp.status_code = 200
resp.content = samples.package_response
post.return_value = resp
with self.settings(BANGO_PROXY=self.url):
address = samples.good_address.copy()
del address['seller']
res = self.bango.CreatePackage(address)
eq_(res.packageId, 1)
assert 'CreatePackageResponse' in str(res)
def test_convert_data():
data = {'foo': 'bar'}
eq_(data, response_to_dict(dict_to_mock(data)))
def test_callable():
data = {'foo': lambda: 'x'}
assert callable(dict_to_mock(data).foo)
assert not callable(dict_to_mock(data, callables=True).foo)
|
StarcoderdataPython
|
1836691
|
#-------------------------------------------------------------------------------
# Parser for ASDL [1] definition files. Reads in an ASDL description and parses
# it into an AST that describes it.
#
# The EBNF we're parsing here: Figure 1 of the paper [1]. Extended to support
# modules and attributes after a product. Words starting with Capital letters
# are terminals. Literal tokens are in "double quotes". Others are
# non-terminals. Id is either TokenId or ConstructorId.
#
# module ::= "module" Id "{" [definitions] "}"
# definitions ::= { TypeId "=" type }
# type ::= product | sum
# product ::= fields ["attributes" fields]
# fields ::= "(" { field, "," } field ")"
# field ::= TypeId ["?" | "*"] [Id]
# sum ::= constructor { "|" constructor } ["attributes" fields]
# constructor ::= ConstructorId [fields]
#
# [1] "The Zephyr Abstract Syntax Description Language" by Wang, et. al. See
# http://asdl.sourceforge.net/
#-------------------------------------------------------------------------------
from __future__ import print_function
import cStringIO
# TODO: There should be SimpleSumType(_SumType) and CompoundSumType(_SumType)
# That can be determined at compile time with this function. is_simple()
# should move to front_end.py.
# PATCH: Moved this function from asdl_c.py.
def is_simple(sum):
"""Return True if a sum is a simple.
A sum is simple if its types have no fields, e.g.
unaryop = Invert | Not | UAdd | USub
"""
for t in sum.types:
if t.fields:
return False
return True
# The following classes are the AST for the ASDL schema, i.e. the "meta-AST".
# See the EBNF at the top of the file to understand the logical connection
# between the various node types.
class AST(object):
def Print(self, f, indent):
raise NotImplementedError
def __repr__(self):
f = cStringIO.StringIO()
self.Print(f, 0)
return f.getvalue()
class Module(AST):
def __init__(self, name, dfns):
self.name = name
self.dfns = dfns
def Print(self, f, indent):
ind = indent * ' '
f.write('%sModule %s {\n' % (ind, self.name))
for d in self.dfns:
d.Print(f, indent+1)
f.write('\n')
f.write('%s}\n' % ind)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def Print(self, f, indent):
ind = indent * ' '
f.write('%sType %s {\n' % (ind, self.name))
self.value.Print(f, indent+1)
f.write('%s}\n' % ind)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def Print(self, f, indent):
extra = []
if self.seq:
extra.append('seq=True')
elif self.opt:
extra.append('opt=True')
else:
extra = ""
ind = indent * ' '
f.write('%sField %s %s' % (ind, self.name, self.type))
if extra:
f.write(' (')
f.write(', '.join(extra))
f.write(')')
f.write('\n')
class _CompoundAST(AST):
"""Either a Product or Constructor.
encode.py and format.py need a reflection API.
"""
def __init__(self, fields):
self.fields = fields or []
class Constructor(_CompoundAST):
def __init__(self, name, fields=None):
_CompoundAST.__init__(self, fields)
self.name = name
# Add fake spids field.
# TODO: Only do this if 'attributes' are set.
if self.fields:
self.fields.append(Field('int', 'spids', seq=True))
def Print(self, f, indent):
ind = indent * ' '
f.write('%sConstructor %s' % (ind, self.name))
if self.fields:
f.write(' {\n')
for field in self.fields:
field.Print(f, indent+1)
f.write('%s}' % ind)
f.write('\n')
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types # List[Constructor]
self.attributes = attributes or []
def Print(self, f, indent):
ind = indent * ' '
f.write('%sSum {\n' % ind)
for t in self.types:
t.Print(f, indent+1)
if self.attributes:
f.write('%s\n' % self.attributes)
f.write('%s}\n' % ind)
class Product(_CompoundAST):
def __init__(self, fields, attributes=None):
_CompoundAST.__init__(self, fields)
self.attributes = attributes or []
def Print(self, f, indent):
ind = indent * ' '
f.write('%sProduct {\n' % ind)
for field in self.fields:
field.Print(f, indent+1)
if self.attributes:
f.write('%s\n' % self.attributes)
f.write('%s}\n' % ind)
|
StarcoderdataPython
|
5072277
|
<reponame>AutonomyLab/obzerver<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import sys
import string
import numpy
def main():
if len(sys.argv) < 2:
print("Usage: ./analyze-perf-result.py perf_file1 perf_file_2 ...", file=sys.stderr)
sys.exit(1)
data = dict()
for input_filename in sys.argv[1:]:
print("Processing %s ..." % (input_filename, ), file=sys.stderr)
with open(input_filename) as input_file:
for line in input_file:
if string.find(line, "(ms)") == -1:
continue
elements = string.split(line)
#assert elements[2] == "(ms)"
if not elements[2] == "(ms)":
continue
component = elements[0]
#print(line, elements)
exec_time = float(elements[1])
if not component in data:
data[component] = list()
data[component].append(exec_time)
print("#component, mean exec time, stddev exec time, data points")
for component, exec_time_list in data.items():
numd = len(exec_time_list)
mean = numpy.mean(exec_time_list)
stdd = numpy.std(exec_time_list)
print("%s, %s, %s, %s " % (component, mean, stdd, numd))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1649993
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#172. Factorial Trailing Zeroes
#Given an integer n, return the number of trailing zeroes in n!.
#Note: Your solution should be in logarithmic time complexity.
#Credits:
#Special thanks to @ts for adding this problem and creating all test cases.
#class Solution:
# def trailingZeroes(self, n):
# """
# :type n: int
# :rtype: int
# """
# Time Is Money
|
StarcoderdataPython
|
11260681
|
<reponame>ALFA-group/neural_program_comprehension
ringos, orenjis = 3, 8
if ringos == orenjis:
print(ringos + orenjis)
elif abs(ringos - orenjis) == 5 or (ringos + orenjis) == 5:
print(ringos*orenjis)
else:
print(ringos - orenjis)
|
StarcoderdataPython
|
6523037
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for deterministic ViT."""
import functools
import logging
import flax
import jax
import jax.numpy as jnp
import input_utils # local file import from baselines.jft
import train_utils # local file import from baselines.jft
def get_total_steps(config):
"""Get total_steps of training.
Args:
config: The config of the experiment.
Returns:
Total_steps of training.
"""
local_batch_size = config.batch_size // jax.process_count()
ntrain_img = input_utils.get_num_examples(
config.dataset,
split=config.train_split,
process_batch_size=local_batch_size,
data_dir=config.get('data_dir'))
steps_per_epoch = ntrain_img // config.batch_size
if config.get('num_epochs'):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get('total_steps'), 'Set either num_epochs or total_steps'
else:
total_steps = config.total_steps
logging.info('Total train data points: %d', ntrain_img)
logging.info(
'Running for %d steps, that means %f epochs and %d steps per epoch',
total_steps, total_steps * config.batch_size / ntrain_img,
steps_per_epoch)
return total_steps
def create_init(model, config, train_ds):
"""Create the initialization function for model parameters.
Args:
model: The model to be used in updates.
config: The config of the experiment.
train_ds: tf.data.Dataset.
Returns:
Function that returns initialized model parameters.
"""
local_batch_size = config.batch_size // jax.process_count()
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@functools.partial(jax.jit, backend='cpu')
def init(rng):
image_size = tuple(train_ds.element_spec['image'].shape[2:])
logging.info('image_size = %s', image_size)
dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, dummy_input,
train=False))['params']
# Set bias in the head to a low value, such that loss is small initially.
params['head']['bias'] = jnp.full_like(params['head']['bias'],
config.get('init_head_bias', 0))
# init head kernel to all zeros for fine-tuning
if config.get('model_init'):
params['head']['kernel'] = jnp.full_like(params['head']['kernel'], 0)
return params
return init
def create_update_fn(model, config):
"""Create the update function from model and config.
Args:
model: The model to be used in updates.
config: The config of the experiment.
Returns:
The function that updates the model for one step.
"""
weight_decay_rules = config.get('weight_decay', []) or []
rescale_value = config.lr.base if config.get('weight_decay_decouple') else 1.
weight_decay_fn = train_utils.get_weight_decay_fn(
weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)
logging.info('weight_decay_rules = %s', weight_decay_rules)
logging.info('rescale_value = %s', rescale_value)
@functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0,))
def update_fn(opt, lr, images, labels, rng):
"""Update step."""
measurements = {}
# Split rng and return next_rng for the following step.
rng, next_rng = jax.random.split(rng, 2)
rng_local = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
logging.info(msg=f'images in loss_fn = {jnp.shape(images)}')
logging.info(msg=f'labels in loss_fn = {jnp.shape(labels)}')
def loss_fn(params, images, labels):
logits, _ = model.apply({'params': flax.core.freeze(params)},
images,
train=True,
rngs={'dropout': rng_local})
logging.info(msg=f'logits={logits}')
label_indices = config.get('label_indices')
if label_indices:
logits = logits[:, label_indices]
loss = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels)
return loss, logits
# Implementation considerations compared and summarized at
# https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#
(l, logits), g = train_utils.accumulate_gradient(
jax.value_and_grad(loss_fn, has_aux=True), opt.target, images, labels,
config.get('grad_accum_steps'))
l, g = jax.lax.pmean((l, g), axis_name='batch')
measurements['training_loss'] = l
logging.info(msg=f'measurements = {measurements}')
# Log the gradient norm only if we need to compute it anyways (clipping)
# or if we don't use grad_accum_steps, as they interact badly.
if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):
grads, _ = jax.tree_flatten(g)
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))
measurements['l2_grads'] = l2_g
logging.info(msg=f'measurements = {measurements}')
# Optionally resize the global gradient to a maximum norm. We found this
# useful in some cases across optimizers, hence it's in the main loop.
if config.get('grad_clip_norm'):
g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
g = jax.tree_util.tree_map(lambda p: g_factor * p, g)
opt = opt.apply_gradient(g, learning_rate=lr)
opt = opt.replace(target=weight_decay_fn(opt.target, lr))
params, _ = jax.tree_flatten(opt.target)
measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))
top1_idx = jnp.argmax(logits, axis=1)
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
prec1 = jax.lax.psum(
jnp.sum(top1_correct), axis_name='batch') / config.batch_size
measurements['training_prec@1'] = prec1
measurements['learning_rate'] = lr
return opt, next_rng, measurements
return update_fn
def create_evaluation_fn(model, config):
"""Create the evaluation function from model and config.
Args:
model: The model to be used in updates.
config: The config of the experiment.
Returns:
The function that evaluates the model for one step.
"""
@functools.partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, images, labels, mask):
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
logits, out = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
label_indices = config.get('label_indices')
logging.info('!!! mask %s, label_indices %s', mask, label_indices)
if label_indices:
logits = logits[:, label_indices]
# Note that logits and labels are usually of the shape [batch,num_classes].
# But for OOD data, when num_classes_ood > num_classes_ind, we need to
# adjust labels to labels[:, :config.num_classes] to match the shape of
# logits. That is just to avoid shape mismatch. The output losses does not
# have any meaning for OOD data, because OOD not belong to any IND class.
losses = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits,
labels=labels[:, :(
len(label_indices) if label_indices else config.num_classes)],
reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
return evaluation_fn
|
StarcoderdataPython
|
1781651
|
<reponame>coverwallet/pysoni<gh_stars>1-10
from time import sleep
from datetime import datetime
from psycopg2.extras import execute_values
from pandas import DataFrame, to_datetime, notnull
from toolz import groupby
from . import helpers
from .connection import Connection
class Postgre(object):
"""This class will contain special methods to perform over PostgreSQL.
The arguments received on initialization are passed directly to Connection,
you can find more information about them in its documentation
"""
def __init__(self, port=None, host=None, dbname=None, user=None, password=<PASSWORD>, uri=None,
is_persistent=False, connection_options='-c statement_timeout=3600000'):
self.db_connection = Connection(port=port, host=host, dbname=dbname,
user=user, password=password, uri=uri,
is_persistent=is_persistent,
connection_options=connection_options)
def connection(self):
"""Generate the DB connection object and connects to it
The values used during the connection are obtained from the fields
of the Postgre instance
"""
self.db_connection.connect()
return self.db_connection
def delete_batch_rows(self, delete_batch, table_name, column, batch_size=1000, timeout=True):
"""Delete rows from a table using batches when the table column match any value given in the delete_batch
argument."""
helpers.validate_types(subject=delete_batch, expected_types=[list,tuple],
contained_types=[str,int])
delete_batch, remaining_rows = delete_batch[:batch_size], delete_batch[batch_size:]
while len(delete_batch) > 0:
rows_string = ','.join(f"'{register}'" for register in delete_batch)
self.postgre_statement(f"delete from {table_name} where {column} in ({rows_string})", timesleep=timeout)
delete_batch, remaining_rows = remaining_rows[:batch_size], remaining_rows[batch_size:]
def drop_tables(self, table_names, wait_time=0, batch=True):
"""Delete a set of DB tables.
Arguments
----------
tablelist : list, tuple
Iterable of strings representing the names of the tables to drop
wait_time : int
Number of seconds that we'll wait before committing the operation
to the DB
batch : boolean
Defines if the operation should be done in a batch
"""
helpers.validate_types(table_names, expected_types=[list, tuple], contained_types=[str])
if batch:
statement = f"DROP TABLES {', '.join(table_names)}"
self.postgre_statement(statement, timesleep=wait_time)
else:
for table_name in table_names:
self.postgre_statement(f"DROP TABLE {table_name}", timesleep=wait_time)
def execute_batch_inserts(self, insert_rows, tablename, batch_size=1000, columns=None):
"""Insert rows in a table using batches.
Arguments
---------
insert_rows: list, tuple
Iterable containing the values that we want to insert in the DB
tablename: str
Name of the table where the values will be inserted
batch_size: int
Size of each batch of values. The default batch size it is set to 1000.
columns: list, tuple
List containing the bame of the columns to be used in the insertion.
They must be in the same order than the values. If it is not specified,
it is assumed that the order of the values follow the order of the
columns of the table.
"""
if columns:
helpers.validate_types(subject=columns, expected_types=[list, tuple, str])
insert_columns = f" ({(helpers.format_sql_string(subject=columns))})"
else:
insert_columns = ""
helpers.validate_types(subject=insert_rows, expected_types=[list, tuple],
contained_types=[str, int, float, list, tuple])
conn = self.connection()
cur = conn.cursor()
try:
insert = helpers.format_insert(insert_rows)
batch_insert, insert = insert[:batch_size], insert[batch_size:]
while batch_insert:
execute_values(cur, f'INSERT INTO {tablename}{insert_columns} VALUES %s', batch_insert)
batch_insert, insert = insert[:batch_size], insert[batch_size:]
conn.commit()
finally:
conn.commit()
cur.close()
conn.close()
def execute_query(self, queryname, types=False, path_sql_script=None):
"""This method is used to execute an sql query. With default parameters, it returns a dictionary
with two keys: 'results' provides a list with the results of the query and 'keys' provides a list
with the names of each of the columns returned. If types = true, it also returns a key 'types' inside
the dictionary that contains the type of each of the columns. If a path in "path_sql_script" is
specified, the query is read from the file named 'queryname' (with the SQL query) that is located
in that specific path is used.
If we want to make dynamic queries, the attributes should be passed as in the following example:
place = "Madrid"
f"select * from hotels where city='{place}'" """
conn = self.connection()
cur = conn.cursor()
column_name_index = 0
column_type_index = 1
try:
query_results = {}
if path_sql_script:
cur.execute(helpers.read_query(queryname, path_sql_script))
else:
cur.execute(queryname)
cursor_info = cur.fetchall()
columns_names = [cursor_metadata[column_name_index] for cursor_metadata in cur.description]
if types:
types_of_columns = [cursor_metadata[column_type_index] for cursor_metadata in cur.description]
type_string = ','.join(str(type_code) for type_code in types_of_columns)
cur.execute(f"select pg_type.oid, pg_type.typname from pg_type where pg_type.oid in ({type_string})")
list_of_types = cur.fetchall()
oid_name_type_dict = {type_column_tuple[0]: type_column_tuple[1] for type_column_tuple in list_of_types}
type_name_list = [oid_name_type_dict.get(type_code, 'text') for type_code in types_of_columns]
query_results = {'results': cursor_info, 'keys': columns_names, 'types': type_name_list}
else:
query_results = {'results': cursor_info, 'keys': columns_names}
return query_results
finally:
cur.close()
conn.close()
def get_schema(self, schema, metadata=False):
"""This method it is perform to get all the schema information from postgresql."""
query = (
"SELECT table_name, column_name, data_type FROM "
f"information_schema.columns WHERE table_schema='{schema}'"
)
if not metadata:
query += "AND table_name NOT LIKE 'pg_%'"
tables_information = self.postgre_to_tuple(query)
if not tables_information:
raise ValueError('The schema is empty')
schemas_metadata = [{
'table_name': table_information[0],
'column_name': table_information[1],
'data_type': table_information[2]
} for table_information in tables_information]
final_results = []
for table_name, table_metadatas in groupby('table_name', schemas_metadata).items():
final_results.append({
table_name: { metadata['column_name']: metadata['data_type'] for metadata in table_metadatas }
})
return final_results
def postgre_statement(self, statement, timesleep=0):
"""Method to perform postgres transactions as an example rename columns,
truncate tables, grant permissions to users etc,.
Arguments
----------
statement : string
String representing the transacion we want to execute
timesleep : int
Number with the seconds we want to wait between the transaction is
executed and commited time.
"""
conn = self.connection()
cur = conn.cursor()
try:
cur.execute(statement)
sleep(timesleep)
finally:
conn.commit()
cur.close()
conn.close()
def postgre_multiple_statements(self, statements, timesleep=0):
"""Method to perform multiple postgres transactions as an example rename columns,
truncate tables, grant permissions to users etc..All transactions are commited
at the same time.
Arguments
----------
statements : list, tuple
Iterable of strings representing the transacion.
All transactions are executed following the order of the iterable
timesleep : int
Number with the seconds we want to wait between alls transactions are
executed and commited time.
"""
helpers.validate_types(statements, expected_types=[list, tuple], contained_types=[str])
self.postgre_statement(";".join(statements), timesleep=timesleep)
def dataframe_to_postgre(self, tablename, dataframe_object, method, batch_size,
merge_key=None):
"""This method it is perform to insert a Dataframe python object into a DWH table.
The insert method can be done by appending elements to a table for that purpose use
the append opction in the method param. If you want to update a table by a column, you
need to use the rebuilt method and select the merge_key column of your DWH table.
"""
dataframe_object = dataframe_object.where((notnull(dataframe_object)), None)
df_columns = dataframe_object.columns[0:].tolist()
df_values = dataframe_object.values[:, 0:].tolist()
if method not in ('rebuild', 'append'):
raise ValueError("""Invalid method. Select method='rebuild' if you
want to update a table using a column. Select method='append'
if you want just to update it.""")
if method == 'rebuild':
if not merge_key:
raise ValueError("""To rebuilt a table you must select a
merge_key with the table column""")
df_delete_values = dataframe_object[merge_key].unique().tolist()
self.update_table(tablename=tablename, merge_key=merge_key,
insert_batch_size=batch_size, delete_batch_size=batch_size,
insert_list=df_values, delete_list=df_delete_values,
columns=df_columns)
elif method == 'append':
self.execute_batch_inserts(
tablename=tablename, columns=df_columns, insert_rows=df_values,
batch_size=batch_size)
def postgre_to_dataframe(self, query, convert_types=True, path_sql_script=None):
"""This method is used to execute a sql query and return a pandas Dataframe with the results.
If 'convert_types' = True, the time variables are converted to timestamp format and the date variables
are converted to YYYY-MM-DD format.
If we want to make dynamic queries the attributes should be pass as the following example
place = "Madrid"
f"select * from hotels where city='{place}'" """
results = self.execute_query(query, types=convert_types,
path_sql_script=path_sql_script)
df = DataFrame.from_records(results['results'], columns=results['keys'])
if convert_types:
for column_data_type, column_name in zip(results['types'], results['keys']):
if column_data_type in ('timestamp', 'timestamptz'):
df[column_name] = to_datetime(df[column_name])
elif column_data_type == 'date':
df[column_name] = to_datetime(
df[column_name], format='%Y-%m-%d')
return df
def postgre_to_dict(self, query, types=False, path_sql_script=None):
"""This method is used to execute an sql query and it would retrieve a
list, corresponding each element to a different row of the resulted
query. Each element is in turn made up of a list of dictionaries in
which the keys are the name of the columns and the value is the the
actual value of the row for that specific column. If types=True, it
also returns the type of each column inside each dictionary."""
results = self.execute_query(query, types=types,
path_sql_script=path_sql_script)
columns = results['keys']
rows = results['results']
if types:
types = results['types']
list_of_dict = []
for row in rows:
list_of_dict.append([{column: {'value': value, 'type': type_}}
for value, column, type_ in zip(row, columns, types)])
return list_of_dict
else:
rows = results['results']
list_of_dict = []
for row in rows:
list_of_dict.append([{column: register} for register,
column in zip(row, columns)])
return list_of_dict
def postgre_to_dict_list(self, query, types=False, path_sql_script=False):
"""This method is used to execute an sql query and return a list of
dictionaries. Each dictionary contais the information of each element
of the result (value + column). If types=True, the dictionary also
includes the type of the column.
Different from 'postgre_to_dict', here each element of each row has
its own dictionary, and inside the dictionary it is contained the
value, the name of the column and the type of the column (if True)"""
results = self.execute_query(query, types=types, path_sql_script=path_sql_script)
columns, rows = results['keys'], results['results']
if types:
types = results['types']
list_of_dict = []
for row in rows:
for value, column, type_ in zip(row, columns, types):
list_of_dict.append({column: {'value': value, 'type': type_}})
return list_of_dict
else:
list_of_dict = []
for row in rows:
row_dict = {}
for value, column in zip(row, columns):
row_dict.update({column: value})
list_of_dict.append(row_dict)
return list_of_dict
def postgre_to_tuple(self, query, path_sql_script=False):
"""This method it is perform to execute an sql query and it would retrieve a list of tuples.
If we want to make dynamic queries the attributes should be pass as the following example
f"select * from hoteles where city='Madrid'")"""
results = self.execute_query(query, path_sql_script=path_sql_script)
return results['results']
def update_fields(self, tablename, column, values, wait_time=0):
"""Method to perform updates over a column.
Arguments
----------
tablename : string
String representing the table we want to update.
column : int
String representing the column we want to update the values.
values : list, tuple
Iterable of iterables representing the values we want to update.
By default the first element of the value is corresponding with,
the old value and the second element of the value corresponds with,
the new value we are going to update.
wait_time : int
Sleep time between update transactions.
"""
helpers.validate_types(values, expected_types=[list, tuple],
contained_types=[list,tuple])
OLD_RECORD_INDEX = 0
NEW_RECORD_INDEX = 1
for record in values:
update = (f"UPDATE {tablename} "
f"SET {column}='{record[NEW_RECORD_INDEX]}' "
f"WHERE {column}='{record[OLD_RECORD_INDEX]}'")
self.postgre_statement(update, timesleep=wait_time)
def update_fields_in_batch(self, tablename, column, values, batch_size):
"""Method to perform postgres batch updates over a column.
Arguments
----------
tablename : string
String representing the table we want to update.
column : int
String representing the column we want to update the values.
values : list, tuple
Iterable of iterables representing the values we want to update.
By default the first element of the value is corresponding with,
the old value and the second element of the value corresponds with,
the new value we are going to update.
wait_time : int
Sleep time between update transactions.
"""
helpers.validate_types(values, expected_types=[list, tuple],
contained_types=[list, tuple])
conn = self.connection()
cur = conn.cursor()
try:
batch_update, update = values[:batch_size], values[batch_size:]
while len(batch_update) > 0:
execute_values(cur, f"UPDATE {tablename} SET {column}"
+"= data.new_value FROM (VALUES %s) "
"AS data (old_value, new_value) " \
f"WHERE {column} = data.old_value",
batch_update)
batch_update, update = update[:batch_size], update[batch_size:]
finally:
conn.commit()
cur.close()
conn.close()
def update_table(self, tablename, merge_key, delete_list, insert_list, insert_batch_size=5000,
delete_batch_size=None, columns=None):
"""Update the records of a DB table in batches
It follows the delete-and-insert pattern (first delete all the rows
that will be updated, then insert them with the new values) because
this greatly improves the speed over doing the update row by row, as
this pattern enables batch operations on the DB.
WARNING: It's important to consider that if only specific columns are
updated (by using the 'columns' argument) the rest of the values of the
row will be lost (as they won't be re-inserted after the deletion)
Arguments
---------
tablename : string
Name of the table that will be updated
merge_key : string
Name of the column that will be updated
delete_list : list, tuple
Iterable containing the table PK of the rows that we want to remove
insert_list : list, tuple
Iterable of iterables, representing all the values that will be
inserted in each row
insert_batch_size : integer
Size of the batch to insert in each DB transaction
delete_batch_size : integer
Size of the batch to delete in each DB transaction. If not
specified, it's set to the same value as insert_batch_size
columns : list, tuple
Columns to update, in case we don't want to set all the values
in the record.
WARNING: If you use this option, the values on the missing columns
will be lost
"""
if not delete_batch_size:
delete_batch_size = insert_batch_size
self.delete_batch_rows(
delete_list, table_name=tablename, column=merge_key,
batch_size=delete_batch_size, timeout=False
)
self.execute_batch_inserts(
insert_list, tablename=tablename, batch_size=insert_batch_size, columns=columns
)
def update_table_with_temp_table(self, tablename, insert_list, schema=None, merge_key=None,
insert_batch_size=5000, columns=None, truncate_table=False):
"""Use a temporary staging table to perform a merge (Upsert). It update
and insert efficiently new data by loading your data into a staging table
first. Then, in one single transaction, deletes all the rows in the target
table using a merge key and inserts all the rows from the temporary table.
The connection must be persistent (is_persistent=True) to use temporary
tables.
WARNING: It's important to consider that if only specific columns are
updated (by using the 'columns' argument) the rest of the values of the
row will be lost (as they won't be re-inserted after the deletion)
Arguments
---------
tablename: string
Name of the table that will be updated
merge_key: string (optional)
Column used to make an inner join to delete rows that will be updated.
If it is not specified, only an insert operation from the temporary
table is done
insert_list: list, tuple
Iterable of iterables, representing all the values that will be
inserted in each row
schema: string
Name of the schema that contains the table that will be updated (Optional).
insert_batch_size: integer
Size of the batch to insert in each DB transaction
columns : list, tuple
Columns to update, in case we don't want to set all the values
in the record.
WARNING: If you use this option, the values on the missing columns
will be lost
truncate_table : boolean
If true, truncates the table before doing the insertion. False by default
"""
if not self.db_connection.is_persistent:
raise ConnectionError("The connection must be persistent to use temporary tables")
execution_time = datetime.now().strftime("%Y%m%d%H%M%S%f")
tmp_table = f'{tablename}_TEMP_{execution_time}'
table_name_with_schema = f"{schema}.{tablename}" if schema else tablename
self.postgre_statement(f"CREATE TEMP TABLE {tmp_table} AS (SELECT * FROM {table_name_with_schema} LIMIT 0)")
# Batch insert is done in temporary table
self.execute_batch_inserts(insert_list, tablename=tmp_table, batch_size=insert_batch_size, columns=columns)
sql_statements = []
if truncate_table:
sql_statements.append(f"TRUNCATE TABLE {table_name_with_schema}")
if merge_key:
sql_statements.append(
f"DELETE FROM {table_name_with_schema} USING {tmp_table} "
f"WHERE {table_name_with_schema}.{merge_key}={tmp_table}.{merge_key};")
# Table truncation, batch delete and insert happens in one single transaction
sql_statements.append(f"INSERT INTO {table_name_with_schema} (SELECT * FROM {tmp_table})")
self.postgre_multiple_statements(sql_statements)
|
StarcoderdataPython
|
3574014
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Epidemico Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from __future__ import absolute_import
import glob
from ftplib import FTP
import logging
import os
import datetime
import re
import shutil
from django.conf import settings
from dataqs.processor_base import GeoDataMosaicProcessor
from dataqs.helpers import gdal_translate, style_exists
logger = logging.getLogger("dataqs.processors")
script_dir = os.path.dirname(os.path.realpath(__file__))
GPM_ACCOUNT = getattr(settings, 'GPM_ACCOUNT', 'anonymous')
GS_DATA_DIR = getattr(settings, 'GS_DATA_DIR', '/data/geodata')
GS_TMP_DIR = getattr(settings, 'GS_TMP_DIR', '/tmp')
class GPMProcessor(GeoDataMosaicProcessor):
"""
Class for processing the latest NASA IMERG Rainfall estimates combining
data from all passive-microwave instruments in the GPM Constellation.
Uses the 'early' (possibly less accurate) images for most timely
information (generated within 6-8 hours of observation).
"""
base_url = "jsimpson.pps.eosdis.nasa.gov"
layername_prefix = 'nasa_gpm_'
prefix = '3B-HHR-E.MS.MRG.3IMERG.'
layer_name = 'nasa_gpm_24hr'
archive_hours = ("T12:00:00.000Z", "T12:30:00.000Z")
description = """NASA IMERG: Rainfall estimates combining data from all
passive-microwave instruments in the GPM Constellation.
This algorithm is intended to intercalibrate, merge, and interpolate "all"
satellite microwave precipitation estimates, together with microwave-calibrated
infrared (IR) satellite estimates, precipitation gauge analyses, and
potentially other precipitation estimators at fine time and space scales for the
TRMM and GPM eras over the entire globe. The system is run several times for
each observation time, first giving a quick estimate (this image) and
successively providing better estimates as more data arrive.
\n\nSource: http://pmm.nasa.gov/data-access/downloads/gpm"""
def download(self, auth_account=GPM_ACCOUNT, tmp_dir=GS_TMP_DIR, days=1):
ftp = FTP(self.base_url)
ftp.login(auth_account, auth_account)
ftp.cwd('/NRTPUB/imerg/gis/early')
file_list = ftp.nlst()
pattern = '.+\.1day\.tif' if days == 1 else '.+\-S120000\-.+\.1day\.tif'
re_1day = re.compile(pattern)
files = sorted([x for x in file_list if re_1day.search(x)])[-days:]
for file_1day in files:
with open(os.path.join(self.tmp_dir, file_1day), 'wb') as outfile:
ftp.retrbinary('RETR %s' % file_1day, outfile.write)
tfw_file = file_1day.replace('.tif', '.tfw')
with open(os.path.join(self.tmp_dir, tfw_file), 'wb') as outfile:
ftp.retrbinary('RETR %s' % tfw_file, outfile.write)
return files
def parse_name(self, tifname):
name_subs = re.search(
'IMERG\.(\d{8}-S\d{6}).+\.(3hr|30min|1day)\.tif', tifname)
imgtime = datetime.datetime.strptime(name_subs.group(1),
"%Y%m%d-S%H%M%S")
imgstrtime = imgtime.strftime("%Y-%m-%d %H:00")
layer_title = "NASA Global Precipitation Estimate ({}) - {} UTC".format(
name_subs.group(2), imgstrtime)
return layer_title, imgtime
def convert(self, tif_file):
layer_title, imgtime = self.parse_name(tif_file)
time_format = imgtime.strftime('%Y%m%dT%H0000000Z')
tif_out = "{prefix}_{time}.tif".format(
prefix=self.layer_name,
time=time_format)
# Use gdal_translate to embed projection info
gdal_translate(os.path.join(self.tmp_dir, tif_file),
os.path.join(self.tmp_dir, tif_out),
nodata=0, projection="EPSG:4326")
return tif_out
def run(self, days=1):
tifs = self.download(days=days)
for tif_file in tifs:
projected_tif = self.convert(tif_file)
dst_file = self.data_dir.format(gsd=GS_DATA_DIR, ws=self.workspace,
layer=self.layer_name,
file=projected_tif)
dst_dir = os.path.dirname(dst_file)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
if dst_file.endswith('.tif'):
shutil.move(os.path.join(self.tmp_dir, projected_tif), dst_file)
self.post_geoserver(dst_file, self.layer_name)
layer_title, imgtime = self.parse_name(tifs[-1])
self.drop_old_hourly_images(imgtime, self.layer_name)
self.drop_old_daily_images(imgtime, self.layer_name)
if not style_exists(self.layer_name):
with open(os.path.join(script_dir, 'resources/gpm.sld')) as sld:
self.set_default_style(self.layer_name,
self.layer_name, sld.read())
self.update_geonode(
self.layer_name,
title=layer_title,
description=self.description,
store=self.layer_name,
bounds=('-180.0', '180.0', '-90.0', '90.0',
'EPSG:4326'),
extra_keywords=['category:Climatology Meteorology'])
self.truncate_gs_cache(self.layer_name)
self.cleanup()
def cleanup(self):
"""
Remove any files in the temp directory matching
the processor class prefix or layer name
"""
filelist = glob.glob("{}*.*".format(
os.path.join(self.tmp_dir, self.layer_name)))
for f in filelist:
os.remove(f)
super(GPMProcessor, self).cleanup()
if __name__ == '__main__':
processor = GPMProcessor()
processor.run()
|
StarcoderdataPython
|
6531885
|
"""
To install, change to this directory and do:
sudo python setup.py install
"""
from distutils.core import setup
setup(name='navboxplus',
version='1.0',
description="An Unscented Estimation and Adaptive Control Package",
author="<NAME>",
packages=['navboxplus'])
|
StarcoderdataPython
|
1844145
|
"""Module containing factory class for building uvicorn app for Galaxy.
Information on uvicorn, its various settings, and how to invoke it can
be found at https://www.uvicorn.org/.
Galaxy can be launched with uvicorn using the following invocation:
::
uvicorn --app-dir lib --factory galaxy.webapps.galaxy.fast_factory:factory
Use the environment variable ``GALAXY_CONFIG_FILE`` to specify a Galaxy
configuration file. Galaxy configuration can be loading from a YAML
or an .ini file (reads app:main currently but can be overridden with
GALAXY_CONFIG_SECTION).
::
GALAXY_CONFIG_FILE=config/galaxy.yml uvicorn --app-dir lib --factory galaxy.webapps.galaxy.fast_factory:factory
.. note::
Information on additional ways to configure uvicorn can be found at
https://www.uvicorn.org/.
.. warning::
If an ini file is supplied via GALAXY_CONFIG_FILE, the server properties
such as host and port are not read from the file like older forms of
configuring Galaxy.
`Gunicorn <https://docs.gunicorn.org/en/stable/index.html>`__ is a server with
more complex management options.
This factory function can be executed as a uvicorn worker managed with gunicorn
with the following command-line.
::
gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()' --env GALAXY_CONFIG_FILE=config/galaxy.ini --pythonpath lib -w 4 -k uvicorn.workers.UvicornWorker
"""
import os
from galaxy.main_config import (
absolute_config_path,
config_is_ini,
DEFAULT_CONFIG_SECTION,
DEFAULT_INI_APP,
find_config,
)
from galaxy.web_stack import get_app_kwds
from galaxy.webapps.galaxy.buildapp import app_pair
from .fast_app import initialize_fast_app
def factory():
kwds = get_app_kwds("galaxy", "galaxy")
config_file = kwds.get("config_file")
if not config_file and "GALAXY_CONFIG_FILE" in os.environ:
config_file = os.path.abspath(os.environ["GALAXY_CONFIG_FILE"])
else:
galaxy_root = kwds.get("galaxy_root")
config_file = find_config(config_file, galaxy_root)
config_file = absolute_config_path(config_file, galaxy_root=galaxy_root)
if "GALAXY_CONFIG_SECTION" in os.environ:
config_section = os.environ["GALAXY_CONFIG_SECTION"]
elif config_is_ini(config_file):
config_section = "app:%s" % DEFAULT_INI_APP
else:
config_section = DEFAULT_CONFIG_SECTION
if 'config_file' not in kwds:
kwds['config_file'] = config_file
if 'config_section' not in kwds:
kwds['config_section'] = config_section
global_conf = {}
if config_is_ini(config_file):
global_conf["__file__"] = config_file
gx_webapp, gx_app = app_pair(global_conf=global_conf, load_app_kwds=kwds, wsgi_preflight=False)
return initialize_fast_app(gx_webapp, gx_app)
|
StarcoderdataPython
|
1802433
|
from server.crud.base import CRUDBase
from server.db.models import ProductsTable
from server.schemas.product import ProductCreate, ProductUpdate
class CRUDProduct(CRUDBase[ProductsTable, ProductCreate, ProductUpdate]):
pass
product_crud = CRUDProduct(ProductsTable)
|
StarcoderdataPython
|
6534681
|
<gh_stars>0
from typing import Union, Optional
import networkx as nx
import numpy as np
import numpy.typing as npt
def check_for_adjacency_matrix(adjacency_matrix: npt.NDArray[np.int_], ) -> bool:
if not isinstance(adjacency_matrix, np.ndarray):
raise TypeError(f"An adjacency_matrix of type numpy.ndarray is expected, "
f"{type(adjacency_matrix)} is obtained instead.")
if adjacency_matrix.ndim < 2:
raise ValueError("Given adjacency_matrix has less then 2 dimensions.")
if not adjacency_matrix.shape[-1] == adjacency_matrix.shape[-2]:
raise ValueError("Given matrix or matrices are not square.")
if np.any(adjacency_matrix > 1):
raise NotImplementedError("Hypergraph mutation is not yet supported.")
return True
def from_adjacency_matrix(adjacency_matrix: npt.NDArray[np.int_],
prob: Union[float, int] = 0.1,
rng: Optional[np.random.Generator] = None,
n: int = 1) -> npt.NDArray[np.int_]:
"""Generates mutations in the adjacency matrix with the given mutation probability.
Parameters
----------
adjacency_matrix :
The adjacency matrix to be mutated.
prob :
Edge mutation probability. Default is 0.1. Must satisfy the following inequality:
float: 0.0 <= prob <= 1.0
int: 0 <= prob <= 100
If an integer value is given, it is interpreted as a percentage.
rng :
Numpy random generator. Default is np.random.default_rng.
n :
Number of mutations to generate.
Returns
-------
numpy.ndarray
n mutated adjacency matrices of the same shape as an input matrix.
"""
if isinstance(prob, int):
prob /= 100
if prob > 1:
raise ValueError("Given probability is greater than 1 (or 100%)")
if rng is None:
rng = np.random.default_rng()
if check_for_adjacency_matrix(adjacency_matrix):
masks = rng.choice([0, 1], size=(n, *adjacency_matrix.shape), p=(1 - prob, prob))
masks = np.tril(masks, k=-1)
transpose_order = np.arange(0, len(masks.shape))
masks += np.transpose(masks, axes=(*transpose_order[:-2], transpose_order[-1], transpose_order[-2]))
mut_adjacency_matrices: npt.NDArray[np.int_] = adjacency_matrix.astype(int) ^ masks
return mut_adjacency_matrices
def from_graph(graph: nx.Graph,
prob: Union[float, int] = 0.1,
rng: Optional[np.random.Generator] = None) -> nx.Graph:
"""Generates mutated graph with the given mutation probability.
Parameters
----------
graph
The graph to be mutated.
prob
Edge mutation probability. Default is 0.1. Must satisfy the following inequality:
float: 0.0 <= prob <= 1.0
int: 0 <= prob <= 100
If an integer value is given, it is interpreted as a percentage.
rng
Numpy random generator. Default is np.random.default_rng.
Returns
-------
networkx.Graph
Mutated graph.
"""
if not isinstance(graph, nx.Graph):
raise TypeError(f"A graph of type networkx.classes.graph.Graph is expected, "
f"{type(graph)} is obtained instead.")
elif isinstance(graph, (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)):
raise TypeError("Types DiGraph, MultiGraph and MultiDiGraph are not implemented yet.")
adjacency_matrix = nx.to_numpy_array(graph, dtype=int)
return nx.from_numpy_array(from_adjacency_matrix(adjacency_matrix, prob, rng)[0])
|
StarcoderdataPython
|
9715213
|
<reponame>dyllllll/tencentcloud-sdk-python<gh_stars>1-10
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AccessInfo(AbstractModel):
"""HTTP域名相关信息
"""
def __init__(self):
"""
:param Host: 域名
:type Host: str
:param Vip: VIP
:type Vip: str
"""
self.Host = None
self.Vip = None
def _deserialize(self, params):
self.Host = params.get("Host")
self.Vip = params.get("Vip")
class Alias(AbstractModel):
"""函数的版本别名
"""
def __init__(self):
"""
:param FunctionVersion: 别名指向的主版本
:type FunctionVersion: str
:param Name: 别名的名称
:type Name: str
:param RoutingConfig: 别名的路由信息
注意:此字段可能返回 null,表示取不到有效值。
:type RoutingConfig: :class:`tencentcloud.scf.v20180416.models.RoutingConfig`
:param Description: 描述信息
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param AddTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type AddTime: str
:param ModTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModTime: str
"""
self.FunctionVersion = None
self.Name = None
self.RoutingConfig = None
self.Description = None
self.AddTime = None
self.ModTime = None
def _deserialize(self, params):
self.FunctionVersion = params.get("FunctionVersion")
self.Name = params.get("Name")
if params.get("RoutingConfig") is not None:
self.RoutingConfig = RoutingConfig()
self.RoutingConfig._deserialize(params.get("RoutingConfig"))
self.Description = params.get("Description")
self.AddTime = params.get("AddTime")
self.ModTime = params.get("ModTime")
class CfsConfig(AbstractModel):
"""文件系统(cfs)配置描述
"""
def __init__(self):
"""
:param CfsInsList: 文件系统信息列表
:type CfsInsList: list of CfsInsInfo
"""
self.CfsInsList = None
def _deserialize(self, params):
if params.get("CfsInsList") is not None:
self.CfsInsList = []
for item in params.get("CfsInsList"):
obj = CfsInsInfo()
obj._deserialize(item)
self.CfsInsList.append(obj)
class CfsInsInfo(AbstractModel):
"""云函数关联的cfs配置信息
"""
def __init__(self):
"""
:param UserId: 用户id
:type UserId: str
:param UserGroupId: 用户组id
:type UserGroupId: str
:param CfsId: 文件系统实例id
:type CfsId: str
:param MountInsId: 文件系统挂载点id
:type MountInsId: str
:param LocalMountDir: 本地挂载点
:type LocalMountDir: str
:param RemoteMountDir: 远程挂载点
:type RemoteMountDir: str
:param IpAddress: 文件系统ip,配置 cfs 时无需填写。
注意:此字段可能返回 null,表示取不到有效值。
注意:此字段可能返回 null,表示取不到有效值。
:type IpAddress: str
:param MountVpcId: 文件系统所在的私有网络id,配置 cfs 时无需填写。
注意:此字段可能返回 null,表示取不到有效值。
注意:此字段可能返回 null,表示取不到有效值。
:type MountVpcId: str
:param MountSubnetId: 文件系统所在私有网络的子网id,配置 cfs 时无需填写。
注意:此字段可能返回 null,表示取不到有效值。
注意:此字段可能返回 null,表示取不到有效值。
:type MountSubnetId: str
"""
self.UserId = None
self.UserGroupId = None
self.CfsId = None
self.MountInsId = None
self.LocalMountDir = None
self.RemoteMountDir = None
self.IpAddress = None
self.MountVpcId = None
self.MountSubnetId = None
def _deserialize(self, params):
self.UserId = params.get("UserId")
self.UserGroupId = params.get("UserGroupId")
self.CfsId = params.get("CfsId")
self.MountInsId = params.get("MountInsId")
self.LocalMountDir = params.get("LocalMountDir")
self.RemoteMountDir = params.get("RemoteMountDir")
self.IpAddress = params.get("IpAddress")
self.MountVpcId = params.get("MountVpcId")
self.MountSubnetId = params.get("MountSubnetId")
class Code(AbstractModel):
"""函数代码
"""
def __init__(self):
"""
:param CosBucketName: 对象存储桶名称
:type CosBucketName: str
:param CosObjectName: 对象存储对象路径
:type CosObjectName: str
:param ZipFile: 包含函数代码文件及其依赖项的 zip 格式文件,使用该接口时要求将 zip 文件的内容转成 base64 编码,最大支持20M
:type ZipFile: str
:param CosBucketRegion: 对象存储的地域,地域为北京时需要传入ap-beijing,北京一区时需要传递ap-beijing-1,其他的地域不需要传递。
:type CosBucketRegion: str
:param DemoId: 如果是通过Demo创建的话,需要传入DemoId
:type DemoId: str
:param TempCosObjectName: 如果是从TempCos创建的话,需要传入TempCosObjectName
:type TempCosObjectName: str
:param GitUrl: Git地址
:type GitUrl: str
:param GitUserName: Git用户名
:type GitUserName: str
:param GitPassword: Git密码
:type GitPassword: str
:param GitPasswordSecret: 加密后的Git密码,一般无需指定
:type GitPasswordSecret: str
:param GitBranch: Git分支
:type GitBranch: str
:param GitDirectory: 代码在Git仓库中的路径
:type GitDirectory: str
:param GitCommitId: 指定要拉取的版本
:type GitCommitId: str
:param GitUserNameSecret: 加密后的Git用户名,一般无需指定
:type GitUserNameSecret: str
"""
self.CosBucketName = None
self.CosObjectName = None
self.ZipFile = None
self.CosBucketRegion = None
self.DemoId = None
self.TempCosObjectName = None
self.GitUrl = None
self.GitUserName = None
self.GitPassword = None
self.GitPasswordSecret = None
self.GitBranch = None
self.GitDirectory = None
self.GitCommitId = None
self.GitUserNameSecret = None
def _deserialize(self, params):
self.CosBucketName = params.get("CosBucketName")
self.CosObjectName = params.get("CosObjectName")
self.ZipFile = params.get("ZipFile")
self.CosBucketRegion = params.get("CosBucketRegion")
self.DemoId = params.get("DemoId")
self.TempCosObjectName = params.get("TempCosObjectName")
self.GitUrl = params.get("GitUrl")
self.GitUserName = params.get("GitUserName")
self.GitPassword = params.get("GitPassword")
self.GitPasswordSecret = params.get("GitPasswordSecret")
self.GitBranch = params.get("GitBranch")
self.GitDirectory = params.get("GitDirectory")
self.GitCommitId = params.get("GitCommitId")
self.GitUserNameSecret = params.get("GitUserNameSecret")
class CopyFunctionRequest(AbstractModel):
"""CopyFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 要复制的函数的名称
:type FunctionName: str
:param NewFunctionName: 新函数的名称
:type NewFunctionName: str
:param Namespace: 要复制的函数所在的命名空间,默认为default
:type Namespace: str
:param TargetNamespace: 将函数复制到的命名空间,默认为default
:type TargetNamespace: str
:param Description: 新函数的描述
:type Description: str
:param TargetRegion: 要将函数复制到的地域,不填则默认为当前地域
:type TargetRegion: str
:param Override: 如果目标Namespace下已有同名函数,是否覆盖,默认为否
(注意:如果选择覆盖,会导致同名函数被删除,请慎重操作)
TRUE:覆盖同名函数
FALSE:不覆盖同名函数
:type Override: bool
:param CopyConfiguration: 是否复制函数的属性,包括环境变量、内存、超时、函数描述、标签、VPC等,默认为是。
TRUE:复制函数配置
FALSE:不复制函数配置
:type CopyConfiguration: bool
"""
self.FunctionName = None
self.NewFunctionName = None
self.Namespace = None
self.TargetNamespace = None
self.Description = None
self.TargetRegion = None
self.Override = None
self.CopyConfiguration = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.NewFunctionName = params.get("NewFunctionName")
self.Namespace = params.get("Namespace")
self.TargetNamespace = params.get("TargetNamespace")
self.Description = params.get("Description")
self.TargetRegion = params.get("TargetRegion")
self.Override = params.get("Override")
self.CopyConfiguration = params.get("CopyConfiguration")
class CopyFunctionResponse(AbstractModel):
"""CopyFunction返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateAliasRequest(AbstractModel):
"""CreateAlias请求参数结构体
"""
def __init__(self):
"""
:param Name: 别名的名称,在函数级别中唯一,只能包含字母、数字、'_'和‘-’,且必须以字母开头,长度限制为1-64
:type Name: str
:param FunctionName: 函数名称
:type FunctionName: str
:param FunctionVersion: 别名指向的主版本
:type FunctionVersion: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
:param RoutingConfig: 别名的请求路由配置
:type RoutingConfig: :class:`tencentcloud.scf.v20180416.models.RoutingConfig`
:param Description: 别名的描述信息
:type Description: str
"""
self.Name = None
self.FunctionName = None
self.FunctionVersion = None
self.Namespace = None
self.RoutingConfig = None
self.Description = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.FunctionName = params.get("FunctionName")
self.FunctionVersion = params.get("FunctionVersion")
self.Namespace = params.get("Namespace")
if params.get("RoutingConfig") is not None:
self.RoutingConfig = RoutingConfig()
self.RoutingConfig._deserialize(params.get("RoutingConfig"))
self.Description = params.get("Description")
class CreateAliasResponse(AbstractModel):
"""CreateAlias返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateFunctionRequest(AbstractModel):
"""CreateFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 创建的函数名称,函数名称支持26个英文字母大小写、数字、连接符和下划线,第一个字符只能以字母开头,最后一个字符不能为连接符或者下划线,名称长度2-60
:type FunctionName: str
:param Code: 函数代码. 注意:不能同时指定Cos、ZipFile或 DemoId。
:type Code: :class:`tencentcloud.scf.v20180416.models.Code`
:param Handler: 函数处理方法名称,名称格式支持 "文件名称.方法名称" 形式(java 名称格式 包名.类名::方法名),文件名称和函数名称之间以"."隔开,文件名称和函数名称要求以字母开始和结尾,中间允许插入字母、数字、下划线和连接符,文件名称和函数名字的长度要求是 2-60 个字符
:type Handler: str
:param Description: 函数描述,最大支持 1000 个英文字母、数字、空格、逗号、换行符和英文句号,支持中文
:type Description: str
:param MemorySize: 函数运行时内存大小,默认为 128M,可选范围 64、128MB-3072MB,并且以 128MB 为阶梯
:type MemorySize: int
:param Timeout: 函数最长执行时间,单位为秒,可选值范围 1-900 秒,默认为 3 秒
:type Timeout: int
:param Environment: 函数的环境变量
:type Environment: :class:`tencentcloud.scf.v20180416.models.Environment`
:param Runtime: 函数运行环境,目前仅支持 Python2.7,Python3.6,Nodejs6.10,Nodejs8.9,Nodejs10.15,Nodejs12.16, Php5, Php7,Go1,Java8 和 CustomRuntime,默认Python2.7
:type Runtime: str
:param VpcConfig: 函数的私有网络配置
:type VpcConfig: :class:`tencentcloud.scf.v20180416.models.VpcConfig`
:param Namespace: 函数所属命名空间
:type Namespace: str
:param Role: 函数绑定的角色
:type Role: str
:param ClsLogsetId: 函数日志投递到的CLS LogsetID
:type ClsLogsetId: str
:param ClsTopicId: 函数日志投递到的CLS TopicID
:type ClsTopicId: str
:param Type: 函数类型,默认值为Event,创建触发器函数请填写Event,创建HTTP函数级服务请填写HTTP
:type Type: str
:param CodeSource: CodeSource 代码来源,支持ZipFile, Cos, Demo 其中之一
:type CodeSource: str
:param Layers: 函数要关联的Layer版本列表,Layer会按照在列表中顺序依次覆盖。
:type Layers: list of LayerVersionSimple
:param DeadLetterConfig: 死信队列参数
:type DeadLetterConfig: :class:`tencentcloud.scf.v20180416.models.DeadLetterConfig`
:param PublicNetConfig: 公网访问配置
:type PublicNetConfig: :class:`tencentcloud.scf.v20180416.models.PublicNetConfigIn`
:param CfsConfig: 文件系统配置参数,用于云函数挂载文件系统
:type CfsConfig: :class:`tencentcloud.scf.v20180416.models.CfsConfig`
:param InitTimeout: 函数初始化超时时间
:type InitTimeout: int
:param Tags: 函数 Tag 参数,以键值对数组形式传入
:type Tags: list of Tag
"""
self.FunctionName = None
self.Code = None
self.Handler = None
self.Description = None
self.MemorySize = None
self.Timeout = None
self.Environment = None
self.Runtime = None
self.VpcConfig = None
self.Namespace = None
self.Role = None
self.ClsLogsetId = None
self.ClsTopicId = None
self.Type = None
self.CodeSource = None
self.Layers = None
self.DeadLetterConfig = None
self.PublicNetConfig = None
self.CfsConfig = None
self.InitTimeout = None
self.Tags = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
if params.get("Code") is not None:
self.Code = Code()
self.Code._deserialize(params.get("Code"))
self.Handler = params.get("Handler")
self.Description = params.get("Description")
self.MemorySize = params.get("MemorySize")
self.Timeout = params.get("Timeout")
if params.get("Environment") is not None:
self.Environment = Environment()
self.Environment._deserialize(params.get("Environment"))
self.Runtime = params.get("Runtime")
if params.get("VpcConfig") is not None:
self.VpcConfig = VpcConfig()
self.VpcConfig._deserialize(params.get("VpcConfig"))
self.Namespace = params.get("Namespace")
self.Role = params.get("Role")
self.ClsLogsetId = params.get("ClsLogsetId")
self.ClsTopicId = params.get("ClsTopicId")
self.Type = params.get("Type")
self.CodeSource = params.get("CodeSource")
if params.get("Layers") is not None:
self.Layers = []
for item in params.get("Layers"):
obj = LayerVersionSimple()
obj._deserialize(item)
self.Layers.append(obj)
if params.get("DeadLetterConfig") is not None:
self.DeadLetterConfig = DeadLetterConfig()
self.DeadLetterConfig._deserialize(params.get("DeadLetterConfig"))
if params.get("PublicNetConfig") is not None:
self.PublicNetConfig = PublicNetConfigIn()
self.PublicNetConfig._deserialize(params.get("PublicNetConfig"))
if params.get("CfsConfig") is not None:
self.CfsConfig = CfsConfig()
self.CfsConfig._deserialize(params.get("CfsConfig"))
self.InitTimeout = params.get("InitTimeout")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
class CreateFunctionResponse(AbstractModel):
"""CreateFunction返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateNamespaceRequest(AbstractModel):
"""CreateNamespace请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 命名空间名称
:type Namespace: str
:param Description: 命名空间描述
:type Description: str
"""
self.Namespace = None
self.Description = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
self.Description = params.get("Description")
class CreateNamespaceResponse(AbstractModel):
"""CreateNamespace返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateTriggerRequest(AbstractModel):
"""CreateTrigger请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 新建触发器绑定的函数名称
:type FunctionName: str
:param TriggerName: 新建触发器名称。如果是定时触发器,名称支持英文字母、数字、连接符和下划线,最长100个字符;如果是cos触发器,需要是对应cos存储桶适用于XML API的访问域名(例如:5401-5ff414-12345.cos.ap-shanghai.myqcloud.com);如果是其他触发器,见具体触发器绑定参数的说明
:type TriggerName: str
:param Type: 触发器类型,目前支持 cos 、cmq、 timer、 ckafka、apigw类型
:type Type: str
:param TriggerDesc: 触发器对应的参数,可见具体[触发器描述说明](https://cloud.tencent.com/document/product/583/39901)
:type TriggerDesc: str
:param Namespace: 函数的命名空间
:type Namespace: str
:param Qualifier: 函数的版本
:type Qualifier: str
:param Enable: 触发器的初始是能状态 OPEN表示开启 CLOSE表示关闭
:type Enable: str
:param CustomArgument: 用户自定义参数,仅支持timer触发器
:type CustomArgument: str
"""
self.FunctionName = None
self.TriggerName = None
self.Type = None
self.TriggerDesc = None
self.Namespace = None
self.Qualifier = None
self.Enable = None
self.CustomArgument = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.TriggerName = params.get("TriggerName")
self.Type = params.get("Type")
self.TriggerDesc = params.get("TriggerDesc")
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
self.Enable = params.get("Enable")
self.CustomArgument = params.get("CustomArgument")
class CreateTriggerResponse(AbstractModel):
"""CreateTrigger返回参数结构体
"""
def __init__(self):
"""
:param TriggerInfo: 触发器信息
:type TriggerInfo: :class:`tencentcloud.scf.v20180416.models.Trigger`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TriggerInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TriggerInfo") is not None:
self.TriggerInfo = Trigger()
self.TriggerInfo._deserialize(params.get("TriggerInfo"))
self.RequestId = params.get("RequestId")
class DeadLetterConfig(AbstractModel):
"""死信队列参数
"""
def __init__(self):
"""
:param Type: 死信队列模式
:type Type: str
:param Name: 死信队列名称
:type Name: str
:param FilterType: 死信队列主题模式的标签形式
:type FilterType: str
"""
self.Type = None
self.Name = None
self.FilterType = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Name = params.get("Name")
self.FilterType = params.get("FilterType")
class DeleteAliasRequest(AbstractModel):
"""DeleteAlias请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Name: 别名的名称
:type Name: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
"""
self.FunctionName = None
self.Name = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Name = params.get("Name")
self.Namespace = params.get("Namespace")
class DeleteAliasResponse(AbstractModel):
"""DeleteAlias返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteFunctionRequest(AbstractModel):
"""DeleteFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 要删除的函数名称
:type FunctionName: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param Qualifier: 函数版本
:type Qualifier: str
"""
self.FunctionName = None
self.Namespace = None
self.Qualifier = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
class DeleteFunctionResponse(AbstractModel):
"""DeleteFunction返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLayerVersionRequest(AbstractModel):
"""DeleteLayerVersion请求参数结构体
"""
def __init__(self):
"""
:param LayerName: 层名称
:type LayerName: str
:param LayerVersion: 版本号
:type LayerVersion: int
"""
self.LayerName = None
self.LayerVersion = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.LayerVersion = params.get("LayerVersion")
class DeleteLayerVersionResponse(AbstractModel):
"""DeleteLayerVersion返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteNamespaceRequest(AbstractModel):
"""DeleteNamespace请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 命名空间名称
:type Namespace: str
"""
self.Namespace = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
class DeleteNamespaceResponse(AbstractModel):
"""DeleteNamespace返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteProvisionedConcurrencyConfigRequest(AbstractModel):
"""DeleteProvisionedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要删除预置并发的函数的名称
:type FunctionName: str
:param Qualifier: 函数的版本号
:type Qualifier: str
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.Qualifier = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Qualifier = params.get("Qualifier")
self.Namespace = params.get("Namespace")
class DeleteProvisionedConcurrencyConfigResponse(AbstractModel):
"""DeleteProvisionedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteReservedConcurrencyConfigRequest(AbstractModel):
"""DeleteReservedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要删除预置并发的函数的名称
:type FunctionName: str
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
class DeleteReservedConcurrencyConfigResponse(AbstractModel):
"""DeleteReservedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteTriggerRequest(AbstractModel):
"""DeleteTrigger请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param TriggerName: 要删除的触发器名称
:type TriggerName: str
:param Type: 要删除的触发器类型,目前支持 cos 、cmq、 timer、ckafka 类型
:type Type: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param TriggerDesc: 如果删除的触发器类型为 COS 触发器,该字段为必填值,存放 JSON 格式的数据 {"event":"cos:ObjectCreated:*"},数据内容和 SetTrigger 接口中该字段的格式相同;如果删除的触发器类型为定时触发器或 CMQ 触发器,可以不指定该字段
:type TriggerDesc: str
:param Qualifier: 函数的版本信息
:type Qualifier: str
"""
self.FunctionName = None
self.TriggerName = None
self.Type = None
self.Namespace = None
self.TriggerDesc = None
self.Qualifier = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.TriggerName = params.get("TriggerName")
self.Type = params.get("Type")
self.Namespace = params.get("Namespace")
self.TriggerDesc = params.get("TriggerDesc")
self.Qualifier = params.get("Qualifier")
class DeleteTriggerResponse(AbstractModel):
"""DeleteTrigger返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class EipConfigIn(AbstractModel):
"""公网访问固定ip配置
"""
def __init__(self):
"""
:param EipStatus: Eip开启状态,取值['ENABLE','DISABLE']
:type EipStatus: str
"""
self.EipStatus = None
def _deserialize(self, params):
self.EipStatus = params.get("EipStatus")
class EipConfigOut(AbstractModel):
"""公网访问固定ip配置
"""
def __init__(self):
"""
:param EipStatus: 是否是固定IP,["ENABLE","DISABLE"]
:type EipStatus: str
:param EipAddress: IP列表
注意:此字段可能返回 null,表示取不到有效值。
:type EipAddress: list of str
"""
self.EipStatus = None
self.EipAddress = None
def _deserialize(self, params):
self.EipStatus = params.get("EipStatus")
self.EipAddress = params.get("EipAddress")
class EipOutConfig(AbstractModel):
"""EipOutConfig
"""
def __init__(self):
"""
:param EipFixed: 是否是固定IP,["TRUE","FALSE"]
:type EipFixed: str
:param Eips: IP列表
:type Eips: list of str
"""
self.EipFixed = None
self.Eips = None
def _deserialize(self, params):
self.EipFixed = params.get("EipFixed")
self.Eips = params.get("Eips")
class Environment(AbstractModel):
"""函数的环境变量参数
"""
def __init__(self):
"""
:param Variables: 环境变量数组
:type Variables: list of Variable
"""
self.Variables = None
def _deserialize(self, params):
if params.get("Variables") is not None:
self.Variables = []
for item in params.get("Variables"):
obj = Variable()
obj._deserialize(item)
self.Variables.append(obj)
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等
若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
"""
def __init__(self):
"""
:param Name: 需要过滤的字段。
:type Name: str
:param Values: 字段的过滤值。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class Function(AbstractModel):
"""函数列表
"""
def __init__(self):
"""
:param ModTime: 修改时间
:type ModTime: str
:param AddTime: 创建时间
:type AddTime: str
:param Runtime: 运行时
:type Runtime: str
:param FunctionName: 函数名称
:type FunctionName: str
:param FunctionId: 函数ID
:type FunctionId: str
:param Namespace: 命名空间
:type Namespace: str
:param Status: 函数状态,状态值及流转[参考此处](https://cloud.tencent.com/document/product/583/47175)
:type Status: str
:param StatusDesc: 函数状态详情
:type StatusDesc: str
:param Description: 函数描述
:type Description: str
:param Tags: 函数标签
:type Tags: list of Tag
:param Type: 函数类型,取值为 HTTP 或者 Event
:type Type: str
:param StatusReasons: 函数状态失败原因
:type StatusReasons: list of StatusReason
:param TotalProvisionedConcurrencyMem: 函数所有版本预置并发内存总和
注意:此字段可能返回 null,表示取不到有效值。
:type TotalProvisionedConcurrencyMem: int
:param ReservedConcurrencyMem: 函数并发保留内存
注意:此字段可能返回 null,表示取不到有效值。
:type ReservedConcurrencyMem: int
"""
self.ModTime = None
self.AddTime = None
self.Runtime = None
self.FunctionName = None
self.FunctionId = None
self.Namespace = None
self.Status = None
self.StatusDesc = None
self.Description = None
self.Tags = None
self.Type = None
self.StatusReasons = None
self.TotalProvisionedConcurrencyMem = None
self.ReservedConcurrencyMem = None
def _deserialize(self, params):
self.ModTime = params.get("ModTime")
self.AddTime = params.get("AddTime")
self.Runtime = params.get("Runtime")
self.FunctionName = params.get("FunctionName")
self.FunctionId = params.get("FunctionId")
self.Namespace = params.get("Namespace")
self.Status = params.get("Status")
self.StatusDesc = params.get("StatusDesc")
self.Description = params.get("Description")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.Type = params.get("Type")
if params.get("StatusReasons") is not None:
self.StatusReasons = []
for item in params.get("StatusReasons"):
obj = StatusReason()
obj._deserialize(item)
self.StatusReasons.append(obj)
self.TotalProvisionedConcurrencyMem = params.get("TotalProvisionedConcurrencyMem")
self.ReservedConcurrencyMem = params.get("ReservedConcurrencyMem")
class FunctionLog(AbstractModel):
"""日志信息
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param RetMsg: 函数执行完成后的返回值
:type RetMsg: str
:param RequestId: 执行该函数对应的requestId
:type RequestId: str
:param StartTime: 函数开始执行时的时间点
:type StartTime: str
:param RetCode: 函数执行结果,如果是 0 表示执行成功,其他值表示失败
:type RetCode: int
:param InvokeFinished: 函数调用是否结束,如果是 1 表示执行结束,其他值表示调用异常
:type InvokeFinished: int
:param Duration: 函数执行耗时,单位为 ms
:type Duration: float
:param BillDuration: 函数计费时间,根据 duration 向上取最近的 100ms,单位为ms
:type BillDuration: int
:param MemUsage: 函数执行时消耗实际内存大小,单位为 Byte
:type MemUsage: int
:param Log: 函数执行过程中的日志输出
:type Log: str
:param Level: 日志等级
:type Level: str
:param Source: 日志来源
:type Source: str
:param RetryNum: 重试次数
:type RetryNum: int
"""
self.FunctionName = None
self.RetMsg = None
self.RequestId = None
self.StartTime = None
self.RetCode = None
self.InvokeFinished = None
self.Duration = None
self.BillDuration = None
self.MemUsage = None
self.Log = None
self.Level = None
self.Source = None
self.RetryNum = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.RetMsg = params.get("RetMsg")
self.RequestId = params.get("RequestId")
self.StartTime = params.get("StartTime")
self.RetCode = params.get("RetCode")
self.InvokeFinished = params.get("InvokeFinished")
self.Duration = params.get("Duration")
self.BillDuration = params.get("BillDuration")
self.MemUsage = params.get("MemUsage")
self.Log = params.get("Log")
self.Level = params.get("Level")
self.Source = params.get("Source")
self.RetryNum = params.get("RetryNum")
class FunctionVersion(AbstractModel):
"""函数版本信息
"""
def __init__(self):
"""
:param Version: 函数版本名称
:type Version: str
:param Description: 版本描述信息
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param AddTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type AddTime: str
:param ModTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModTime: str
"""
self.Version = None
self.Description = None
self.AddTime = None
self.ModTime = None
def _deserialize(self, params):
self.Version = params.get("Version")
self.Description = params.get("Description")
self.AddTime = params.get("AddTime")
self.ModTime = params.get("ModTime")
class GetAliasRequest(AbstractModel):
"""GetAlias请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Name: 别名的名称
:type Name: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
"""
self.FunctionName = None
self.Name = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Name = params.get("Name")
self.Namespace = params.get("Namespace")
class GetAliasResponse(AbstractModel):
"""GetAlias返回参数结构体
"""
def __init__(self):
"""
:param FunctionVersion: 别名指向的主版本
:type FunctionVersion: str
:param Name: 别名的名称
:type Name: str
:param RoutingConfig: 别名的路由信息
:type RoutingConfig: :class:`tencentcloud.scf.v20180416.models.RoutingConfig`
:param Description: 别名的描述
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param AddTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type AddTime: str
:param ModTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FunctionVersion = None
self.Name = None
self.RoutingConfig = None
self.Description = None
self.AddTime = None
self.ModTime = None
self.RequestId = None
def _deserialize(self, params):
self.FunctionVersion = params.get("FunctionVersion")
self.Name = params.get("Name")
if params.get("RoutingConfig") is not None:
self.RoutingConfig = RoutingConfig()
self.RoutingConfig._deserialize(params.get("RoutingConfig"))
self.Description = params.get("Description")
self.AddTime = params.get("AddTime")
self.ModTime = params.get("ModTime")
self.RequestId = params.get("RequestId")
class GetFunctionAddressRequest(AbstractModel):
"""GetFunctionAddress请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param Qualifier: 函数的版本
:type Qualifier: str
:param Namespace: 函数的命名空间
:type Namespace: str
"""
self.FunctionName = None
self.Qualifier = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Qualifier = params.get("Qualifier")
self.Namespace = params.get("Namespace")
class GetFunctionAddressResponse(AbstractModel):
"""GetFunctionAddress返回参数结构体
"""
def __init__(self):
"""
:param Url: 函数的Cos地址
:type Url: str
:param CodeSha256: 函数的SHA256编码
:type CodeSha256: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Url = None
self.CodeSha256 = None
self.RequestId = None
def _deserialize(self, params):
self.Url = params.get("Url")
self.CodeSha256 = params.get("CodeSha256")
self.RequestId = params.get("RequestId")
class GetFunctionLogsRequest(AbstractModel):
"""GetFunctionLogs请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param Offset: 数据的偏移量,Offset+Limit不能大于10000
:type Offset: int
:param Limit: 返回数据的长度,Offset+Limit不能大于10000
:type Limit: int
:param Order: 以升序还是降序的方式对日志进行排序,可选值 desc和 asc
:type Order: str
:param OrderBy: 根据某个字段排序日志,支持以下字段:function_name, duration, mem_usage, start_time
:type OrderBy: str
:param Filter: 日志过滤条件。可用来区分正确和错误日志,filter.RetCode=not0 表示只返回错误日志,filter.RetCode=is0 表示只返回正确日志,不传,则返回所有日志
:type Filter: :class:`tencentcloud.scf.v20180416.models.LogFilter`
:param Namespace: 函数的命名空间
:type Namespace: str
:param Qualifier: 函数的版本
:type Qualifier: str
:param FunctionRequestId: 执行该函数对应的requestId
:type FunctionRequestId: str
:param StartTime: 查询的具体日期,例如:2017-05-16 20:00:00,只能与endtime相差一天之内
:type StartTime: str
:param EndTime: 查询的具体日期,例如:2017-05-16 20:59:59,只能与startTime相差一天之内
:type EndTime: str
:param SearchContext: 该字段已下线
:type SearchContext: :class:`tencentcloud.scf.v20180416.models.LogSearchContext`
"""
self.FunctionName = None
self.Offset = None
self.Limit = None
self.Order = None
self.OrderBy = None
self.Filter = None
self.Namespace = None
self.Qualifier = None
self.FunctionRequestId = None
self.StartTime = None
self.EndTime = None
self.SearchContext = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Order = params.get("Order")
self.OrderBy = params.get("OrderBy")
if params.get("Filter") is not None:
self.Filter = LogFilter()
self.Filter._deserialize(params.get("Filter"))
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
self.FunctionRequestId = params.get("FunctionRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("SearchContext") is not None:
self.SearchContext = LogSearchContext()
self.SearchContext._deserialize(params.get("SearchContext"))
class GetFunctionLogsResponse(AbstractModel):
"""GetFunctionLogs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 函数日志的总数
:type TotalCount: int
:param Data: 函数日志信息
:type Data: list of FunctionLog
:param SearchContext: 该字段已下线
:type SearchContext: :class:`tencentcloud.scf.v20180416.models.LogSearchContext`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Data = None
self.SearchContext = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = FunctionLog()
obj._deserialize(item)
self.Data.append(obj)
if params.get("SearchContext") is not None:
self.SearchContext = LogSearchContext()
self.SearchContext._deserialize(params.get("SearchContext"))
self.RequestId = params.get("RequestId")
class GetFunctionRequest(AbstractModel):
"""GetFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要获取详情的函数名称
:type FunctionName: str
:param Qualifier: 函数的版本号
:type Qualifier: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param ShowCode: 是否显示代码, TRUE表示显示代码,FALSE表示不显示代码,大于1M的入口文件不会显示
:type ShowCode: str
"""
self.FunctionName = None
self.Qualifier = None
self.Namespace = None
self.ShowCode = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Qualifier = params.get("Qualifier")
self.Namespace = params.get("Namespace")
self.ShowCode = params.get("ShowCode")
class GetFunctionResponse(AbstractModel):
"""GetFunction返回参数结构体
"""
def __init__(self):
"""
:param ModTime: 函数的最后修改时间
:type ModTime: str
:param CodeInfo: 函数的代码
:type CodeInfo: str
:param Description: 函数的描述信息
:type Description: str
:param Triggers: 函数的触发器列表
:type Triggers: list of Trigger
:param Handler: 函数的入口
:type Handler: str
:param CodeSize: 函数代码大小
:type CodeSize: int
:param Timeout: 函数的超时时间
:type Timeout: int
:param FunctionVersion: 函数的版本
:type FunctionVersion: str
:param MemorySize: 函数的最大可用内存
:type MemorySize: int
:param Runtime: 函数的运行环境
:type Runtime: str
:param FunctionName: 函数的名称
:type FunctionName: str
:param VpcConfig: 函数的私有网络
:type VpcConfig: :class:`tencentcloud.scf.v20180416.models.VpcConfig`
:param UseGpu: 是否使用GPU
:type UseGpu: str
:param Environment: 函数的环境变量
:type Environment: :class:`tencentcloud.scf.v20180416.models.Environment`
:param CodeResult: 代码是否正确
:type CodeResult: str
:param CodeError: 代码错误信息
:type CodeError: str
:param ErrNo: 代码错误码
:type ErrNo: int
:param Namespace: 函数的命名空间
:type Namespace: str
:param Role: 函数绑定的角色
:type Role: str
:param InstallDependency: 是否自动安装依赖
:type InstallDependency: str
:param Status: 函数状态,状态值及流转[参考说明](https://cloud.tencent.com/document/product/583/47175)
:type Status: str
:param StatusDesc: 状态描述
:type StatusDesc: str
:param ClsLogsetId: 日志投递到的Cls日志集
:type ClsLogsetId: str
:param ClsTopicId: 日志投递到的Cls Topic
:type ClsTopicId: str
:param FunctionId: 函数ID
:type FunctionId: str
:param Tags: 函数的标签列表
:type Tags: list of Tag
:param EipConfig: EipConfig配置
:type EipConfig: :class:`tencentcloud.scf.v20180416.models.EipOutConfig`
:param AccessInfo: 域名信息
:type AccessInfo: :class:`tencentcloud.scf.v20180416.models.AccessInfo`
:param Type: 函数类型,取值为HTTP或者Event
:type Type: str
:param L5Enable: 是否启用L5
:type L5Enable: str
:param Layers: 函数关联的Layer版本信息
:type Layers: list of LayerVersionInfo
:param DeadLetterConfig: 函数关联的死信队列信息
:type DeadLetterConfig: :class:`tencentcloud.scf.v20180416.models.DeadLetterConfig`
:param AddTime: 函数创建回见
:type AddTime: str
:param PublicNetConfig: 公网访问配置
注意:此字段可能返回 null,表示取不到有效值。
:type PublicNetConfig: :class:`tencentcloud.scf.v20180416.models.PublicNetConfigOut`
:param OnsEnable: 是否启用Ons
注意:此字段可能返回 null,表示取不到有效值。
:type OnsEnable: str
:param CfsConfig: 文件系统配置参数,用于云函数挂载文件系统
注意:此字段可能返回 null,表示取不到有效值。
:type CfsConfig: :class:`tencentcloud.scf.v20180416.models.CfsConfig`
:param AvailableStatus: 函数的计费状态,状态值[参考此处](https://cloud.tencent.com/document/product/583/47175#.E5.87.BD.E6.95.B0.E8.AE.A1.E8.B4.B9.E7.8A.B6.E6.80.81)
注意:此字段可能返回 null,表示取不到有效值。
:type AvailableStatus: str
:param Qualifier: 函数版本
注意:此字段可能返回 null,表示取不到有效值。
:type Qualifier: str
:param InitTimeout: 函数初始化超时时间
:type InitTimeout: int
:param StatusReasons: 函数状态失败原因
注意:此字段可能返回 null,表示取不到有效值。
:type StatusReasons: list of StatusReason
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ModTime = None
self.CodeInfo = None
self.Description = None
self.Triggers = None
self.Handler = None
self.CodeSize = None
self.Timeout = None
self.FunctionVersion = None
self.MemorySize = None
self.Runtime = None
self.FunctionName = None
self.VpcConfig = None
self.UseGpu = None
self.Environment = None
self.CodeResult = None
self.CodeError = None
self.ErrNo = None
self.Namespace = None
self.Role = None
self.InstallDependency = None
self.Status = None
self.StatusDesc = None
self.ClsLogsetId = None
self.ClsTopicId = None
self.FunctionId = None
self.Tags = None
self.EipConfig = None
self.AccessInfo = None
self.Type = None
self.L5Enable = None
self.Layers = None
self.DeadLetterConfig = None
self.AddTime = None
self.PublicNetConfig = None
self.OnsEnable = None
self.CfsConfig = None
self.AvailableStatus = None
self.Qualifier = None
self.InitTimeout = None
self.StatusReasons = None
self.RequestId = None
def _deserialize(self, params):
self.ModTime = params.get("ModTime")
self.CodeInfo = params.get("CodeInfo")
self.Description = params.get("Description")
if params.get("Triggers") is not None:
self.Triggers = []
for item in params.get("Triggers"):
obj = Trigger()
obj._deserialize(item)
self.Triggers.append(obj)
self.Handler = params.get("Handler")
self.CodeSize = params.get("CodeSize")
self.Timeout = params.get("Timeout")
self.FunctionVersion = params.get("FunctionVersion")
self.MemorySize = params.get("MemorySize")
self.Runtime = params.get("Runtime")
self.FunctionName = params.get("FunctionName")
if params.get("VpcConfig") is not None:
self.VpcConfig = VpcConfig()
self.VpcConfig._deserialize(params.get("VpcConfig"))
self.UseGpu = params.get("UseGpu")
if params.get("Environment") is not None:
self.Environment = Environment()
self.Environment._deserialize(params.get("Environment"))
self.CodeResult = params.get("CodeResult")
self.CodeError = params.get("CodeError")
self.ErrNo = params.get("ErrNo")
self.Namespace = params.get("Namespace")
self.Role = params.get("Role")
self.InstallDependency = params.get("InstallDependency")
self.Status = params.get("Status")
self.StatusDesc = params.get("StatusDesc")
self.ClsLogsetId = params.get("ClsLogsetId")
self.ClsTopicId = params.get("ClsTopicId")
self.FunctionId = params.get("FunctionId")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
if params.get("EipConfig") is not None:
self.EipConfig = EipOutConfig()
self.EipConfig._deserialize(params.get("EipConfig"))
if params.get("AccessInfo") is not None:
self.AccessInfo = AccessInfo()
self.AccessInfo._deserialize(params.get("AccessInfo"))
self.Type = params.get("Type")
self.L5Enable = params.get("L5Enable")
if params.get("Layers") is not None:
self.Layers = []
for item in params.get("Layers"):
obj = LayerVersionInfo()
obj._deserialize(item)
self.Layers.append(obj)
if params.get("DeadLetterConfig") is not None:
self.DeadLetterConfig = DeadLetterConfig()
self.DeadLetterConfig._deserialize(params.get("DeadLetterConfig"))
self.AddTime = params.get("AddTime")
if params.get("PublicNetConfig") is not None:
self.PublicNetConfig = PublicNetConfigOut()
self.PublicNetConfig._deserialize(params.get("PublicNetConfig"))
self.OnsEnable = params.get("OnsEnable")
if params.get("CfsConfig") is not None:
self.CfsConfig = CfsConfig()
self.CfsConfig._deserialize(params.get("CfsConfig"))
self.AvailableStatus = params.get("AvailableStatus")
self.Qualifier = params.get("Qualifier")
self.InitTimeout = params.get("InitTimeout")
if params.get("StatusReasons") is not None:
self.StatusReasons = []
for item in params.get("StatusReasons"):
obj = StatusReason()
obj._deserialize(item)
self.StatusReasons.append(obj)
self.RequestId = params.get("RequestId")
class GetLayerVersionRequest(AbstractModel):
"""GetLayerVersion请求参数结构体
"""
def __init__(self):
"""
:param LayerName: 层名称
:type LayerName: str
:param LayerVersion: 版本号
:type LayerVersion: int
"""
self.LayerName = None
self.LayerVersion = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.LayerVersion = params.get("LayerVersion")
class GetLayerVersionResponse(AbstractModel):
"""GetLayerVersion返回参数结构体
"""
def __init__(self):
"""
:param CompatibleRuntimes: 适配的运行时
:type CompatibleRuntimes: list of str
:param CodeSha256: 层中版本文件的SHA256编码
:type CodeSha256: str
:param Location: 层中版本文件的下载地址
:type Location: str
:param AddTime: 版本的创建时间
:type AddTime: str
:param Description: 版本的描述
:type Description: str
:param LicenseInfo: 许可证信息
:type LicenseInfo: str
:param LayerVersion: 版本号
:type LayerVersion: int
:param LayerName: 层名称
:type LayerName: str
:param Status: 层的具体版本当前状态,可能取值:
Active 正常
Publishing 发布中
PublishFailed 发布失败
Deleted 已删除
:type Status: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CompatibleRuntimes = None
self.CodeSha256 = None
self.Location = None
self.AddTime = None
self.Description = None
self.LicenseInfo = None
self.LayerVersion = None
self.LayerName = None
self.Status = None
self.RequestId = None
def _deserialize(self, params):
self.CompatibleRuntimes = params.get("CompatibleRuntimes")
self.CodeSha256 = params.get("CodeSha256")
self.Location = params.get("Location")
self.AddTime = params.get("AddTime")
self.Description = params.get("Description")
self.LicenseInfo = params.get("LicenseInfo")
self.LayerVersion = params.get("LayerVersion")
self.LayerName = params.get("LayerName")
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class GetProvisionedConcurrencyConfigRequest(AbstractModel):
"""GetProvisionedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要获取预置并发详情的函数名称。
:type FunctionName: str
:param Namespace: 函数所在的命名空间,默认为default。
:type Namespace: str
:param Qualifier: 函数版本号,不传则返回函数所有版本的预置并发信息。
:type Qualifier: str
"""
self.FunctionName = None
self.Namespace = None
self.Qualifier = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
class GetProvisionedConcurrencyConfigResponse(AbstractModel):
"""GetProvisionedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param UnallocatedConcurrencyNum: 该函数剩余可配置的预置并发数。
:type UnallocatedConcurrencyNum: int
:param Allocated: 函数已预置的并发配置详情。
:type Allocated: list of VersionProvisionedConcurrencyInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.UnallocatedConcurrencyNum = None
self.Allocated = None
self.RequestId = None
def _deserialize(self, params):
self.UnallocatedConcurrencyNum = params.get("UnallocatedConcurrencyNum")
if params.get("Allocated") is not None:
self.Allocated = []
for item in params.get("Allocated"):
obj = VersionProvisionedConcurrencyInfo()
obj._deserialize(item)
self.Allocated.append(obj)
self.RequestId = params.get("RequestId")
class GetReservedConcurrencyConfigRequest(AbstractModel):
"""GetReservedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要获取预置并发详情的函数名称。
:type FunctionName: str
:param Namespace: 函数所在的命名空间,默认为default。
:type Namespace: str
"""
self.FunctionName = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
class GetReservedConcurrencyConfigResponse(AbstractModel):
"""GetReservedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param ReservedMem: 该函数的保留并发内存。
注意:此字段可能返回 null,表示取不到有效值。
:type ReservedMem: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReservedMem = None
self.RequestId = None
def _deserialize(self, params):
self.ReservedMem = params.get("ReservedMem")
self.RequestId = params.get("RequestId")
class InvokeRequest(AbstractModel):
"""Invoke请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param InvocationType: RequestResponse(同步) 和 Event(异步),默认为同步
:type InvocationType: str
:param Qualifier: 触发函数的版本号
:type Qualifier: str
:param ClientContext: 运行函数时的参数,以json格式传入,最大支持的参数长度是 1M
:type ClientContext: str
:param LogType: 同步调用时指定该字段,返回值会包含4K的日志,可选值为None和Tail,默认值为None。当该值为Tail时,返回参数中的logMsg字段会包含对应的函数执行日志
:type LogType: str
:param Namespace: 命名空间
:type Namespace: str
:param RoutingKey: 函数灰度流量控制调用,以json格式传入,例如{"k":"v"},注意kv都需要是字符串类型,最大支持的参数长度是1024字节
:type RoutingKey: str
"""
self.FunctionName = None
self.InvocationType = None
self.Qualifier = None
self.ClientContext = None
self.LogType = None
self.Namespace = None
self.RoutingKey = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.InvocationType = params.get("InvocationType")
self.Qualifier = params.get("Qualifier")
self.ClientContext = params.get("ClientContext")
self.LogType = params.get("LogType")
self.Namespace = params.get("Namespace")
self.RoutingKey = params.get("RoutingKey")
class InvokeResponse(AbstractModel):
"""Invoke返回参数结构体
"""
def __init__(self):
"""
:param Result: 函数执行结果
:type Result: :class:`tencentcloud.scf.v20180416.models.Result`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = Result()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class LayerVersionInfo(AbstractModel):
"""层版本信息
"""
def __init__(self):
"""
:param CompatibleRuntimes: 版本适用的运行时
注意:此字段可能返回 null,表示取不到有效值。
:type CompatibleRuntimes: list of str
:param AddTime: 创建时间
:type AddTime: str
:param Description: 版本描述
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param LicenseInfo: 许可证信息
注意:此字段可能返回 null,表示取不到有效值。
:type LicenseInfo: str
:param LayerVersion: 版本号
:type LayerVersion: int
:param LayerName: 层名称
:type LayerName: str
:param Status: 层的具体版本当前状态,状态值[参考此处](https://cloud.tencent.com/document/product/583/47175#.E5.B1.82.EF.BC.88layer.EF.BC.89.E7.8A.B6.E6.80.81)
:type Status: str
"""
self.CompatibleRuntimes = None
self.AddTime = None
self.Description = None
self.LicenseInfo = None
self.LayerVersion = None
self.LayerName = None
self.Status = None
def _deserialize(self, params):
self.CompatibleRuntimes = params.get("CompatibleRuntimes")
self.AddTime = params.get("AddTime")
self.Description = params.get("Description")
self.LicenseInfo = params.get("LicenseInfo")
self.LayerVersion = params.get("LayerVersion")
self.LayerName = params.get("LayerName")
self.Status = params.get("Status")
class LayerVersionSimple(AbstractModel):
"""指定某个Layer版本
"""
def __init__(self):
"""
:param LayerName: layer名称
:type LayerName: str
:param LayerVersion: 版本号
:type LayerVersion: int
"""
self.LayerName = None
self.LayerVersion = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.LayerVersion = params.get("LayerVersion")
class ListAliasesRequest(AbstractModel):
"""ListAliases请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
:param FunctionVersion: 如果提供此参数,则只返回与该函数版本有关联的别名
:type FunctionVersion: str
:param Offset: 数据偏移量,默认值为 0
:type Offset: str
:param Limit: 返回数据长度,默认值为 20
:type Limit: str
"""
self.FunctionName = None
self.Namespace = None
self.FunctionVersion = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.FunctionVersion = params.get("FunctionVersion")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class ListAliasesResponse(AbstractModel):
"""ListAliases返回参数结构体
"""
def __init__(self):
"""
:param Aliases: 别名列表
:type Aliases: list of Alias
:param TotalCount: 别名总数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Aliases = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Aliases") is not None:
self.Aliases = []
for item in params.get("Aliases"):
obj = Alias()
obj._deserialize(item)
self.Aliases.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class ListFunctionsRequest(AbstractModel):
"""ListFunctions请求参数结构体
"""
def __init__(self):
"""
:param Order: 以升序还是降序的方式返回结果,可选值 ASC 和 DESC
:type Order: str
:param Orderby: 根据哪个字段进行返回结果排序,支持以下字段:AddTime, ModTime, FunctionName
:type Orderby: str
:param Offset: 数据偏移量,默认值为 0
:type Offset: int
:param Limit: 返回数据长度,默认值为 20
:type Limit: int
:param SearchKey: 支持FunctionName模糊匹配
:type SearchKey: str
:param Namespace: 命名空间
:type Namespace: str
:param Description: 函数描述,支持模糊搜索
:type Description: str
:param Filters: 过滤条件。
- tag:tag-key - String - 是否必填:否 - (过滤条件)按照标签键值对进行过滤。 tag-key使用具体的标签键进行替换。
每次请求的Filters的上限为10,Filter.Values的上限为5。
:type Filters: list of Filter
"""
self.Order = None
self.Orderby = None
self.Offset = None
self.Limit = None
self.SearchKey = None
self.Namespace = None
self.Description = None
self.Filters = None
def _deserialize(self, params):
self.Order = params.get("Order")
self.Orderby = params.get("Orderby")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.SearchKey = params.get("SearchKey")
self.Namespace = params.get("Namespace")
self.Description = params.get("Description")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
class ListFunctionsResponse(AbstractModel):
"""ListFunctions返回参数结构体
"""
def __init__(self):
"""
:param Functions: 函数列表
:type Functions: list of Function
:param TotalCount: 总数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Functions = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Functions") is not None:
self.Functions = []
for item in params.get("Functions"):
obj = Function()
obj._deserialize(item)
self.Functions.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class ListLayerVersionsRequest(AbstractModel):
"""ListLayerVersions请求参数结构体
"""
def __init__(self):
"""
:param LayerName: 层名称
:type LayerName: str
:param CompatibleRuntime: 适配的运行时
:type CompatibleRuntime: list of str
"""
self.LayerName = None
self.CompatibleRuntime = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.CompatibleRuntime = params.get("CompatibleRuntime")
class ListLayerVersionsResponse(AbstractModel):
"""ListLayerVersions返回参数结构体
"""
def __init__(self):
"""
:param LayerVersions: 层版本列表
:type LayerVersions: list of LayerVersionInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LayerVersions = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LayerVersions") is not None:
self.LayerVersions = []
for item in params.get("LayerVersions"):
obj = LayerVersionInfo()
obj._deserialize(item)
self.LayerVersions.append(obj)
self.RequestId = params.get("RequestId")
class ListLayersRequest(AbstractModel):
"""ListLayers请求参数结构体
"""
def __init__(self):
"""
:param CompatibleRuntime: 适配的运行时
:type CompatibleRuntime: str
:param Offset: 偏移位置
:type Offset: int
:param Limit: 查询数目限制
:type Limit: int
:param SearchKey: 查询key,模糊匹配名称
:type SearchKey: str
"""
self.CompatibleRuntime = None
self.Offset = None
self.Limit = None
self.SearchKey = None
def _deserialize(self, params):
self.CompatibleRuntime = params.get("CompatibleRuntime")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.SearchKey = params.get("SearchKey")
class ListLayersResponse(AbstractModel):
"""ListLayers返回参数结构体
"""
def __init__(self):
"""
:param Layers: 层列表
:type Layers: list of LayerVersionInfo
:param TotalCount: 层总数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Layers = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Layers") is not None:
self.Layers = []
for item in params.get("Layers"):
obj = LayerVersionInfo()
obj._deserialize(item)
self.Layers.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class ListNamespacesRequest(AbstractModel):
"""ListNamespaces请求参数结构体
"""
def __init__(self):
"""
:param Limit: 返回数据长度,默认值为 20
:type Limit: int
:param Offset: 数据的偏移量,默认值为 0
:type Offset: int
:param Orderby: 根据哪个字段进行返回结果排序,支持以下字段:Name,Updatetime
:type Orderby: str
:param Order: 以升序还是降序的方式返回结果,可选值 ASC 和 DESC
:type Order: str
"""
self.Limit = None
self.Offset = None
self.Orderby = None
self.Order = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.Orderby = params.get("Orderby")
self.Order = params.get("Order")
class ListNamespacesResponse(AbstractModel):
"""ListNamespaces返回参数结构体
"""
def __init__(self):
"""
:param Namespaces: namespace详情
:type Namespaces: list of Namespace
:param TotalCount: 返回的namespace数量
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Namespaces = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Namespaces") is not None:
self.Namespaces = []
for item in params.get("Namespaces"):
obj = Namespace()
obj._deserialize(item)
self.Namespaces.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class ListTriggersRequest(AbstractModel):
"""ListTriggers请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Namespace: 命名空间,默认是default
:type Namespace: str
:param Offset: 数据偏移量,默认值为 0
:type Offset: int
:param Limit: 返回数据长度,默认值为 20
:type Limit: int
:param OrderBy: 根据哪个字段进行返回结果排序,支持以下字段:AddTime, ModTime,默认ModTime
:type OrderBy: str
:param Order: 以升序还是降序的方式返回结果,可选值 ASC 和 DESC,默认DESC
:type Order: str
:param Filters: * Qualifier:
函数版本,别名
:type Filters: list of Filter
"""
self.FunctionName = None
self.Namespace = None
self.Offset = None
self.Limit = None
self.OrderBy = None
self.Order = None
self.Filters = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.OrderBy = params.get("OrderBy")
self.Order = params.get("Order")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
class ListTriggersResponse(AbstractModel):
"""ListTriggers返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 触发器总数
:type TotalCount: int
:param Triggers: 触发器列表
:type Triggers: list of TriggerInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Triggers = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Triggers") is not None:
self.Triggers = []
for item in params.get("Triggers"):
obj = TriggerInfo()
obj._deserialize(item)
self.Triggers.append(obj)
self.RequestId = params.get("RequestId")
class ListVersionByFunctionRequest(AbstractModel):
"""ListVersionByFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名
:type FunctionName: str
:param Namespace: 函数所在命名空间
:type Namespace: str
:param Offset: 数据偏移量,默认值为 0
:type Offset: int
:param Limit: 返回数据长度,默认值为 20
:type Limit: int
:param Order: 以升序还是降序的方式返回结果,可选值 ASC 和 DESC
:type Order: str
:param OrderBy: 根据哪个字段进行返回结果排序,支持以下字段:AddTime, ModTime
:type OrderBy: str
"""
self.FunctionName = None
self.Namespace = None
self.Offset = None
self.Limit = None
self.Order = None
self.OrderBy = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Order = params.get("Order")
self.OrderBy = params.get("OrderBy")
class ListVersionByFunctionResponse(AbstractModel):
"""ListVersionByFunction返回参数结构体
"""
def __init__(self):
"""
:param FunctionVersion: 函数版本。
:type FunctionVersion: list of str
:param Versions: 函数版本列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Versions: list of FunctionVersion
:param TotalCount: 函数版本总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FunctionVersion = None
self.Versions = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
self.FunctionVersion = params.get("FunctionVersion")
if params.get("Versions") is not None:
self.Versions = []
for item in params.get("Versions"):
obj = FunctionVersion()
obj._deserialize(item)
self.Versions.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class LogFilter(AbstractModel):
"""日志过滤条件,用于区分正确与错误日志
"""
def __init__(self):
"""
:param RetCode: filter.RetCode的取值有:
not0 表示只返回错误日志,
is0 表示只返回正确日志,
TimeLimitExceeded 返回函数调用发生超时的日志,
ResourceLimitExceeded 返回函数调用发生资源超限的日志,
UserCodeException 返回函数调用发生用户代码错误的日志,
无输入则返回所有日志。
:type RetCode: str
"""
self.RetCode = None
def _deserialize(self, params):
self.RetCode = params.get("RetCode")
class LogSearchContext(AbstractModel):
"""日志搜索上下文
"""
def __init__(self):
"""
:param Offset: 偏移量
:type Offset: str
:param Limit: 日志条数
:type Limit: int
:param Keyword: 日志关键词
:type Keyword: str
:param Type: 日志类型,支持Application和Platform,默认为Application
:type Type: str
"""
self.Offset = None
self.Limit = None
self.Keyword = None
self.Type = None
def _deserialize(self, params):
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Keyword = params.get("Keyword")
self.Type = params.get("Type")
class Namespace(AbstractModel):
"""命名空间
"""
def __init__(self):
"""
:param ModTime: 命名空间创建时间
:type ModTime: str
:param AddTime: 命名空间修改时间
:type AddTime: str
:param Description: 命名空间描述
:type Description: str
:param Name: 命名空间名称
:type Name: str
:param Type: 默认default,TCB表示是小程序云开发创建的
:type Type: str
"""
self.ModTime = None
self.AddTime = None
self.Description = None
self.Name = None
self.Type = None
def _deserialize(self, params):
self.ModTime = params.get("ModTime")
self.AddTime = params.get("AddTime")
self.Description = params.get("Description")
self.Name = params.get("Name")
self.Type = params.get("Type")
class PublicNetConfigIn(AbstractModel):
"""公网访问配置
"""
def __init__(self):
"""
:param PublicNetStatus: 是否开启公网访问能力取值['DISABLE','ENABLE']
:type PublicNetStatus: str
:param EipConfig: Eip配置
:type EipConfig: :class:`tencentcloud.scf.v20180416.models.EipConfigIn`
"""
self.PublicNetStatus = None
self.EipConfig = None
def _deserialize(self, params):
self.PublicNetStatus = params.get("PublicNetStatus")
if params.get("EipConfig") is not None:
self.EipConfig = EipConfigIn()
self.EipConfig._deserialize(params.get("EipConfig"))
class PublicNetConfigOut(AbstractModel):
"""公网访问配置
"""
def __init__(self):
"""
:param PublicNetStatus: 是否开启公网访问能力取值['DISABLE','ENABLE']
:type PublicNetStatus: str
:param EipConfig: Eip配置
:type EipConfig: :class:`tencentcloud.scf.v20180416.models.EipConfigOut`
"""
self.PublicNetStatus = None
self.EipConfig = None
def _deserialize(self, params):
self.PublicNetStatus = params.get("PublicNetStatus")
if params.get("EipConfig") is not None:
self.EipConfig = EipConfigOut()
self.EipConfig._deserialize(params.get("EipConfig"))
class PublishLayerVersionRequest(AbstractModel):
"""PublishLayerVersion请求参数结构体
"""
def __init__(self):
"""
:param LayerName: 层名称,支持26个英文字母大小写、数字、连接符和下划线,第一个字符只能以字母开头,最后一个字符不能为连接符或者下划线,名称长度1-64
:type LayerName: str
:param CompatibleRuntimes: 层适用的运行时,可多选,可选的值对应函数的 Runtime 可选值。
:type CompatibleRuntimes: list of str
:param Content: 层的文件来源或文件内容
:type Content: :class:`tencentcloud.scf.v20180416.models.Code`
:param Description: 层的版本的描述
:type Description: str
:param LicenseInfo: 层的软件许可证
:type LicenseInfo: str
"""
self.LayerName = None
self.CompatibleRuntimes = None
self.Content = None
self.Description = None
self.LicenseInfo = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.CompatibleRuntimes = params.get("CompatibleRuntimes")
if params.get("Content") is not None:
self.Content = Code()
self.Content._deserialize(params.get("Content"))
self.Description = params.get("Description")
self.LicenseInfo = params.get("LicenseInfo")
class PublishLayerVersionResponse(AbstractModel):
"""PublishLayerVersion返回参数结构体
"""
def __init__(self):
"""
:param LayerVersion: 本次创建的层的版本号
:type LayerVersion: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LayerVersion = None
self.RequestId = None
def _deserialize(self, params):
self.LayerVersion = params.get("LayerVersion")
self.RequestId = params.get("RequestId")
class PublishVersionRequest(AbstractModel):
"""PublishVersion请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 发布函数的名称
:type FunctionName: str
:param Description: 函数的描述
:type Description: str
:param Namespace: 函数的命名空间
:type Namespace: str
"""
self.FunctionName = None
self.Description = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Description = params.get("Description")
self.Namespace = params.get("Namespace")
class PublishVersionResponse(AbstractModel):
"""PublishVersion返回参数结构体
"""
def __init__(self):
"""
:param FunctionVersion: 函数的版本
:type FunctionVersion: str
:param CodeSize: 代码大小
:type CodeSize: int
:param MemorySize: 最大可用内存
:type MemorySize: int
:param Description: 函数的描述
:type Description: str
:param Handler: 函数的入口
:type Handler: str
:param Timeout: 函数的超时时间
:type Timeout: int
:param Runtime: 函数的运行环境
:type Runtime: str
:param Namespace: 函数的命名空间
:type Namespace: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FunctionVersion = None
self.CodeSize = None
self.MemorySize = None
self.Description = None
self.Handler = None
self.Timeout = None
self.Runtime = None
self.Namespace = None
self.RequestId = None
def _deserialize(self, params):
self.FunctionVersion = params.get("FunctionVersion")
self.CodeSize = params.get("CodeSize")
self.MemorySize = params.get("MemorySize")
self.Description = params.get("Description")
self.Handler = params.get("Handler")
self.Timeout = params.get("Timeout")
self.Runtime = params.get("Runtime")
self.Namespace = params.get("Namespace")
self.RequestId = params.get("RequestId")
class PutProvisionedConcurrencyConfigRequest(AbstractModel):
"""PutProvisionedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要设置预置并发的函数的名称
:type FunctionName: str
:param Qualifier: 函数的版本号,注:$LATEST版本不支持预置并发
:type Qualifier: str
:param VersionProvisionedConcurrencyNum: 预置并发数量,注:所有版本的预置并发数总和存在上限限制,当前的上限是:函数最大并发配额 - 100
:type VersionProvisionedConcurrencyNum: int
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.Qualifier = None
self.VersionProvisionedConcurrencyNum = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Qualifier = params.get("Qualifier")
self.VersionProvisionedConcurrencyNum = params.get("VersionProvisionedConcurrencyNum")
self.Namespace = params.get("Namespace")
class PutProvisionedConcurrencyConfigResponse(AbstractModel):
"""PutProvisionedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class PutReservedConcurrencyConfigRequest(AbstractModel):
"""PutReservedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要设置预置并发的函数的名称
:type FunctionName: str
:param ReservedConcurrencyMem: 函数保留并发内存,注:函数的保留并发内存总和上限:用户总并发内存配额 - 12800
:type ReservedConcurrencyMem: int
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.ReservedConcurrencyMem = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.ReservedConcurrencyMem = params.get("ReservedConcurrencyMem")
self.Namespace = params.get("Namespace")
class PutReservedConcurrencyConfigResponse(AbstractModel):
"""PutReservedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class PutTotalConcurrencyConfigRequest(AbstractModel):
"""PutTotalConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param TotalConcurrencyMem: 账号并发内存配额,注:账号并发内存配额下限:用户已用并发内存总额 + 12800
:type TotalConcurrencyMem: int
:param Namespace: 命名空间,默认为default
:type Namespace: str
"""
self.TotalConcurrencyMem = None
self.Namespace = None
def _deserialize(self, params):
self.TotalConcurrencyMem = params.get("TotalConcurrencyMem")
self.Namespace = params.get("Namespace")
class PutTotalConcurrencyConfigResponse(AbstractModel):
"""PutTotalConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Result(AbstractModel):
"""运行函数的返回
"""
def __init__(self):
"""
:param Log: 表示执行过程中的日志输出,异步调用返回为空
:type Log: str
:param RetMsg: 表示执行函数的返回,异步调用返回为空
:type RetMsg: str
:param ErrMsg: 表示执行函数的错误返回信息,异步调用返回为空
:type ErrMsg: str
:param MemUsage: 执行函数时的内存大小,单位为Byte,异步调用返回为空
:type MemUsage: int
:param Duration: 表示执行函数的耗时,单位是毫秒,异步调用返回为空
:type Duration: float
:param BillDuration: 表示函数的计费耗时,单位是毫秒,异步调用返回为空
:type BillDuration: int
:param FunctionRequestId: 此次函数执行的Id
:type FunctionRequestId: str
:param InvokeResult: 0为正确,异步调用返回为空
:type InvokeResult: int
"""
self.Log = None
self.RetMsg = None
self.ErrMsg = None
self.MemUsage = None
self.Duration = None
self.BillDuration = None
self.FunctionRequestId = None
self.InvokeResult = None
def _deserialize(self, params):
self.Log = params.get("Log")
self.RetMsg = params.get("RetMsg")
self.ErrMsg = params.get("ErrMsg")
self.MemUsage = params.get("MemUsage")
self.Duration = params.get("Duration")
self.BillDuration = params.get("BillDuration")
self.FunctionRequestId = params.get("FunctionRequestId")
self.InvokeResult = params.get("InvokeResult")
class RoutingConfig(AbstractModel):
"""别名的版本路由配置
"""
def __init__(self):
"""
:param AdditionalVersionWeights: 随机权重路由附加版本
:type AdditionalVersionWeights: list of VersionWeight
:param AddtionVersionMatchs: 规则路由附加版本
:type AddtionVersionMatchs: list of VersionMatch
"""
self.AdditionalVersionWeights = None
self.AddtionVersionMatchs = None
def _deserialize(self, params):
if params.get("AdditionalVersionWeights") is not None:
self.AdditionalVersionWeights = []
for item in params.get("AdditionalVersionWeights"):
obj = VersionWeight()
obj._deserialize(item)
self.AdditionalVersionWeights.append(obj)
if params.get("AddtionVersionMatchs") is not None:
self.AddtionVersionMatchs = []
for item in params.get("AddtionVersionMatchs"):
obj = VersionMatch()
obj._deserialize(item)
self.AddtionVersionMatchs.append(obj)
class StatusReason(AbstractModel):
"""状态原因描述
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param ErrorMessage: 错误描述
:type ErrorMessage: str
"""
self.ErrorCode = None
self.ErrorMessage = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.ErrorMessage = params.get("ErrorMessage")
class Tag(AbstractModel):
"""函数标签
"""
def __init__(self):
"""
:param Key: 标签的key
:type Key: str
:param Value: 标签的value
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
class Trigger(AbstractModel):
"""触发器类型
"""
def __init__(self):
"""
:param ModTime: 触发器最后修改时间
:type ModTime: str
:param Type: 触发器类型
:type Type: str
:param TriggerDesc: 触发器详细配置
:type TriggerDesc: str
:param TriggerName: 触发器名称
:type TriggerName: str
:param AddTime: 触发器创建时间
:type AddTime: str
:param Enable: 使能开关
:type Enable: int
:param CustomArgument: 客户自定义参数
:type CustomArgument: str
:param AvailableStatus: 触发器状态
:type AvailableStatus: str
:param ResourceId: 触发器最小资源ID
:type ResourceId: str
:param BindStatus: 触发器和云函数绑定状态
:type BindStatus: str
:param TriggerAttribute: 触发器类型,双向表示两侧控制台均可操作,单向表示SCF控制台单向创建
:type TriggerAttribute: str
"""
self.ModTime = None
self.Type = None
self.TriggerDesc = None
self.TriggerName = None
self.AddTime = None
self.Enable = None
self.CustomArgument = None
self.AvailableStatus = None
self.ResourceId = None
self.BindStatus = None
self.TriggerAttribute = None
def _deserialize(self, params):
self.ModTime = params.get("ModTime")
self.Type = params.get("Type")
self.TriggerDesc = params.get("TriggerDesc")
self.TriggerName = params.get("TriggerName")
self.AddTime = params.get("AddTime")
self.Enable = params.get("Enable")
self.CustomArgument = params.get("CustomArgument")
self.AvailableStatus = params.get("AvailableStatus")
self.ResourceId = params.get("ResourceId")
self.BindStatus = params.get("BindStatus")
self.TriggerAttribute = params.get("TriggerAttribute")
class TriggerInfo(AbstractModel):
"""触发器信息
"""
def __init__(self):
"""
:param Enable: 使能开关
:type Enable: int
:param Qualifier: 函数版本或别名
:type Qualifier: str
:param TriggerName: 触发器名称
:type TriggerName: str
:param Type: 触发器类型
:type Type: str
:param TriggerDesc: 触发器详细配置
:type TriggerDesc: str
:param AvailableStatus: 触发器是否可用
:type AvailableStatus: str
:param CustomArgument: 客户自定义参数
注意:此字段可能返回 null,表示取不到有效值。
:type CustomArgument: str
:param AddTime: 触发器创建时间
:type AddTime: str
:param ModTime: 触发器最后修改时间
:type ModTime: str
:param ResourceId: 触发器最小资源ID
:type ResourceId: str
:param BindStatus: 触发器和云函数绑定状态
:type BindStatus: str
:param TriggerAttribute: 触发器类型,双向表示两侧控制台均可操作,单向表示SCF控制台单向创建
:type TriggerAttribute: str
"""
self.Enable = None
self.Qualifier = None
self.TriggerName = None
self.Type = None
self.TriggerDesc = None
self.AvailableStatus = None
self.CustomArgument = None
self.AddTime = None
self.ModTime = None
self.ResourceId = None
self.BindStatus = None
self.TriggerAttribute = None
def _deserialize(self, params):
self.Enable = params.get("Enable")
self.Qualifier = params.get("Qualifier")
self.TriggerName = params.get("TriggerName")
self.Type = params.get("Type")
self.TriggerDesc = params.get("TriggerDesc")
self.AvailableStatus = params.get("AvailableStatus")
self.CustomArgument = params.get("CustomArgument")
self.AddTime = params.get("AddTime")
self.ModTime = params.get("ModTime")
self.ResourceId = params.get("ResourceId")
self.BindStatus = params.get("BindStatus")
self.TriggerAttribute = params.get("TriggerAttribute")
class UpdateAliasRequest(AbstractModel):
"""UpdateAlias请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Name: 别名的名称
:type Name: str
:param FunctionVersion: 别名指向的主版本
:type FunctionVersion: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
:param RoutingConfig: 别名的路由信息,需要为别名指定附加版本时,必须提供此参数
:type RoutingConfig: :class:`tencentcloud.scf.v20180416.models.RoutingConfig`
:param Description: 别名的描述
:type Description: str
"""
self.FunctionName = None
self.Name = None
self.FunctionVersion = None
self.Namespace = None
self.RoutingConfig = None
self.Description = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Name = params.get("Name")
self.FunctionVersion = params.get("FunctionVersion")
self.Namespace = params.get("Namespace")
if params.get("RoutingConfig") is not None:
self.RoutingConfig = RoutingConfig()
self.RoutingConfig._deserialize(params.get("RoutingConfig"))
self.Description = params.get("Description")
class UpdateAliasResponse(AbstractModel):
"""UpdateAlias返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UpdateFunctionCodeRequest(AbstractModel):
"""UpdateFunctionCode请求参数结构体
"""
def __init__(self):
"""
:param Handler: 函数处理方法名称。名称格式支持“文件名称.函数名称”形式(java 名称格式 包名.类名::方法名),文件名称和函数名称之间以"."隔开,文件名称和函数名称要求以字母开始和结尾,中间允许插入字母、数字、下划线和连接符,文件名称和函数名字的长度要求 2-60 个字符
:type Handler: str
:param FunctionName: 要修改的函数名称
:type FunctionName: str
:param CosBucketName: 对象存储桶名称
:type CosBucketName: str
:param CosObjectName: 对象存储对象路径
:type CosObjectName: str
:param ZipFile: 包含函数代码文件及其依赖项的 zip 格式文件,使用该接口时要求将 zip 文件的内容转成 base64 编码,最大支持20M
:type ZipFile: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param CosBucketRegion: 对象存储的地域,注:北京分为ap-beijing和ap-beijing-1
:type CosBucketRegion: str
:param EnvId: 函数所属环境
:type EnvId: str
:param Publish: 在更新时是否同步发布新版本,默认为:FALSE,不发布
:type Publish: str
:param Code: 函数代码
:type Code: :class:`tencentcloud.scf.v20180416.models.Code`
:param CodeSource: 代码来源方式,支持 ZipFile, Cos, Inline 之一
:type CodeSource: str
"""
self.Handler = None
self.FunctionName = None
self.CosBucketName = None
self.CosObjectName = None
self.ZipFile = None
self.Namespace = None
self.CosBucketRegion = None
self.EnvId = None
self.Publish = None
self.Code = None
self.CodeSource = None
def _deserialize(self, params):
self.Handler = params.get("Handler")
self.FunctionName = params.get("FunctionName")
self.CosBucketName = params.get("CosBucketName")
self.CosObjectName = params.get("CosObjectName")
self.ZipFile = params.get("ZipFile")
self.Namespace = params.get("Namespace")
self.CosBucketRegion = params.get("CosBucketRegion")
self.EnvId = params.get("EnvId")
self.Publish = params.get("Publish")
if params.get("Code") is not None:
self.Code = Code()
self.Code._deserialize(params.get("Code"))
self.CodeSource = params.get("CodeSource")
class UpdateFunctionCodeResponse(AbstractModel):
"""UpdateFunctionCode返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UpdateFunctionConfigurationRequest(AbstractModel):
"""UpdateFunctionConfiguration请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 要修改的函数名称
:type FunctionName: str
:param Description: 函数描述。最大支持 1000 个英文字母、数字、空格、逗号和英文句号,支持中文
:type Description: str
:param MemorySize: 函数运行时内存大小,默认为 128 M,可选范64M、128 M-3072 M,以 128MB 为阶梯。
:type MemorySize: int
:param Timeout: 函数最长执行时间,单位为秒,可选值范 1-900 秒,默认为 3 秒
:type Timeout: int
:param Runtime: 函数运行环境,目前仅支持 Python2.7,Python3.6,Nodejs6.10,Nodejs8.9,Nodejs10.15,Nodejs12.16, PHP5, PHP7,Go1 , Java8和CustomRuntime
:type Runtime: str
:param Environment: 函数的环境变量
:type Environment: :class:`tencentcloud.scf.v20180416.models.Environment`
:param Namespace: 函数所属命名空间
:type Namespace: str
:param VpcConfig: 函数的私有网络配置
:type VpcConfig: :class:`tencentcloud.scf.v20180416.models.VpcConfig`
:param Role: 函数绑定的角色
:type Role: str
:param ClsLogsetId: 日志投递到的cls日志集ID
:type ClsLogsetId: str
:param ClsTopicId: 日志投递到的cls Topic ID
:type ClsTopicId: str
:param Publish: 在更新时是否同步发布新版本,默认为:FALSE,不发布新版本
:type Publish: str
:param L5Enable: 是否开启L5访问能力,TRUE 为开启,FALSE为关闭
:type L5Enable: str
:param Layers: 函数要关联的层版本列表,层的版本会按照在列表中顺序依次覆盖。
:type Layers: list of LayerVersionSimple
:param DeadLetterConfig: 函数关联的死信队列信息
:type DeadLetterConfig: :class:`tencentcloud.scf.v20180416.models.DeadLetterConfig`
:param PublicNetConfig: 公网访问配置
:type PublicNetConfig: :class:`tencentcloud.scf.v20180416.models.PublicNetConfigIn`
:param CfsConfig: 文件系统配置入参,用于云函数绑定CFS文件系统
:type CfsConfig: :class:`tencentcloud.scf.v20180416.models.CfsConfig`
:param InitTimeout: 函数初始化执行超时时间,默认15秒
:type InitTimeout: int
"""
self.FunctionName = None
self.Description = None
self.MemorySize = None
self.Timeout = None
self.Runtime = None
self.Environment = None
self.Namespace = None
self.VpcConfig = None
self.Role = None
self.ClsLogsetId = None
self.ClsTopicId = None
self.Publish = None
self.L5Enable = None
self.Layers = None
self.DeadLetterConfig = None
self.PublicNetConfig = None
self.CfsConfig = None
self.InitTimeout = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Description = params.get("Description")
self.MemorySize = params.get("MemorySize")
self.Timeout = params.get("Timeout")
self.Runtime = params.get("Runtime")
if params.get("Environment") is not None:
self.Environment = Environment()
self.Environment._deserialize(params.get("Environment"))
self.Namespace = params.get("Namespace")
if params.get("VpcConfig") is not None:
self.VpcConfig = VpcConfig()
self.VpcConfig._deserialize(params.get("VpcConfig"))
self.Role = params.get("Role")
self.ClsLogsetId = params.get("ClsLogsetId")
self.ClsTopicId = params.get("ClsTopicId")
self.Publish = params.get("Publish")
self.L5Enable = params.get("L5Enable")
if params.get("Layers") is not None:
self.Layers = []
for item in params.get("Layers"):
obj = LayerVersionSimple()
obj._deserialize(item)
self.Layers.append(obj)
if params.get("DeadLetterConfig") is not None:
self.DeadLetterConfig = DeadLetterConfig()
self.DeadLetterConfig._deserialize(params.get("DeadLetterConfig"))
if params.get("PublicNetConfig") is not None:
self.PublicNetConfig = PublicNetConfigIn()
self.PublicNetConfig._deserialize(params.get("PublicNetConfig"))
if params.get("CfsConfig") is not None:
self.CfsConfig = CfsConfig()
self.CfsConfig._deserialize(params.get("CfsConfig"))
self.InitTimeout = params.get("InitTimeout")
class UpdateFunctionConfigurationResponse(AbstractModel):
"""UpdateFunctionConfiguration返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UpdateNamespaceRequest(AbstractModel):
"""UpdateNamespace请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 命名空间名称
:type Namespace: str
:param Description: 命名空间描述
:type Description: str
"""
self.Namespace = None
self.Description = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
self.Description = params.get("Description")
class UpdateNamespaceResponse(AbstractModel):
"""UpdateNamespace返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Variable(AbstractModel):
"""变量参数
"""
def __init__(self):
"""
:param Key: 变量的名称
:type Key: str
:param Value: 变量的值
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
class VersionMatch(AbstractModel):
"""带有匹配规则的函数版本
"""
def __init__(self):
"""
:param Version: 函数版本名称
:type Version: str
:param Key: 匹配规则的key,调用时通过传key来匹配规则路由到指定版本
header方式:
key填写"invoke.headers.User",并在 invoke 调用函数时传参 RoutingKey:{"User":"value"}规则匹配调用
:type Key: str
:param Method: 匹配方式。取值范围:
range:范围匹配
exact:字符串精确匹配
:type Method: str
:param Expression: range 匹配规则要求:
需要为开区间或闭区间描述 (a,b) [a,b],其中 a、b 均为整数
exact 匹配规则要求:
字符串精确匹配
:type Expression: str
"""
self.Version = None
self.Key = None
self.Method = None
self.Expression = None
def _deserialize(self, params):
self.Version = params.get("Version")
self.Key = params.get("Key")
self.Method = params.get("Method")
self.Expression = params.get("Expression")
class VersionProvisionedConcurrencyInfo(AbstractModel):
"""函数版本的预置并发信息,包括设置预置并发数、已完成预置的并发数和预置任务状态。
"""
def __init__(self):
"""
:param AllocatedProvisionedConcurrencyNum: 设置的预置并发数。
:type AllocatedProvisionedConcurrencyNum: int
:param AvailableProvisionedConcurrencyNum: 当前已完成预置的并发数。
:type AvailableProvisionedConcurrencyNum: int
:param Status: 预置任务状态,Done表示已完成,InProgress表示进行中,Failed表示部分或全部失败。
:type Status: str
:param StatusReason: 对预置任务状态Status的说明。
:type StatusReason: str
:param Qualifier: 函数版本号
:type Qualifier: str
"""
self.AllocatedProvisionedConcurrencyNum = None
self.AvailableProvisionedConcurrencyNum = None
self.Status = None
self.StatusReason = None
self.Qualifier = None
def _deserialize(self, params):
self.AllocatedProvisionedConcurrencyNum = params.get("AllocatedProvisionedConcurrencyNum")
self.AvailableProvisionedConcurrencyNum = params.get("AvailableProvisionedConcurrencyNum")
self.Status = params.get("Status")
self.StatusReason = params.get("StatusReason")
self.Qualifier = params.get("Qualifier")
class VersionWeight(AbstractModel):
"""带有权重的函数版本
"""
def __init__(self):
"""
:param Version: 函数版本名称
:type Version: str
:param Weight: 该版本的权重
:type Weight: float
"""
self.Version = None
self.Weight = None
def _deserialize(self, params):
self.Version = params.get("Version")
self.Weight = params.get("Weight")
class VpcConfig(AbstractModel):
"""私有网络参数配置
"""
def __init__(self):
"""
:param VpcId: 私有网络 的 Id
:type VpcId: str
:param SubnetId: 子网的 Id
:type SubnetId: str
"""
self.VpcId = None
self.SubnetId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
|
StarcoderdataPython
|
3308249
|
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import os
from os.path import join
from fastsrm.utils import error_source
os.makedirs("../figures", exist_ok=True)
rc = {
"pdf.fonttype": 42,
"text.usetex": True,
"font.size": 16,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"text.latex.preview": True,
"font.family": "serif",
}
plt.rcParams.update(rc)
shared_dir = (
"../experiments/results/identifiability/shared/"
)
def standardize(X):
X_ = X - np.mean(X, axis=1)
X_ = X_ / np.std(X_, axis=1)
return X
errors = []
for algo in ["brainiak", "fast"]:
errors_ = []
for n_repeat in range(9):
X, Y = (
join(
shared_dir,
"%i-%i-sherlock-10-%s.npy" % (n_repeat, split, algo),
)
for split in (0, 1)
)
X = np.load(X)
Y = np.load(Y)
errors_.append(np.mean(1 - error_source(X, Y)))
errors.append(errors_)
errors = np.array(errors)
plt.figure(figsize=(3, 3))
plt.scatter(errors[0], errors[1])
plt.plot(np.arange(0, 2), np.arange(0, 2), color="black")
plt.xlim(np.min(errors) - 0.1, np.max(errors) + 0.1)
plt.ylim(np.min(errors) - 0.1, np.max(errors) + 0.1)
plt.xlabel("Stability index \n (General covariance)")
plt.ylabel("Stability index \n (Diagonal covariance)")
plt.savefig("../figures/identifiability.pdf", bbox_inches="tight")
|
StarcoderdataPython
|
11245977
|
import mock
import pytest
import types
from spackl.db import Postgres, QueryResult
from sqlalchemy.engine import ResultProxy
from comparator import comps
from comparator import SourcePair, Comparator, ComparatorSet
from comparator.compare import ComparatorResult
from comparator.exceptions import InvalidCompSetException, QueryFormatError
query = 'select * from nowhere'
other_query = 'select count(*) from somewhere'
left_query_results = [{'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6}]
right_query_results = [{'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6}]
mismatch_right_query_results = [{'a': 1, 'b': 2, 'c': 3}, {'a': 4, 'b': 5, 'c': 6}, {'a': 7, 'b': 8, 'c': 9}]
string_query_results = [{'a': 'one', 'b': 'two', 'c': 'three'}, {'a': 'four', 'b': 'five', 'c': 'six'}]
unicode_query_results = [{u'a': u'one', u'b': u'two', u'c': u'three'}, {u'a': u'four', u'b': u'five', u'c': u'six'}]
def get_mock_query_result(values):
mock_result = mock.MagicMock(spec=ResultProxy)
mock_result.__iter__.return_value = values
return QueryResult(mock_result)
left_results = get_mock_query_result(left_query_results)
right_results = get_mock_query_result(right_query_results)
mismatch_right_results = get_mock_query_result(mismatch_right_query_results)
string_results = get_mock_query_result(string_query_results)
unicode_results = get_mock_query_result(unicode_query_results)
expected_default_result = ('basic_comp', True)
expected_multiple_result = [
ComparatorResult('test', 'first_eq_comp', True),
ComparatorResult('test', 'len_comp', True)]
expected_mismatch_result = [
ComparatorResult('test', 'first_eq_comp', True),
ComparatorResult('test', 'len_comp', False)]
def test_source_pair():
l, r = Postgres(), Postgres()
sp = SourcePair(l, query, r)
assert sp._lquery == sp._rquery
assert sp.empty
assert sp.query_results == (None, None)
assert sp.lresult is None
assert sp.rresult is None
sp2 = SourcePair(l, query)
assert sp2._lquery == query
assert sp2._right is None
assert sp2._rquery is None
assert sp2.query_results == (None, )
with pytest.raises(TypeError):
SourcePair(l, r, query)
with pytest.raises(TypeError):
SourcePair(l, query, r, 1234)
def test_source_pair_queries():
l, r = Postgres(), Postgres()
rquery = 'select * from somewhere where id in {{ a }}'
sp = SourcePair(l, query, r, rquery)
with mock.patch.object(sp._left, 'query', return_value=left_results):
with mock.patch.object(sp._right, 'query', return_value=right_results):
with mock.patch.object(sp, '_format_rquery') as mock_fmt:
sp.get_query_results()
assert mock_fmt.call_count == 1
sp._lresult = left_results
formatted = sp._format_rquery()
assert formatted == 'select * from somewhere where id in (1, 4)'
sp._rquery = 'select * from somewhere where id in {{ notreal }}'
with pytest.raises(QueryFormatError):
sp._format_rquery()
sp._rquery = rquery
sp._lresult = string_results
formatted = sp._format_rquery()
assert formatted == "select * from somewhere where id in ('one', 'four')"
sp._lresult = unicode_results
formatted = sp._format_rquery()
assert formatted == "select * from somewhere where id in ('one', 'four')"
def test_comparator():
sp1 = SourcePair(Postgres(), query, Postgres())
sp2 = SourcePair(Postgres(), query, Postgres(), other_query)
with pytest.raises(TypeError):
Comparator(sp1)
c = Comparator(sp=sp1)
assert c._sp._lquery == query
assert c._sp._rquery == query
assert c._comps == [comps.COMPS.get(comps.DEFAULT_COMP)]
assert c.name is None
c = Comparator(sp=sp2, comps=comps.FIRST_COMP, name='Shirley')
assert c._sp._lquery == query
assert c._sp._rquery == other_query
assert c._comps == [comps.COMPS.get(comps.FIRST_COMP)]
assert c.name == 'Shirley'
assert c.query_results == (None, None)
assert c.results == list()
def test_compare_defaults():
sp = SourcePair(Postgres(), query, Postgres())
c = Comparator(sp=sp)
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
with mock.patch.object(c._sp._right, 'query', return_value=right_results):
assert c.get_query_results() == (left_results, right_results)
assert isinstance(c.compare(), types.GeneratorType)
res = c.run_comparisons()[0]
assert (res.name, res.result) == expected_default_result
assert c._complete is True
ln = len(c.results)
c.run_comparisons()
assert len(c.results) == ln
for _ in c.compare():
pass
assert len(c.results) == ln
c._complete = False
c.run_comparisons()
assert len(c.results) == ln * 2
c._complete = True
c.clear()
assert c._complete is False
assert c.query_results == (None, None)
assert c.results == list()
def test_left_only_compare():
sp = SourcePair(Postgres(), query)
c = Comparator(sp=sp, comps=lambda x: bool(x[0]))
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
assert c.get_query_results() == (left_results, )
expected_comp_result = ('lambda x: bool(x[0]))', True)
res = c.run_comparisons()[0]
assert (res.name, res.result) == expected_comp_result
assert c._complete is True
def test_comarison_result():
pass
def test_compare_multiple():
sp = SourcePair(Postgres(), query, Postgres())
comparisons = [comps.FIRST_COMP, comps.LEN_COMP]
c = Comparator(sp=sp, comps=comparisons)
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
with mock.patch.object(c._sp._right, 'query', return_value=right_results):
assert c.get_query_results() == (left_results, right_results)
assert isinstance(c.lresult, QueryResult)
assert str(c.lresult.a) == '(1, 4)'
assert isinstance(c.rresult, QueryResult)
assert str(c.lresult.b) == '(2, 5)'
for i, result in enumerate(c.compare()):
assert result == expected_multiple_result[i]
def test_compare_mismatch():
sp = SourcePair(Postgres(), query, Postgres())
comparisons = [comps.FIRST_COMP, comps.LEN_COMP]
c = Comparator(sp=sp, comps=comparisons)
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
with mock.patch.object(c._sp._right, 'query', return_value=mismatch_right_results):
res = c.run_comparisons()
assert c.query_results == (left_results, mismatch_right_results)
assert res == expected_mismatch_result
def test_left_right_queries():
sp1 = SourcePair(Postgres(), query, Postgres())
sp2 = SourcePair(Postgres(), query, Postgres(), other_query)
c1 = Comparator(sp=sp1)
assert c1._sp._lquery == query
assert c1._sp._rquery == query
c = Comparator(sp=sp2)
assert c._sp._lquery == query
assert c._sp._rquery == other_query
def test_results_run():
sp = SourcePair(Postgres(), query, Postgres())
c = Comparator(sp=sp)
with mock.patch.object(c._sp._left, 'query', return_value=left_results) as lq:
with mock.patch.object(c._sp._right, 'query', return_value=right_results) as rq:
res = c.get_query_results(run=True)
c.get_query_results(run=True)
assert lq.call_count == 1
assert rq.call_count == 1
assert res == (left_results, right_results)
def test_no_comps():
sp = SourcePair(Postgres(), query, Postgres())
c = Comparator(sp=sp, comps='notarealcomparison')
assert c._comps == [comps.COMPS.get(comps.DEFAULT_COMP)]
def test_custom_comparison():
def custom_comp(left, right):
return len(left) < len(right)
expected_result = [ComparatorResult('test', 'custom_comp', True),
ComparatorResult('test', 'lambda x, y: len(x) < len(y)', True)]
sp = SourcePair(Postgres(), query, Postgres())
c = Comparator(sp=sp, comps=[custom_comp, lambda x, y: len(x) < len(y)])
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
with mock.patch.object(c._sp._right, 'query', return_value=mismatch_right_results):
res = c.run_comparisons()
assert res == expected_result
def test_non_bool_comparison():
def custom_comp(left, right):
return len(right) - len(left)
expected_result = [ComparatorResult('test', 'custom_comp', 1)]
sp = SourcePair(Postgres(), query, Postgres())
c = Comparator(sp=sp, comps=custom_comp, name='test')
with mock.patch.object(c._sp._left, 'query', return_value=left_results):
with mock.patch.object(c._sp._right, 'query', return_value=mismatch_right_results):
res = c.run_comparisons()
assert res == expected_result
comp = res[0]
assert str(comp) == 'custom_comp : 1'
assert comp.comparator_name == 'test'
assert comp.name == 'custom_comp'
assert comp['name'] == 'custom_comp'
assert bool(comp)
assert comp > 0
assert comp >= 0
assert comp < 2
assert comp <= 2
assert comp == 1
assert comp != 0
assert comp.result == 1
assert comp['result'] == 1
assert comp[0] == 'custom_comp'
assert comp[1] == 1
with pytest.raises(IndexError):
comp[2]
with pytest.raises(KeyError):
comp['cheesecake']
def test_comparatorset():
l, r = Postgres(), Postgres()
sp1 = SourcePair(l, query, r)
sp2 = SourcePair(l, query, r, other_query)
with pytest.raises(TypeError):
ComparatorSet()
with pytest.raises(InvalidCompSetException):
ComparatorSet('bananas')
with pytest.raises(InvalidCompSetException):
ComparatorSet(sp1, sp1)
with pytest.raises(InvalidCompSetException):
ComparatorSet([sp1, sp1], comps='not_a_callable')
with pytest.raises(InvalidCompSetException):
ComparatorSet([sp1, sp1], names='too_short')
cs = ComparatorSet([sp1, sp1])
for c in cs:
assert isinstance(c, Comparator)
assert c._sp._left is l
assert c._sp._right is r
assert c._sp._lquery == query
assert c._sp._rquery == query
assert c._comps == [comps.COMPS.get(comps.DEFAULT_COMP)]
assert c.name is None
with pytest.raises(InvalidCompSetException):
ComparatorSet(
[sp2, sp2],
comps=[comps.LEN_COMP, comps.FIRST_COMP],
names='Shirley')
names = ['Shirley', 'Eugene']
with pytest.raises(InvalidCompSetException):
ComparatorSet(
[sp2, sp2],
comps=comps.COMPS.get(comps.LEN_COMP),
names=names)
with pytest.raises(InvalidCompSetException):
ComparatorSet(
[sp2, sp2],
comps=[comps.LEN_COMP, 'nope'],
names=names)
cmps = [[comps.LEN_COMP], [comps.FIRST_COMP]]
cs = ComparatorSet(
[sp2, sp2],
comps=cmps,
names=names)
for i, c in enumerate(cs):
assert c._sp._left is l
assert c._sp._right is r
assert c._sp._lquery == query
assert c._sp._rquery == other_query
assert c._comps == [comps.COMPS.get(cmps[i][0])]
assert c.name == names[i]
assert cs[0]
assert cs[1]
with pytest.raises(IndexError):
cs[2]
with pytest.raises(TypeError):
cs['what']
def test_comparatorset_from_dict():
l, r = Postgres(), Postgres()
with pytest.raises(TypeError):
ComparatorSet.from_dict()
with pytest.raises(InvalidCompSetException):
ComparatorSet.from_dict('what', l, r)
cs = ComparatorSet.from_dict({'lquery': query, 'rquery': other_query}, l, r)
assert isinstance(cs, ComparatorSet)
sp = SourcePair(l, query, r, other_query)
d1 = {'sp': sp}
d2 = {'name': 'test', 'lquery': query, 'rquery': other_query, 'comps': comps.LEN_COMP}
cs = ComparatorSet.from_dict([d1, d2], l, r, default_comp=comps.FIRST_COMP)
for c in cs:
assert c._sp._left is l
assert c._sp._right is r
assert c._sp._lquery == query
assert c._sp._rquery == other_query
assert cs[0].name is None
assert cs[1].name == 'test'
assert cs[0]._comps == [comps.COMPS.get(comps.FIRST_COMP)]
assert cs[1]._comps == [comps.COMPS.get(comps.LEN_COMP)]
with pytest.raises(IndexError):
cs[2]
|
StarcoderdataPython
|
9601238
|
from setuptools import setup
setup(
name='clean-folder',
version='1.0.0',
description='Script sorting tree folders',
url='https://github.com/Keshasan/clean-folder',
author='<NAME>',
author_email='<EMAIL>',
license='',
entry_points={'console_scripts': ['clean-folder = clean_folder.clean_folder:clean_folder_func']}
)
|
StarcoderdataPython
|
3496712
|
<reponame>Leo-xxx/DenseNAS
from tools.collections import AttrDict
__C = AttrDict()
cfg = __C
__C.net_config="""[[16, 16], 'mbconv_k3_t1', [], 0, 1]|
[[16, 24], 'mbconv_k5_t3', ['mbconv_k5_t3', 'mbconv_k3_t3'], 2, 2]|
[[24, 48], 'mbconv_k5_t6', [], 0, 2]|
[[48, 80], 'mbconv_k5_t6', ['mbconv_k7_t3', 'mbconv_k5_t3', 'mbconv_k3_t3'], 3, 2]|
[[80, 112], 'mbconv_k3_t3', ['mbconv_k3_t3'], 1, 1]|
[[112, 160], 'mbconv_k7_t6', ['mbconv_k7_t3', 'mbconv_k7_t3'], 2, 2]|
[[160, 352], 'mbconv_k5_t6', ['mbconv_k3_t3'], 1, 1]|
[[352, 416], 'mbconv_k3_t3', [], 0, 1]|
[[416, 480], 'mbconv_k3_t3', [], 0, 1]"""
__C.train_params=AttrDict()
__C.train_params.batch_size=256
__C.train_params.num_workers=8
__C.optim=AttrDict()
__C.optim.last_dim=1728
__C.optim.init_dim=16
__C.optim.bn_momentum=0.1
__C.optim.bn_eps=0.001
__C.data=AttrDict()
__C.data.dataset='imagenet' # cifar10 imagenet
__C.data.train_data_type='lmdb'
__C.data.val_data_type='img'
__C.data.patch_dataset=False
__C.data.num_examples=1281167
__C.data.input_size=(3,224,224)
|
StarcoderdataPython
|
1733182
|
<gh_stars>0
#!/usr/bin/python
# simple python socket sender
import socket
import time
HOST = "localhost" # The remote host
PORT = 8888 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
print "Enter 'q' to quit"
command = ""
while (command != "q"):
command = raw_input("alarmClock>> ")
print command
if command != "q":
s.sendall(command + "\n")
print (s.recv(1024))
s.close()
|
StarcoderdataPython
|
9794224
|
<reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TransferCredential import TransferCredential
from alipay.aop.api.domain.TransferAddressInfo import TransferAddressInfo
from alipay.aop.api.domain.TransferUserName import TransferUserName
class TransferUser(object):
def __init__(self):
self._birth_date = None
self._credential = None
self._nationality = None
self._user_address = None
self._user_email = None
self._user_id = None
self._user_name = None
self._user_phone_no = None
@property
def birth_date(self):
return self._birth_date
@birth_date.setter
def birth_date(self, value):
self._birth_date = value
@property
def credential(self):
return self._credential
@credential.setter
def credential(self, value):
if isinstance(value, TransferCredential):
self._credential = value
else:
self._credential = TransferCredential.from_alipay_dict(value)
@property
def nationality(self):
return self._nationality
@nationality.setter
def nationality(self, value):
self._nationality = value
@property
def user_address(self):
return self._user_address
@user_address.setter
def user_address(self, value):
if isinstance(value, TransferAddressInfo):
self._user_address = value
else:
self._user_address = TransferAddressInfo.from_alipay_dict(value)
@property
def user_email(self):
return self._user_email
@user_email.setter
def user_email(self, value):
self._user_email = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
if isinstance(value, TransferUserName):
self._user_name = value
else:
self._user_name = TransferUserName.from_alipay_dict(value)
@property
def user_phone_no(self):
return self._user_phone_no
@user_phone_no.setter
def user_phone_no(self, value):
self._user_phone_no = value
def to_alipay_dict(self):
params = dict()
if self.birth_date:
if hasattr(self.birth_date, 'to_alipay_dict'):
params['birth_date'] = self.birth_date.to_alipay_dict()
else:
params['birth_date'] = self.birth_date
if self.credential:
if hasattr(self.credential, 'to_alipay_dict'):
params['credential'] = self.credential.to_alipay_dict()
else:
params['credential'] = self.credential
if self.nationality:
if hasattr(self.nationality, 'to_alipay_dict'):
params['nationality'] = self.nationality.to_alipay_dict()
else:
params['nationality'] = self.nationality
if self.user_address:
if hasattr(self.user_address, 'to_alipay_dict'):
params['user_address'] = self.user_address.to_alipay_dict()
else:
params['user_address'] = self.user_address
if self.user_email:
if hasattr(self.user_email, 'to_alipay_dict'):
params['user_email'] = self.user_email.to_alipay_dict()
else:
params['user_email'] = self.user_email
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
if self.user_phone_no:
if hasattr(self.user_phone_no, 'to_alipay_dict'):
params['user_phone_no'] = self.user_phone_no.to_alipay_dict()
else:
params['user_phone_no'] = self.user_phone_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TransferUser()
if 'birth_date' in d:
o.birth_date = d['birth_date']
if 'credential' in d:
o.credential = d['credential']
if 'nationality' in d:
o.nationality = d['nationality']
if 'user_address' in d:
o.user_address = d['user_address']
if 'user_email' in d:
o.user_email = d['user_email']
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_name' in d:
o.user_name = d['user_name']
if 'user_phone_no' in d:
o.user_phone_no = d['user_phone_no']
return o
|
StarcoderdataPython
|
9734622
|
<filename>www/django/src/templates_django/templates_django/account/__init__.py
default_app_config = "templates_django.account.apps.AccountConfig"
|
StarcoderdataPython
|
11288448
|
<gh_stars>0
import math
from torchnlp.samplers.bptt_sampler import BPTTSampler
class BPTTBatchSampler(object):
"""Samples sequentially a batch of source and target slices of size ``bptt_length``.
Typically, such a sampler, is used for language modeling training with backpropagation through
time (BPTT).
**Reference:**
https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L61
Args:
data (iterable): Iterable data.
bptt_length (int): Length of the slice.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if its size would be
less than ``batch_size``.
type_ (str, optional): Type of batch ['source'|'target'] to load where a target batch is one
timestep ahead
Example:
>>> sampler = BPTTBatchSampler(range(100), bptt_length=2, batch_size=3, drop_last=False)
>>> list(sampler)[0] # First Batch
[slice(0, 2, None), slice(34, 36, None), slice(67, 69, None)]
"""
def __init__(self, data, bptt_length, batch_size, drop_last, type_='source'):
self.data = data
self.batch_size = batch_size
self.drop_last = drop_last
# For each row in the batch, we iterate over a chunk of size `chunk_size`
# Our chunks are similar to the columns in this PyTorch example:
# https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L61
chunk_sizes = [math.floor(len(data) / batch_size)] * batch_size
# Distribute the remaining elements to some chunks
if not self.drop_last:
remainder = len(data) - sum(chunk_sizes)
for i in range(remainder):
chunk_sizes[i] += 1
self.samplers = [{
'offset': sum(chunk_sizes[:i]),
'sampler': BPTTSampler(range(chunk_sizes[i]), bptt_length, type_=type_)
} for i in range(batch_size)]
def __iter__(self):
# Samplers iterate over chunks similar to:
# https://github.com/pytorch/examples/blob/c66593f1699ece14a4a2f4d314f1afb03c6793d9/word_language_model/main.py#L112
self.iterators = [iter(value['sampler']) for value in self.samplers]
while True:
batch = []
for i, iterator in enumerate(self.iterators):
try:
# Adjust the sampler indices to the offset
offset = self.samplers[i]['offset']
slice_ = next(iterator)
batch.append(slice(slice_.start + offset, slice_.stop + offset))
except StopIteration:
pass
# Samplers are all empty
if (len(batch) == 0):
break
yield batch
def __len__(self):
return len(self.samplers[0]['sampler'])
|
StarcoderdataPython
|
4945068
|
# NsearchQuery
# <NAME>
from NFixedPointQuery import *
from DSGRN.Query.NstableQuery import *
class NsearchgoeQuery:
def __init__(self, database, goe1, goe2 , bounds1, bounds2):
self.database = database
c = database.conn.cursor()
NFP = NFixedPointQuery(database, *bounds1).matches()
if goe1 == '=':
N = len(bounds1)
X1 = NstableQuery(database, N).matches()
X2 = NstableQuery(database, N+1).matches()
inter = set(X1.difference(X2))
MGset = [i for i in inter if i in NFP]
if goe1 == '<':
MGset = list(NFP)
self.MGset = MGset
# diff is all of the MG's with N1 stability in the database
N1 = len(bounds2)
X1 = NstableQuery(database, N1).matches()
X2 = NstableQuery(database, N1+1).matches()
diff = set(X1.difference(X2))
# PGI1 is the set of Parameter Index' assosiated to the Morse Graph inputs
string = 'create temp table C as select * from Signatures where MorseGraphIndex in ({seq})'.format(
seq=','.join(['?']*len(MGset)))
c.execute(string, MGset)
PGI1 = [ row[0] for row in c.execute('select ParameterIndex from C')]
c.execute('drop table C')
# PGIfinal is the set of tuples where first value in tuple is original Parameter Index, second value is the adjacent Parameter index, third is the MG index. Note that the tuples in this set are only those what have an original Parameter node in the bounds AND an adjacent parameter with =N (not >=N) fixed points.
PGIfinal = set()
new = NFixedPointQuery(database, *bounds2).matches()
want = [i for i in new if i in diff]
if goe2 == '=':
for node in PGI1:
adj_nodes = database.parametergraph.adjacencies(node)
sql="create temp table C as select *, " + str(node) + " as ParentPGI" + " from Signatures where ParameterIndex in ({seq})".format(
seq=','.join(['?']*len(adj_nodes)))
c.execute(sql, adj_nodes)
table2 = 'create temp table D as select * from C where MorseGraphIndex in ({seq})'.format(
seq=','.join(['?']*len(want)))
c.execute(table2, want)
edges = set([ (row[2],row[0],row[1]) for row in c.execute('select * from D')])
PGIfinal = PGIfinal.union(edges)
c.execute('drop table C')
c.execute('drop table D')
else:
for node in PGI1:
adj_nodes = database.parametergraph.adjacencies(node)
sql="create temp table C as select *, " + str(node) + " as ParentPGI" + " from Signatures where ParameterIndex in ({seq})".format(
seq=','.join(['?']*len(adj_nodes)))
c.execute(sql, adj_nodes)
table2 = 'create temp table D as select * from C where MorseGraphIndex in ({seq})'.format(
seq=','.join(['?']*len(list(new))))
c.execute(table2, list(new))
edges = set([ (row[2],row[0],row[1]) for row in c.execute('select * from D')])
PGIfinal = PGIfinal.union(edges)
c.execute('drop table C')
c.execute('drop table D')
self.PGIfinal = PGIfinal
def matches(self):
return set(self.PGIfinal)
def stability_of_matches(self):
# Return PGIfinal where the adj parameter node is in bounds.
return self.PGIfinal
|
StarcoderdataPython
|
8076465
|
<reponame>alexandermerritt/tools
#! /usr/bin/env python
# Report sizes of all maps by procsses in their virtual address space.
import os
import re
re_pid = re.compile('^[0-9]+')
#re_map = re.compile('^([0-9]+|Size|Rss)')
re_map = re.compile('^Size')
def pids():
dirs = os.listdir('/proc/')
dirs2 = []
for d in dirs:
if re_pid.match(d):
dirs2.append(d)
return dirs2
def mapsizes(pid):
sizes = [] # region sizes
pname = '/proc/' + pid + '/smaps'
try:
f = open(pname, 'r')
lines = f.readlines()
except:
return
for line in lines:
line = line.strip()
if re_map.match(line):
# size reported in kB
sz = long(line.split()[1]) * 1024
sizes.append( sz )
f.close()
return sizes
print('pid nregions pgsize waste')
def analyze(pid,sizes,pgsize):
waste = 0
for sz in sizes:
waste += pgsize - (sz % pgsize)
print(pid + ' ' + str(len(sizes)) + ' '
+ str(pgsize) + ' ' + str(waste))
for pid in pids():
sizes = mapsizes(pid)
if not sizes:
continue
analyze(pid, sizes, 2**12)
analyze(pid, sizes, 2**21)
|
StarcoderdataPython
|
11213493
|
import asyncio
import json
from aiocoap import *
import time
async def main():
uri = "coap://localhost:9100/act-coap"
context = await Context.create_client_context()
request = Message(code=GET, payload="", uri=uri)
response = await context.request(request).response
json_p = json.loads(response.payload.decode('utf-8'))
# request = Message(code=PUT,
# payload=(json.dumps({"state": "execute", "execution_time": 5}).encode('utf-8')),
# uri=uri)
# response = await context.request(request).response
# json_p = json.loads(response.payload.decode('utf-8'))
time.sleep(int(json_p["execution_time"]) + 1)
request = Message(code=GET, payload="", uri=uri)
# print(str(request.opt))
response = await context.request(request).response
json_p = json.loads(response.payload.decode('utf-8'))
print(str(json_p))
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
|
StarcoderdataPython
|
8054695
|
<reponame>jimmycheng603/katrain
import math
import threading
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import BooleanProperty, Clock, ListProperty, NumericProperty, StringProperty
from kivy.uix.widget import Widget
from kivymd.app import MDApp
from katrain.gui.theme import Theme
class Graph(Widget):
marker_font_size = NumericProperty(0)
background_image = StringProperty(Theme.GRAPH_TEXTURE)
background_color = ListProperty([1, 1, 1, 1])
highlighted_index = NumericProperty(0)
nodes = ListProperty([])
hidden = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._lock = threading.Lock()
self.bind(pos=self.update_graph, size=self.update_graph)
self.redraw_trigger = Clock.create_trigger(self.update_graph, 0.1)
def initialize_from_game(self, root):
self.nodes = [root]
node = root
while node.children:
node = node.ordered_children[0]
self.nodes.append(node)
self.highlighted_index = 0
self.redraw_trigger()
def update_graph(self, *args):
pass
def update_value(self, node):
with self._lock:
self.highlighted_index = index = node.depth
self.nodes.extend([None] * max(0, index - (len(self.nodes) - 1)))
self.nodes[index] = node
if index > 1 and node.parent: # sometimes there are gaps
backfill, bfnode = index - 1, node.parent
while bfnode is not None and self.nodes[backfill] != bfnode:
self.nodes[backfill] = bfnode
backfill -= 1
bfnode = bfnode.parent
if index + 1 < len(self.nodes) and (
node is None or not node.children or self.nodes[index + 1] != node.ordered_children[0]
):
self.nodes = self.nodes[: index + 1] # on branch switching, don't show history from other branch
if index == len(self.nodes) - 1: # possibly just switched branch or the line above triggered
while node.children: # add children back
node = node.ordered_children[0]
self.nodes.append(node)
self.redraw_trigger()
class ScoreGraph(Graph):
show_score = BooleanProperty(True)
show_winrate = BooleanProperty(True)
score_points = ListProperty([])
winrate_points = ListProperty([])
score_dot_pos = ListProperty([0, 0])
winrate_dot_pos = ListProperty([0, 0])
highlight_size = NumericProperty(dp(6))
score_scale = NumericProperty(5)
winrate_scale = NumericProperty(5)
navigate_move = ListProperty([None, 0, 0, 0])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos) and "scroll" not in getattr(touch, "button", ""):
ix, _ = min(enumerate(self.score_points[::2]), key=lambda ix_v: abs(ix_v[1] - touch.x))
self.navigate_move = [
self.nodes[ix],
self.score_points[2 * ix],
self.score_points[2 * ix + 1],
self.winrate_points[2 * ix + 1],
]
else:
self.navigate_move = [None, 0, 0, 0]
def on_touch_move(self, touch):
return self.on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(*touch.pos) and self.navigate_move[0] and "scroll" not in getattr(touch, "button", ""):
katrain = MDApp.get_running_app().gui
if katrain and katrain.game:
katrain.game.set_current_node(self.navigate_move[0])
katrain.update_state()
self.navigate_move = [None, 0, 0, 0]
def show_graphs(self, keys):
self.show_score = keys["score"]
self.show_winrate = keys["winrate"]
def update_graph(self, *args):
nodes = self.nodes
if nodes:
score_values = [n.score if n and n.score else math.nan for n in nodes]
# score_values=[]
# for n in nodes:
# if n and n.score:
# score_values.append(n.score)
# else:
# score_values.append(math.nan)
score_nn_values = [n.score for n in nodes if n and n.score]
score_values_range = min(score_nn_values or [0]), max(score_nn_values or [0])
winrate_values = [(n.winrate - 0.5) * 100 if n and n.winrate else math.nan for n in nodes]
winrate_nn_values = [(n.winrate - 0.5) * 100 for n in nodes if n and n.winrate]
winrate_values_range = min(winrate_nn_values or [0]), max(winrate_nn_values or [0])
score_granularity = 5
winrate_granularity = 10
self.score_scale = (
max(math.ceil(max(-score_values_range[0], score_values_range[1]) / score_granularity), 1)
* score_granularity
)
self.winrate_scale = (
max(math.ceil(max(-winrate_values_range[0], winrate_values_range[1]) / winrate_granularity), 1)
* winrate_granularity
)
xscale = self.width / max(len(score_values) - 1, 15)
available_height = self.height
score_line_points = [
[self.x + i * xscale, self.y + self.height / 2 + available_height / 2 * (val / self.score_scale)]
for i, val in enumerate(score_values)
]
winrate_line_points = [
[self.x + i * xscale, self.y + self.height / 2 + available_height / 2 * (val / self.winrate_scale)]
for i, val in enumerate(winrate_values)
]
self.score_points = sum(score_line_points, [])
self.winrate_points = sum(winrate_line_points, [])
if self.highlighted_index is not None:
self.highlighted_index = min(self.highlighted_index, len(score_values) - 1)
score_dot_point = score_line_points[self.highlighted_index]
winrate_dot_point = winrate_line_points[self.highlighted_index]
if math.isnan(score_dot_point[1]):
score_dot_point[1] = (
self.y
+ self.height / 2
+ available_height / 2 * ((score_nn_values or [0])[-1] / self.score_scale)
)
self.score_dot_pos = score_dot_point
if math.isnan(winrate_dot_point[1]):
winrate_dot_point[1] = (
self.y
+ self.height / 2
+ available_height / 2 * ((winrate_nn_values or [0])[-1] / self.winrate_scale)
)
self.winrate_dot_pos = winrate_dot_point
Builder.load_string(
"""
#:import Theme katrain.gui.theme.Theme
<Graph>:
background_color: Theme.BOX_BACKGROUND_COLOR
marker_font_size: 0.1 * self.height
canvas.before:
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
Color:
rgba: [1,1,1,1]
Rectangle:
pos: self.pos
size: self.size
source: root.background_image
<ScoreGraph>:
canvas:
Color:
rgba: Theme.SCORE_COLOR
Line:
points: root.score_points if root.show_score else []
width: dp(1.1)
Color:
rgba: Theme.WINRATE_COLOR
Line:
points: root.winrate_points if root.show_winrate else []
width: dp(1.1)
Color:
rgba: [0.5,0.5,0.5,1] if root.navigate_move[0] else [0,0,0,0]
Line:
points: root.navigate_move[1], root.y, root.navigate_move[1], root.y+root.height
width: 1
Color:
rgba: Theme.GRAPH_DOT_COLOR
Ellipse:
id: score_dot
pos: [c - self.highlight_size / 2 for c in (self.score_dot_pos if not self.navigate_move[0] else [self.navigate_move[1],self.navigate_move[2]] ) ]
size: (self.highlight_size,self.highlight_size) if root.show_score else (0.0001,0.0001)
Color:
rgba: Theme.GRAPH_DOT_COLOR
Ellipse:
id: winrate_dot
pos: [c - self.highlight_size / 2 for c in (self.winrate_dot_pos if not self.navigate_move[0] else [self.navigate_move[1],self.navigate_move[3]] ) ]
size: (self.highlight_size,self.highlight_size) if root.show_winrate else (0.0001,0.0001)
# score ticks
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.pos[1]+root.height - self.font_size - 1
text: 'B+{}'.format(root.score_scale)
opacity: int(root.show_score)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.y + root.height*0.5 - self.height/2 + 2
text: i18n._('Jigo')
opacity: int(root.show_score)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.pos[1]
text: 'W+' + str(int(root.score_scale))
opacity: int(root.show_score)
# wr ticks
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.WINRATE_MARKER_COLOR
pos: root.pos[0]+1, root.pos[1] + root.height - self.font_size - 1
text: "{}%".format(50 + root.winrate_scale)
opacity: int(root.show_winrate)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.WINRATE_MARKER_COLOR
pos:root.pos[0]+1, root.pos[1]
text: "{}%".format(50 - root.winrate_scale)
opacity: int(root.show_winrate)
"""
)
|
StarcoderdataPython
|
8033088
|
<reponame>smokah420/StakeCubeCoin<filename>contrib/auto_gdb/log_size.py
#!/usr/bin/env python3
#
try:
import gdb
except ImportError as e:
raise ImportError("This script must be run in GDB: ", str(e))
import traceback
import datetime
import sys
import os
import common_helpers
sys.path.append(os.getcwd())
class LogSizeCommand (gdb.Command):
"""calc size of the memory used by the object and write it to file"""
def __init__ (self):
super (LogSizeCommand, self).__init__ ("logsize", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
try:
args = gdb.string_to_argv(arg)
obj = gdb.parse_and_eval(args[0])
logfile = open(args[1], 'a', encoding="utf8")
size = common_helpers.get_instance_size(obj)
logfile.write("%s %s: %d\n" % (str(datetime.datetime.now()), args[0], size))
logfile.close()
except Exception as e:
print(traceback.format_exc())
raise e
LogSizeCommand()
|
StarcoderdataPython
|
73034
|
<filename>wiki/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import WikiPage
admin.site.register(WikiPage)
|
StarcoderdataPython
|
3263367
|
#!/usr/bin/env python
"""
Multi-Resolution (2) Binary Classification with underline Gaussian Distributions
<NAME> & <NAME>,
Electrical and Computer Engineering Dept.,
University of Maryland
"""
#%% Import Modules
import pickle
import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# from scipy.stats import multivariate_normal
# plt.ioff() # turn off interactive mode: only show figures with plt.show()
plt.close('all')
#%%
save_file = 'data.pkl'
# Gaussian Sampling?
gauss_sampling = True
# gauss_sampling = False
# Number of samples per Gaussian
ns = 120
test_ratio = 20.0/100
# Gaussian Density 2D, Symmetric
def gauss_dens(c,s,X,Y):
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X
# pos[:, :, 1] = Y
# dens = multivariate_normal.pdf(pos, mean=c, cov=np.diag((s,s)))
dens = 1/(2*np.pi*s) * np.exp(-((X-c[0])**2 + (Y-c[1])**2)/(2.0*s**2))
return dens
b = 3
c = 0
centers = [[b-1.4,b+0.4],[b+2.2,b+1.0],[b-0.4+c,b-1.4],[b+1.4+c,b-0.4],[b+0.4+c,b+2.0]]
sigmas = [0.75, 0.5, 0.35, 0.75, 0.5]
sigmas = [0.35, 0.25, 0.15, 0.35, 0.25]
sigmas = [0.20, 0.15, 0.10, 0.20, 0.15]
# Contour alpha
aa = 0.05
# samples alpha
aaa = 0.4
np.random.seed(0)
# np.random.seed(13)
#%% Sample Data Set
data = []
labels = []
if gauss_sampling:
# class o
cx = np.array(centers[0])
sx = sigmas[0]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class o
cx = np.array(centers[1])
sx = sigmas[1]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class o
cx = np.array(centers[2])
sx = sigmas[2]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class x
cx = np.array(centers[3])
sx = sigmas[3]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(1)
# class x
cx = np.array(centers[4])
sx = sigmas[4]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(1)
else:
def arc_point(c, r, theta):
c = np.array(c)
d = np.array([r*np.cos(theta), r*np.sin(theta)])
return c + d
# class o
cx = np.array(centers[0])
for r in [0.5,1]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(0)
# class o
cx = np.array(centers[1])
for r in [0.3,0.5]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(0)
# class o
cx = np.array(centers[2])
for r in [0.1,0.3]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(0)
# class x
cx = np.array(centers[3])
for r in [0.5,1]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(1)
# class x
cx = np.array(centers[4])
for r in [0.3,0.5]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(1)
#% Convert Data
# data = x_data + y_data
# labels = [0]*len(x_data) + [1]*len(y_data)
#%% Map to [0,1] and shuffle
# map to [0,1]
train_min = np.min(np.min(data,0)) #-0.1
train_max = np.max(np.max(data,0)) #+0.1
train_domain = train_max-train_min
# add small margins so that we avoid 0.0 values (for KL divergence)
train_min = train_min - 0.05*train_domain
train_max = train_max + 0.05*train_domain
train_domain = train_max-train_min
# transform
data = (data-train_min)/train_domain
centers = (centers-train_min)/train_domain
sigmas = sigmas/train_domain
# shuffle data
shi = np.arange(len(data))
np.random.shuffle(shi)
data = [data[i] for i in shi]
labels = [labels[i] for i in shi]
# train_data = [data[i] for i in shi]
# train_labels = [labels[i] for i in shi]
# train_samples = len(train_data)
# test_data = []
# test_labels = []
# test_samples = 1#len(test_data)
# Split into training and testing sets
train_samples = int(np.floor(len(data)*(1-test_ratio)))
test_samples = int(len(data)-train_samples)
train_data = data[:][:train_samples]
train_labels = labels[:][:train_samples]
test_data = data[:][train_samples:]
test_labels = labels[:][train_samples:]
#%% Algorithm Plot
# Create new Figure
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
# xlim=(b-3, b+3), ylim=(b-3, b+3))
xlim=(0, 1), ylim=(0, 1))
# ax.grid(True)
plt.xticks([0,1],'')
plt.yticks([0,1],'')
# data in 2D space
x_plot = [data[i] for i in range(len(data)) if labels[i] == 0]
y_plot = [data[i] for i in range(len(data)) if labels[i] == 1]
## plot data
ax.plot([x_plot[i][0] for i in range(len(x_plot))],
[x_plot[i][1] for i in range(len(x_plot))],'k.',alpha=aaa)
ax.plot([y_plot[i][0] for i in range(len(y_plot))],
[y_plot[i][1] for i in range(len(y_plot))],'r.',alpha=aaa)
## Contours
delta = 0.005
# xm = np.arange(b-3.0, b+3.0, delta)
# ym = np.arange(b-3.0, b+3.0, delta)
xm = np.arange(0.0, 1.0, delta)
ym = np.arange(0.0, 1.0, delta)
Xm, Ym = np.meshgrid(xm, ym)
test = gauss_dens((0.5,0.5),0.05,Xm,Ym)
# class o
cx = np.array(centers[0])
sx = sigmas[0]
Zm = gauss_dens(cx,sx,Xm,Ym)
ax.contour(Xm, Ym, Zm, alpha=aa, colors='k', levels=[1e-9, 1e-3, 1e-1, 1])
# class o
cx = np.array(centers[1])
sx = sigmas[1]
Zm = gauss_dens(cx,sx,Xm,Ym)
ax.contour(Xm, Ym, Zm, alpha=aa, colors='k', levels=[1e-9, 1e-3, 1e-1, 1])
# class o
cx = np.array(centers[2])
sx = sigmas[2]
Zm = gauss_dens(cx,sx,Xm,Ym)
ax.contour(Xm, Ym, Zm, alpha=aa, colors='k', levels=[1e-9, 1e-3, 1e-1, 1])
# class x
cx = np.array(centers[3])
sx = sigmas[3]
Zm = gauss_dens(cx,sx,Xm,Ym)
ax.contour(Xm, Ym, Zm, alpha=aa, colors='r', levels=[1e-9, 1e-3, 1e-1, 1])
# class x
cx = np.array(centers[4])
sx = sigmas[4]
Zm = gauss_dens(cx,sx,Xm,Ym)
ax.contour(Xm, Ym, Zm, alpha=aa, colors='r', levels=[1e-9, 1e-3, 1e-1, 1])
plt.show()
#%% Save Data
def proj(x):
u = np.array([1,1])
u = u/np.linalg.norm(u)
x = np.array(x)
return np.dot(u,x)
# def proj_back(x):
# u = np.array([1,1])
# u = u/np.linalg.norm(u)
# x = np.array(x)
# return u*x
train_data = [[np.array([proj(td)]),td] for td in train_data]
test_data = [[np.array([proj(td)]),td] for td in test_data]
# For high Resolution only:
# train_data = [[np.array(td)] for td in train_data]
# test_data = [[np.array(td)] for td in test_data]
# Save results to file
mydata = [train_data,train_labels,test_data,test_labels]
with open(save_file, mode='wb') as file:
pickle.dump(mydata, file)
|
StarcoderdataPython
|
8193413
|
<gh_stars>0
import pymongo
import gridfs
from .datastore import DataStore
class MongoDBDataStore(pymongo.MongoClient, DataStore):
db_name = 'librarypaste'
@property
def db(self):
return self[self.db_name]
@classmethod
def from_uri(cls, uri):
store = cls(uri)
uri_p = pymongo.uri_parser.parse_uri(uri)
if uri_p['database']:
store.db_name = uri_p['database']
return store
def _store(self, uid, content, data=None):
"""Store the given dict of content at uid. Nothing returned."""
doc = dict(uid=uid)
if data:
gfs = gridfs.GridFS(self.db)
id = gfs.put(data, encoding='utf-8')
doc.update(data_id=id)
doc.update(content)
self.db.pastes.insert_one(doc)
def _storeLog(self, nick, time, uid):
"""Adds the nick & uid to the log for a given time/order. No return."""
query = dict(uid=uid)
update = {'$set': dict(nick=nick, time=time)}
self.db.pastes.update(query, update)
def _retrieve(self, uid):
"""Return a dict with the contents of the paste, including the raw
data, if any, as the key 'data'. Must pass in uid, not shortid."""
query = dict(uid=uid)
doc = self.db.pastes.find_one(query)
if 'data_id' in doc:
data_id = doc.pop('data_id')
gfs = gridfs.GridFS(self.db)
doc.update(data=gfs.get(data_id).read())
return doc
def _delete(self, uid):
filter = dict(uid=uid)
doc = self.db.pastes.find_one_and_delete(filter)
if 'data_id' in doc:
gfs = gridfs.GridFS(self.db)
gfs.delete(doc['data_id'])
return doc
def lookup(self, nick):
"""Looks for the most recent paste by a given nick.
Returns the uid or None"""
query = dict(nick=nick)
order = [('time', pymongo.DESCENDING)]
recs = self.db.pastes.find(query).sort(order).limit(1)
try:
return next(recs)['uid']
except StopIteration:
pass
def _lookupUid(self, shortid):
query = dict(shortid=shortid)
rec = self.db.pastes.find_one(query)
return rec['uid']
def list(self):
return (doc['uid'] for doc in self.db.pastes.find(projection=['uid']))
|
StarcoderdataPython
|
3551391
|
<gh_stars>1-10
import xml.etree.ElementTree as ET
import sys, csv
tree = ET.parse(sys.argv[1])
root = tree.getroot()
from constants import *
relations = ['collectionobjects2storagelocations', 'collectionobjects2people']
cspaceCSV = csv.writer(open('entities.csv', 'wb'), delimiter='\t')
entities = {}
for cluedoElement, cspaceElement in mapping.items():
print 'looking for Cluedo %s elements' % cluedoElement
for e in root.findall('.//' + cluedoElement):
for c in e.findall('.//' + cspaceElement):
print ' ', cluedoElement, c.tag, c.text
slug = c.text.replace('.', '').replace(' ', '')
print ' ', 'media', c.tag, slug + '_Full.jpg'
entities[c.text] = cluedo2cspace[c.tag]
cspaceCSV.writerow([cluedo2cspace[c.tag], c.tag, c.text])
cspaceCSV.writerow(['media', c.text, slug + '_Full.jpg'])
cspaceRel = csv.writer(open('relations.csv', 'wb'), delimiter='\t')
for object in [x for x in entities.keys() if entities[x] == 'collectionobject']:
for location in [x for x in entities.keys() if entities[x] == 'storagelocation']:
movement = '%s :: %s' % (location, object)
cspaceCSV.writerow(['movement', 'movement', movement ])
cspaceRel.writerow(['relation', 'Movement', movement, 'CollectionObject', object ])
print location, object
del entities[location]
break
|
StarcoderdataPython
|
18968
|
<reponame>Kreastr/SmartAPI-HEILA
import sys
import site
import os
|
StarcoderdataPython
|
394734
|
# Generated by Django 2.2.3 on 2019-08-01 17:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0021_auto_20190607_1706'),
]
operations = [
migrations.RenameModel(
old_name='Mot',
new_name='Expression',
),
]
|
StarcoderdataPython
|
1745173
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 10.6 from Kane 1985."""
from __future__ import division
from sympy import symbols
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
q1, q2, q3 = dynamicsymbols('q1, q2 q3')
#omega1, omega2, omega3 = dynamicsymbols('ω1 ω2 ω3')
q1d, q2d = dynamicsymbols('q1, q2', level=1)
m, I11, I22, I33 = symbols('m I11 I22 I33', real=True, positive=True)
# reference frames
A = ReferenceFrame('A')
B = A.orientnew('B', 'body', [q1, q2, q3], 'xyz')
# points B*, O
pB_star = Point('B*')
pB_star.set_vel(A, 0)
# rigidbody B
I_B_Bs = inertia(B, I11, I22, I33)
rbB = RigidBody('rbB', pB_star, B, m, (I_B_Bs, pB_star))
# kinetic energy
K = rbB.kinetic_energy(A) # velocity of point B* is zero
print('K_ω = {0}'.format(msprint(K)))
print('\nSince I11, I22, I33 are the central principal moments of inertia')
print('let I_min = I11, I_max = I33')
I_min = I11
I_max = I33
H = rbB.angular_momentum(pB_star, A)
K_min = dot(H, H) / I_max / 2
K_max = dot(H, H) / I_min / 2
print('K_ω_min = {0}'.format(msprint(K_min)))
print('K_ω_max = {0}'.format(msprint(K_max)))
print('\nI11/I33, I22/I33 =< 1, since I33 >= I11, I22, so K_ω_min <= K_ω')
print('Similarly, I22/I11, I33/I11 >= 1, '
'since I11 <= I22, I33, so K_ω_max >= K_ω')
|
StarcoderdataPython
|
5143272
|
<reponame>Sheldongg/Led3D-master
import math
import numpy as np
def rret(data,size_x,size_y):
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
x= np.array(x)
max_x = max(x)
min_x = min(x)
max_y = max(y)
min_y = min(y)
min_z = min(z)
max_z = max(z)
range_x = max_x - min_x
range_y = max_y - min_y
ret = np.zeros((size_x, size_y))
ret[:,:] = min_z
len =x.shape[0]
for i in range(len) :
X = math.floor((x[i] - min_x) / range_x * (size_x - 1))
Y = math.floor((y[i] - min_y) / range_y * (size_y - 1))
ret[X,Y] = max(ret[X, Y], z[i])
ret[X,Y]=max(ret[X,Y],min_z)
min_z = min(min_z, ret[X, Y])
range_z = max_z - min_z
# ret = max(ret, min_z)
ret = np.round((ret - min_z) / range_z * 255)
return ret
|
StarcoderdataPython
|
11247901
|
<reponame>JSzymanskiJS/trading-bot<gh_stars>0
API_KEY = '<your key>'
API_SECRET = '<your secret>'
|
StarcoderdataPython
|
3272281
|
import numpy as np
import pandas as pd
import neurokit as nk
import matplotlib.pyplot as plt
import scipy
import biosppy
import mne
import seaborn as sns
df = pd.read_csv("https://raw.githubusercontent.com/neuropsychology/NeuroKit.py/master/examples/Bio/bio_100Hz.csv")
ecg=df["ECG"]
rsp=df["RSP"]
sampling_rate=100
df = nk.bio_process(ecg=df["ECG"], rsp=df["RSP"], sampling_rate=100)
df["df"].plot()
#sampling_rate=100
#df["df"].plot()
#rpeaks = df["ECG"]["R_Peaks"]
#
#rri = df["ECG_RR_Interval"]
#rri = rri.dropna()
##df.plot()
#
#fbands = {
# "ULF": [0.0001, 0.0033],
# "VLF": [0.0033, 0.04],
# "LF": [0.04, 0.15],
# "HF": [0.15, 0.40],
# "VHF": [0.4, 0.5]
# }
#
#power, freq = mne.time_frequency.psd_array_multitaper(rri, sfreq=100, fmin=0, fmax=0.5, adaptive=False, normalization='length')
#
#freq, power = biosppy.signals.tools.power_spectrum(signal=rri, sampling_rate=sampling_rate)
#plt.plot(freq,power)
#
#tf = {}
#for band in fbands:
# freqs = fbands[band]
#
# filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=rri, ftype='butter', band='bandpass', order=1, frequency=freqs, sampling_rate=sampling_rate)
# amplitude, phase = biosppy.signals.tools.analytic_signal(filtered)
#
# tf[band] = amplitude
#
#
#
#tf = pd.DataFrame.from_dict(tf)
#tf["RRi"] = rri.values
#nk.z_score(tf).plot()
#
#
#import numpy as np
#import pandas as pd
#import mne
#import scipy
#import biosppy
#import neurokit as nk
#import matplotlib.pyplot as plt
#signal = pd.read_csv("signal_100Hz.txt")["Signal"]
#sampling_rate=100
#
## Set frequencies with variable step to have approximately the same amount of values in each band
#fbands = {
## "ULF": [0, 0.0033],
# "VLF": [0.0033, 0.04],
## "LF": [0.04, 0.15],
# "HF": [0.15, 0.42]
## "VHF": np.arange(0.4, 0.51, 0.01)
# }
#
#
#
#freqs, power = biosppy.signals.tools.power_spectrum(signal=signal, sampling_rate=sampling_rate, pad=2)
#
#plt.plot(freqs, power)
#biosppy.signals.tools.filter_signal(signal=None, ftype='FIR', band='lowpass',
#
# Initialize empty dict
# hrv = {}
#
# # Preprocessing
# # ==================
# # Extract RR intervals (RRis)
# RRis = np.diff(rpeaks)
# # Basic resampling to 1Hz to standardize the scale
# RRis = RRis/sampling_rate
# RRis = RRis.astype(float)
#
#
# # Artifact detection - Statistical
# for index, rr in enumerate(RRis):
# # Remove RR intervals that differ more than 25% from the previous one
# if RRis[index] < RRis[index-1]*0.75:
# RRis[index] = np.nan
# if RRis[index] > RRis[index-1]*1.25:
# RRis[index] = np.nan
#
# # Artifact detection - Physiological (http://emedicine.medscape.com/article/2172196-overview)
# RRis = pd.Series(RRis)
# RRis[RRis < 0.6] = np.nan
# RRis[RRis > 1.3] = np.nan
#
# # Artifacts treatment
# hrv["n_Artifacts"] = pd.isnull(RRis).sum()/len(RRis)
# artifacts_indices = RRis.index[RRis.isnull()] # get the artifacts indices
# RRis = RRis.drop(artifacts_indices) # remove the artifacts
#
# # Convert to continuous RR interval (RRi)
# beats_times = rpeaks[1:] # the time at which each beat occured starting from the 2nd beat
# beats_times -= beats_times[0]
# beats_times = np.delete(beats_times, artifacts_indices) # delete also the artifact beat moments
# try:
# RRi = discrete_to_continuous(RRis, beats_times, sampling_rate) # Interpolation using 3rd order spline
# except TypeError:
# print("NeuroKit Warning: ecg_hrv(): Sequence too short to compute HRV.")
# return(hrv)
#
#
# # Rescale to 1000Hz
# RRis = RRis*1000
# RRi = RRi*1000
# hrv["RR_Intervals"] = RRis # Values of RRis
# hrv["df"] = RRi.to_frame("ECG_RR_Interval") # Continuous (interpolated) signal of RRi
#
# # Time Domain
# # ==================
# hrv["RMSSD"] = np.sqrt(np.mean(np.diff(RRis) ** 2))
# hrv["meanNN"] = np.mean(RRis)
# hrv["sdNN"] = np.std(RRis, ddof=1) # make it calculate N-1
# hrv["cvNN"] = hrv["sdNN"] / hrv["meanNN"]
# hrv["CVSD"] = hrv["RMSSD"] / hrv["meanNN"] * 100
# hrv["medianNN"] = np.median(abs(RRis))
# hrv["madNN"] = mad(RRis, constant=1)
# hrv["mcvNN"] = hrv["madNN"] / hrv["medianNN"]
# nn50 = sum(abs(np.diff(RRis)) > 50)
# hrv["pNN50"] = nn50 / len(RRis) * 100
# nn20 = sum(abs(np.diff(RRis)) > 20)
# hrv["pNN20"] = nn20 / len(RRis) * 100
#
# # Geometrical Method
# # ====================
# # TODO: This part needs to be checked by an expert. Also, it would be better to have Renyi entropy (a generalization of shannon's), but I don't know how to compute it.
# try:
# bin_number = 32 # Initialize bin_width value
# # find the appropriate number of bins so the class width is approximately 8 ms (Voss, 2015)
# for bin_number_current in range(2, 50):
# bin_width = np.diff(np.histogram(RRi, bins=bin_number_current, density=True)[1])[0]
# if abs(8 - bin_width) < abs(8 - np.diff(np.histogram(RRi, bins=bin_number, density=True)[1])[0]):
# bin_number = bin_number_current
# hrv["Triang"] = len(RRis)/np.max(np.histogram(RRi, bins=bin_number, density=True)[0])
# hrv["Shannon_h"] = entropy_shannon(np.histogram(RRi, bins=bin_number, density=True)[0])
# except ValueError:
# hrv["Triang"] = np.nan
# hrv["Shannon_h"] = np.nan
#
#
# # Frequency Domain
# # =================
# freq_bands = {
# "ULF": [0.0001, 0.0033],
# "VLF": [0.0033, 0.04],
# "LF": [0.04, 0.15],
# "HF": [0.15, 0.40],
# "VHF": [0.4, 0.5]}
#
#
# # Frequency-Domain Power over Time
# freq_powers = {}
# for band in freq_bands:
# freqs = freq_bands[band]
# # Filter to keep only the band of interest
# filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=RRi, ftype='butter', band='bandpass', order=1, frequency=freqs, sampling_rate=sampling_rate)
# # Apply Hilbert transform
# amplitude, phase = biosppy.signals.tools.analytic_signal(filtered)
# # Extract Amplitude of Envolope (power)
# freq_powers["ECG_HRV_" + band] = amplitude
#
# freq_powers = pd.DataFrame.from_dict(freq_powers)
# freq_powers.index = hrv["df"].index
# hrv["df"] = pd.concat([hrv["df"], freq_powers])
#
#
# # Compute Power Spectral Density (PSD) using multitaper method
# power, freq = mne.time_frequency.psd_array_multitaper(RRi, sfreq=sampling_rate, fmin=0, fmax=0.5, adaptive=False, normalization='length')
#
# def power_in_band(power, freq, band):
# power = np.trapz(y=power[(freq >= band[0]) & (freq < band[1])], x=freq[(freq >= band[0]) & (freq < band[1])])
# return(power)
#
# # Extract Power according to frequency bands
# hrv["ULF"] = power_in_band(power, freq, freq_bands["ULF"])
# hrv["VLF"] = power_in_band(power, freq, freq_bands["VLF"])
# hrv["LF"] = power_in_band(power, freq, freq_bands["LF"])
# hrv["HF"] = power_in_band(power, freq, freq_bands["HF"])
# hrv["VHF"] = power_in_band(power, freq, freq_bands["VHF"])
# hrv["Total_Power"] = power_in_band(power, freq, [0, 0.5])
#
# hrv["LFn"] = hrv["LF"]/(hrv["LF"]+hrv["HF"])
# hrv["HFn"] = hrv["HF"]/(hrv["LF"]+hrv["HF"])
# hrv["LF/HF"] = hrv["LF"]/hrv["HF"]
# hrv["LF/P"] = hrv["LF"]/hrv["Total_Power"]
# hrv["HF/P"] = hrv["HF"]/hrv["Total_Power"]
#
#
# # TODO: THIS HAS TO BE CHECKED BY AN EXPERT - Should it be applied on the interpolated on raw RRis?
# # Non-Linear Dynamics
# # ======================
# if len(RRis) > 17:
# hrv["DFA_1"] = nolds.dfa(RRis, range(4, 17))
# if len(RRis) > 66:
# hrv["DFA_2"] = nolds.dfa(RRis, range(16, 66))
# hrv["Shannon"] = entropy_shannon(RRis)
# hrv["Sample_Entropy"] = nolds.sampen(RRis, emb_dim=2)
# try:
# hrv["Correlation_Dimension"] = nolds.corr_dim(RRis, emb_dim=2)
# except AssertionError as error:
# print("NeuroKit Warning: ecg_hrv(): Correlation Dimension. Error: " + str(error))
# hrv["Correlation_Dimension"] = np.nan
# hrv["Entropy_Multiscale"] = entropy_multiscale(RRis, emb_dim=2)
# hrv["Entropy_SVD"] = entropy_svd(RRis, emb_dim=2)
# hrv["Entropy_Spectral_VLF"] = entropy_spectral(RRis, sampling_rate, bands=np.arange(0.0033, 0.04, 0.001))
# hrv["Entropy_Spectral_LF"] = entropy_spectral(RRis, sampling_rate, bands=np.arange(0.04, 0.15, 0.001))
# hrv["Entropy_Spectral_HF"] = entropy_spectral(RRis, sampling_rate, bands=np.arange(0.15, 0.40, 0.001))
# hrv["Fisher_Info"] = fisher_info(RRis, tau=1, emb_dim=2)
# try: # Otherwise travis errors for some reasons :(
# hrv["Lyapunov"] = np.max(nolds.lyap_e(RRis, emb_dim=58, matrix_dim=4))
# except Exception:
# hrv["Lyapunov"] = np.nan
# hrv["FD_Petrosian"] = fd_petrosian(RRis)
# hrv["FD_Higushi"] = fd_higushi(RRis, k_max=16)
#
# # TO DO:
# # Include many others (see Voss 2015)
#
# return(hrv)
#
#
#tf = {}
#for band in fbands:
# freqs = fbands[band]
# amplitude, phase = biosppy.signals.tools.analytic_signal(signal)
## filtered = mne.filter.filter_data(np.array([[signal]]), sfreq=sampling_rate, l_freq=freqs[0], h_freq=freqs[1], method="fir", verbose="CRITICAL")[0][0]
## analytic = scipy.signal.hilbert(filtered)
## amplitude_envelope = np.abs(analytic)
## instantaneous_phase = np.unwrap(np.angle(analytic))
## instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0*np.pi) * sampling_rate)
#
#
## tf[band + "_Signal"] = filtered
# tf[band + "_Amplitude"] = amplitude
# tf[band + "_Phase"] = phase
#
#
## freqs = tf[freqs_range]
## signal = mne.time_frequency.tfr_array_multitaper(np.array([[signal]]), sampling_rate, freqs, n_cycles=freqs/2, zero_mean=False, time_bandwidth=7)[0][0]
## signal = np.mean(signal, 0) # Average
## tf[freqs_range] = signal # Replace data in dict
#
#tf = pd.DataFrame.from_dict(tf)
#tf["Raw_Signal"] = signal
#nk.z_score(tf).plot()
#
#
##
##
#
#
#
#
#
#
##df = pd.read_csv('normal_ECG.csv')
##df = df.loc[10000:100000].reset_index(drop=True) # Select 10s of signal
#sampling_rate=100
#ecg=df["ECG"]
#
#
#df = nk.ecg_process(ecg=ecg, sampling_rate=100)["df"]
#df["ECG_RR_Interval"].to_csv("signal_100Hz.txt", index=False)
#
#rri = df["ECG_RR_Interval"]
#rri = rri.dropna()
#
#signal = pd.read_csv("signal_100Hz.txt")
#sampling_rate=100
## Set frequencies with variable step to have approximately the same amount of values in each band
#tf = {"ULF": np.arange(0.0001, 0.0033, 0.001),
# "VLF": np.arange(0.0033, 0.045, 0.005),
# "LF": np.arange(0.04, 0.16, 0.01),
# "HF": np.arange(0.15, 0.42, 0.02),
# "VHF": np.arange(0.4, 0.51, 0.01)}
#
#
#for freqs_range in tf:
# freqs = tf[freqs_range]*1000
# signal = mne.time_frequency.tfr_array_multitaper(np.array([[signal]]), sampling_rate, freqs, n_cycles=freqs/2, zero_mean=False, time_bandwidth=7)[0][0]
# signal = np.mean(signal, 0)
# tf[freqs_range] = signal
#
#tf = pd.DataFrame.from_dict(tf)
#tf["RRI"] = signal
#nk.z_score(tf).plot()
#
#tf_HF = mne.time_frequency.tfr_array_morlet(np.array([[rri]]), 100, np.arange(0.15, 0.42, 0.2 ), n_cycles=np.arange(0.15, 0.42, 0.2)/2)[0][0]
#tf_LF = mne.time_frequency.tfr_array_morlet(np.array([[rri]]), 100, np.arange(0.04, 0.16, 0.1), n_cycles=np.arange(0.04, 0.16, 0.1)/2)[0][0]
#tf_LF = np.mean(tf_LF, 0)
#tf_HF = np.mean(tf_HF, 0)
###tf_HF20 = np.mean(tf_HF20, 0)
###
###
#pd.Series(tf_LF).plot()
#pd.Series(tf_HF).plot()
#pd.Series(tf_HF20).plot()
#pd.Series(rri).plot()
#hrv["VLF"] = power_in_band(power, freq, 0.0033, 0.04)
#hrv["LF"] = power_in_band(power, freq, 0.04, 0.15)
#hrv["HF"] = power_in_band(power, freq, 0.15, 0.4)
#hrv["VHF"] = power_in_band(power, freq, 0.4, 0.5)
#tfr = pd.DataFrame(tfr)
#tfr.plot()
#len(pd.Series(tfr[])).plot()
#
#sns.heatmap(tfr)
#rsp=dict(biosppy.resp.resp(df["RSP"], sampling_rate, show=False))["filtered"]
#rpeaks = dict(biosppy.ecg.ecg(ecg, sampling_rate, show=False))["rpeaks"]
#
#
#bio = nk.bio_process(ecg=df["ECG"], rsp=df["RSP"], sampling_rate=1000)
##rsa_interpolated = nk.discrete_to_continuous(values=np.array(bio["ECG"]["RSA"]["RSA_P2T_Values"]), value_times=bio["ECG"]["RSA"]["value_times"], sampling_rate=sampling_rate)
##rsp = pd.Series(dict(biosppy.resp.resp(rsp, 100, show=False))["filtered"])
#bio["df"].plot()
#
##nk.plot_events_in_signal(rsp, rsp_onsets)
##nk.z_score(df).plot()
#
|
StarcoderdataPython
|
1783253
|
<gh_stars>1-10
# type: ignore
### Standard imports. ###
import os
import curses
import pickle
### Local imports. ###
from src_scripts.utilities import (reader,
unpickler)
if __name__ == '__main__':
import argparse
import textwrap
#####################################################################################
parser = argparse.ArgumentParser(
formatter_class = argparse.RawDescriptionHelpFormatter,
usage = argparse.SUPPRESS,
description = textwrap.dedent('''
######################################
GHRSS Survey FFA Pipeline: The Monitor
######################################
Just like the cosmic being in the DC universe it is
named after, the Monitor updates the user about both
per-file and per-date progress of the pipeline. The
only input is the particular date for which the user
wishes to get an update or summary.
To be run from the "GHRSS_FFA_Pipeline" directory only.
usage: python the_monitor.py [date]
'''))
parser.add_argument('date',
type = str,
help = textwrap.dedent(
""" The date for which updates are required. """))
try:
args = parser.parse_args()
except:
parser.print_help()
parser.exit(1)
date = args.date
#####################################################################################
def build_record(config, summary):
""" Build the summary of all processing done till now by the pipeline. """
record = []
# Record the configuration.
descp_cfg = ('Current Pipeline Configuration\n'
'------------------------------\n'
'Node(s): {nodes}\n'
'Machine configuration: {mach_config}\n'
'Dates to be analysed: {dates}\n'
'Number of cores per node: {cores}\n\n'
'DM range: {DM_lowest} to {DM_highest} pc cm^-3\n'
'Number of DM trials: {numDMs}\n'
'Period ranges searched by the FFA:\n'
' (0.2 to 0.5 s), (0.5 to 2s) and (2 to 100s)\n\n'
'').format(nodes=config._config['pipeline_variables']['nodes'],
mach_config=config.mach_config,
dates=config._config['pipeline_variables']['dates'][config.backend],
cores=config.cores,
DM_lowest=config.ddplan['DM_lowest'],
DM_highest=config.ddplan['DM_highest'],
numDMs=config.ddplan['numDMs'])
record.append(descp_cfg)
# Then the summary.
header = ['Filename',
'DM trials',
'Candidates',
'Folding Status',
'Archiving Status']
fmt_hdr = ('{:<50} {:<11} {:<11} {:<15} {}\n\n').format(*header)
# Record table header.
record.append(fmt_hdr)
for meta in summary:
# Check if folding has been carried out properly.
if meta['num_fold_prfs']:
if (meta['num_candidates'] == meta['num_fold_prfs']):
fold_flag = 'Done.'
else:
fold_flag = 'Incomplete.'
else:
fold_flag = 'Not Done.'
# Check if archiving has been carried out properly.
if meta['num_arv_prfs']:
if (meta['num_fold_prfs'] == meta['num_arv_prfs']):
archive_flag = 'Done.'
else:
archive_flag = 'Incomplete.'
else:
archive_flag = 'Not Done.'
# Construct each row of the table.
row = ('{fname:<53}'
'{proc_dm_trials:<13}'
'{num_candidates:<13}'
'{fd_flag:<16}'
'{arv_flag}\n').format(fname=meta['fname'],
proc_dm_trials=str(meta['proc_dm_trials']),
num_candidates=str(meta['num_candidates']),
fd_flag=fold_flag,
arv_flag=archive_flag)
# Record each row.
record.append(row)
return record
def cli_updater(stdscr, date):
""" Create a CLI where all summaries and updates are displayed.
The CLI was built using the "curses" module in Python.
"""
if curses.has_colors():
curses.start_color()
curses.use_default_colors()
stdscr.keypad(1)
curses.curs_set(0)
# Initialise the color combinations we're going to use.
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
# Begin the program.
stdscr.addstr('GHRSS FFA Pipeline', curses.A_REVERSE)
stdscr.chgat(-1, curses.A_REVERSE)
stdscr.addstr(curses.LINES-1, 0, ('Press "U" to request a new update, '
'Press "S" to request a summary, '
'"Q" to quit.'))
# Change the U and the S to green and the Q to red.
stdscr.chgat(curses.LINES-1, 7, 1, curses.A_BOLD | curses.color_pair(2))
stdscr.chgat(curses.LINES-1, 42, 1, curses.A_BOLD | curses.color_pair(2))
stdscr.chgat(curses.LINES-1, 68, 1, curses.A_BOLD | curses.color_pair(1))
# Set up the window to hold the updates.
update_window = curses.newwin(curses.LINES-2, curses.COLS, 1, 0)
# Create a sub-window so as to cleanly display the update without worrying
# about over-writing the update window's borders.
update_text_window = update_window.subwin(curses.LINES-6, curses.COLS-4, 3, 2)
update_text_window.addstr('Press "U" to get an update. Press "S" to get a summary.')
# Draw a border around the main update window.
update_window.box()
# Update the internal window data structures.
stdscr.noutrefresh()
update_window.noutrefresh()
# Redraw the screen.
curses.doupdate()
# Start the event loop.
while True:
c = update_window.getch()
#####################################################################################
# If user needs an update...
if c == ord('u') or c == ord('U'):
update_text_window.clear()
update_text_window.addstr('Getting update...')
update_text_window.refresh()
update_text_window.clear()
# Get updates from the hidden process log file.
updates = reader('./{}.PIDS.log'.format(date))
# Print updates to screen.
try:
for line in updates:
update_text_window.addstr(line)
update_text_window.refresh()
# If there are no updates to print, inform the user that there is a problem.
except StopIteration:
update_text_window.addstr('Houston, we may have a problem.')
update_text_window.refresh()
#####################################################################################
# If user needs a summary...
elif c == ord('s') or c == ord('S'):
update_text_window.clear()
update_text_window.addstr('Getting summary...')
update_text_window.refresh()
update_text_window.clear()
# Restore information about the state of the pipeline.
# Read first line to get configuration.
summ_log = unpickler('./{}.history.log'.format(date))
try:
# Get the configuration.
config = next(summ_log)
# Get the summary.
summary = summ_log
# Build the record.
record = build_record(config, summary)
# Print the summary on the screen.
for line in record:
update_text_window.addstr(line)
update_text_window.refresh()
# Path where summary must be saved.
_rec_ = os.path.join(config.state_path,
date,
'{}.rec'.format(date))
# Save the recorded summary.
try:
with open(_rec_, 'w+') as _save_:
for line in record:
_save_.write(line)
except IOError:
update_text_window.addstr('\n')
update_text_window.addstr('Cannot write summary to file.')
# Inform the user that the summary has been saved
# and where it has been saved.
update_text_window.addstr('\n')
update_text_window.addstr('Summary saved at {}'.format(_rec_))
# Refresh window.
update_text_window.refresh()
except StopIteration:
# Hold your horses, user!
update_text_window.addstr('No files processed yet. Work ongoing.')
update_text_window.refresh()
#####################################################################################
elif c == ord('q') or c == ord('Q'):
# Quit and exit event loop.
break
# Refresh the windows from the bottom up.
stdscr.noutrefresh()
update_window.noutrefresh()
update_text_window.noutrefresh()
curses.doupdate()
#########################################################################
# Wrap the "cli_updater" in the curses "wrapper" function so that all
# initialisations are handled appropriately and the terminal is restored
# to its former glory without a hitch, even if there is an exception.
curses.wrapper(cli_updater, date)
#########################################################################
|
StarcoderdataPython
|
4852345
|
<reponame>the-gamecoders/TheGameHub<filename>src/signup.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'signup.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import sqlite3
conn=sqlite3.connect('TheGameHub.db')
curs=conn.cursor()
class Ui_Dialog2(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(295, 275)
Dialog.setStyleSheet("background-color: rgb(225, 255, 208);")
self.formLayout = QtWidgets.QFormLayout(Dialog)
self.formLayout.setContentsMargins(30, 30, 30, -1)
self.formLayout.setHorizontalSpacing(20)
self.formLayout.setVerticalSpacing(36)
self.formLayout.setObjectName("formLayout")
self.label_3 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.label_3)
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.uname_line = QtWidgets.QLineEdit(Dialog)
self.uname_line.setStyleSheet("background-color: rgb(255, 255, 255);")
self.uname_line.setText("")
self.uname_line.setObjectName("uname_line")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.uname_line)
self.label_2 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.password_line = QtWidgets.QLineEdit(Dialog)
self.password_line.setStyleSheet("background-color: rgb(255, 255, 255);")
self.password_line.setText("")
self.password_line.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line.setObjectName("password_line")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.password_line)
self.signup = QtWidgets.QPushButton(Dialog)
font = QtGui.QFont()
font.setPointSize(9)
self.signup.setFont(font)
self.signup.setStyleSheet("background-color: rgb(197, 198, 255);")
self.signup.setObjectName("signup")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.SpanningRole, self.signup)
self.signup.clicked.connect(self.insertData)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Sign Up"))
self.label_3.setText(_translate("Dialog", "CREATE ACCOUNT"))
self.label.setText(_translate("Dialog", "username:"))
self.label_2.setText(_translate("Dialog", "password:"))
self.signup.setText(_translate("Dialog", "sign up"))
def insertData(self):
username = self.uname_line.text()
password = self.password_line.text()
curs.execute('SELECT Username from Users;')
users = curs.fetchall()
user_list = []
for i in users:
for Name in i:
user_list.append(Name)
if username in user_list or len(username) == 0 or len(password) == 0:
if username in user_list:
msg = QMessageBox()
msg.setWindowTitle("Warning")
msg.setText("Username already taken!")
msg.setIcon(QMessageBox.Warning)
x=msg.exec()
self.uname_line.setText("")
self.password_line.setText("")
if len(username) == 0 and len(password) == 0:
msg = QMessageBox()
msg.setWindowTitle("Warning")
msg.setText("please enter a username and password.")
msg.setIcon(QMessageBox.Warning)
x=msg.exec()
elif len(username) == 0:
msg = QMessageBox()
msg.setWindowTitle("Warning")
msg.setText("please enter a username.")
msg.setIcon(QMessageBox.Warning)
x=msg.exec()
elif len(password) == 0:
msg = QMessageBox()
msg.setWindowTitle("Warning")
msg.setText("please enter password.")
msg.setIcon(QMessageBox.Warning)
x=msg.exec()
else:
curs.execute("INSERT INTO Users VALUES(?,?);",(username,password))
conn.commit()
msg1 = QMessageBox()
msg1.setWindowTitle("Sign Up")
msg1.setText('''Registered successfully.
Now you can Login!''')
msg1.setIcon(QMessageBox.Information)
x=msg1.exec()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog2()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
8131122
|
<reponame>grahamguthrie99/ai-dfs-dff-scraper
import connexion
import six
from datetime import date, datetime
import pytz
from swagger_server.models.player_list import PlayerList # noqa: E501
from swagger_server.models.dff_scraper import DFFScraper
from swagger_server import util
def get_date(): # noqa: E501
"""Get contest date
Get contest date # noqa: E501
:rtype: str
"""
tz_NY = pytz.timezone('America/New_York')
datetime_NY = datetime.now(tz_NY)
return datetime_NY.strftime('%Y-%m-%d')
def get_player_list(provider, platform, sport, _date): # noqa: E501
"""Get list of daily fantasy players for a specified sport, platform and slate
Get list of valid players # noqa: E501
:param provider: Daily fatansy sports data provider
:type provider: str
:param platform: Daily fatansy sports contest website
:type platform: str
:param sport: Supported sport
:type sport: str
:param _date: Date
:type _date: str
:rtype: PlayerList
"""
return DFFScraper(sport, platform, _date).scrape()
def get_supported_platforms(): # noqa: E501
"""Get list of supported contest platforms
Get list of supported daily fantasy contest platforms # noqa: E501
:rtype: List[str]
"""
return ["Draftkings", "Fanduel"]
def get_supported_providers(): # noqa: E501
"""Get list of supported data providers
Get list of supported daily fantasy data providers # noqa: E501
:rtype: List[str]
"""
return ["Daily Fantasy Fuel"]
def get_supported_sports(): # noqa: E501
"""Get list of supported sports
Get list of supported sport codes # noqa: E501
:rtype: List[str]
"""
return ["MLB", "NBA"]
|
StarcoderdataPython
|
5035091
|
<filename>malaria24/ona/tests/base.py
import pkg_resources
import random
from datetime import datetime
import pytz
import responses
from django.test import TestCase, override_settings
from django.utils import timezone
from malaria24.ona.models import (
ReportedCase, Actor, EHP, CASE_INVESTIGATOR, MIS, Facility)
@override_settings(CELERY_ALWAYS_EAGER=True)
class MalariaTestCase(TestCase):
def setUp(self):
responses.add(
responses.PUT,
('http://go.vumi.org/api/v1/go/http_api_nostream/'
'VUMI_GO_CONVERSATION_KEY/messages.json'),
status=200, content_type='application/json',
body=pkg_resources.resource_string(
'malaria24', 'ona/fixtures/responses/send_sms.json'))
def mk_random_date(self):
random_year = random.choice(range(1950, timezone.now().year))
random_month = random.choice(range(1, 13))
random_day = random.choice(range(1, 29))
return datetime(random_year,
random_month, random_day).strftime("%y%m%d")
def mk_actor(self, **kwargs):
defaults = {
'name': 'name',
'email_address': '<EMAIL>',
'phone_number': 'phone_number',
'facility_code': 'facility_code',
}
defaults.update(kwargs)
return Actor.objects.create(**defaults)
def mk_ehp(self, **kwargs):
return self.mk_actor(role=EHP, **kwargs)
def mk_ci(self, **kwargs):
return self.mk_actor(role=CASE_INVESTIGATOR, **kwargs)
def mk_mis(self, **kwargs):
return self.mk_actor(role=MIS, **kwargs)
def mk_facility(self, **kwargs):
return Facility.objects.create(**kwargs)
def mk_create_random_week_range(self):
random_year = timezone.now().year
return (datetime(random_year, 2, random.choice(range(3, 8)), 11, 30,
30, 0, pytz.timezone('US/Pacific'))
.strftime('%Y-%m-%d %H:%M:%S.%f%z'))
def mk_create_date(self):
random_year = timezone.now().year
e_month = random.choice(range(1, 13))
e_day = random.choice(range(1, 29))
e_hour = random.choice(range(0, 24))
e_minute = random.choice(range(0, 60))
e_second = random.choice(range(0, 60))
return (datetime(random_year, e_month, e_day, e_hour, e_minute,
e_second, 0, pytz.timezone('US/Pacific'))
.strftime('%Y-%m-%d %H:%M:%S.%f%z'))
'''in order to view differents dates for testing, change create_date_time
to self.mk_create_random_week_range()'''
def mk_case(self, **kwargs):
defaults = {
'first_name': 'first_name',
'last_name': 'last_name',
'locality': 'locality',
'date_of_birth': self.mk_random_date(),
'create_date_time': timezone.now(),
'sa_id_number': 'sa_id_number',
'msisdn': 'msisdn',
'id_type': 'id_type',
'abroad': 'abroad',
'reported_by': 'reported_by',
'gender': 'gender',
'facility_code': 'facility_code',
'landmark': 'landmark',
'landmark_description': 'landmark_description',
'_id': '_id',
'_uuid': '_uuid',
'_xform_id_string': '_xform_id_string',
'digest': None,
}
defaults.update(kwargs)
return ReportedCase.objects.create(**defaults)
|
StarcoderdataPython
|
3590154
|
"""Tests for functions generating random linear systems."""
import numpy as np
import pytest
import scipy.stats
from probnum import randvars
from probnum.problems.zoo.linalg import random_linear_system, random_spd_matrix
def test_custom_random_matrix(rng: np.random.Generator):
random_unitary_matrix = lambda rng, dim: scipy.stats.unitary_group.rvs(
dim=dim, random_state=rng
)
_ = random_linear_system(rng, random_unitary_matrix, dim=5)
def test_custom_solution_randvar(rng: np.random.Generator):
n = 5
x = randvars.Normal(mean=np.ones(n), cov=np.eye(n))
_ = random_linear_system(rng=rng, matrix=random_spd_matrix, solution_rv=x, dim=n)
def test_incompatible_matrix_and_solution(rng: np.random.Generator):
with pytest.raises(ValueError):
_ = random_linear_system(
rng=rng,
matrix=random_spd_matrix,
solution_rv=randvars.Normal(np.ones(2), np.eye(2)),
dim=5,
)
|
StarcoderdataPython
|
107011
|
from pickle import load
from numpy import array
from numpy import argmax
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from nltk.translate.bleu_score import corpus_bleu
import sys
import pika
import os
import urllib.parse
# Parse CLODUAMQP_URL (fallback to localhost)
url_str = os.environ.get('CLOUDAMQP_URL', 'amqp://guest:guest@localhost//')
url = urllib.parse.urlparse(url_str)
params = pika.ConnectionParameters(host=url.hostname, virtual_host=url.path[1:],
credentials=pika.PlainCredentials(url.username, url.password))
connection = pika.BlockingConnection(params) # Connect to CloudAMQP
#connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
#connection = pika.BlockingConnection(pika.ConnectionParameters('127.0.0.1'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
# load a clean dataset
def load_clean_sentences(filename):
return load(open(filename, 'rb'))
# fit a tokenizer
def create_tokenizer(lines):
tokenizer = Tokenizer(char_level=False)
tokenizer.fit_on_texts(lines)
return tokenizer
# max sentence length
def max_length(lines):
return max(len(line.split()) for line in lines)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate target given source sequence
def predict_sequence(model, tokenizer, source):
prediction = model.predict(source, verbose=0)[0]
integers = [argmax(vector) for vector in prediction]
target = list()
for i in integers:
word = word_for_id(i, tokenizer)
if word is None:
break
target.append(word)
return ' '.join(target)
# translate
def translate(model, tokenizer, sources):
predicted = list()
for i, source in enumerate(sources):
# translate encoded source text
source = source.reshape((1, source.shape[0]))
translation = predict_sequence(model, all_tokenizer, source)
return{'ANSWER':translation}
#print('ANSWER: %s' % (translation))
predicted.append(translation.split())
# load datasets
dataset = load_clean_sentences('both.pkl')
dataset1=dataset.reshape(-1,1)
# prepare tokenizer
all_tokenizer = create_tokenizer(dataset1[:,0])
all_vocab_size = len(all_tokenizer.word_index) + 1
all_length = max_length(dataset1[:, 0])
# load model
model = load_model('model1.h5')
# Setting up the chat
#question = str(sys.argv[1])
#print('arg: %s' % (q))
#question = question.strip().split('\n')
#we tokenize
#X = all_tokenizer.texts_to_sequences(question)
#X = pad_sequences(X, maxlen=all_length, padding='post')
# find reply and print it out
#translate(model, all_tokenizer, X)
def on_request(ch, method, props, body):
question = body.decode("utf-8")
print(" [.] question(%s)" % question)
question = (question.strip().split('\n'))
X = all_tokenizer.texts_to_sequences(question)
X = pad_sequences(X, maxlen=all_length, padding='post')
#response = fib(n)
response = translate(model, all_tokenizer, X)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
|
StarcoderdataPython
|
216689
|
#! /usr/bin/python
HOME_PATH = './'
CACHE_PATH = '/var/cache/obmc/'
FLASH_DOWNLOAD_PATH = "/tmp"
GPIO_BASE = 320
SYSTEM_NAME = "Garrison"
## System states
## state can change to next state in 2 ways:
## - a process emits a GotoSystemState signal with state name to goto
## - objects specified in EXIT_STATE_DEPEND have started
SYSTEM_STATES = [
'BASE_APPS',
'BMC_STARTING',
'BMC_READY',
'HOST_POWERING_ON',
'HOST_POWERED_ON',
'HOST_BOOTING',
'HOST_BOOTED',
'HOST_POWERED_OFF',
]
EXIT_STATE_DEPEND = {
'BASE_APPS' : {
'/org/openbmc/sensors': 0,
},
'BMC_STARTING' : {
'/org/openbmc/control/chassis0': 0,
'/org/openbmc/control/power0' : 0,
'/org/openbmc/control/host0' : 0,
'/org/openbmc/control/flash/bios' : 0,
},
}
## method will be called when state is entered
ENTER_STATE_CALLBACK = {
'HOST_POWERED_ON' : {
'boot' : {
'bus_name' : 'org.openbmc.control.Host',
'obj_name' : '/org/openbmc/control/host0',
'interface_name' : 'org.openbmc.control.Host',
},
},
'HOST_POWERED_OFF' : {
'setOff' : {
'bus_name' : 'org.openbmc.control.led',
'obj_name' : '/org/openbmc/control/led/identify',
'interface_name' : 'org.openbmc.Led',
}
},
'BMC_READY' : {
'setOn' : {
'bus_name' : 'org.openbmc.control.led',
'obj_name' : '/org/openbmc/control/led/beep',
'interface_name' : 'org.openbmc.Led',
},
'init' : {
'bus_name' : 'org.openbmc.control.Flash',
'obj_name' : '/org/openbmc/control/flash/bios',
'interface_name' : 'org.openbmc.Flash',
}
}
}
APPS = {
'startup_hacks' : {
'system_state' : 'BASE_APPS',
'start_process' : True,
'monitor_process' : False,
'process_name' : 'startup_hacks.sh',
},
'inventory' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'inventory_items.py',
'args' : [ SYSTEM_NAME ]
},
'hwmon' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'hwmon.py',
'args' : [ SYSTEM_NAME ]
},
'sensor_manager' : {
'system_state' : 'BASE_APPS',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'sensor_manager2.py',
'args' : [ SYSTEM_NAME ]
},
'host_watchdog' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'host_watchdog.exe',
},
'power_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'power_control.exe',
'args' : [ '3000', '10' ]
},
'power_button' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'button_power.exe',
},
'reset_button' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'button_reset.exe',
},
'led_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'led_controller.exe',
},
'flash_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'flash_bios.exe',
},
'bmc_flash_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'bmc_update.py',
},
'download_manager' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'download_manager.py',
'args' : [ SYSTEM_NAME ]
},
'host_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'control_host.exe',
},
'chassis_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'chassis_control.py',
},
'restore' : {
'system_state' : 'BMC_READY',
'start_process' : True,
'monitor_process' : False,
'process_name' : 'discover_system_state.py',
},
'bmc_control' : {
'system_state' : 'BMC_STARTING',
'start_process' : True,
'monitor_process' : True,
'process_name' : 'control_bmc.exe',
},
}
CACHED_INTERFACES = {
"org.openbmc.InventoryItem" : True,
"org.openbmc.control.Chassis" : True,
}
INVENTORY_ROOT = '/org/openbmc/inventory'
FRU_INSTANCES = {
'<inventory_root>/system' : { 'fru_type' : 'SYSTEM','is_fru' : True, 'present' : "True" },
'<inventory_root>/system/bios' : { 'fru_type' : 'SYSTEM','is_fru' : True, 'present' : "True" },
'<inventory_root>/system/misc' : { 'fru_type' : 'SYSTEM','is_fru' : False, },
'<inventory_root>/system/chassis' : { 'fru_type' : 'SYSTEM','is_fru' : True, 'present' : "True" },
'<inventory_root>/system/chassis/motherboard' : { 'fru_type' : 'MAIN_PLANAR','is_fru' : True, },
'<inventory_root>/system/systemevent' : { 'fru_type' : 'SYSTEM_EVENT', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/refclock' : { 'fru_type' : 'MAIN_PLANAR', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/pcieclock': { 'fru_type' : 'MAIN_PLANAR', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/todclock' : { 'fru_type' : 'MAIN_PLANAR', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/apss' : { 'fru_type' : 'MAIN_PLANAR', 'is_fru' : False, },
'<inventory_root>/system/chassis/fan0' : { 'fru_type' : 'FAN','is_fru' : True, },
'<inventory_root>/system/chassis/fan1' : { 'fru_type' : 'FAN','is_fru' : True, },
'<inventory_root>/system/chassis/fan2' : { 'fru_type' : 'FAN','is_fru' : True, },
'<inventory_root>/system/chassis/fan3' : { 'fru_type' : 'FAN','is_fru' : True, },
'<inventory_root>/system/chassis/motherboard/bmc' : { 'fru_type' : 'BMC','is_fru' : False, 'manufacturer' : 'ASPEED' },
'<inventory_root>/system/chassis/motherboard/cpu0' : { 'fru_type' : 'CPU', 'is_fru' : True, },
'<inventory_root>/system/chassis/motherboard/cpu1' : { 'fru_type' : 'CPU', 'is_fru' : True, },
'<inventory_root>/system/chassis/motherboard/cpu0/core0' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core1' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core2' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core3' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core4' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core5' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core6' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core7' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core8' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core9' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core10': { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core11': { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core0' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core1' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core2' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core3' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core4' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core5' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core6' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core7' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core8' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core9' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core10' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core11' : { 'fru_type' : 'CORE', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf0' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf1' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf2' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf3' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf4' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf5' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf6' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/membuf7' : { 'fru_type' : 'MEMORY_BUFFER', 'is_fru' : False, },
'<inventory_root>/system/chassis/motherboard/dimm0' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm1' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm2' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm3' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm4' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm5' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm6' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm7' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm8' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm9' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm10' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm11' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm12' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm13' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm14' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm15' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm16' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm17' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm18' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm19' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm20' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm21' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm22' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm23' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm24' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm25' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm26' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm27' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm28' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm29' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm30' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
'<inventory_root>/system/chassis/motherboard/dimm31' : { 'fru_type' : 'DIMM', 'is_fru' : True,},
}
ID_LOOKUP = {
'FRU' : {
0x01 : '<inventory_root>/system/chassis/motherboard/cpu0',
0x02 : '<inventory_root>/system/chassis/motherboard/cpu1',
0x03 : '<inventory_root>/system/chassis/motherboard',
0x04 : '<inventory_root>/system/chassis/motherboard/membuf0',
0x05 : '<inventory_root>/system/chassis/motherboard/membuf1',
0x06 : '<inventory_root>/system/chassis/motherboard/membuf2',
0x07 : '<inventory_root>/system/chassis/motherboard/membuf3',
0x08 : '<inventory_root>/system/chassis/motherboard/membuf4',
0x09 : '<inventory_root>/system/chassis/motherboard/membuf5',
0x0c : '<inventory_root>/system/chassis/motherboard/dimm0',
0x0d : '<inventory_root>/system/chassis/motherboard/dimm1',
0x0e : '<inventory_root>/system/chassis/motherboard/dimm2',
0x0f : '<inventory_root>/system/chassis/motherboard/dimm3',
0x10 : '<inventory_root>/system/chassis/motherboard/dimm4',
0x11 : '<inventory_root>/system/chassis/motherboard/dimm5',
0x12 : '<inventory_root>/system/chassis/motherboard/dimm6',
0x13 : '<inventory_root>/system/chassis/motherboard/dimm7',
0x14 : '<inventory_root>/system/chassis/motherboard/dimm8',
0x15 : '<inventory_root>/system/chassis/motherboard/dimm9',
0x16 : '<inventory_root>/system/chassis/motherboard/dimm10',
0x17 : '<inventory_root>/system/chassis/motherboard/dimm11',
0x18 : '<inventory_root>/system/chassis/motherboard/dimm12',
0x19 : '<inventory_root>/system/chassis/motherboard/dimm13',
0x1a : '<inventory_root>/system/chassis/motherboard/dimm14',
0x1b : '<inventory_root>/system/chassis/motherboard/dimm15',
0x1c : '<inventory_root>/system/chassis/motherboard/dimm16',
0x1d : '<inventory_root>/system/chassis/motherboard/dimm17',
0x1e : '<inventory_root>/system/chassis/motherboard/dimm18',
0x1f : '<inventory_root>/system/chassis/motherboard/dimm19',
0x20 : '<inventory_root>/system/chassis/motherboard/dimm20',
0x21 : '<inventory_root>/system/chassis/motherboard/dimm21',
0x22 : '<inventory_root>/system/chassis/motherboard/dimm22',
0x23 : '<inventory_root>/system/chassis/motherboard/dimm23',
0x24 : '<inventory_root>/system/chassis/motherboard/dimm24',
0x25 : '<inventory_root>/system/chassis/motherboard/dimm25',
0x26 : '<inventory_root>/system/chassis/motherboard/dimm26',
0x27 : '<inventory_root>/system/chassis/motherboard/dimm27',
0x28 : '<inventory_root>/system/chassis/motherboard/dimm28',
0x29 : '<inventory_root>/system/chassis/motherboard/dimm29',
0x2a : '<inventory_root>/system/chassis/motherboard/dimm30',
0x2b : '<inventory_root>/system/chassis/motherboard/dimm31',
},
'FRU_STR' : {
'PRODUCT_0' : '<inventory_root>/system/bios',
'BOARD_1' : '<inventory_root>/system/chassis/motherboard/cpu0',
'BOARD_2' : '<inventory_root>/system/chassis/motherboard/cpu1',
'CHASSIS_3' : '<inventory_root>/system/chassis/motherboard',
'BOARD_3' : '<inventory_root>/system/misc',
'BOARD_4' : '<inventory_root>/system/chassis/motherboard/membuf0',
'BOARD_5' : '<inventory_root>/system/chassis/motherboard/membuf1',
'BOARD_6' : '<inventory_root>/system/chassis/motherboard/membuf2',
'BOARD_7' : '<inventory_root>/system/chassis/motherboard/membuf3',
'BOARD_8' : '<inventory_root>/system/chassis/motherboard/membuf4',
'BOARD_9' : '<inventory_root>/system/chassis/motherboard/membuf5',
'BOARD_10' : '<inventory_root>/system/chassis/motherboard/membuf6',
'BOARD_11' : '<inventory_root>/system/chassis/motherboard/membuf7',
'PRODUCT_12' : '<inventory_root>/system/chassis/motherboard/dimm0',
'PRODUCT_13' : '<inventory_root>/system/chassis/motherboard/dimm1',
'PRODUCT_14' : '<inventory_root>/system/chassis/motherboard/dimm2',
'PRODUCT_15' : '<inventory_root>/system/chassis/motherboard/dimm3',
'PRODUCT_16' : '<inventory_root>/system/chassis/motherboard/dimm4',
'PRODUCT_17' : '<inventory_root>/system/chassis/motherboard/dimm5',
'PRODUCT_18' : '<inventory_root>/system/chassis/motherboard/dimm6',
'PRODUCT_19' : '<inventory_root>/system/chassis/motherboard/dimm7',
'PRODUCT_20' : '<inventory_root>/system/chassis/motherboard/dimm8',
'PRODUCT_21' : '<inventory_root>/system/chassis/motherboard/dimm9',
'PRODUCT_22' : '<inventory_root>/system/chassis/motherboard/dimm10',
'PRODUCT_23' : '<inventory_root>/system/chassis/motherboard/dimm11',
'PRODUCT_24' : '<inventory_root>/system/chassis/motherboard/dimm12',
'PRODUCT_25' : '<inventory_root>/system/chassis/motherboard/dimm13',
'PRODUCT_26' : '<inventory_root>/system/chassis/motherboard/dimm14',
'PRODUCT_27' : '<inventory_root>/system/chassis/motherboard/dimm15',
'PRODUCT_28' : '<inventory_root>/system/chassis/motherboard/dimm16',
'PRODUCT_29' : '<inventory_root>/system/chassis/motherboard/dimm17',
'PRODUCT_30' : '<inventory_root>/system/chassis/motherboard/dimm18',
'PRODUCT_31' : '<inventory_root>/system/chassis/motherboard/dimm19',
'PRODUCT_32' : '<inventory_root>/system/chassis/motherboard/dimm20',
'PRODUCT_33' : '<inventory_root>/system/chassis/motherboard/dimm21',
'PRODUCT_34' : '<inventory_root>/system/chassis/motherboard/dimm22',
'PRODUCT_35' : '<inventory_root>/system/chassis/motherboard/dimm23',
'PRODUCT_36' : '<inventory_root>/system/chassis/motherboard/dimm24',
'PRODUCT_37' : '<inventory_root>/system/chassis/motherboard/dimm25',
'PRODUCT_38' : '<inventory_root>/system/chassis/motherboard/dimm26',
'PRODUCT_39' : '<inventory_root>/system/chassis/motherboard/dimm27',
'PRODUCT_40' : '<inventory_root>/system/chassis/motherboard/dimm28',
'PRODUCT_41' : '<inventory_root>/system/chassis/motherboard/dimm29',
'PRODUCT_42' : '<inventory_root>/system/chassis/motherboard/dimm30',
'PRODUCT_43' : '<inventory_root>/system/chassis/motherboard/dimm31',
'PRODUCT_47' : '<inventory_root>/system/misc',
},
'SENSOR' : {
0x04 : '/org/openbmc/sensors/host/HostStatus',
0x05 : '/org/openbmc/sensors/host/BootProgress',
0x08 : '/org/openbmc/sensors/host/cpu0/OccStatus',
0x09 : '/org/openbmc/sensors/host/cpu1/OccStatus',
0x0c : '<inventory_root>/system/chassis/motherboard/cpu0',
0x0e : '<inventory_root>/system/chassis/motherboard/cpu1',
0x1e : '<inventory_root>/system/chassis/motherboard/dimm3',
0x1f : '<inventory_root>/system/chassis/motherboard/dimm2',
0x20 : '<inventory_root>/system/chassis/motherboard/dimm1',
0x21 : '<inventory_root>/system/chassis/motherboard/dimm0',
0x22 : '<inventory_root>/system/chassis/motherboard/dimm7',
0x23 : '<inventory_root>/system/chassis/motherboard/dimm6',
0x24 : '<inventory_root>/system/chassis/motherboard/dimm5',
0x25 : '<inventory_root>/system/chassis/motherboard/dimm4',
0x26 : '<inventory_root>/system/chassis/motherboard/dimm11',
0x27 : '<inventory_root>/system/chassis/motherboard/dimm10',
0x28 : '<inventory_root>/system/chassis/motherboard/dimm9',
0x29 : '<inventory_root>/system/chassis/motherboard/dimm8',
0x2a : '<inventory_root>/system/chassis/motherboard/dimm15',
0x2b : '<inventory_root>/system/chassis/motherboard/dimm14',
0x2c : '<inventory_root>/system/chassis/motherboard/dimm13',
0x2d : '<inventory_root>/system/chassis/motherboard/dimm12',
0x2e : '<inventory_root>/system/chassis/motherboard/dimm19',
0x2f : '<inventory_root>/system/chassis/motherboard/dimm18',
0x30 : '<inventory_root>/system/chassis/motherboard/dimm17',
0x31 : '<inventory_root>/system/chassis/motherboard/dimm16',
0x32 : '<inventory_root>/system/chassis/motherboard/dimm23',
0x33 : '<inventory_root>/system/chassis/motherboard/dimm22',
0x34 : '<inventory_root>/system/chassis/motherboard/dimm21',
0x35 : '<inventory_root>/system/chassis/motherboard/dimm20',
0x36 : '<inventory_root>/system/chassis/motherboard/dimm27',
0x37 : '<inventory_root>/system/chassis/motherboard/dimm26',
0x38 : '<inventory_root>/system/chassis/motherboard/dimm25',
0x39 : '<inventory_root>/system/chassis/motherboard/dimm24',
0x3a : '<inventory_root>/system/chassis/motherboard/dimm31',
0x3b : '<inventory_root>/system/chassis/motherboard/dimm30',
0x3c : '<inventory_root>/system/chassis/motherboard/dimm29',
0x3d : '<inventory_root>/system/chassis/motherboard/dimm28',
0x3e : '<inventory_root>/system/chassis/motherboard/cpu0/core0',
0x3f : '<inventory_root>/system/chassis/motherboard/cpu0/core1',
0x40 : '<inventory_root>/system/chassis/motherboard/cpu0/core2',
0x41 : '<inventory_root>/system/chassis/motherboard/cpu0/core3',
0x42 : '<inventory_root>/system/chassis/motherboard/cpu0/core4',
0x43 : '<inventory_root>/system/chassis/motherboard/cpu0/core5',
0x44 : '<inventory_root>/system/chassis/motherboard/cpu0/core6',
0x45 : '<inventory_root>/system/chassis/motherboard/cpu0/core7',
0x46 : '<inventory_root>/system/chassis/motherboard/cpu0/core8',
0x47 : '<inventory_root>/system/chassis/motherboard/cpu0/core9',
0x48 : '<inventory_root>/system/chassis/motherboard/cpu0/core10',
0x49 : '<inventory_root>/system/chassis/motherboard/cpu0/core11',
0x4a : '<inventory_root>/system/chassis/motherboard/cpu1/core0',
0x4b : '<inventory_root>/system/chassis/motherboard/cpu1/core1',
0x4c : '<inventory_root>/system/chassis/motherboard/cpu1/core2',
0x4d : '<inventory_root>/system/chassis/motherboard/cpu1/core3',
0x4e : '<inventory_root>/system/chassis/motherboard/cpu1/core4',
0x4f : '<inventory_root>/system/chassis/motherboard/cpu1/core5',
0x50 : '<inventory_root>/system/chassis/motherboard/cpu1/core6',
0x51 : '<inventory_root>/system/chassis/motherboard/cpu1/core7',
0x52 : '<inventory_root>/system/chassis/motherboard/cpu1/core8',
0x53 : '<inventory_root>/system/chassis/motherboard/cpu1/core9',
0x54 : '<inventory_root>/system/chassis/motherboard/cpu1/core10',
0x55 : '<inventory_root>/system/chassis/motherboard/cpu1/core11',
0x56 : '<inventory_root>/system/chassis/motherboard/membuf0',
0x57 : '<inventory_root>/system/chassis/motherboard/membuf1',
0x58 : '<inventory_root>/system/chassis/motherboard/membuf2',
0x59 : '<inventory_root>/system/chassis/motherboard/membuf3',
0x5a : '<inventory_root>/system/chassis/motherboard/membuf4',
0x5b : '<inventory_root>/system/chassis/motherboard/membuf5',
0x5c : '<inventory_root>/system/chassis/motherboard/membuf6',
0x5d : '<inventory_root>/system/chassis/motherboard/membuf7',
0x5f : '/org/openbmc/sensors/host/BootCount',
0x60 : '<inventory_root>/system/chassis/motherboard',
0x61 : '<inventory_root>/system/systemevent',
0x62 : '<inventory_root>/system/powerlimit',
0x63 : '<inventory_root>/system/chassis/motherboard/refclock',
0x64 : '<inventory_root>/system/chassis/motherboard/pcieclock',
0xb1 : '<inventory_root>/system/chassis/motherboard/todclock',
0xb2 : '<inventory_root>/system/chassis/motherboard/apss',
0xb3 : '/org/openbmc/sensors/host/powercap',
0xb5 : '/org/openbmc/sensors/host/OperatingSystemStatus',
0xb6 : '<inventory_root>/system/chassis/motherboard/pcielink',
},
'GPIO_PRESENT' : {}
}
GPIO_CONFIG = {}
GPIO_CONFIG['BMC_POWER_UP'] = \
{'gpio_pin': 'D1', 'direction': 'out'}
GPIO_CONFIG['SYS_PWROK_BUFF'] = \
{'gpio_pin': 'D2', 'direction': 'in'}
GPIO_CONFIG['BMC_WD_CLEAR_PULSE_N'] = \
{'gpio_pin': 'N4', 'direction': 'out'}
GPIO_CONFIG['CM1_OE_R_N'] = \
{'gpio_pin': 'Q6', 'direction': 'out'}
GPIO_CONFIG['BMC_CP0_RESET_N'] = \
{'gpio_pin': 'O2', 'direction': 'out'}
GPIO_CONFIG['BMC_CFAM_RESET_N_R'] = \
{'gpio_pin': 'J2', 'direction': 'out'}
GPIO_CONFIG['PEX8718_DEVICES_RESET_N'] = \
{'gpio_pin': 'B6', 'direction': 'out'}
GPIO_CONFIG['CP0_DEVICES_RESET_N'] = \
{'gpio_pin': 'N3', 'direction': 'out'}
GPIO_CONFIG['CP1_DEVICES_RESET_N'] = \
{'gpio_pin': 'N5', 'direction': 'out'}
GPIO_CONFIG['FSI_DATA'] = \
{'gpio_pin': 'A5', 'direction': 'out'}
GPIO_CONFIG['FSI_CLK'] = \
{'gpio_pin': 'A4', 'direction': 'out'}
GPIO_CONFIG['FSI_ENABLE'] = \
{'gpio_pin': 'D0', 'direction': 'out'}
GPIO_CONFIG['CRONUS_SEL'] = \
{'gpio_pin': 'A6', 'direction': 'out'}
GPIO_CONFIG['BMC_THROTTLE'] = \
{'gpio_pin': 'J3', 'direction': 'out'}
GPIO_CONFIG['IDBTN'] = \
{ 'gpio_pin': 'Q7', 'direction': 'out' }
GPIO_CONFIG['POWER_BUTTON'] = \
{'gpio_pin': 'E0', 'direction': 'both'}
GPIO_CONFIG['RESET_BUTTON'] = \
{'gpio_pin': 'E4', 'direction': 'both'}
GPIO_CONFIG['PS0_PRES_N'] = \
{'gpio_pin': 'P7', 'direction': 'in'}
GPIO_CONFIG['PS1_PRES_N'] = \
{'gpio_pin': 'N0', 'direction': 'in'}
GPIO_CONFIG['CARD_PRES_N'] = \
{'gpio_pin': 'J0', 'direction': 'in'}
def convertGpio(name):
name = name.upper()
c = name[0:1]
offset = int(name[1:])
a = ord(c)-65
base = a*8+GPIO_BASE
return base+offset
HWMON_CONFIG = {
'4-0050' : {
'names' : {
'caps_curr_powercap' : { 'object_path' : 'powercap/curr_cap','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
'caps_curr_powerreading' : { 'object_path' : 'powercap/system_power','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
'caps_max_powercap' : { 'object_path' : 'powercap/max_cap','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
'caps_min_powercap' : { 'object_path' : 'powercap/min_cap','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
'caps_norm_powercap' : { 'object_path' : 'powercap/n_cap','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
'caps_user_powerlimit' : { 'object_path' : 'powercap/user_cap','poll_interval' : 10000,'scale' : 1,'units' : 'W' },
},
'labels' : {
'176' : { 'object_path' : 'temperature/cpu0/core0','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'177' : { 'object_path' : 'temperature/cpu0/core1','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'178' : { 'object_path' : 'temperature/cpu0/core2','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'179' : { 'object_path' : 'temperature/cpu0/core3','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'180' : { 'object_path' : 'temperature/cpu0/core4','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'181' : { 'object_path' : 'temperature/cpu0/core5','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'182' : { 'object_path' : 'temperature/cpu0/core6','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'183' : { 'object_path' : 'temperature/cpu0/core7','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'184' : { 'object_path' : 'temperature/cpu0/core8','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'185' : { 'object_path' : 'temperature/cpu0/core9','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'186' : { 'object_path' : 'temperature/cpu0/core10','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'187' : { 'object_path' : 'temperature/cpu0/core11','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'102' : { 'object_path' : 'temperature/dimm0','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'103' : { 'object_path' : 'temperature/dimm1','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'104' : { 'object_path' : 'temperature/dimm2','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'105' : { 'object_path' : 'temperature/dimm3','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'106' : { 'object_path' : 'temperature/dimm4','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'107' : { 'object_path' : 'temperature/dimm5','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'108' : { 'object_path' : 'temperature/dimm6','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'109' : { 'object_path' : 'temperature/dimm7','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'110' : { 'object_path' : 'temperature/dimm8','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'111' : { 'object_path' : 'temperature/dimm9','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'112' : { 'object_path' : 'temperature/dimm10','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'113' : { 'object_path' : 'temperature/dimm11','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'114' : { 'object_path' : 'temperature/dimm12','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'115' : { 'object_path' : 'temperature/dimm13','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'116' : { 'object_path' : 'temperature/dimm14','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'117' : { 'object_path' : 'temperature/dimm15','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'94' : { 'object_path' : 'temperature/membuf0','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'95' : { 'object_path' : 'temperature/membuf1','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'96' : { 'object_path' : 'temperature/membuf2','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'97' : { 'object_path' : 'temperature/membuf3','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
}
},
'5-0050' : {
'labels' : {
'188' : { 'object_path' : 'temperature/cpu1/core0','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'189' : { 'object_path' : 'temperature/cpu1/core1','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'190' : { 'object_path' : 'temperature/cpu1/core2','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'191' : { 'object_path' : 'temperature/cpu1/core3','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'192' : { 'object_path' : 'temperature/cpu1/core4','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'193' : { 'object_path' : 'temperature/cpu1/core5','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'194' : { 'object_path' : 'temperature/cpu1/core6','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'195' : { 'object_path' : 'temperature/cpu1/core7','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'196' : { 'object_path' : 'temperature/cpu1/core8','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'197' : { 'object_path' : 'temperature/cpu1/core9','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'198' : { 'object_path' : 'temperature/cpu1/core10','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'199' : { 'object_path' : 'temperature/cpu1/core11','poll_interval' : 5000,'scale' : 1000,'units' : 'C',
'critical_upper' : 100, 'critical_lower' : -100, 'warning_upper' : 90, 'warning_lower' : -99, 'emergency_enabled' : True },
'118' : { 'object_path' : 'temperature/dimm16','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'119' : { 'object_path' : 'temperature/dimm17','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'120' : { 'object_path' : 'temperature/dimm18','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'121' : { 'object_path' : 'temperature/dimm19','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'122' : { 'object_path' : 'temperature/dimm20','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'123' : { 'object_path' : 'temperature/dimm21','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'124' : { 'object_path' : 'temperature/dimm22','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'125' : { 'object_path' : 'temperature/dimm23','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'126' : { 'object_path' : 'temperature/dimm24','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'127' : { 'object_path' : 'temperature/dimm25','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'128' : { 'object_path' : 'temperature/dimm26','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'129' : { 'object_path' : 'temperature/dimm27','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'130' : { 'object_path' : 'temperature/dimm28','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'131' : { 'object_path' : 'temperature/dimm29','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'132' : { 'object_path' : 'temperature/dimm30','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'133' : { 'object_path' : 'temperature/dimm31','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'98' : { 'object_path' : 'temperature/membuf4','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'99' : { 'object_path' : 'temperature/membuf5','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'100' : { 'object_path' : 'temperature/membuf6','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
'101' : { 'object_path' : 'temperature/membuf7','poll_interval' : 5000,'scale' : 1000,'units' : 'C' },
}
},
}
# Miscellaneous non-poll sensor with system specific properties.
# The sensor id is the same as those defined in ID_LOOKUP['SENSOR'].
MISC_SENSORS = {
0x5f : { 'class' : 'BootCountSensor' },
0x05 : { 'class' : 'BootProgressSensor' },
0x08 : { 'class' : 'OccStatusSensor',
'os_path' : '/sys/class/i2c-adapter/i2c-3/3-0050/online' },
0x09 : { 'class' : 'OccStatusSensor',
'os_path' : '/sys/class/i2c-adapter/i2c-3/3-0051/online' },
0xb5 : { 'class' : 'OperatingSystemStatusSensor' },
0xb3 : { 'class' : 'PowerCap',
'os_path' : '/sys/class/hwmon/hwmon3/user_powercap' },
}
|
StarcoderdataPython
|
1989034
|
"""Question Class for quora question."""
from quora.content import Content
class Question:
def __init__(self, data_dict):
self.id = data_dict.get("id")
self.qid = data_dict.get("qid")
self.url = "https://www.quora.com" + data_dict.get("url")
self.title = Content(data_dict.get("title"))
def __str__(self):
return self.title.__str__()
def __eq__(self, other):
return self.qid == other.qid
|
StarcoderdataPython
|
6687612
|
<gh_stars>1-10
"""
Compares the tables generated by makeMiniAppTable and the
corresponding table from the original mini app.
"""
import csv
import sys
from tr55.tablelookup import lookup_nlcd
# Generate keys from rows that will be used to match rows in the
# old and new table.
# If transform == True, convert values of row so that
# land and soil_types are in the format of the data table
# from the old mini-app
def get_key(row, transform=False):
P = row['P']
land = row['land']
soil = row['soil']
if transform:
land = str(lookup_nlcd(row['land']))
new_soil_types = ['a', 'b', 'c', 'd']
old_soil_types = ['0', '1', '2', '3']
soil_types_map = dict(zip(new_soil_types, old_soil_types))
soil = soil_types_map[row['soil']]
return (P, land, soil)
def isClose(val1, val2):
val1 = round(float(val1), 1)
val2 = round(float(val2), 1)
return val1 == val2
def rowsMatch(old_row, new_row):
return isClose(old_row['ET'], new_row['ET']) and \
isClose(old_row['I'], new_row['I']) and \
isClose(old_row['R'], new_row['R'])
def test_get_key():
old_row = {'land': '71', 'I': '1.6', 'soil': '2',
'P': '2.0', 'R': '0.2', 'ET': '0.1'}
new_row = {'land': 'grassland', 'I': '1.6100773216841155',
'soil': 'c', 'P': '2.0', 'R': '0.2657226783158845',
'ET': '0.12419999999999999'}
assert get_key(old_row) == get_key(new_row, transform=True) \
== ('2.0', '71', '2')
test_get_key()
if len(sys.argv) != 3:
print ('Usage: python -m tr55.compareMiniAppTable ' +
'old_csv_file_name new_csv_file_name')
sys.exit()
else:
old_csv_fn = sys.argv[1]
new_csv_fn = sys.argv[2]
with open(old_csv_fn, 'rb') as old_csv_file:
with open(new_csv_fn, 'rb') as new_csv_file:
old_reader = csv.DictReader(old_csv_file)
new_reader = csv.DictReader(new_csv_file)
old_row_dict = dict([(get_key(old_row), old_row)
for old_row in old_reader])
new_row_dict = dict([(get_key(new_row, transform=True), new_row)
for new_row in new_reader])
num_in_match_rows = 0
num_in_out_match_rows = 0
for key, new_row in new_row_dict.iteritems():
if key in old_row_dict:
old_row = old_row_dict[key]
if rowsMatch(old_row, new_row):
num_in_out_match_rows += 1
else:
print 'output does not match'
print 'old row: ', old_row
print 'new row: ', new_row
print
num_in_match_rows += 1
print '# rows in new table: ', len(new_row_dict)
print '# rows where input matches: ', num_in_match_rows
print '# rows where input and output match: ', num_in_out_match_rows
|
StarcoderdataPython
|
6513090
|
# __init__.py: Yet Another Bayes Net library
# Contact: <NAME> ( <EMAIL> )
"""
For detailed documentation and examples, see the README.
"""
# Make our dependencies explicit so compiled Cython code won't segfault trying
# to load them.
import networkx, matplotlib.pyplot, scipy
import numpy as np
import os
import pyximport
# Adapted from Cython docs https://github.com/cython/cython/wiki/
# InstallingOnWindows#mingw--numpy--pyximport-at-runtime
if os.name == 'nt':
if 'CPATH' in os.environ:
os.environ['CPATH'] = os.environ['CPATH'] + np.get_include()
else:
os.environ['CPATH'] = np.get_include()
# XXX: we're assuming that MinGW is installed in C:\MinGW (default)
if 'PATH' in os.environ:
os.environ['PATH'] = os.environ['PATH'] + ';C:\MinGW\bin'
else:
os.environ['PATH'] = 'C:\MinGW\bin'
mingw_setup_args = { 'options': { 'build_ext': { 'compiler': 'mingw32' } } }
pyximport.install(setup_args=mingw_setup_args)
elif os.name == 'posix':
if 'CFLAGS' in os.environ:
os.environ['CFLAGS'] = os.environ['CFLAGS'] + ' -I' + np.get_include()
else:
os.environ['CFLAGS'] = ' -I' + np.get_include()
pyximport.install()
from yabn import *
__version__ = '0.1.0'
|
StarcoderdataPython
|
4899653
|
<filename>Methods/Machine/LamSquirrelCage/build_geometry.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""@package build_geometry
@date Created on août 10 10:42 2018
@author franco_i
"""
def build_geometry(self, sym=1, alpha=0, delta=0, is_simplified=False):
"""Build geometry of the LamSquirrelCage
Parameters
----------
self :
LamSquirrelCage Object
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_simplified: bool
True to avoid line superposition
Returns
-------
list
surf_list: list of surfaces
"""
surf_list = super(type(self), self).build_geometry(
sym=sym, is_simplified=is_simplified, alpha=alpha, delta=delta
)
# Adapt the label
for surf in surf_list:
if "Wind" in surf.label:
surf.label = surf.label.replace("Wind", "Bare")
return surf_list
|
StarcoderdataPython
|
11397238
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009 <NAME>
# Licensed under the terms of the MIT License
"""
MatplotlibWidget
================
Example of matplotlib widget for PyQt4
Copyright © 2009 <NAME>
This software is licensed under the terms of the MIT License
Derived from 'embedding_in_pyqt4.py':
Copyright © 2005 <NAME>, 2006 <NAME>
"""
__version__ = "1.0.0"
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtCore import QSize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as Canvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rcParams
rcParams['font.size'] = 9
class MatplotlibWidget(Canvas):
"""
MatplotlibWidget inherits PyQt4.QtGui.QWidget
and matplotlib.backend_bases.FigureCanvasBase
Options: option_name (default_value)
-------
parent (None): parent widget
title (''): figure title
xlabel (''): X-axis label
ylabel (''): Y-axis label
xlim (None): X-axis limits ([min, max])
ylim (None): Y-axis limits ([min, max])
xscale ('linear'): X-axis scale
yscale ('linear'): Y-axis scale
width (4): width in inches
height (3): height in inches
dpi (100): resolution in dpi
hold (False): if False, figure will be cleared each time plot is called
Widget attributes:
-----------------
figure: instance of matplotlib.figure.Figure
axes: figure axes
Example:
-------
self.widget = MatplotlibWidget(self, yscale='log', hold=True)
from numpy import linspace
x = linspace(-10, 10)
self.widget.axes.plot(x, x**2)
self.wdiget.axes.plot(x, x**3)
"""
def __init__(self, parent=None, title='', xlabel='', ylabel='',
xlim=None, ylim=None, xscale='linear', yscale='linear',
width=4, height=3, dpi=100, hold=False):
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
if xscale is not None:
self.axes.set_xscale(xscale)
if yscale is not None:
self.axes.set_yscale(yscale)
if xlim is not None:
self.axes.set_xlim(*xlim)
if ylim is not None:
self.axes.set_ylim(*ylim)
#self.axes.hold(hold)
Canvas.__init__(self, self.figure)
self.setParent(parent)
Canvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
Canvas.updateGeometry(self)
def sizeHint(self):
w, h = self.get_width_height()
return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
class Matplotlib3DWidget(Canvas):
"""
MatplotlibWidget inherits PyQt4.QtGui.QWidget
and matplotlib.backend_bases.FigureCanvasBase
Options: option_name (default_value)
-------
parent (None): parent widget
title (''): figure title
xlabel (''): X-axis label
ylabel (''): Y-axis label
xlim (None): X-axis limits ([min, max])
ylim (None): Y-axis limits ([min, max])
xscale ('linear'): X-axis scale
yscale ('linear'): Y-axis scale
width (4): width in inches
height (3): height in inches
dpi (100): resolution in dpi
hold (False): if False, figure will be cleared each time plot is called
Widget attributes:
-----------------
figure: instance of matplotlib.figure.Figure
axes: figure axes
Example:
-------
self.widget = MatplotlibWidget(self, yscale='log', hold=True)
from numpy import linspace
x = linspace(-10, 10)
self.widget.axes.plot(x, x**2)
self.wdiget.axes.plot(x, x**3)
"""
def __init__(self, parent=None, title='', xlabel='', ylabel='', zlabel='',
xlim=None, ylim=None, zlim=None, xscale='linear', yscale='linear',
width=4, height=3, dpi=100, hold=False):
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111, projection='3d')
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.set_zlabel(zlabel)
#self.axes.mouse_init()
if xscale is not None:
self.axes.set_xscale(xscale)
if yscale is not None:
self.axes.set_yscale(yscale)
if xlim is not None:
self.axes.set_xlim(*xlim)
if ylim is not None:
self.axes.set_ylim(*ylim)
if zlim is not None:
self.axes.set_zlim(*zlim)
#self.axes.hold(hold)
Canvas.__init__(self, self.figure)
self.setParent(parent)
Canvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
Canvas.updateGeometry(self)
def sizeHint(self):
w, h = self.get_width_height()
return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
#===============================================================================
# Example
#===============================================================================
if __name__ == '__main__':
import sys
from PyQt4.QtGui import QMainWindow, QApplication
from numpy import linspace
class ApplicationWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.mplwidget = MatplotlibWidget(self, title='Example',
xlabel='Linear scale',
ylabel='Log scale',
hold=True, yscale='log')
self.mplwidget.setFocus()
self.setCentralWidget(self.mplwidget)
self.plot(self.mplwidget.axes)
def plot(self, axes):
x = linspace(-10, 10)
axes.plot(x, x**2)
axes.plot(x, x**3)
app = QApplication(sys.argv)
win = ApplicationWindow()
win.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3547852
|
<reponame>loumir/modelinstanceinvot-code
'''
Created on 31 mars 2020
@author: laurentmichel
'''
from astropy.io.votable import parse
from client.inst_builder import logger, table_mapper
from client.translator.instance_from_votable import InstanceFromVotable
from client.translator.json_mapping_builder import JsonMappingBuilder
from client.inst_builder.table_mapper import TableMapper
from client.inst_builder.json_block_extractor import JsonBlockExtractor
class VodmlInstance(object):
'''
This class manages the transformation of a VOTable mapping blocks into a
model instance serialized in a Python {}
'''
def __init__(self, votable_path, exit_validation=True):
#
# One table_mapper per TABLE_MAPPING
# table ID or name taken as keys
#
self.table_mappers = {}
self.votable_path = votable_path
#
# Dict translation of the <MODEL_INSTANCE> block
#
self.json_view = {}
# Convert the XML mapping block in a dictionary
self.build_json_view(exit_validation=exit_validation)
# Make the dictionary compliant with JSON mapping syntax
self.build_json_mapping()
# Build the table_mapper
self.build_table_mapper_map()
def build_json_view(self, exit_validation=True):
"""
Convert the XML mapping block into a dictionary (XML2json transform)
"""
logger.info("Extracting the MODEL_INSTANCE block")
instanceFromVotable = InstanceFromVotable(self.votable_path, exit_validation=exit_validation)
instanceFromVotable._extract_vodml_block()
logger.info("Validating the MODEL_INSTANCE block")
instanceFromVotable._validate_vodml_block()
logger.info("Extracting the raw JSON block")
self.json_view = instanceFromVotable.json_block
def build_json_mapping(self):
"""
Replace in the XML2json the elements related to the model (COLLECTION, INSTANCE, ATTRIBUTE) with their roles
The other element, the parser directives (TABLE_ROW_TEMPLATE...), are kept in place
"""
logger.info("Formating the JSON view")
builder = JsonMappingBuilder(json_dict=self.json_view)
builder.revert_compositions("COLLECTION")
builder.revert_templates()
builder.revert_elements("INSTANCE")
builder.revert_elements("ATTRIBUTE")
self.json_view = builder.json
def build_table_mapper_map(self):
"""
Build one TableMapper for each mapped table (TABLE_MAPPING) and store them
in a map using the table ID (or name if no ID) as keys.
TODO map the first table by default
"""
logger.info("Looking for tables matching TABLE_MAPPING ")
votable = parse(self.votable_path)
for template_key in self.json_view["MODEL_INSTANCE"]["TABLE_MAPPING"].keys():
logger.info("Looking for a table matching TABLE_MAPPING %s", template_key)
name = None
parsed_table = None
for table in votable.iter_tables():
if template_key == table.ID:
logger.info("Table with ID = %s found", template_key)
name = table.ID
parsed_table = table
break
if name == None:
for table in votable.iter_tables():
if template_key == table.name:
logger.info("Table with name = %s found", template_key)
name = table.name
parsed_table = table
break
if name == None:
raise Exception("Cannot find table with name or ID equals to None")
else:
logger.info("Add TableMapper for table %s", name)
self.table_mappers[template_key] = TableMapper(
template_key,
self.votable_path,
parsed_table=parsed_table,
json_inst_dict=self.json_view)
def populate_templates(self, resolve_dmrefs=False):
"""
resolve all @ref with values read in the VOTable
if resolve_dmrefs is true, the INSTANCE references are replaces with a copies of the actual objects
"""
for k, v in self.table_mappers.items():
logger.info("populate template %s", k)
v.resolve_refs_and_values(resolve_dmrefs=resolve_dmrefs)
v.map_columns()
def connect_join_iterators(self):
"""
Connect the table iterators located in the mapping blocks (TABLE_RAW_TEMPLATE)
with the VOTable parser
"""
logger.info("connect join iterators")
parse_tables = {}
for template, table_mapper in self.table_mappers.items():
parse_tables[template] = table_mapper.parsed_table
for template, table_mapper in self.table_mappers.items():
for target, join_iterator in table_mapper.join_iterators.items():
logger.info("join template %s with template %s", template, target)
join_iterator.connect_votable(parse_tables[target])
def get_root_element(self, root_class):
"""
Look for the table mapper (TABLE_MAPPING) having one child matching root_class
The root element is selected accordiing one of these criteria
- having no role attribute (should never occur actually)
- having an empty dmrole
- having dmrole = root
:param root_class: dmtype of the root class
:type root_class: string
:return: the table mapper of the table containing the root element (or None)
:rtype: TableMapper instance
"""
for template, table_mapper in self.table_mappers.items():
logger.info("Looking for %s instances in template %s", root_class, template)
json_block_extract = JsonBlockExtractor(table_mapper.json)
retour = json_block_extract.search_subelement_by_type(root_class)
for block in retour:
if "@dmrole" not in block.keys():
logger.info("found (no role)")
return table_mapper
role = block["@dmrole"]
if role == "" or role == "root":
logger.info("found with role=%s", role)
return table_mapper
return None
|
StarcoderdataPython
|
3372576
|
<reponame>guionardo/py-cache-guiosoft
import os
import unittest
from unittest.mock import Mock, patch
from cache_gs.cache_classes.cache_data_file import CacheData, CacheDataFile
from tests.test_tools import raise_test_exception
def force_exception(*args, **kwargs):
raise_test_exception()
class TestCacheDataFile(unittest.TestCase):
def setUp(self):
self.file_name = 'test.json'
def tearDown(self):
if os.path.isfile(self.file_name):
os.unlink(self.file_name)
def test_save(self):
cd = CacheData("test_section", "test_key", "test_value", 0)
cdf = CacheDataFile('test', cd)
self.assertTrue(cdf.save(self.file_name))
cdf2 = CacheDataFile(self.file_name)
self.assertEqual(cdf.data, cdf2.data)
def test_repr(self):
cd = CacheData('sec', 'key', 'value', 0)
cdf = CacheDataFile('test', cd)
self.assertEqual(
repr(cdf),
"CacheDataFile('test',CacheData('sec','key','value',0))")
@patch("os.path.isfile", Mock())
def test_load_exception(self):
os.path.isfile.return_value = True
cdf = CacheDataFile()
self.assertFalse(cdf.load('abcd'))
@patch("json.dumps", force_exception)
def test_save_exception(self):
cd = CacheData("sec", "key", "value", 0)
cdf = CacheDataFile(cache_data=cd)
self.assertFalse(cdf.save('abcd'))
if os.path.isfile('abcd'):
os.unlink('abcd')
|
StarcoderdataPython
|
1858215
|
from forms.registerForm import UserForm
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.contrib.auth import (
authenticate,
login,
logout,
)
from django.views.generic import DetailView, ListView
from forms.loginForm import UserLoginForm
from Store.models.productModel import *
from django.db.models import Min, Max
from django.http import JsonResponse
from django.contrib import messages # import messages to show flash message
from forms.profileForm import *
from Store.models.profileModel import Profile
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
import math
from forms.product_modificationForm import *
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.conf import settings
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def base(request):
return render(request, 'homepage.html')
def login_view(request):
next = request.GET.get('next')
title = 'Login'
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect('/')
return render(request, 'login.html', {'form': form, 'title': title})
def register(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
form.save()
return redirect('Store:Base')
else:
form = UserForm()
return render(request, 'register.html', {'form': form})
@method_decorator(login_required(login_url='login'),
name='dispatch') # login_required controlla che l'utente corrente sia loggato.
# dispatch() → metodo presente in tutte le class-based view che si occupa di gestire le request e le response.
class ProfileView(View):
profile = None
def dispatch(self, request, *args, **kwargs):
self.profile, __ = Profile.objects.get_or_create(user=request.user)
return super(ProfileView, self).dispatch(request, *args, **kwargs)
def get(self, request):
context = {'profile': self.profile}
return render(request, 'profile.html', context)
def post(self, request):
form = ProfileForm(request.POST, request.FILES, instance=self.profile)
if form.is_valid():
profile = form.save()
# to save user model info
profile.user.first_name = form.cleaned_data.get('first_name')
profile.user.last_name = form.cleaned_data.get('last_name')
profile.user.email = form.cleaned_data.get('email')
profile.phone = form.cleaned_data.get('phone')
profile.user.save()
messages.success(request, 'Profile saved successfully')
else:
messages.error(request, form_validation_error(form))
return redirect('Store:profile')
def logout_view(request):
logout(request)
return render(request, 'logout.html')
def search_bar(request):
if request.method == 'POST':
# prende quello che c'è scritto nella search bar
searched = request.POST['searched']
# query che contiene tutti i profumi con nome "searched"
venues = Product.objects.filter(name__contains=searched)
return render(request, 'search_bar.html', {'searched': searched, 'venues': venues})
else:
return render(request, 'search_bar.html')
def product_review(request, id):
if request.method == 'POST' and request.user.is_authenticated:
stars = request.POST.get('stars', 3)
content = request.POST.get('content', '')
product = Product.objects.get(id=id)
review = ProductReviewModel.objects.create(product=product, user=request.user, stars=stars,
content=content)
return render(request, 'review_added.html')
else:
return render(request, 'review_added.html')
class ProductList(DetailView):
model = Product
template_name = 'products.html'
def price(request):
# dizionario che contiene il prezzo minimo e massimo tra tutti i profumi
minMaxPrice = Product.objects.aggregate(Min('price'), Max('price'))
allProducts = Product.objects.all().order_by('-id').distinct() # tutti i profumi ordinati per id decrescente
allProducts = allProducts.filter(price__gte=minMaxPrice['price__min'])
allProducts = allProducts.filter(price__lte=minMaxPrice['price__max'])
data = {'minMaxPrice': minMaxPrice, 'allProducts': allProducts}
return render(request, 'price.html', data)
def filter_price(request):
minPrice = request.GET['minPrice'] # prezzo minimo
maxPrice = request.GET['maxPrice'] # prezzo massimo impostato da interfaccia
# tutti i profumi dal prezzo minimo al prezzo massimo
filtered_products = Product.objects.filter(price__gte=minPrice).filter(price__lte=maxPrice).distinct()
# restituisce tutto l'html in formato stringa
t = render_to_string('ajax/filtered_products_price.html', {'data': filtered_products})
return JsonResponse({'data': t})
class MenPerfumes(ListView):
model = Product
template_name = 'men_perfumes.html'
class WomenPerfumes(ListView):
model = Product
template_name = 'women_perfumes.html'
def recommended_products_anonymous_helper(obj):
queryset = ProductReviewModel.objects.all()
products = {}
for product in obj: # per ogni profumo in "profumi_finali"
stars_splitting = {}
count = 0 # serve per contare il numero di recensioni presenti in un profumo
for recensione in queryset: # per ogni recensione
if recensione.product == product: # se il profumo in queryset è uguale al profumo in "profumi_finali"
if product not in products.keys(): # se il profumo in "profumi_finali" non è nel dizionario "products"
# il dizionario "products" con chiave "product" prende come valore il numero di stelle della
# singola recensione
products[product] = recensione.stars
else:
products[product] = products[product] + recensione.stars # aggiungo il valore dell'altra recensione
count += 1
if count != 0:
# calcolo la media totale delle recensioni per quel profumo
average_stars = float(products[product]) / count
frazione, intero = math.modf(average_stars) # separo la parte frazionaria dall'intero
stars_splitting['intero'] = intero
stars_splitting['frazione'] = frazione
# ora "products" avrà come chiave il profumo e come valore "stars_splitting"
# (es: {'intero': 4.0, 'frazione': 0.0})
products[product] = stars_splitting
if intero < 3: # elimino tutti i profumi < 3 stelle
del products[product]
return products
def recommended_products_view(request):
if request.user.is_authenticated:
# query che filtra gli ordini effettuati da questo utente
customer_orders = CustomerOrders.objects.filter(user=request.user)
if customer_orders: # se l'utente ha effettuato un ordine
profumi_con_ripetizione = []
profumi_finali = []
for order in customer_orders: # per tutti gli ordini che l'utente ha effettuato
brand = order.product.brand # prendo il brand
prezzo = order.product.price # prendo il prezzo
# queryset filtra tutti i profumi presenti nel database per brand=brand degli ordini, e prezzo compreso
# tra -50 e + 50 rispetto al prezzo del profumo
queryset = Product.objects.filter(brand=brand, price__lte=prezzo + 50, price__gte=prezzo - 50)
# lista che contiene tutti i profumi filtrati in base al profumo acquistato
profumi_con_ripetizione.append(queryset)
# questo ciclo innestato permette che per ogni lista di prodotti consigliati per ogni profumo acquistato,
# aggiunge a "profumi_finali" tutti i profumi di "profumi_con_ripetizione" tranne quelli che si ripetono
for perfume in profumi_con_ripetizione:
for x in perfume:
if x not in profumi_finali:
profumi_finali.append(x)
context = {'products': recommended_products_anonymous_helper(profumi_finali)}
return render(request, 'recommended_products.html', context)
# se l'utente non ha effettuato alcun ordine, allora viene mostrata una lista di profumi con le recensioni
# più alte
else:
context = {'products': recommended_products_anonymous_helper(Product.objects.all())}
else:
context = {'products': recommended_products_anonymous_helper(Product.objects.all())}
template_name = 'recommended_products.html'
return render(request, template_name, context)
def lista_prodotti_amministratore(request):
listaProdotti = Product.objects.all()
context = {'listaProdotti': listaProdotti}
return render(request, 'tabella_prodotti.html', context)
def modifica_prodotto(request, pk):
if request.method == 'POST':
modpro = Product.objects.get(pk=pk)
form = ModificaProdotto(request.POST)
old_qty = modpro.quantity
if form.is_valid():
name = form.cleaned_data['name']
price = form.cleaned_data['price']
brand = form.cleaned_data['brand']
category = form.cleaned_data['category']
description = form.cleaned_data['description']
quantity = form.cleaned_data['quantity']
modpro = Product.objects.filter(pk=pk).update(name=name, price=price, brand=brand, category=category,
description=description, quantity=quantity)
if old_qty == 0 and old_qty != quantity:
return redirect('Store:send-email', id=pk)
return HttpResponseRedirect(reverse('Store:lista-prodotti'))
else:
modpro = Product.objects.filter(pk=pk)
form = ModificaProdotto()
context = {'form': form, 'modpro': modpro}
return render(request, 'modifica_tabella_prodotti.html', context)
def send_email(request, id):
product = Product.objects.get(id=id)
MY_ADDRESS = settings.EMAIL_HOST_USER
PASSWORD = settings.EMAIL_HOST_PASSWORD
s = smtplib.SMTP(host='smtp.gmail.com', port=587) # set up the SMTP server
s.starttls()
s.login(MY_ADDRESS, PASSWORD)
message = f"Il profumo {product.name} è di nuovo disponibile! :)"
waiting_list_users = WaitingListModel.objects.filter(product=product.id)
for wait in waiting_list_users:
user = wait.user
msg = MIMEMultipart() # create a message
# setup the parameters of the message
msg['From'] = MY_ADDRESS
msg['To'] = user.email
msg['Subject'] = f"Profumo {product.name} disponibile!"
# add in the message body
msg.attach(MIMEText(message, 'plain'))
# send the message via the server set up earlier.
s.send_message(msg)
del msg
# Terminate the SMTP session and close the connection
s.quit()
WaitingListModel.objects.all().delete()
return HttpResponseRedirect(reverse('Store:lista-prodotti'))
|
StarcoderdataPython
|
9660991
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
from scrapy import signals
from scrapy.exceptions import NotConfigured
import redis
class XpcSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class XpcDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class RandomProxyMiddleware(object):
def __init__(self, settings):
self.r = redis.Redis(host='127.0.0.1')
self.proxy_key = settings.get('PROXY_REDIS_KEY')
self.proxy_stats_key = self.proxy_key + "_stats"
self.max_failed = 5
@property
def proxies(self):
return [i.decode('utf-8') for i in self.r.lrange(self.proxy_key, 0, -1)]
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool("HTTPPROXY_ENABLED"):
raise NotConfigured
return cls(crawler.settings)
def process_request(self, request, spider):
if not request.meta.get('proxy') \
and request.url not in spider.start_urls:
request.meta['proxy'] = random.choice(self.proxies)
def process_response(self, request, response, spider):
cur_proxy = request.meta.get('proxy')
if response.status in (401, 403):
print('{} got wrong code {} times'.
format(cur_proxy, self.r.hget(self.proxy_stats_key, cur_proxy)))
self.r.hincrby(self.proxy_stats_key, cur_proxy, 1)
failed_times = self.r.hget(self.proxy_stats_key, cur_proxy) or 0
if int(failed_times) >= self.max_failed:
self.removeProxy(cur_proxy)
del request.meta['proxy']
print('got wrong http code [{}] when use {}'.format(
response.status, cur_proxy))
self.removeProxy(proxy=cur_proxy)
del request.meta['proxy']
return request
return response
def process_exception(self, request, exception, spider):
cur_proxy = request
from twisted.internet.error import ConnectionRefusedError, TimeoutError
if isinstance(exception, (ConnectionRefusedError, TimeoutError)):
print('error occur when use proxy {}'.format(exception))
self.removeProxy(proxy=cur_proxy)
del request.meta['proxy']
return request
def removeProxy(self, proxy):
if proxy in self.proxies:
self.r.lrem(self.proxy_key, proxy)
self.r.hdel(self.proxy_stats_key, proxy)
print('remove {} from proxy list'.format(proxy))
|
StarcoderdataPython
|
1828798
|
<reponame>zeshinsei/sync-companion
import configparser
import sys
import reddit
### Return the current subreddit name ###
def get_subreddit():
return sys.argv[1]
### Read from the config ###
def get_config():
config = configparser.ConfigParser()
s = reddit.reddit.subreddit(get_subreddit())
configdata = s.wiki['sync_config'].content_md
config.read_string(configdata)
return config
|
StarcoderdataPython
|
1719862
|
# -*- coding: utf-8 -*-
"""
@brief: Extract judicial acts from `bsr.sudrf.ru` (update database)
@package: judicial
@file: settings.py
@author: dmryutov (<EMAIL>)
@version: 1.0
@date: 03.11.2017 -- 04.11.2017
"""
import re
import os
# Links to all acts
ALL_ACTS = r'https://bsr.sudrf.ru/bigs/portal.html#%7B%22mode%22:%22QUERY_HISTORY%22,%22historyQueryId%22:%22B6A74295-C7E2-4A42-B3A1-CB790A82DB22%22%7D' # pylint: disable=line-too-long
# Defendant keywords
KEYWORDS = {
'одсудимого', 'одсудимой', 'одсудимая', 'одсудимый',
'сужденный', 'сужденная', 'сужденного', 'сужденной', 'сужденого', 'сужденой'
'суждённый', 'суждённая', 'суждённого', 'суждённой',
'удимый', 'удимая', 'удимого ранее', 'удимой ранее',
'бвиняемого', 'бвиняемой', 'бвиняемый', 'бвиняемая',
'аявителя', 'в отношении'
}
# List of month names
MONTH_LIST = {
'января', 'февраля', 'марта', 'апреля', 'мая', 'июня', 'июля', 'августа',
'сентября', 'октября', 'ноября', 'декабря'
}
# Name masks
NAME_MASK = re.compile(r'('+ '|'.join(KEYWORDS) +r')[а-яё]*[ -:]*'+
r'(([А-ЯЁ][а-яё]{1,}\s*){3},|'+ # <NAME>,
r'([А-ЯЁ][а-яё]{1,}\s*)([А-ЯЁ]\.\s*){1,2}|'+ # Багнюк Д.С.
r'([А-ЯЁ]\.\s*){1,2}([А-ЯЁ][а-яё]{1,}\s*)|'+ # Д.С. Багнюк
r'([А-ЯЁ]\.\s*){1,3}|'+ # К.Р.Н.
r'[А-ЯЁ0-9]{2,})') # ФИО1
NAME_MASK2 = re.compile(r'(([А-ЯЁ][а-яё]{1,}\s*){3},|'+ # <NAME>,
r'([А-ЯЁ][а-яё]{1,}\s*)([А-ЯЁ]\.\s*){1,2}|'+ # Багнюк Д.С.
r'([А-ЯЁ]\.\s*){1,2}([А-ЯЁ][а-яё]{1,}\s*)|'+ # Д.С. Багнюк
r'([А-ЯЁ]\.\s*){1,3})', # К.Р.Н.
re.I) # pylint: disable=no-member
FIO_N = re.compile(r'(ФИО|Ф\.И\.О\.)')
NAME_AND_WORD = re.compile(r'^[А-ЯЁ]\. [а-яё]{1,}$')
# Bad words and endings
BAD_ENDINGS = [' по ', ' на ', ',']
BAD_WORDS = {'посредством', 'характеризуется', 'избрана', 'меры', 'возбуждено', 'путем'}
BAD_WORDS2 = {
'Судебная', 'Преступления совершены', 'Преступление совершено', 'Судья',
'...', '**', 'по ', 'На основании', 'года. ', 'г. ', 'п. '
}
BAD_WORDS3 = {'ДД.ММ.', 'судья', 'судьей', 'судью', 'судьи'}
# Chrome browser driver path
DRIVER_PATH = os.path.join(os.getcwd(), 'drivers', 'chromedriver')
# Browser loading timeout
TIMEOUT = 20
# Path to database file
DATABASE_PATH = '/Users/dmryutov/Desktop/Judicial/db.sqlite'
# Columns for fast search
FAST_COLUMN_LIST = [
'old_id',
'search_number',
'act_number',
'instance',
'article',
'document_type',
'region',
'court_name',
'result',
'judge',
'lawyer',
'victim_representative',
'defender',
'representative',
'prosecutor',
'receipt_date',
'decision_date',
'entry_date',
'defendant'
]
# Columns for full search
FULL_COLUMN_LIST = [
'search_number',
'act_number',
'proceedings_type',
'instance',
'article',
'document_type',
'region',
'court_name',
'result',
'judge',
'lawyer',
'claimant',
'inquirer',
'debtor',
'another_participants',
'interested_person',
'victim_representative',
'defender',
'applicant',
'plaintiff',
'public_defender',
'inquiry',
'respondent',
'victim',
'representative',
'prosecutor',
'investigative_body_head',
'witness',
'investigator',
'third_party',
'document_registration_year',
'receipt_date',
'decision_date',
'entry_date',
'defendant'
]
|
StarcoderdataPython
|
9766330
|
<filename>gauto/common/protocol.py
# -*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making GAutomator available.
Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
__author__ = 'minhuaxu <EMAIL>,alexkan <EMAIL>'
class Commands(object):
GET_VERSION = 100 # 获取版本号
FIND_ELEMENTS = 101 # 查找节点
FIND_ELEMENT_PATH = 102 # 模糊查找
GET_ELEMENTS_BOUND = 103 # 获取节点的位置信息
GET_ELEMENT_WORLD_BOUND = 104 # 获取节点的世界坐标
GET_UI_INTERACT_STATUS = 105 # 获取游戏的可点击信息,包括scene、可点击节点,及位置信息
GET_CURRENT_SCENE = 106 # 获取Unity的Scene名称
GET_ELEMENT_TEXT = 107 # 获取节点的文字内容
GET_ELEMENT_IMAGE = 108 # 获取节点的图片名称
GET_REGISTERED_HANDLERS = 109 # 获取注册的函数的名称
CALL_REGISTER_HANDLER = 110 # 调用注册的函数
SET_INPUT_TEXT = 111 # input控件更换文字信息
GET_OBJECT_FIELD=112 # 通过反射获取gameobject中component的属性值
FIND_ELEMENTS_COMPONENT=113 #获取所有包含改组件的gameobject
SET_CAMERA_NAME=114 #设置渲染的最佳的Camera
GET_COMPONENT_METHODS = 115 # 反射获取组件上的方法
CALL_COMPONENT_MOTHOD = 116 # 通过反射调用组件的函数
LOAD_TEST_LIB=117 #初始化testlib服务
PRC_SET_METHOD=118#注册python端的方法
RPC_METHOD = 119#游戏内的接口可调用,python端的方法
#######################/
HANDLE_TOUCH_EVENTS = 200 # 发送down move up
DUMP_TREE = 300
class TouchEvent(object):
ACTION_DOWN = 0
ACTION_UP = 1
ACTION_MOVE = 2
def __init__(self, x, y, sleeptime, type):
self.x = x
self.y = y
self.sleeptime = sleeptime
self.type = type
|
StarcoderdataPython
|
6609283
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class AzureRegionBaseUrl(Enum):
westusapicognitivemicrosoftcom = "westus.api.cognitive.microsoft.com"
westus2apicognitivemicrosoftcom = "westus2.api.cognitive.microsoft.com"
eastusapicognitivemicrosoftcom = "eastus.api.cognitive.microsoft.com"
eastus2apicognitivemicrosoftcom = "eastus2.api.cognitive.microsoft.com"
westcentralusapicognitivemicrosoftcom = "westcentralus.api.cognitive.microsoft.com"
southcentralusapicognitivemicrosoftcom = "southcentralus.api.cognitive.microsoft.com"
westeuropeapicognitivemicrosoftcom = "westeurope.api.cognitive.microsoft.com"
northeuropeapicognitivemicrosoftcom = "northeurope.api.cognitive.microsoft.com"
southeastasiaapicognitivemicrosoftcom = "southeastasia.api.cognitive.microsoft.com"
eastasiaapicognitivemicrosoftcom = "eastasia.api.cognitive.microsoft.com"
australiaeastapicognitivemicrosoftcom = "australiaeast.api.cognitive.microsoft.com"
brazilsouthapicognitivemicrosoftcom = "brazilsouth.api.cognitive.microsoft.com"
contentmoderatortestazure_apinet = "contentmoderatortest.azure-api.net"
|
StarcoderdataPython
|
3368679
|
<reponame>jasonwbarnett/pycal-play
#!/usr/bin/env python3
from datetime import datetime
a_meetings = [['09:00', '10:30'], ['12:00', '13:00'], ['16:00', '18:00']]
a_workday = ['09:00', '20:00']
a_meetings = [['10:00', '11:30'], ['12:30', '14:30'], ['14:30', '15:00'], ['16:00', '17:00']]
a_workday = ['10:00', '18:30']
length = 30
output = [['11:30', '12:00'], ['15:00', '16:00'], ['18:00', '18:30']]
def find_available(workday, meetings):
free_time = []
num_of_meetings = len(meetings)
first_meeting = meetings[0]
last_meeting = meetings[-1]
# find time between start of day and first meeting
if workday.start < first_meeting.start:
free_time.append([workday.start, first_meeting.start])
# find time between meetings
for i, meeting in enumerate(meetings):
if i < num_of_meetings && meetings[i].end < meetings[i+1].start:
free_time.append([meetings[i].end, meetings[i+1].start])
# find time between last meeting and end of day
if workday.end > last_meeting.end:
free_time.append([last_meeting.end, workday.end])
return free_time
def find_match(availability_a, availability_b, duration):
# 1. remove availability less than duration, in both
# 2. remove availability
|
StarcoderdataPython
|
1828140
|
<reponame>imranq2/SparkAutoMapper.FHIR
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class TestReportParticipantTypeCode(GenericTypeCode):
"""
TestReportParticipantType
From: http://hl7.org/fhir/report-participant-type in valuesets.xml
The type of participant.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/report-participant-type
"""
codeset: FhirUri = "http://hl7.org/fhir/report-participant-type"
class TestReportParticipantTypeCodeValues:
"""
The test execution engine.
From: http://hl7.org/fhir/report-participant-type in valuesets.xml
"""
TestEngine = TestReportParticipantTypeCode("test-engine")
"""
A FHIR Client.
From: http://hl7.org/fhir/report-participant-type in valuesets.xml
"""
Client = TestReportParticipantTypeCode("client")
"""
A FHIR Server.
From: http://hl7.org/fhir/report-participant-type in valuesets.xml
"""
Server = TestReportParticipantTypeCode("server")
|
StarcoderdataPython
|
11353657
|
from setuptools import setup, Extension
import setuptools_scm # noqa Ensure it’s installed
extensions = [
Extension("cutadapt._align", sources=["src/cutadapt/_align.pyx"]),
Extension("cutadapt.qualtrim", sources=["src/cutadapt/qualtrim.pyx"]),
]
setup(ext_modules=extensions)
|
StarcoderdataPython
|
12170
|
import asyncio
import logging
import traceback
import uuid
from typing import Optional, Tuple, Any, Callable
from pesto.ws.core.payload_parser import PayloadParser, PestoConfig
from pesto.ws.core.pesto_feature import PestoFeatures
from pesto.ws.core.utils import load_class, async_exec
from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper
from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI
from pesto.ws.features.payload_converter import PayloadConverter
from pesto.ws.features.payload_debug import PayloadDebug
from pesto.ws.features.response_serializer import ResponseSerializer
from pesto.ws.features.schema_validation import SchemaValidation
from pesto.ws.features.stateful_response import StatefulResponse
from pesto.ws.features.stateless_response import StatelessResponse
from pesto.ws.service.describe import DescribeService
from pesto.ws.service.job_result import ResultType
log = logging.getLogger(__name__)
class ProcessService:
PROCESS_CLASS_NAME = 'algorithm.process.Process'
_algorithm: Optional[Callable] = None
_describe = None
@staticmethod
def init():
if ProcessService._algorithm is not None:
raise ValueError('Process Service already loaded !')
try:
log.info('ProcessService.init() ...')
ProcessService._algorithm = load_class(ProcessService.PROCESS_CLASS_NAME)()
if hasattr(ProcessService._algorithm, 'on_start'):
log.info('ProcessService.on_start() ...')
ProcessService._algorithm.on_start()
log.info('ProcessService.on_start() ... Done !')
log.info('ProcessService.init() ... Done !')
except:
traceback.print_exc()
log.warning('Algorithm {}.on_start() failure !'.format(ProcessService.PROCESS_CLASS_NAME))
def __init__(self, url_root: str):
self.url_root = url_root
@property
def service_description(self):
if ProcessService._describe is None:
ProcessService._describe = DescribeService(self.url_root).compute_describe()
return ProcessService._describe
def process(self, payload: dict) -> dict:
config = PayloadParser.parse(payload)
image_roi: Optional[ImageROI] = config.get(PestoConfig.roi) # if no ROI: None
active_roi: ImageROI = image_roi or DummyImageROI() # bypass compute crop info and remove margins in pipeline
job_id = str(uuid.uuid4().time_low)
is_stateful = self.service_description['asynchronous'] is True
input_schema = self.service_description['input']
output_schema = self.service_description['output']
common_pipeline = filter(None, [
SchemaValidation(schema=input_schema),
active_roi.compute_crop_infos(),
PayloadConverter(image_roi=image_roi, schema=input_schema),
PayloadDebug(schema=input_schema),
AlgorithmWrapper(ProcessService._algorithm),
active_roi.remove_margin(),
ResponseSerializer(schema=output_schema, job_id=job_id),
])
if is_stateful:
pipeline = [
*common_pipeline,
StatefulResponse(self.url_root, job_id)
]
else:
pipeline = [
*common_pipeline,
StatelessResponse(self.url_root, job_id, output_schema)
]
return PestoFeatures(pipeline).process(payload)
async def async_process(self, request_payload: dict) -> Tuple[Any, ResultType]:
return await asyncio.wait_for(
async_exec(lambda: self.process(request_payload)),
timeout=None
)
|
StarcoderdataPython
|
8038043
|
<filename>Chapter07/original_images_example.py
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
class OriginalImages:
def __init__(self):
pass
def main(self):
X_train, X_test = self.standard_scale(mnist.train.images, mnist.test.images)
original_imgs = X_test[:100]
plt.figure(1, figsize=(10, 10))
for i in range(0, 100):
im = original_imgs[i].reshape((28, 28))
ax = plt.subplot(10, 10, i + 1)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(8)
plt.imshow(im, cmap="gray", clim=(0.0, 1.0))
plt.suptitle(' Original Images', fontsize=15, y=0.95)
plt.savefig('figures/original_images.png')
plt.show()
def main():
auto = OriginalImages()
auto.main()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.