id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
83891
|
import http.client
import json
import logging
import os
import ssl
import threading
import urllib.parse
from base64 import b64encode
from datetime import datetime
from http.client import HTTPConnection, HTTPResponse
from typing import Optional, Dict
from pyctuator.auth import Auth, BasicAuth
# pylint: disable=too-many-instance-attributes
class BootAdminRegistrationHandler:
def __init__(
self,
registration_url: str,
registration_auth: Optional[Auth],
application_name: str,
pyctuator_base_url: str,
start_time: datetime,
service_url: str,
registration_interval_sec: float,
application_metadata: Optional[dict] = None,
ssl_context: Optional[ssl.SSLContext] = None,
) -> None:
self.registration_url = registration_url
self.registration_auth = registration_auth
self.application_name = application_name
self.pyctuator_base_url = pyctuator_base_url
self.start_time = start_time
self.service_url = service_url if service_url.endswith("/") else service_url + "/"
self.registration_interval_sec = registration_interval_sec
self.instance_id = None
self.application_metadata = application_metadata if application_metadata else {}
self.ssl_context = ssl_context
self.should_continue_registration_schedule: bool = False
self.disable_certificate_validation_for_https_registration: bool = \
os.getenv("PYCTUATOR_REGISTRATION_NO_CERT") is not None
def _schedule_next_registration(self, registration_interval_sec: float) -> None:
timer = threading.Timer(
registration_interval_sec,
self._register_with_admin_server,
[]
)
timer.setDaemon(True)
timer.start()
def _register_with_admin_server(self) -> None:
# When waking up, make sure registration is still needed
if not self.should_continue_registration_schedule:
return
registration_data = {
"name": self.application_name,
"managementUrl": self.pyctuator_base_url,
"healthUrl": f"{self.pyctuator_base_url}/health",
"serviceUrl": self.service_url,
"metadata": {
"startup": self.start_time.isoformat(),
**self.application_metadata
}
}
logging.debug("Trying to post registration data to %s: %s", self.registration_url, registration_data)
conn: Optional[HTTPConnection] = None
try:
headers = {"Content-type": "application/json"}
self.authenticate(headers)
response = self._http_request(self.registration_url, "POST", headers, json.dumps(registration_data))
if response.status < 200 or response.status >= 300:
logging.warning("Failed registering with boot-admin, got %s - %s", response.status, response.read())
else:
self.instance_id = json.loads(response.read().decode('utf-8'))["id"]
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed registering with boot-admin, %s (%s)", e, type(e))
finally:
if conn:
conn.close()
# Schedule the next registration unless asked to abort
if self.should_continue_registration_schedule:
self._schedule_next_registration(self.registration_interval_sec)
def deregister_from_admin_server(self) -> None:
if self.instance_id is None:
return
headers = {}
self.authenticate(headers)
deregistration_url = f"{self.registration_url}/{self.instance_id}"
logging.info("Deregistering from %s", deregistration_url)
conn: Optional[HTTPConnection] = None
try:
response = self._http_request(deregistration_url, "DELETE", headers)
if response.status < 200 or response.status >= 300:
logging.warning("Failed deregistering from boot-admin, got %s - %s", response.status, response.read())
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed deregistering from boot-admin, %s (%s)", e, type(e))
finally:
if conn:
conn.close()
def authenticate(self, headers: Dict) -> None:
if isinstance(self.registration_auth, BasicAuth):
password = self.registration_auth.password if self.registration_auth.password else ""
authorization_string = self.registration_auth.username + ":" + password
encoded_authorization: str = b64encode(bytes(authorization_string, "utf-8")).decode("ascii")
headers["Authorization"] = f"Basic {encoded_authorization}"
def start(self, initial_delay_sec: float = None) -> None:
logging.info("Starting recurring registration of %s with %s",
self.pyctuator_base_url, self.registration_url)
self.should_continue_registration_schedule = True
self._schedule_next_registration(initial_delay_sec or self.registration_interval_sec)
def stop(self) -> None:
logging.info("Stopping recurring registration")
self.should_continue_registration_schedule = False
def _http_request(self, url: str, method: str, headers: Dict[str, str], body: Optional[str] = None) -> HTTPResponse:
url_parts = urllib.parse.urlsplit(url)
if url_parts.scheme == "http":
conn = http.client.HTTPConnection(url_parts.hostname, url_parts.port)
elif url_parts.scheme == "https":
context = self.ssl_context
if not context and self.disable_certificate_validation_for_https_registration:
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
conn = http.client.HTTPSConnection(url_parts.hostname, url_parts.port, context=context)
else:
raise ValueError(f"Unknown scheme in {url}")
conn.request(
method,
url_parts.path,
body=body,
headers=headers,
)
return conn.getresponse()
|
83908
|
del_items(0x80124F0C)
SetType(0x80124F0C, "void GameOnlyTestRoutine__Fv()")
del_items(0x80124F14)
SetType(0x80124F14, "int vecleny__Fii(int a, int b)")
del_items(0x80124F38)
SetType(0x80124F38, "int veclenx__Fii(int a, int b)")
del_items(0x80124F64)
SetType(0x80124F64, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x8012555C)
SetType(0x8012555C, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x80125644)
SetType(0x80125644, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x801257E0)
SetType(0x801257E0, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x80125854)
SetType(0x80125854, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80125A70)
SetType(0x80125A70, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x80125AC8)
SetType(0x80125AC8, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x80125C7C)
SetType(0x80125C7C, "void PutMissile__Fi(int i)")
del_items(0x80125D80)
SetType(0x80125D80, "void GetMissilePos__Fi(int i)")
del_items(0x80125EA8)
SetType(0x80125EA8, "void MoveMissilePos__Fi(int i)")
del_items(0x80126010)
SetType(0x80126010, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80126384)
SetType(0x80126384, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80126AE4)
SetType(0x80126AE4, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x80127550)
SetType(0x80127550, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x80127D2C)
SetType(0x80127D2C, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x8012846C)
SetType(0x8012846C, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x80128500)
SetType(0x80128500, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x801285D0)
SetType(0x801285D0, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x80128614)
SetType(0x80128614, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801287F4)
SetType(0x801287F4, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801289B0)
SetType(0x801289B0, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x80128AD4)
SetType(0x80128AD4, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80128E44)
SetType(0x80128E44, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801290B0)
SetType(0x801290B0, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801291C4)
SetType(0x801291C4, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801293BC)
SetType(0x801293BC, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129510)
SetType(0x80129510, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801296F8)
SetType(0x801296F8, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129954)
SetType(0x80129954, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129A3C)
SetType(0x80129A3C, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129C04)
SetType(0x80129C04, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80129E10)
SetType(0x80129E10, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x80129EF4)
SetType(0x80129EF4, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A318)
SetType(0x8012A318, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A528)
SetType(0x8012A528, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A708)
SetType(0x8012A708, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A7D0)
SetType(0x8012A7D0, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012A92C)
SetType(0x8012A92C, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012AD98)
SetType(0x8012AD98, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012ADF4)
SetType(0x8012ADF4, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012AFB0)
SetType(0x8012AFB0, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B2A8)
SetType(0x8012B2A8, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B3AC)
SetType(0x8012B3AC, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B484)
SetType(0x8012B484, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B77C)
SetType(0x8012B77C, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B934)
SetType(0x8012B934, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012B9C8)
SetType(0x8012B9C8, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012BBF0)
SetType(0x8012BBF0, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012BC58)
SetType(0x8012BC58, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012BE84)
SetType(0x8012BE84, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012BF34)
SetType(0x8012BF34, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C1E4)
SetType(0x8012C1E4, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C2E0)
SetType(0x8012C2E0, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C364)
SetType(0x8012C364, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C57C)
SetType(0x8012C57C, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C62C)
SetType(0x8012C62C, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C6DC)
SetType(0x8012C6DC, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C744)
SetType(0x8012C744, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012C980)
SetType(0x8012C980, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x8012CB9C)
SetType(0x8012CB9C, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012CC8C)
SetType(0x8012CC8C, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8012CE80)
SetType(0x8012CE80, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8012D040)
SetType(0x8012D040, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D0B4)
SetType(0x8012D0B4, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D13C)
SetType(0x8012D13C, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D1A4)
SetType(0x8012D1A4, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D3A0)
SetType(0x8012D3A0, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D440)
SetType(0x8012D440, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8012D57C)
SetType(0x8012D57C, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x8012D8C8)
SetType(0x8012D8C8, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x8012DAAC)
SetType(0x8012DAAC, "void MI_Dummy__Fi(int i)")
del_items(0x8012DAB4)
SetType(0x8012DAB4, "void MI_Golem__Fi(int i)")
del_items(0x8012DD10)
SetType(0x8012DD10, "void MI_SetManashield__Fi(int i)")
del_items(0x8012DD4C)
SetType(0x8012DD4C, "void MI_LArrow__Fi(int i)")
del_items(0x8012E508)
SetType(0x8012E508, "void MI_Arrow__Fi(int i)")
del_items(0x8012E724)
SetType(0x8012E724, "void MI_Firebolt__Fi(int i)")
del_items(0x8012EDF0)
SetType(0x8012EDF0, "void MI_Lightball__Fi(int i)")
del_items(0x8012F078)
SetType(0x8012F078, "void MI_Acidpud__Fi(int i)")
del_items(0x8012F188)
SetType(0x8012F188, "void MI_Firewall__Fi(int i)")
del_items(0x8012F44C)
SetType(0x8012F44C, "void MI_Fireball__Fi(int i)")
del_items(0x8012FE10)
SetType(0x8012FE10, "void MI_Lightctrl__Fi(int i)")
del_items(0x8013037C)
SetType(0x8013037C, "void MI_Lightning__Fi(int i)")
del_items(0x801304F8)
SetType(0x801304F8, "void MI_Town__Fi(int i)")
del_items(0x80130740)
SetType(0x80130740, "void MI_Flash__Fi(int i)")
del_items(0x80130B78)
SetType(0x80130B78, "void MI_Flash2__Fi(int i)")
del_items(0x80130DC0)
SetType(0x80130DC0, "void MI_Manashield__Fi(int i)")
del_items(0x801313C8)
SetType(0x801313C8, "void MI_Firemove__Fi(int i)")
del_items(0x80131804)
SetType(0x80131804, "void MI_Guardian__Fi(int i)")
del_items(0x80131BD0)
SetType(0x80131BD0, "void MI_Chain__Fi(int i)")
del_items(0x80131ECC)
SetType(0x80131ECC, "void MI_Misexp__Fi(int i)")
del_items(0x801321CC)
SetType(0x801321CC, "void MI_Acidsplat__Fi(int i)")
del_items(0x80132368)
SetType(0x80132368, "void MI_Teleport__Fi(int i)")
del_items(0x80132730)
SetType(0x80132730, "void MI_Stone__Fi(int i)")
del_items(0x801328DC)
SetType(0x801328DC, "void MI_Boom__Fi(int i)")
del_items(0x801329D4)
SetType(0x801329D4, "void MI_Rhino__Fi(int i)")
del_items(0x80132D80)
SetType(0x80132D80, "void MI_FirewallC__Fi(int i)")
del_items(0x801330E8)
SetType(0x801330E8, "void MI_Infra__Fi(int i)")
del_items(0x801331A0)
SetType(0x801331A0, "void MI_Apoca__Fi(int i)")
del_items(0x80133434)
SetType(0x80133434, "void MI_Wave__Fi(int i)")
del_items(0x80133930)
SetType(0x80133930, "void MI_Nova__Fi(int i)")
del_items(0x80133BF0)
SetType(0x80133BF0, "void MI_Flame__Fi(int i)")
del_items(0x80133DE8)
SetType(0x80133DE8, "void MI_Flamec__Fi(int i)")
del_items(0x80134070)
SetType(0x80134070, "void MI_Cbolt__Fi(int i)")
del_items(0x80134374)
SetType(0x80134374, "void MI_Hbolt__Fi(int i)")
del_items(0x80134680)
SetType(0x80134680, "void MI_Element__Fi(int i)")
del_items(0x80134D38)
SetType(0x80134D38, "void MI_Bonespirit__Fi(int i)")
del_items(0x80135140)
SetType(0x80135140, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x801351B0)
SetType(0x801351B0, "void MI_Rportal__Fi(int i)")
del_items(0x801353D4)
SetType(0x801353D4, "void ProcessMissiles__Fv()")
del_items(0x801357C8)
SetType(0x801357C8, "void ClearMissileSpot__Fi(int mi)")
del_items(0x80135880)
SetType(0x80135880, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x80135894)
SetType(0x80135894, "void MonstPartJump__Fi(int m)")
del_items(0x80135A28)
SetType(0x80135A28, "void DeleteMonster__Fi(int i)")
del_items(0x80135A60)
SetType(0x80135A60, "int M_GetDir__Fi(int i)")
del_items(0x80135ABC)
SetType(0x80135ABC, "void M_StartDelay__Fii(int i, int len)")
del_items(0x80135B04)
SetType(0x80135B04, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x80135C1C)
SetType(0x80135C1C, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x80135D40)
SetType(0x80135D40, "void M_StartSpAttack__Fi(int i)")
del_items(0x80135E28)
SetType(0x80135E28, "void M_StartEat__Fi(int i)")
del_items(0x80135EF8)
SetType(0x80135EF8, "void M_GetKnockback__Fi(int i)")
del_items(0x801360D0)
SetType(0x801360D0, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x801363C8)
SetType(0x801363C8, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x801366EC)
SetType(0x801366EC, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x80136998)
SetType(0x80136998, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x80136C6C)
SetType(0x80136C6C, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80137034)
SetType(0x80137034, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80137124)
SetType(0x80137124, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80137278)
SetType(0x80137278, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x801373C0)
SetType(0x801373C0, "void M_StartHeal__Fi(int i)")
del_items(0x80137440)
SetType(0x80137440, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x801374E0)
SetType(0x801374E0, "int M_DoStand__Fi(int i)")
del_items(0x80137548)
SetType(0x80137548, "int M_DoWalk__Fi(int i)")
del_items(0x801377CC)
SetType(0x801377CC, "int M_DoWalk2__Fi(int i)")
del_items(0x801379B8)
SetType(0x801379B8, "int M_DoWalk3__Fi(int i)")
del_items(0x80137C7C)
SetType(0x80137C7C, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x80137E44)
SetType(0x80137E44, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x80138458)
SetType(0x80138458, "int M_DoAttack__Fi(int i)")
del_items(0x801385FC)
SetType(0x801385FC, "int M_DoRAttack__Fi(int i)")
del_items(0x80138774)
SetType(0x80138774, "int M_DoRSpAttack__Fi(int i)")
del_items(0x80138964)
SetType(0x80138964, "int M_DoSAttack__Fi(int i)")
del_items(0x80138A38)
SetType(0x80138A38, "int M_DoFadein__Fi(int i)")
del_items(0x80138B08)
SetType(0x80138B08, "int M_DoFadeout__Fi(int i)")
del_items(0x80138C1C)
SetType(0x80138C1C, "int M_DoHeal__Fi(int i)")
del_items(0x80138CC8)
SetType(0x80138CC8, "int M_DoTalk__Fi(int i)")
del_items(0x80139134)
SetType(0x80139134, "void M_Teleport__Fi(int i)")
del_items(0x80139368)
SetType(0x80139368, "int M_DoGotHit__Fi(int i)")
del_items(0x801393C8)
SetType(0x801393C8, "void DoEnding__Fv()")
del_items(0x80139474)
SetType(0x80139474, "void PrepDoEnding__Fv()")
del_items(0x80139598)
SetType(0x80139598, "int M_DoDeath__Fi(int i)")
del_items(0x80139768)
SetType(0x80139768, "int M_DoSpStand__Fi(int i)")
del_items(0x8013980C)
SetType(0x8013980C, "int M_DoDelay__Fi(int i)")
del_items(0x801398FC)
SetType(0x801398FC, "int M_DoStone__Fi(int i)")
del_items(0x80139980)
SetType(0x80139980, "void M_WalkDir__Fii(int i, int md)")
del_items(0x80139BA8)
SetType(0x80139BA8, "void GroupUnity__Fi(int i)")
del_items(0x80139F94)
SetType(0x80139F94, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x8013A180)
SetType(0x8013A180, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x8013A244)
SetType(0x8013A244, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x8013A358)
SetType(0x8013A358, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x8013A3AC)
SetType(0x8013A3AC, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x8013A54C)
SetType(0x8013A54C, "void MAI_Zombie__Fi(int i)")
del_items(0x8013A744)
SetType(0x8013A744, "void MAI_SkelSd__Fi(int i)")
del_items(0x8013A8DC)
SetType(0x8013A8DC, "void MAI_Snake__Fi(int i)")
del_items(0x8013ACC0)
SetType(0x8013ACC0, "void MAI_Bat__Fi(int i)")
del_items(0x8013B078)
SetType(0x8013B078, "void MAI_SkelBow__Fi(int i)")
del_items(0x8013B25C)
SetType(0x8013B25C, "void MAI_Fat__Fi(int i)")
del_items(0x8013B40C)
SetType(0x8013B40C, "void MAI_Sneak__Fi(int i)")
del_items(0x8013B7F8)
SetType(0x8013B7F8, "void MAI_Fireman__Fi(int i)")
del_items(0x8013BAF0)
SetType(0x8013BAF0, "void MAI_Fallen__Fi(int i)")
del_items(0x8013BE0C)
SetType(0x8013BE0C, "void MAI_Cleaver__Fi(int i)")
del_items(0x8013BEF4)
SetType(0x8013BEF4, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x8013C360)
SetType(0x8013C360, "void MAI_GoatMc__Fi(int i)")
del_items(0x8013C380)
SetType(0x8013C380, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x8013C5A0)
SetType(0x8013C5A0, "void MAI_GoatBow__Fi(int i)")
del_items(0x8013C5C4)
SetType(0x8013C5C4, "void MAI_Succ__Fi(int i)")
del_items(0x8013C5E8)
SetType(0x8013C5E8, "void MAI_AcidUniq__Fi(int i)")
del_items(0x8013C60C)
SetType(0x8013C60C, "void MAI_Scav__Fi(int i)")
del_items(0x8013CA24)
SetType(0x8013CA24, "void MAI_Garg__Fi(int i)")
del_items(0x8013CC04)
SetType(0x8013CC04, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x8013D118)
SetType(0x8013D118, "void MAI_Magma__Fi(int i)")
del_items(0x8013D144)
SetType(0x8013D144, "void MAI_Storm__Fi(int i)")
del_items(0x8013D170)
SetType(0x8013D170, "void MAI_Acid__Fi(int i)")
del_items(0x8013D1A0)
SetType(0x8013D1A0, "void MAI_Diablo__Fi(int i)")
del_items(0x8013D1CC)
SetType(0x8013D1CC, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x8013D6CC)
SetType(0x8013D6CC, "void MAI_Mega__Fi(int i)")
del_items(0x8013D6F0)
SetType(0x8013D6F0, "void MAI_SkelKing__Fi(int i)")
del_items(0x8013DC2C)
SetType(0x8013DC2C, "void MAI_Rhino__Fi(int i)")
del_items(0x8013E0D4)
SetType(0x8013E0D4, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x8013E5A0)
SetType(0x8013E5A0, "void MAI_Garbud__Fi(int i)")
del_items(0x8013E750)
SetType(0x8013E750, "void MAI_Zhar__Fi(int i)")
del_items(0x8013E948)
SetType(0x8013E948, "void MAI_SnotSpil__Fi(int i)")
del_items(0x8013EB7C)
SetType(0x8013EB7C, "void MAI_Lazurus__Fi(int i)")
del_items(0x8013EDF4)
SetType(0x8013EDF4, "void MAI_Lazhelp__Fi(int i)")
del_items(0x8013EF14)
SetType(0x8013EF14, "void MAI_Lachdanan__Fi(int i)")
del_items(0x8013F0A4)
SetType(0x8013F0A4, "void MAI_Warlord__Fi(int i)")
del_items(0x8013F1F0)
SetType(0x8013F1F0, "void DeleteMonsterList__Fv()")
del_items(0x8013F30C)
SetType(0x8013F30C, "void ProcessMonsters__Fv()")
del_items(0x8013F894)
SetType(0x8013F894, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x8013FC7C)
SetType(0x8013FC7C, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x8013FCE4)
SetType(0x8013FCE4, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x8013FD28)
SetType(0x8013FD28, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x8013FFB0)
SetType(0x8013FFB0, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x8013FFF0)
SetType(0x8013FFF0, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x80140284)
SetType(0x80140284, "void M_FallenFear__Fii(int x, int y)")
del_items(0x80140454)
SetType(0x80140454, "void PrintMonstHistory__Fi(int mt)")
del_items(0x8014067C)
SetType(0x8014067C, "void PrintUniqueHistory__Fv()")
del_items(0x801407A0)
SetType(0x801407A0, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x80140C04)
SetType(0x80140C04, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x80140E20)
SetType(0x80140E20, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x80141114)
SetType(0x80141114, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x8014126C)
SetType(0x8014126C, "void TalktoMonster__Fi(int i)")
del_items(0x8014138C)
SetType(0x8014138C, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x801415E4)
SetType(0x801415E4, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8014161C)
SetType(0x8014161C, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x801416E8)
SetType(0x801416E8, "void MAI_Golum__Fi(int i)")
del_items(0x80141A5C)
SetType(0x80141A5C, "unsigned char MAI_Path__Fi(int i)")
del_items(0x80141BC0)
SetType(0x80141BC0, "void M_StartAttack__Fi(int i)")
del_items(0x80141CA8)
SetType(0x80141CA8, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x80141E08)
SetType(0x80141E08, "void FreeInvGFX__Fv()")
del_items(0x80141E10)
SetType(0x80141E10, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x80141E94)
SetType(0x80141E94, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x801420E8)
SetType(0x801420E8, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x801421B8)
SetType(0x801421B8, "void InvDrawSlots__Fv()")
del_items(0x80142490)
SetType(0x80142490, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x8014255C)
SetType(0x8014255C, "void DrawInvStats__Fv()")
del_items(0x801430E8)
SetType(0x801430E8, "void DrawInvBack__Fv()")
del_items(0x80143170)
SetType(0x80143170, "void DrawInvCursor__Fv()")
del_items(0x8014364C)
SetType(0x8014364C, "void DrawInvMsg__Fv()")
del_items(0x80143814)
SetType(0x80143814, "void DrawInvUnique__Fv()")
del_items(0x80143938)
SetType(0x80143938, "void DrawInv__Fv()")
del_items(0x80143978)
SetType(0x80143978, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x80143CA8)
SetType(0x80143CA8, "void DoThatDrawInv__Fv()")
del_items(0x80144470)
SetType(0x80144470, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80144790)
SetType(0x80144790, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80144B2C)
SetType(0x80144B2C, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x80144FFC)
SetType(0x80144FFC, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x80145288)
SetType(0x80145288, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x80145384)
SetType(0x80145384, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80147070)
SetType(0x80147070, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x80147B20)
SetType(0x80147B20, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x80147DC8)
SetType(0x80147DC8, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x80147EC8)
SetType(0x80147EC8, "void CheckInvScrn__Fv()")
del_items(0x80147F40)
SetType(0x80147F40, "void CheckItemStats__Fi(int pnum)")
del_items(0x80147FC4)
SetType(0x80147FC4, "void CheckBookLevel__Fi(int pnum)")
del_items(0x801480F8)
SetType(0x801480F8, "void CheckQuestItem__Fi(int pnum)")
del_items(0x80148520)
SetType(0x80148520, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x8014881C)
SetType(0x8014881C, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x8014928C)
SetType(0x8014928C, "int FindGetItem__FiUsi(int idx, unsigned short ci, int iseed)")
del_items(0x80149340)
SetType(0x80149340, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x801494CC)
SetType(0x801494CC, "unsigned char TryInvPut__Fv()")
del_items(0x80149694)
SetType(0x80149694, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x80149B3C)
SetType(0x80149B3C, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x8014A098)
SetType(0x8014A098, "char CheckInvHLight__Fv()")
del_items(0x8014A3E0)
SetType(0x8014A3E0, "void RemoveScroll__Fi(int pnum)")
del_items(0x8014A5C4)
SetType(0x8014A5C4, "unsigned char UseScroll__Fv()")
del_items(0x8014A82C)
SetType(0x8014A82C, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x8014A894)
SetType(0x8014A894, "unsigned char UseStaff__Fv()")
del_items(0x8014A954)
SetType(0x8014A954, "void StartGoldDrop__Fv()")
del_items(0x8014AA50)
SetType(0x8014AA50, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x8014AF74)
SetType(0x8014AF74, "void DoTelekinesis__Fv()")
del_items(0x8014B09C)
SetType(0x8014B09C, "long CalculateGold__Fi(int pnum)")
del_items(0x8014B1D4)
SetType(0x8014B1D4, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x8014B22C)
SetType(0x8014B22C, "void ControlInv__Fv()")
del_items(0x8014B50C)
SetType(0x8014B50C, "void InvGetItemWH__Fi(int Pos)")
del_items(0x8014B600)
SetType(0x8014B600, "void InvAlignObject__Fv()")
del_items(0x8014B7B4)
SetType(0x8014B7B4, "void InvSetItemCurs__Fv()")
del_items(0x8014B948)
SetType(0x8014B948, "void InvMoveCursLeft__Fv()")
del_items(0x8014BB34)
SetType(0x8014BB34, "void InvMoveCursRight__Fv()")
del_items(0x8014BE5C)
SetType(0x8014BE5C, "void InvMoveCursUp__Fv()")
del_items(0x8014C054)
SetType(0x8014C054, "void InvMoveCursDown__Fv()")
del_items(0x8014C37C)
SetType(0x8014C37C, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8014C3A4)
SetType(0x8014C3A4, "void Flush__4CPad(struct CPad *this)")
del_items(0x8014C3C8)
SetType(0x8014C3C8, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8014C3E8)
SetType(0x8014C3E8, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8014C3F0)
SetType(0x8014C3F0, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8014C3F8)
SetType(0x8014C3F8, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x8014C404)
SetType(0x8014C404, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8014C42C)
SetType(0x8014C42C, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x8014C488)
SetType(0x8014C488, "void StartAutomap__Fv()")
del_items(0x8014C4A0)
SetType(0x8014C4A0, "void AutomapUp__Fv()")
del_items(0x8014C4B8)
SetType(0x8014C4B8, "void AutomapDown__Fv()")
del_items(0x8014C4D0)
SetType(0x8014C4D0, "void AutomapLeft__Fv()")
del_items(0x8014C4E8)
SetType(0x8014C4E8, "void AutomapRight__Fv()")
del_items(0x8014C500)
SetType(0x8014C500, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8014C5AC)
SetType(0x8014C5AC, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8014C614)
SetType(0x8014C614, "void AmDrawPlayer__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8014C67C)
SetType(0x8014C67C, "void DrawAutomapPlr__Fv()")
del_items(0x8014CA00)
SetType(0x8014CA00, "void DrawAutoMapVertWall__Fiii(int X, int Y, int Length)")
del_items(0x8014CAD4)
SetType(0x8014CAD4, "void DrawAutoMapHorzWall__Fiii(int X, int Y, int Length)")
del_items(0x8014CBA8)
SetType(0x8014CBA8, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x8014CD7C)
SetType(0x8014CD7C, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x8014CF54)
SetType(0x8014CF54, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x8014D008)
SetType(0x8014D008, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x8014D0BC)
SetType(0x8014D0BC, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x8014D204)
SetType(0x8014D204, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x8014D404)
SetType(0x8014D404, "void DrawAutomap__Fv()")
del_items(0x8014D820)
SetType(0x8014D820, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)")
|
83910
|
from shadowlands.sl_dapp import SLDapp, SLFrame
import pyperclip, os
import schedule
from shadowlands.tui.debug import debug
import pdb
class NetworkConnection(SLDapp):
def initialize(self):
self.add_sl_frame( NetworkStrategies(self, 10, 26, title="Network Options"))
self.connection_strategy = None
def attempt_connection(self):
fn = self._interface.node.__getattribute__(self.conn_fn)
self._interface.node.thread_shutdown = True
self._interface.node.heartbeat_thread.join()
self._interface.node.thread_shutdown = False
try:
if len(self.args) > 0:
return fn(self.args)
else:
return fn()
except StaleBlockchain:
self._scene.add_effect( MessageDialog(self._screen, "Stale blockchain on selected Node"))
return
self._interface.node.start_heartbeat_thread()
class NetworkStrategies(SLFrame):
def initialize(self):
options = [
('Local node', 'connect_w3_local'),
('Custom infura', 'connect_w3_custom_infura'),
('Custom http', 'connect_w3_custom_http'),
('Custom websocket', 'connect_w3_custom_websocket'),
('Custom ipc', 'connect_w3_custom_ipc'),
]
self.listbox_value = self.add_listbox(
5, options, on_select=self._select
#default_value=self.dapp.config.connection_strategy
)
self.add_button(self.close, "Cancel")
def _select(self):
connect_fn = self.listbox_value()
self.dapp.connection_strategy = connect_fn
if connect_fn == 'connect_w3_custom_http':
self.dapp.add_sl_frame(CustomHttpUri(self.dapp, 5, 30, title="Custom Http URI"))
elif connect_fn == 'connect_w3_custom_ipc':
self.dapp.add_sl_frame(CustomIpc(self.dapp, 5, 30, title="Custom IPC path"))
elif connect_fn == 'connect_w3_custom_websocket':
self.dapp.add_sl_frame(CustomWebsocket(self.dapp, 5, 30, title="Custom Websocket URI"))
elif connect_fn == 'connect_w3_custom_infura':
self.dapp.add_sl_frame(CustomInfura(self.dapp, 12, 45, title="Custom Infura Credentials"))
self.close()
class CustomInfura(SLFrame):
def initialize(self):
self.add_divider()
self.add_label(" WEB3_INFURA_PROJECT_ID")
self.id_value = self.add_textbox(
'',
default_value=os.environ.get('WEB3_INFURA_PROJECT_ID')
)
self.add_label(" WEB3_INFURA_API_SECRET")
self.secret_value = self.add_textbox(
'',
default_value=os.environ.get('WEB3_INFURA_API_SECRET')
)
self.add_button_row(
[
("Connect", self._connect, 0),
("Cancel", self.close, 3)
]
)
def _connect(self):
id_value = self.id_value()
secret_value = self.secret_value()
self.dapp.config.connection_args = (self.id_value(), self.secret_value())
self.dapp.config.connection_strategy = self.dapp.connection_strategy
#debug(); pdb.set_trace()
schedule.once().do(self.dapp.node.poll)
self.close()
class CustomHttpUri(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
class CustomIpc(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
class CustomWebsocket(SLFrame):
def initialize(self):
self.add_label("Ex: http://192.168.1.150:8545")
self.text_value = self.add_textbox()
self.add_button(self.close,"Cancel")
|
83936
|
from math import sqrt, pow, sin, pi, cos
from jmetal.core.problem import FloatProblem
from jmetal.core.solution import FloatSolution
"""
.. module:: ZDT
:platform: Unix, Windows
:synopsis: ZDT problem family of multi-objective problems.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
class ZDT1(FloatProblem):
""" Problem ZDT1.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a convex Pareto front
"""
def __init__(self, number_of_variables: int=30):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT1, self).__init__()
self.number_of_variables = number_of_variables
self.number_of_objectives = 2
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['x', 'y']
self.lower_bound = self.number_of_variables * [0.0]
self.upper_bound = self.number_of_variables * [1.0]
def evaluate(self, solution: FloatSolution) -> FloatSolution:
g = self.eval_g(solution)
h = self.eval_h(solution.variables[0], g)
solution.objectives[0] = solution.variables[0]
solution.objectives[1] = h * g
return solution
def eval_g(self, solution: FloatSolution):
g = sum(solution.variables) - solution.variables[0]
constant = 9.0 / (solution.number_of_variables - 1)
return constant * g + 1.0
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g)
def get_name(self):
return 'ZDT1'
class ZDT1Modified(ZDT1):
""" Problem ZDT1Modified.
.. note:: Version including a loop for increasing the computing time of the evaluation functions.
"""
def __init__(self, number_of_variables = 30):
super(ZDT1Modified, self).__init__(number_of_variables)
def evaluate(self, solution:FloatSolution) -> FloatSolution:
s: float = 0.0
for i in range(1000):
for j in range(10000):
s += i * 0.235 / 1.234 + 1.23525 * j
return super().evaluate(solution)
class ZDT2(ZDT1):
""" Problem ZDT2.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a non-convex Pareto front
"""
def eval_h(self, f: float, g: float) -> float:
return 1.0 - pow(f / g, 2.0)
def get_name(self):
return 'ZDT2'
class ZDT3(ZDT1):
""" Problem ZDT3.
.. note:: Bi-objective unconstrained problem. The default number of variables is 30.
.. note:: Continuous problem having a partitioned Pareto front
"""
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g) - (f / g) * sin(10.0 * f * pi)
def get_name(self):
return 'ZDT3'
class ZDT4(ZDT1):
""" Problem ZDT4.
.. note:: Bi-objective unconstrained problem. The default number of variables is 10.
.. note:: Continuous multi-modal problem having a convex Pareto front
"""
def __init__(self, number_of_variables: int=10):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT4, self).__init__(number_of_variables=number_of_variables)
self.lower_bound = self.number_of_variables * [-5.0]
self.upper_bound = self.number_of_variables * [5.0]
self.lower_bound[0] = 0.0
self.upper_bound[0] = 1.0
def eval_g(self, solution: FloatSolution):
g = 0.0
for i in range(1, solution.number_of_variables):
g += pow(solution.variables[i], 2.0) - 10.0 * cos(4.0 * pi * solution.variables[i])
g += 1.0 + 10.0 * (solution.number_of_variables - 1)
return g
def eval_h(self, f: float, g: float) -> float:
return 1.0 - sqrt(f / g)
def get_name(self):
return 'ZDT4'
class ZDT6(ZDT1):
""" Problem ZDT6.
.. note:: Bi-objective unconstrained problem. The default number of variables is 10.
.. note:: Continuous problem having a non-convex Pareto front
"""
def __init__(self, number_of_variables: int=10):
""" :param number_of_variables: Number of decision variables of the problem.
"""
super(ZDT6, self).__init__(number_of_variables=number_of_variables)
def eval_g(self, solution: FloatSolution):
g = sum(solution.variables) - solution.variables[0]
g = g / (solution.number_of_variables - 1)
g = pow(g, 0.25)
g = 9.0 * g
g = 1.0 + g
return g
def eval_h(self, f: float, g: float) -> float:
return 1.0 - pow(f / g, 2.0)
def get_name(self):
return 'ZDT6'
|
83983
|
from invoke import task
from tasks.changelog_check import changelog_check
from tasks.lint import lint
from tasks.test import test
from tasks.typecheck import typecheck
@task(post=[changelog_check, lint, typecheck, test])
def verify(_ctx):
"""Run all verification steps."""
|
84069
|
import matplotlib
matplotlib.use('Agg')
import argparse
import tkinter as tk
import torch
from isegm.utils import exp
from isegm.inference import utils
from interactive_demo.app import InteractiveDemoApp
def main():
args, cfg = parse_args()
torch.backends.cudnn.deterministic = True
checkpoint_path = utils.find_checkpoint(cfg.INTERACTIVE_MODELS_PATH, args.checkpoint)
model = utils.load_is_model(checkpoint_path, args.device, cpu_dist_maps=True)
root = tk.Tk()
root.minsize(960, 480)
app = InteractiveDemoApp(root, args, model)
root.deiconify()
app.mainloop()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True,
help='The path to the checkpoint. '
'This can be a relative path (relative to cfg.INTERACTIVE_MODELS_PATH) '
'or an absolute path. The file extension can be omitted.')
parser.add_argument('--gpu', type=int, default=0,
help='Id of GPU to use.')
parser.add_argument('--cpu', action='store_true', default=False,
help='Use only CPU for inference.')
parser.add_argument('--limit-longest-size', type=int, default=800,
help='If the largest side of an image exceeds this value, '
'it is resized so that its largest side is equal to this value.')
parser.add_argument('--cfg', type=str, default="config.yml",
help='The path to the config file.')
args = parser.parse_args()
if args.cpu:
args.device =torch.device('cpu')
else:
args.device = torch.device(f'cuda:{args.gpu}')
cfg = exp.load_config_file(args.cfg, return_edict=True)
return args, cfg
if __name__ == '__main__':
main()
|
84085
|
from indexd import get_app
import os
os.environ["INDEXD_SETTINGS"] = "/var/www/indexd/"
application = get_app()
|
84106
|
from typing import Tuple
import torch
from torch import nn
class MixUp(nn.Module):
r"""
Implementation of mixup: BEYOND EMPIRICAL RISK MINIMIZATION (https://arxiv.org/abs/1710.09412)
Official implementation: https://github.com/facebookresearch/mixup-cifar10
Note: Can sit inside a model as a method or be used in the training loop without initialization
"""
def __init__(self, alpha: float) -> None:
super().__init__()
self.alpha = alpha
# Cache
self.lambda_scale = -1.
self.y = torch.empty()
self.y_perm = torch.empty()
@property
def lambda_scale(self) -> float:
return self.lambda_scale
@property
def y(self) -> torch.Tensor:
return self.y
@property
def y_perm(self) -> torch.Tensor:
return self.y_perm
def reset_cache(self) -> None:
self.lambda_scale = -1.
self.y = torch.empty()
self.y_perm = torch.empty()
def forward(self, x: torch.Tensor, y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, float]:
r"""
x (b, ...): a minibatch of input data from dataset
y (b, ...): a minibatch of labels from dataset
"""
out, self.y, self.y_perm, self.lambda_scale = self.mix(x, y, self.alpha, self.training)
return out
def get_loss(self, criterion: nn.Module, y_logit: torch.Tensor) -> torch.Tensor:
r"""
Don't forget to call this function after forward propagations.
Will reset the cache after getting the loss.
"""
loss = self.loss(criterion, y_logit, self.y, self.y_perm, self.lambda_scale)
self.reset_cache()
return loss
@staticmethod
def mix(x: torch.Tensor, y: torch.Tensor, alpha: float, is_training: bool = True) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, float]:
r"""
x (b, ...): torch.Tensor - A minibatch of input data from dataset
y (b, ...): torch.Tensor - A minibatch of labels from dataset
alpha: float
is_training: bool - Default: True
"""
if not is_training:
return (x, y)
b: int = x.shape[0]
perm_indices: torch.Tensor = torch.randperm(b, device=x.device)
lambda_scale: float = torch.distributions.beta(alpha, alpha) if alpha > 0. else 1.
x = lambda_scale*x + (1. - lambda_scale)*x.index_select(dim=0, index=perm_indices)
y_perm = y.index_select(dim=0, index=perm_indices)
return (x, y, y_perm, lambda_scale)
@staticmethod
def loss(
criterion: nn.Module,
y_logit: torch.Tensor,
y: torch.Tensor,
y_perm: torch.Tensor,
lambda_scale: float
) -> torch.Tensor:
return lambda_scale*criterion(y_logit, y) + (1. - lambda_scale)*criterion(y_logit, y_perm)
|
84132
|
from profil3r.app.search import search_get
from bs4 import BeautifulSoup
import time
class Hackernews:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['hackernews']['rate_limit'] / 1000
# https://news.ycombinator.com/user?id={username}
self.format = config['plateform']['hackernews']['format']
self.permutations_list = permutations_list
# Forum
self.type = config['plateform']['hackernews']['type']
# Generate all potential hackernews usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
hackernews_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
r = search_get(username)
if not r:
continue
# If the account exists
if r.text.find("No such user.") != 0:
# Account object
account = {}
# Get the username
account["value"] = username
# Parse HTML response content with beautiful soup
soup = BeautifulSoup(r.text, 'html.parser')
# Scrape the user informations
try:
user_creation_date = str(soup.find_all("table")[2].find_all("td")[3].get_text()) if soup.find_all("table") else None
user_karma = str(soup.find_all("table")[2].find_all("td")[5].get_text()) if soup.find_all("table") else None
account["creation_date"] = {"name": "Creation Date", "value": user_creation_date}
account["karma"] = {"name": "Karma", "value": user_karma}
except:
pass
# Append the account to the accounts table
hackernews_usernames["accounts"].append(account)
time.sleep(self.delay)
return hackernews_usernames
|
84156
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD1")
process.source = cms.Source("IntSource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testEdmProvDump.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_intProducerA_*_*'
)
)
process.a1 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("source") ),
expectedSum = cms.untracked.int32(12),
inputTagsNotFound = cms.untracked.VInputTag(
cms.InputTag("source", processName=cms.InputTag.skipCurrentProcess()),
cms.InputTag("intProducer", processName=cms.InputTag.skipCurrentProcess()),
cms.InputTag("intProducerU", processName=cms.InputTag.skipCurrentProcess())
)
)
process.a2 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("intProducerA") ),
expectedSum = cms.untracked.int32(300)
)
process.a3 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag( cms.InputTag("aliasForInt") ),
expectedSum = cms.untracked.int32(300)
)
process.intProducer = cms.EDProducer("IntProducer", ivalue = cms.int32(1))
process.intProducerU = cms.EDProducer("IntProducer", ivalue = cms.int32(10))
process.intProducerA = cms.EDProducer("IntProducer", ivalue = cms.int32(100))
process.aliasForInt = cms.EDAlias(
intProducerA = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct')
)
)
)
process.intVectorProducer = cms.EDProducer("IntVectorProducer",
count = cms.int32(9),
ivalue = cms.int32(11)
)
process.t = cms.Task(process.intProducerU, process.intProducerA, process.intVectorProducer)
process.p = cms.Path(process.intProducer * process.a1 * process.a2 * process.a3, process.t)
process.e = cms.EndPath(process.out)
|
84180
|
import warnings
warnings.warn("pandas.types.common is deprecated and will be "
"removed in a future version, import "
"from pandas.api.types",
DeprecationWarning, stacklevel=3)
from pandas.core.dtypes.common import * # noqa
|
84182
|
from carla_utils import carla
DestroyActor = carla.command.DestroyActor
import weakref
from typing import Dict
from ..system import debug
from ..basic import flatten_list
from .sensor_callback import RawCallback, DefaultCallback
from .sensor_create import create_sensor, create_sensor_command
def createSensorListMaster(core, vehicle, sensors_param_list):
client, world = core.client, core.world
blueprint_library = world.get_blueprint_library()
batch = [create_sensor_command(core, vehicle, blueprint_library, config) for config in sensors_param_list]
sensor_ids = []
for response in client.apply_batch_sync(batch):
if response.error: raise RuntimeError('spawn sensor failed: ' + response.error)
else: sensor_ids.append(response.actor_id)
sensors = world.get_actors(sensor_ids)
sensors_master = CarlaSensorListMaster(core, vehicle)
for sensor, config in zip(sensors, sensors_param_list):
transform, callback = config['transform'], config.get('callback', None)
sensors_master.append(sensor, transform, callback)
return sensors_master
def createSensorListMasters(core, vehicles, sensors_param_lists):
client, world = core.client, core.world
blueprint_library = world.get_blueprint_library()
sensors_master_dict = {vehicle.id: CarlaSensorListMaster(core, vehicle) for vehicle in vehicles}
batch = []
for vehicle, sensors_param_list in zip(vehicles, sensors_param_lists):
batch.extend([create_sensor_command(core, vehicle, blueprint_library, config) for config in sensors_param_list])
sensor_ids = []
for response in client.apply_batch_sync(batch):
if response.error: raise RuntimeError('spawn sensor failed: ' + response.error)
else: sensor_ids.append(response.actor_id)
sensors = world.get_actors(sensor_ids)
sensors_param_list = flatten_list(sensors_param_lists)
for sensor, config in zip(sensors, sensors_param_list):
transform, callback = config['transform'], config.get('callback', None)
sensors_master_dict[sensor.parent.id].append(sensor, transform, callback)
return list(sensors_master_dict.values())
class CarlaSensorListMaster(object):
def __init__(self, core, vehicle):
self.world, self.vehicle = core.world, vehicle
self.sensor_list = []
self.sensor_dict: Dict[tuple, CarlaSensorMaster] = dict()
'''camera'''
self._cameras = []
self.current_camera_index = 0
def append(self, sensor, transform, callback):
sensor_master = CarlaSensorMaster(sensor, transform, callback)
self.sensor_list.append(sensor_master)
self.sensor_dict[(sensor.type_id, sensor.attributes['role_name'])] = sensor_master
if sensor_master.type_id == 'sensor.camera.rgb' \
or sensor_master.type_id == 'sensor.camera.semantic_segmentation':
self._cameras.append(sensor_master)
# ## disable currently
# def reset(self):
# [sensor_master.reset() for sensor_master in self.sensor_dict.values()]
def get_camera(self):
sensor_master = None
try:
sensor_master = self._cameras[self.current_camera_index]
except IndexError:
pass
return sensor_master
def toggle_camera(self):
self.current_camera_index = (self.current_camera_index + 1) % len(self._cameras)
def destroy(self):
for sensor_master in self.sensor_dict.values():
sensor_master.destroy()
def destroy_commands(self):
"""
Note: do not destroy vehicle in this class.
"""
return [sensor_master.destroy_command() for sensor_master in self.sensor_dict.values()]
def __del__(self):
self.destroy()
def __iter__(self):
for sensor_master in self.sensor_dict.values():
yield sensor_master
def get(self, key):
return self.__getitem__(key)
def __getitem__(self, key):
if key in self.sensor_dict:
return self.sensor_dict[key]
else:
raise RuntimeError('[{}] No sensor called '.format(self.__class__.__name__) + str(key))
debug(info='No sensor called '+ str(key), info_type='error')
return None
def __setitem__(self, key, value):
if key in self.sensor_dict:
self.sensor_dict[key] = value
return True
else:
debug(info='No sensor called '+ str(key), info_type='error')
return None
class CarlaSensorMaster(object):
def __init__(self, sensor, transform, callback):
self.sensor = sensor
self.transform = transform
self.raw_data, self.data = None, None
self.type_id = sensor.type_id
self.attributes = sensor.attributes
self.frame_id = sensor.type_id.replace('.', '_') + '/' + sensor.attributes['role_name']
weak_self = weakref.ref(self)
if callback != None:
self.callback = lambda data: callback(weak_self, data)
else:
'''default callback'''
func_name = sensor.type_id.replace('.', '_')
# print('[CarlaSensorMaster] ', func_name)
func = getattr(DefaultCallback, func_name)
self.callback = lambda data: func(weak_self, data)
if hasattr(sensor, 'listen'):
self.sensor.listen(self.callback)
return
# ## disable currently
# def reset(self):
# if self.sensor.is_listening: self.sensor.stop()
# self.raw_data, self.data = None, None
# self.sensor.listen(self.callback)
def get_transform(self):
'''
transform relative to parent actor
'''
return self.transform
def get_world_transform(self):
return self.sensor.get_transform()
def get_raw_data(self):
return self.raw_data
def get_data(self):
return self.data
def destroy(self):
if self.sensor.is_listening: self.sensor.stop()
self.sensor.destroy()
def destroy_command(self):
if self.sensor.is_listening: self.sensor.stop()
return DestroyActor(self.sensor)
|
84201
|
from typing import List, Tuple
import numpy as np
from pyrep.objects.shape import Shape
from pyrep.objects.dummy import Dummy
from pyrep.objects.proximity_sensor import ProximitySensor
from rlbench.backend.task import Task
from rlbench.backend.conditions import DetectedCondition, NothingGrasped
from rlbench.backend.spawn_boundary import SpawnBoundary
NUM_SHELVES_IN_SAFE = 3
class PutMoneyInSafe(Task):
def init_task(self) -> None:
self.index_dic = {0: 'bottom', 1: 'middle', 2: 'top'}
self.money = Shape('dollar_stack')
self.money_boundary = Shape('dollar_stack_boundary')
self.register_graspable_objects([self.money])
self.success_conditions = [NothingGrasped(self.robot.gripper)]
self.w1_rel_pos = [-2.7287 * 10 ** (-4), -2.3246 * 10 ** (-6),
+4.5627 * 10 ** (-2)]
self.w1_rel_ori = [-3.1416, 7.2824 * 10 ** (-1), -2.1265 * 10 ** (-2)]
def init_episode(self, index: int) -> List[str]:
self.target_shelf = index
w4 = Dummy('waypoint4')
target_dummy_name = 'dummy_shelf' + str(self.target_shelf)
target_pos_dummy = Dummy(target_dummy_name)
target_pos = target_pos_dummy.get_position()
w4.set_position(target_pos, reset_dynamics=False)
self.success_detector = ProximitySensor(
('success_detector' + str(self.target_shelf))
)
while len(self.success_conditions) > 1:
self.success_conditions.pop()
self.success_conditions.append(
DetectedCondition(self.money, self.success_detector)
)
self.register_success_conditions(self.success_conditions)
b = SpawnBoundary([self.money_boundary])
b.sample(self.money,
min_rotation=(0.00, 0.00, 0.00),
max_rotation=(0.00, 0.00, +0.5 * np.pi))
return ['put the money away in the safe on the %s shelf'
% self.index_dic[index],
'leave the money on the %s shelf on the safe'
% self.index_dic[index],
'place the stack of bank notes on the %s shelf of the safe'
% self.index_dic[index]]
def variation_count(self) -> int:
return NUM_SHELVES_IN_SAFE
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0.0, 0.0, 0.0], [0.0, 0.0, +0.5 * np.pi]
|
84219
|
from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airplane
from aerosandbox.performance import OperatingPoint
from typing import Union, List, Dict
import tempfile
import warnings
class AVL(ExplicitAnalysis):
"""
An interface to AVL, a 3D vortex lattice aerodynamics code developed by <NAME> at MIT.
Requires AVL to be on your computer; AVL is available here: https://web.mit.edu/drela/Public/web/avl/
It is recommended (but not required) that you add AVL to your system PATH environment variable such that it can
be called with the command `avl`. If this is not the case, you need to specify the path to your AVL
executable using the `avl_command` argument of the constructor.
Usage example:
>>>avl = asb.AVL(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>>)
>>>outputs = avl.run()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint = OperatingPoint(),
avl_command: str = "avl",
verbose: bool = False,
working_directory: str = None,
):
"""
Interface to AVL.
Args:
airplane: The airplane object you wish to analyze.
op_point: The operating point you wish to analyze at.
avl_command: The command-line argument to call AVL.
* If AVL is on your system PATH, then you can just leave this as "avl".
* If AVL is not on your system PATH, thjen you should provide a filepath to the AVL executable.
Note that AVL is not on your PATH by default. To tell if AVL is on your system PATH, open up a
terminal and type "avl".
* If the AVL menu appears, it's on your PATH.
* If you get something like "'avl' is not recognized as an internal or external command..." or
"Command 'avl' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your AVL executable as a string.
To add AVL to your path, modify your system's environment variables. (Google how to do this for your OS.)
verbose:
working_directory:
"""
self.airplane = airplane
self.op_point = op_point
self.avl_command = avl_command
self.verbose = verbose
self.working_directory = working_directory
def run(self) -> Dict:
return self._run_avl()
def _default_keystroke_file_contents(self) -> List[str]:
run_file_contents = []
# Disable graphics
run_file_contents += [
"plop",
"g",
"",
]
# Enter oper mode
run_file_contents += [
"oper",
]
# Set parameters
run_file_contents += [
"m"
f"mn {self.op_point.mach()}",
f"v {self.op_point.velocity}",
f"d {self.op_point.atmosphere.density()}",
"g 9.81",
""
]
# Set analysis state
p_bar = self.op_point.p * self.airplane.b_ref / (2 * self.op_point.velocity)
q_bar = self.op_point.q * self.airplane.c_ref / (2 * self.op_point.velocity)
r_bar = self.op_point.r * self.airplane.b_ref / (2 * self.op_point.velocity)
run_file_contents += [
f"a a {self.op_point.alpha}",
f"b b {self.op_point.beta}",
f"r r {p_bar}",
f"p p {q_bar}",
f"y y {r_bar}"
]
return run_file_contents
def _run_avl(self,
run_command: str = None,
) -> Dict[str, np.ndarray]:
"""
Private function to run AVL.
Args: run_command: A string with any AVL keystroke inputs that you'd like. By default, you start off within the OPER
menu. All of the inputs indicated in the constructor have been set already, but you can override them here (
for this run only) if you want.
Returns: A dictionary containing all of your results.
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Designate an intermediate file for file I/O
output_filename = "output.txt"
with open(directory / output_filename, "w+") as f:
pass
# Handle the airplane file
airplane_file = "airplane.avl"
self.airplane.write_avl(directory / airplane_file)
# Handle the run file
keystroke_file_contents = self._default_keystroke_file_contents()
if run_command is not None:
keystroke_file_contents += [run_command]
keystroke_file_contents += [
"x",
"st",
f"{output_filename}",
"o",
"",
"",
"quit"
]
keystroke_file = "keystroke_file.txt"
with open(directory / keystroke_file, "w+") as f:
f.write(
"\n".join(keystroke_file_contents)
)
command = f'{self.avl_command} {airplane_file} < {keystroke_file}'
### Execute
subprocess.call(
command,
shell=True,
cwd=directory,
stdout=None if self.verbose else subprocess.DEVNULL
)
##### Parse the output file
# Read the file
with open(directory / output_filename, "r") as f:
output_data = f.read()
# Trim off the first few lines that contain name, # of panels, etc.
output_data = "\n".join(output_data.split("\n")[8:])
### Iterate through the string to find all the numeric values, based on where "=" appears.
values = []
index = output_data.find("=")
while index != -1:
output_data = output_data[index + 1:]
number = output_data[:12].split("\n")[0]
number = float(number)
values.append(number)
index = output_data.find("=")
### Record the keys associated with those values:
keys = [
"Sref",
"Cref",
"Bref",
"Xref",
"Yref",
"Zref",
"alpha",
"pb/2V",
"p'b/2V",
"beta",
"qc/2V",
"mach",
"rb/2V",
"r'b/2V",
"CX", # Note: these refer to "CXtot", etc. in AVL, but the "tot" is redundant.
"Cl",
"Cl'",
"CY",
"Cm",
"CZ",
"Cn",
"Cn'",
"CL",
"CD",
"CDvis",
"CDind",
"CLff",
"CDff",
"Cyff",
"e",
"CLa",
"CLb",
"CYa",
"CYb",
"Cla",
"Clb",
"Cma",
"Cmb",
"Cna",
"Cnb",
"CLp",
"CLq",
"CLr",
"CYp",
"CYq",
"CYr",
"Clp",
"Clq",
"Clr",
"Cmp",
"Cmq",
"Cmr",
"Cnp",
"Cnq",
"Cnr",
"Xnp",
"Clb Cnr / Clr Cnb"
]
if len(values) != 57 and len(values) != 56: # Sometimes the spiral mode term is inexplicably not displayed by AVL
raise RuntimeError(
"AVL could not run for some reason!\n"
"Investigate by turning on the `verbose` flag and looking at the output.\n"
"(Common culprit: angular rates too high.)"
)
res = {
k: v
for k, v in zip(
keys, values
)
}
##### Add a few more outputs for ease of use
res["p"] = res["pb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
res["q"] = res["qc/2V"] * (2 * self.op_point.velocity / self.airplane.c_ref)
res["r"] = res["rb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
return res
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(asb.__file__).parent.parent / "tutorial" / "04 - Geometry" / "example_geometry"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
avl = AVL(
airplane=vanilla,
op_point=OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=1,
alpha=0.433476,
beta=0,
p=0,
q=0,
r=0,
),
)
res = avl.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}")
|
84239
|
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from math import pi
def circuit1(qc,qr,theta,L,repeat):
#circuit 1
#theta is list of the parameters
#theta length is 8L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit2(qc,qr,theta,L,repeat):
#circuit 2
#theta is list of the parameters
#theta length is 8L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.cx(qr[3],qr[2])
qc.cx(qr[2],qr[1])
qc.cx(qr[1],qr[0])
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.cx(qr[1],qr[0])
qc.cx(qr[2],qr[1])
qc.cx(qr[3],qr[2])
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit3(qc,qr,theta,L,repeat):
#circuit 3
#theta is list of the parameters
#theta length is (11)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
qc.crz(theta[count],qr[2],qr[1])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crz(theta[count],qr[1],qr[0])
count=count+1
qc.crz(theta[count],qr[2],qr[1])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit4(qc,qr,theta,L,repeat):
#circuit 4
#theta is list of the parameters
#theta length is (11)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
qc.crx(theta[count],qr[2],qr[1])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crx(theta[count],qr[1],qr[0])
count=count+1
qc.crx(theta[count],qr[2],qr[1])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit5(qc,qr,theta,L,repeat):
#circuit 5
#theta is list of the parameters
#theta length is (28)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for j in range(4):
for i in range(4):
if i!=j:
qc.crz(theta[count],qr[3-j],qr[3-i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for j in range(4):
for i in range(4):
if i!=j:
qc.crz(theta[count],qr[j],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit6(qc,qr,theta,L,repeat):
#circuit 6
#theta is list of the parameters
#theta length is (28)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for j in range(4):
for i in range(4):
if i!=j:
qc.crx(theta[count],qr[3-j],qr[3-i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for j in range(4):
for i in range(4):
if i!=j:
qc.crx(theta[count],qr[j],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit7(qc,qr,theta,L,repeat):
#circuit 7
#theta is list of the parameters
#theta length is (19)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crz(theta[count],qr[2],qr[1])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit8(qc,qr,theta,L,repeat):
#circuit 8
#theta is list of the parameters
#theta length is (19)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crx(theta[count],qr[2],qr[1])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit9(qc,qr,theta,L,repeat):
#circuit 9
#theta is list of the parameters
#theta length is (4)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.h(qr[i])
qc.cz(qr[3],qr[2])
qc.cz(qr[2],qr[1])
qc.cz(qr[1],qr[0])
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
qc.cz(qr[1],qr[0])
qc.cz(qr[2],qr[1])
qc.cz(qr[3],qr[2])
for i in range(4):
qc.h(qr[i])
return qc
def circuit10(qc,qr,theta,L,repeat):
#circuit 10
#theta is list of the parameters
#theta length is (4)L+4
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
for l in range(L):
qc.cz(qr[3],qr[2])
qc.cz(qr[2],qr[1])
qc.cz(qr[1],qr[0])
qc.cz(qr[3],qr[0])
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.cz(qr[3],qr[0])
qc.cz(qr[1],qr[0])
qc.cz(qr[2],qr[1])
qc.cz(qr[3],qr[2])
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit11(qc,qr,theta,L,repeat):
#circuit 11
#theta is list of the parameters
#theta length is (12)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.cx(qr[1],qr[0])
qc.cx(qr[3],qr[2])
qc.ry(theta[count],qr[1])
count=count+1
qc.ry(theta[count],qr[2])
count=count+1
qc.rz(theta[count],qr[1])
count=count+1
qc.rz(theta[count],qr[2])
count=count+1
qc.cx(qr[2],qr[1])
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.cx(qr[2],qr[1])
qc.rz(theta[count],qr[2])
count=count+1
qc.rz(theta[count],qr[1])
count=count+1
qc.ry(theta[count],qr[2])
count=count+1
qc.ry(theta[count],qr[1])
count=count+1
qc.cx(qr[3],qr[2])
qc.cx(qr[1],qr[0])
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit12(qc,qr,theta,L,repeat):
#circuit 12
#theta is list of the parameters
#theta length is (12)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.cz(qr[1],qr[0])
qc.cz(qr[3],qr[2])
qc.ry(theta[count],qr[1])
count=count+1
qc.ry(theta[count],qr[2])
count=count+1
qc.rz(theta[count],qr[1])
count=count+1
qc.rz(theta[count],qr[2])
count=count+1
qc.cz(qr[2],qr[1])
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.cz(qr[2],qr[1])
qc.rz(theta[count],qr[2])
count=count+1
qc.rz(theta[count],qr[1])
count=count+1
qc.ry(theta[count],qr[2])
count=count+1
qc.ry(theta[count],qr[1])
count=count+1
qc.cz(qr[3],qr[2])
qc.cz(qr[1],qr[0])
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit13(qc,qr,theta,L,repeat):
#circuit 13
#theta is list of the parameters
#theta length is (16)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[3],qr[0])
count=count+1
qc.crz(theta[count],qr[2],qr[3])
count=count+1
qc.crz(theta[count],qr[1],qr[2])
count=count+1
qc.crz(theta[count],qr[0],qr[1])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
qc.crz(theta[count],qr[0],qr[3])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
qc.crz(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crz(theta[count],qr[2],qr[1])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
qc.crz(theta[count],qr[0],qr[3])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[0],qr[1])
count=count+1
qc.crz(theta[count],qr[1],qr[2])
count=count+1
qc.crz(theta[count],qr[2],qr[3])
count=count+1
qc.crz(theta[count],qr[3],qr[0])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit14(qc,qr,theta,L,repeat):
#circuit 14
#theta is list of the parameters
#theta length is (16)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[3],qr[0])
count=count+1
qc.crx(theta[count],qr[2],qr[3])
count=count+1
qc.crx(theta[count],qr[1],qr[2])
count=count+1
qc.crx(theta[count],qr[0],qr[1])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
qc.crx(theta[count],qr[0],qr[3])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
qc.crx(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crx(theta[count],qr[2],qr[1])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
qc.crx(theta[count],qr[0],qr[3])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[0],qr[1])
count=count+1
qc.crx(theta[count],qr[1],qr[2])
count=count+1
qc.crx(theta[count],qr[2],qr[3])
count=count+1
qc.crx(theta[count],qr[3],qr[0])
count=count+1
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit15(qc,qr,theta,L,repeat):
#circuit 15
#theta is list of the parameters
#theta length is (8)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.cx(qr[3],qr[0])
qc.cx(qr[2],qr[3])
qc.cx(qr[1],qr[2])
qc.cx(qr[0],qr[1])
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.cx(qr[3],qr[2])
qc.cx(qr[0],qr[3])
qc.cx(qr[1],qr[0])
qc.cx(qr[2],qr[1])
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.cx(qr[2],qr[1])
qc.cx(qr[1],qr[0])
qc.cx(qr[0],qr[3])
qc.cx(qr[3],qr[2])
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
qc.cx(qr[0],qr[1])
qc.cx(qr[1],qr[2])
qc.cx(qr[2],qr[3])
qc.cx(qr[3],qr[0])
for i in range(4):
qc.ry(theta[count],qr[i])
count=count+1
return qc
def circuit16(qc,qr,theta,L,repeat):
#circuit 16
#theta is list of the parameters
#theta length is (11)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
qc.crz(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crz(theta[count],qr[2],qr[1])
count=count+1
qc.crz(theta[count],qr[3],qr[2])
count=count+1
qc.crz(theta[count],qr[1],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit17(qc,qr,theta,L,repeat):
#circuit 17
#theta is list of the parameters
#theta length is (11)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
qc.crx(theta[count],qr[2],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crx(theta[count],qr[2],qr[1])
count=count+1
qc.crx(theta[count],qr[3],qr[2])
count=count+1
qc.crx(theta[count],qr[1],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit18(qc,qr,theta,L,repeat):
#circuit 18
#theta is list of the parameters
#theta length is (12)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crz(theta[count],qr[3],qr[0])
count=count+1
qc.crz(theta[count],qr[2],qr[3])
count=count+1
qc.crz(theta[count],qr[1],qr[2])
count=count+1
qc.crz(theta[count],qr[0],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crz(theta[count],qr[0],qr[1])
count=count+1
qc.crz(theta[count],qr[1],qr[2])
count=count+1
qc.crz(theta[count],qr[2],qr[3])
count=count+1
qc.crz(theta[count],qr[3],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
def circuit19(qc,qr,theta,L,repeat):
#circuit 1
#theta is list of the parameters
#theta length is (12)L
#L is the number of repeatation
# repeat will conjugate the first part and add next the the circuit for expressibility
# 0:No, 1: Repeat
count=0
for l in range(L):
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
qc.crx(theta[count],qr[3],qr[0])
count=count+1
qc.crx(theta[count],qr[2],qr[3])
count=count+1
qc.crx(theta[count],qr[1],qr[2])
count=count+1
qc.crx(theta[count],qr[0],qr[1])
count=count+1
if repeat!=0:
qc.barrier(qr)
for l in range(L):
qc.crx(theta[count],qr[0],qr[1])
count=count+1
qc.crx(theta[count],qr[1],qr[2])
count=count+1
qc.crx(theta[count],qr[2],qr[3])
count=count+1
qc.crx(theta[count],qr[3],qr[0])
count=count+1
for i in range(4):
qc.rz(theta[count],qr[i])
count=count+1
for i in range(4):
qc.rx(theta[count],qr[i])
count=count+1
return qc
|
84309
|
import os
import time
import unittest
import pytest
from kafka.admin_client import AdminClient, NewTopic, NewPartitionsInfo
from kafka.protocol.metadata import MetadataRequest
from test.fixtures import ZookeeperFixture, KafkaFixture
from test.testutil import KafkaIntegrationTestCase, env_kafka_version
KAFKA_ADMIN_TIMEOUT_SECONDS = 5
class TestKafkaAdminClientIntegration(KafkaIntegrationTestCase):
@classmethod
def setUpClass(cls):
if not os.environ.get('KAFKA_VERSION'):
return
cls.zk = ZookeeperFixture.instance()
cls.server = KafkaFixture.instance(0, cls.zk)
@classmethod
def tearDownClass(cls):
if not os.environ.get('KAFKA_VERSION'):
return
cls.server.close()
cls.zk.close()
@pytest.mark.skipif(env_kafka_version() < (0, 10, 1), reason='Unsupported Kafka Version')
def test_create_delete_topics(self):
admin = AdminClient(self.client_async)
topic = NewTopic(
name='topic',
num_partitions=1,
replication_factor=1,
)
metadata_request = MetadataRequest[1]()
response = admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS)
# Error code 7 means that RequestTimedOut but we can safely assume
# that topic is created or will be created eventually.
# see this https://cwiki.apache.org/confluence/display/KAFKA/
# KIP-4+-+Command+line+and+centralized+administrative+operations
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
time.sleep(1) # allows the topic to be created
delete_response = admin.delete_topics(['topic'], timeout=1)
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
@pytest.mark.skipif(env_kafka_version() < (1, 0, 0), reason='Unsupported Kafka Version')
def test_create_partitions(self):
admin = AdminClient(self.client_async)
topic = NewTopic(
name='topic',
num_partitions=1,
replication_factor=1,
)
metadata_request = MetadataRequest[1]()
admin.create_topics(topics=[topic], timeout=KAFKA_ADMIN_TIMEOUT_SECONDS)
time.sleep(1) # allows the topic to be created
new_partitions_info = NewPartitionsInfo('topic', 2, [[0]])
response = admin.create_partitions([new_partitions_info], timeout=1, validate_only=False)
self.assertTrue(
response[0].topic_errors[0][1] == 0 or
response[0].topic_errors[0][1] == 7
)
|
84330
|
import copy
from joblib import Parallel
import numpy as np
import time
import numbers
from itertools import product
from collections import defaultdict
from sklearn import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import check_cv, GridSearchCV, RandomizedSearchCV
from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator
from sklearn.utils.validation import indexable, check_random_state, _check_fit_params
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.base import is_classifier
from sklearn.utils.fixes import delayed
def init_eval_set(src_eval_set_selection, src_fit_params, X, y):
"""
fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理
Parameters
----------
src_eval_set_selection : {'all', 'test', 'train', 'original', 'original_transformed'}, optional
eval_setに渡すデータの決め方 ('all': X, 'test': X[test], 'train': X[train], 'original': 入力そのまま, 'original_transformed': 入力そのまま&パイプラインの時は最終学習器以外の変換実行)
src_fit_params : Dict
処理前の学習時パラメータ
"""
fit_params = copy.deepcopy(src_fit_params)
eval_set_selection = src_eval_set_selection
# fit_paramsにeval_metricが設定されているときのみ以下の処理を実施
if 'eval_metric' in src_fit_params and src_fit_params['eval_metric'] is not None:
# fit_paramsにeval_setが存在しないとき、入力データをそのまま追加
if 'eval_set' not in src_fit_params:
print('There is no "eval_set" in fit_params, so "eval_set" is set to (self.X, self.y)')
fit_params['eval_set'] = [(X, y)]
if src_eval_set_selection is None: # eval_set_selection未指定時、eval_setが入力されていなければeval_set_selection='test'とする
eval_set_selection = 'test'
if eval_set_selection not in ['all', 'train', 'test']: # eval_set_selectionの指定が間違っていたらエラーを出す
raise ValueError('The `eval_set_selection` argument should be "all", "train", or "test" when `eval_set` is not in `fit_params`')
# src_fit_paramsにeval_setが存在するとき、eval_set_selection未指定ならばeval_set_selection='original_transformed'とする
else:
if src_eval_set_selection is None:
eval_set_selection = 'original_transformed'
return fit_params, eval_set_selection
def _transform_except_last_estimator(transformer, X_src, X_train):
"""パイプラインのとき、最終学習器以外のtransformを適用"""
if transformer is not None:
transformer.fit(X_train)
X_dst = transformer.transform(X_src)
return X_dst
else:
return X_src
def _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test):
"""eval_setの中から学習データ or テストデータのみを抽出"""
fit_params_modified = copy.deepcopy(fit_params)
# eval_setが存在しない or Noneなら、そのままfit_paramsを返す
eval_sets = [v for v in fit_params.keys() if 'eval_set' in v]
if len(eval_sets) == 0 or fit_params[eval_sets[0]] is None:
return fit_params_modified
eval_set_name = eval_sets[0] # eval_setの列名(pipelineでは列名が変わるため)
# 元のeval_setからX, yを取得
X_fit = fit_params[eval_set_name][0][0]
y_fit = fit_params[eval_set_name][0][1]
# eval_setに該当データを入力し直す
if eval_set_selection == 'train':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit[train], X_fit[train])\
, y_fit[train])]
elif eval_set_selection == 'test':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit[test], X_fit[train])\
, y_fit[test])]
elif eval_set_selection == 'all':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit, X_fit[train])\
, y_fit)]
else:
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit, X_fit)\
, y_fit)]
return fit_params_modified
def _fit_and_score_eval_set(eval_set_selection, transformer,
estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan,
print_message=None):
"""Fit estimator and compute scores for a given dataset split."""
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
if print_message is not None:
print(print_message)
# 学習してスコア計算
result = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params_modified,
return_train_score=return_train_score,
return_parameters=return_parameters, return_n_test_samples=return_n_test_samples,
return_times=return_times, return_estimator=return_estimator,
split_progress=split_progress, candidate_progress=candidate_progress,
error_score=error_score)
return result
def _make_transformer(eval_set_selection, estimator):
"""estimatorがパイプラインのとき、最終学習器以外の変換器(前処理クラスのリスト)を作成"""
if isinstance(estimator, Pipeline) and eval_set_selection != 'original':
transformer = Pipeline([step for i, step in enumerate(estimator.steps) if i < len(estimator) - 1])
return transformer
else:
return None
def cross_validate_eval_set(eval_set_selection,
estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score=np.nan):
"""
Evaluate a scores by cross-validation with `eval_set` argument in `fit_params`
This method is suitable for calculating cross validation scores with `early_stopping_round` in XGBoost or LightGBM.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the cross-validated model on
the test set.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`.Fold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if callable(scoring):
scorers = scoring
elif scoring is None or isinstance(scoring, str):
scorers = check_scoring(estimator, scoring)
else:
scorers = _check_multimetric_scoring(estimator, scoring)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True, return_estimator=return_estimator,
error_score=error_score)
for train, test in cv.split(X, y, groups))
# For callabe scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret['fit_time'] = results["fit_time"]
ret['score_time'] = results["score_time"]
if return_estimator:
ret['estimator'] = results["estimator"]
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret['test_%s' % name] = test_scores_dict[name]
if return_train_score:
key = 'train_%s' % name
ret[key] = train_scores_dict[name]
return ret
def cross_val_score_eval_set(eval_set_selection,
estimator, X, y=None, groups=None, scoring=None,
cv=None, n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', error_score=np.nan):
"""
Evaluate a score by cross-validation with `eval_set` argument in `fit_params`
This method is suitable for calculating cross validation score with `early_stopping_round` in XGBoost or LightGBM.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only
a single value.
Similar to :func:`cross_validate`
but only a single metric is permitted.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : ndarray of float of shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate_eval_set(eval_set_selection=eval_set_selection,
estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch,
error_score=error_score)
return cv_results['test_score']
def validation_curve_eval_set(eval_set_selection,
estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=None, pre_dispatch="all",
verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values with `eval_set` argument in `fit_params`
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
results = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=fit_params,
return_train_score=True, error_score=error_score,
print_message=f'Caluculating score. {param_name}={v}')
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
n_params = len(param_range)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_params).T
test_scores = results["test_scores"].reshape(-1, n_params).T
return train_scores, test_scores
def learning_curve_eval_set(eval_set_selection,
estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None,
scoring=None, exploit_incremental_learning=False,
n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False,
random_state=None, error_score=np.nan, return_times=False,
fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training set sizes with `eval_set` argument in `fit_params`
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose, return_times, error_score=error_score,
fit_params=fit_params)
for train, test in cv_iter
)
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorer, train, test, verbose,
parameters=None, fit_params=fit_params, return_train_score=True,
error_score=error_score, return_times=return_times)
for train, test in train_test_proportions
)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
score_times = results["score_time"].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = train_sizes_abs, out[0], out[1]
if return_times:
ret = ret + (out[2], out[3])
return ret
class GridSearchCVEvalSet(GridSearchCV):
"""
Exhaustive search over specified parameter values for an estimator with `eval_set` argument in `fit_params`.
"""
def fit(self, eval_set_selection,
X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None,
more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
print_message=f'cand={cand_idx}/{n_candidates}, cv={split_idx}: {parameters}',
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out,
all_more_results)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
class RandomizedSearchCVEvalSet(RandomizedSearchCV):
"""
Randomized search on hyper parameters with `eval_set` argument in `fit_params`.
"""
def fit(self, eval_set_selection,
X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None,
more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
print_message=f'cand={cand_idx}/{n_candidates}, cv={split_idx}: {parameters}',
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out,
all_more_results)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
|
84337
|
import copy
from mock import patch
from ruskit.failover import FastAddMachineManager
from ruskit.distribute import RearrangeSlaveManager, NodeWrapper
from test_base import TestCaseBase
class DummyCluster(object):
def __init__(self):
self.nodes = []
def dummy_gen_distribution_for_move_masters(nodes, new_nodes):
hosts = ['host1', 'host2', 'host3', 'host4']
masters = [
[NodeWrapper(i, 'm{}'.format(i), 0) for i in range(1, 4)],
[NodeWrapper(i, 'm{}'.format(i), 1) for i in range(4, 9)],
[NodeWrapper(i, 'm{}'.format(i), 2) for i in range(9, 11)],
[],
]
slaves = [
[NodeWrapper(None, 's1', 1)],
[NodeWrapper(None, 's2', 2)],
[NodeWrapper(None, 's3', 3)],
[],
]
frees = [[], [], [], [NodeWrapper(i, 'f{}'.format(i), 3) for i in range(1, 3)]]
return {
'hosts': hosts,
'masters': masters,
'slaves': slaves,
'frees': frees,
}
class MoveSlavesMockData(object):
hosts = ['host1', 'host2', 'host3', 'host4']
masters = [
[NodeWrapper(i, 'm{}'.format(i), 0) for i in range(1, 4)],
[NodeWrapper(i, 'm{}'.format(i), 1) for i in range(4, 8)],
[NodeWrapper(i, 'm{}'.format(i), 2) for i in range(8, 11)],
[NodeWrapper(i, 'm{}'.format(i), 3) for i in range(11, 14)],
]
m1 = masters[0][0]
m2 = masters[0][1]
m5 = masters[1][1]
m7 = masters[1][3]
m8 = masters[2][0]
m9 = masters[2][1]
m11 = masters[3][0]
s1, s2, s3, s4, s5, s6, s7 = (NodeWrapper(None, 's{}'.format(i), 1) \
for i in range(1, 8))
s1.host_index = 0
s6.host_index = 2
s7.host_index = 2
slaves = [
[s1], [s2, s3, s4, s5], [s6, s7], []
]
s1.master, s2.master, s3.master, s4.master = m7, m1, m9, m11
s5.master, s6.master, s7.master = m2, m8, m5
m1.slaves.append(s2)
m2.slaves.append(s5)
m5.slaves.append(s7)
m7.slaves.append(s1)
m8.slaves.append(s6)
m9.slaves.append(s3)
m11.slaves.append(s4)
frees = [
[NodeWrapper(None, 'f{}'.format(i), 0) for i in range(1, 5)],
[NodeWrapper(None, 'f{}'.format(i), 1) for i in range(5, 8)],
[NodeWrapper(None, 'f{}'.format(i), 2) for i in range(8, 12)],
[NodeWrapper(None, 'f{}'.format(i), 3) for i in range(12, 16)],
]
@classmethod
def dummy_gen_distribution(cls, nodes, new_nodes):
return {
'hosts': map(copy.copy, cls.hosts),
'masters': map(copy.copy, cls.masters),
'slaves': map(copy.copy, cls.slaves),
'frees': map(copy.copy, cls.frees),
}
class TestMoveNode(TestCaseBase):
@patch('ruskit.failover.gen_distribution',
dummy_gen_distribution_for_move_masters)
def test_move_master(self):
dummy_new_nodes = []
manager = FastAddMachineManager(DummyCluster(), dummy_new_nodes)
result = manager.peek_result()
plan = result['plan']
p1 = plan[0]
p2 = plan[1]
if p1['master'].tag != 'm8':
p1, p2 = p2, p1
slaves = ('f1', 'f2')
self.assertEqual(p1['master'].tag, 'm8')
self.assertTrue(p1['slave'].tag in slaves)
self.assertEqual(p2['master'].tag, 'm7')
self.assertTrue(p2['slave'].tag in slaves)
dis = manager.get_distribution()
masters = dis['masters']
masters_num = list(set(map(len, masters)))
self.assertTrue(len(masters_num) <= 2)
if len(masters_num) == 2:
self.assertTrue(abs(masters_num[0] - masters_num[1]) <= 1)
@patch('ruskit.distribute.gen_distribution',
MoveSlavesMockData.dummy_gen_distribution)
def test_move_slave(self):
dummy_new_nodes = []
manager = RearrangeSlaveManager(DummyCluster(), dummy_new_nodes)
result = manager.gen_plan()
add_plan = result['add_plan']
delete_plan = result['delete_plan']
# slaves_num == masters_num
self.assertEqual(7 - len(delete_plan) + len(add_plan), 13)
slaves_of_master = [1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0]
for slave in delete_plan:
slaves_of_master[slave.master.node-1] -= 1
for p in add_plan:
slave = p['slave']
master = p['master']
slaves_of_master[master.node-1] += 1
# All masters should have exactly one slave
self.assertTrue(all(map(lambda s: s == 1, slaves_of_master)))
dis = MoveSlavesMockData.dummy_gen_distribution(None, None)
masters_per_host = [len(m) for m in dis['masters']]
slaves_per_host = [len(set(s) - set(delete_plan)) \
for s in dis['slaves']]
new_slaves_per_host = [0 for _ in range(len(dis['hosts']))]
for p in add_plan:
new_slaves_per_host[p['slave'].host_index] += 1
nodes_per_host = map(sum, zip(
masters_per_host, slaves_per_host, new_slaves_per_host))
# All hosts should contains almost the same number of nodes
nodes_num = list(set(nodes_per_host))
self.assertTrue(len(nodes_num) <= 2)
if (len(nodes_num) == 2):
self.assertTrue(abs(nodes_num[0] - nodes_num[1]) <= 1)
|
84351
|
from .prs import PRS, SubStream_Container
import random
import torch
import numpy as np
from collections import deque
class DelayBuffer(PRS):
"""
Delayed Buffer for new data samples that need to be learned in chunks.
and used to made the decision later whether to enter the buffer or not.
"""
def reset(self):
"""reset the buffer.
"""
self.rsvr = dict()
self.rsvr_available_idx = deque(range(self.rsvr_total_size))
self.substreams = SubStream_Container(self.rsvr_total_size)
self.n = 0
np.random.seed(self.config['random_seed'])
random.seed(self.config['random_seed'])
torch.manual_seed(self.config['random_seed'])
return
|
84380
|
from flask import Blueprint
from flask import flash, session, redirect, render_template, request, \
url_for
from flask_babel import _
from app import constants
from app.decorators import require_role
from app.exceptions.base import DuplicateResourceException
from app.forms.examination import EducationForm
from app.roles import Roles
from app.service import examination_service
blueprint = Blueprint('education', __name__, url_prefix='/education')
REDIR_PAGES = {'view': 'examination.view_examination',
'add': 'examination.add',
'educations': 'education.view_educations',
'courses': 'course.view_courses'
}
DATE_FORMAT = constants.DATE_FORMAT
@blueprint.route('/', methods=['GET'])
@require_role(Roles.EXAMINATION_WRITE)
def view_educations():
return render_template('education/view.htm')
@blueprint.route('/add/', methods=['GET', 'POST'])
@require_role(Roles.EXAMINATION_WRITE)
def add_education():
r = request.args.get('redir', True)
if r in REDIR_PAGES:
session['origin'] = url_for(REDIR_PAGES[r])
elif r == 'edit' and 'examination_edit_id' in session:
session['origin'] = '/examination/edit/{}'.format(
session['examination_edit_id'])
form = EducationForm(request.form)
if form.validate_on_submit():
name = form.title.data
try:
examination_service.add_education(name)
flash("'%s': " % name + _('Education successfully added.'),
'success')
except DuplicateResourceException as e:
flash("'%s': " % name + _('Already exists in the database'),
'danger')
if 'origin' in session:
redir = session['origin']
else:
redir = url_for('examination.add')
return redirect(redir)
return render_template('education/edit.htm',
form=form, new=True)
@blueprint.route('/edit/<int:education_id>', methods=['GET', 'POST'])
@require_role(Roles.EXAMINATION_WRITE)
def edit_education(education_id):
r = request.args.get('redir')
if r in REDIR_PAGES:
session['origin'] = url_for(REDIR_PAGES[r])
elif r == 'edit' and 'examination_edit_id' in session:
session['origin'] = '/examination/edit/{}'.format(
session['examination_edit_id'])
education = examination_service.get_education_by_id(education_id)
exam_count = examination_service. \
count_examinations_by_education(education_id)
form = EducationForm(request.form)
if form.validate_on_submit():
name = form.title.data
try:
examination_service.update_education(education_id, name)
flash("'%s': " % name + _('Education successfully saved.'),
'success')
except DuplicateResourceException as e:
flash("%s: " % e, 'danger')
if 'origin' in session:
redir = session['origin']
else:
redir = url_for('education.view_educations')
return redirect(redir)
else:
form = EducationForm(title=education.name)
return render_template('education/edit.htm', new=False,
form=form, redir=r, exam_count=exam_count,
education=education)
|
84392
|
import os
import pytest
from types import SimpleNamespace
import git
from mathlibtools.git_helpers import visit_ancestors
@pytest.fixture
def dummy_repo(tmp_path):
r"""
A -- B -- E -- I -- J -- L
\ / /
C --- F -- H
\ /
D ---- G --- K
"""
d = tmp_path / "repo"
d.mkdir()
repo = git.Repo.init(d)
with repo.config_writer() as cw:
cw.set_value("user", "name", "pytest")
cw.set_value("user", "email", "<>")
# workaround for https://github.com/gitpython-developers/GitPython/pull/1314
os.environ['USER'] = 'gitpython needs this to be here so it can ignore it'
A = repo.index.commit("A")
B = repo.index.commit("B", parent_commits=(A,))
C = repo.index.commit("C", parent_commits=(B,))
D = repo.index.commit("D", parent_commits=(C,))
E = repo.index.commit("E", parent_commits=(B,))
F = repo.index.commit("F", parent_commits=(C,))
G = repo.index.commit("G", parent_commits=(D,))
I = repo.index.commit("I", parent_commits=(E, F))
H = repo.index.commit("H", parent_commits=(F, G))
J = repo.index.commit("J", parent_commits=(I, H))
K = repo.index.commit("K", parent_commits=(G,))
L = repo.index.commit("L", parent_commits=(J,))
return repo
@pytest.mark.parametrize(['match', 'exp_found', 'exp_visited'], [
('L', 'L', ''), # finding the root prunes everything else
('BFG', 'GF', 'LJHIE'), # B is pruned
('K', '', 'LJHGDIFCEBA'), # no match, all iterated
])
def test_visit_ancestors(dummy_repo, match, exp_found, exp_visited):
assert dummy_repo.head.commit.message == 'L'
found = []
visited = []
for c, prune in visit_ancestors(dummy_repo.head.commit):
if c.message in list(match):
prune()
found.append(c.message)
else:
visited.append(c.message)
assert visited == list(exp_visited)
assert found == list(exp_found)
|
84396
|
import os
import sys
import json
import time
from distutils.version import LooseVersion
import importlib
import pip
from enum import Enum
import logging
import csv
import subprocess
try:
main = pip.main
except AttributeError:
# module 'pip' has no attribute 'main'
from pip._internal import main
apikey = ''
password = ''
username = ''
bkey = ''
buy_trades = {}
sell_trades = {}
items = {}
key_price = 0
bud_price = 0
escrow = None
whitelist = []
currencies = {'bud':'Earbuds', 'ref':'Refined Metal', 'rec':'Reclaimed Metal', 'scrap':'Scrap Metal', 'key':'Mann Co. Supply Crate Key'}
packages = ['steampy', 'requests']
declined_trades = None
past_time = time.time()
start_time = time.time()
logging.basicConfig(filename='trade.log', level=logging.DEBUG,
format='[%(asctime)s][%(levelname)s][%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
start_text = """
_____ _____ ____ _____ ____ _ ____ U _____ u ____ U ___ u _____
|_ " _| |" ___||___"\ |_ " _|U | _"\ u U /"\ u | _"\ \| ___"|/ U | __")u \/"_ \/|_ " _|
| | U| |_ uU __) | | | \| |_) |/ \/ _ \/ /| | | | | _|" \| _ \/ | | | | | |
/| |\ \| _|/ \/ __/ \ /| |\ | _ < / ___ \ U| |_| |\| |___ | |_) |.-,_| |_| | /| |\
u |_|U |_| |_____|u u |_|U |_| \_\ /_/ \_\ |____/ u|_____| |____/ \_)-\___/ u |_|U
_// \\\_ )(\\\,- << // _// \\\_ // \\\_ \\\ >> |||_ << >> _|| \\\_ \\\ _// \\\_
(__) (__)(__)(_/(__)(__) (__) (__)(__) (__)(__) (__)(__)_) (__) (__) (__) (__) (__) (__) (__)
Created by: Zwork101 Github: https://github.com/Zwork101 Steam: https://steamcommunity.com/id/ZWORK101
THIS VERSION IS NO LONGER UNDER DEVELOPMENT AND BUGS WILL NOT BE FIXED. IT IS HIGHLY RECOMMENDED TO SWITCH
TO THE NEW VERSION. YOU CAN FIND THIS AT: https://github.com/mninc/tf2-trade-bot-2\n
"""
class TradeOfferStatus(Enum):
INVALID = 1
ACTIVE = 2
ACCEPTED = 3
EXPIRED = 4
CANCELED = 6
INVALID_ITEMS = 8
WAIT_CONF = 9
WAIT_SFAC = 10
ESCROW = 11
class TradeManager:
"""
The manager for trades. This will be used to organize trades and keep everything from falling apart.
Prams: client (steampy.client.SteamClient object) and conf (steampy.confirmation.ConfirmationExecutor)
Public values: client and conf (see above)
Public functions: accept, check_trades_content, get_new_trades, check_good_trades, check_bad_trades
"""
def __init__(self, client, conf):
self._trades = []
self._pending_trades = []
self._try_confs = []
self._declined_trades = []
self.client = client
self.conf = conf
def decline(self, trade):
if decline_trades:
self.client.decline_trade_offer(trade.id)
if trade.id not in self._declined_trades:
self._declined_trades.append(trade.id)
def accept(self, trade):
"""
The accept function handles accepting trades. This is important, because different errors could occur.
Prams: (self), trade (Trade object)
Output: None
"""
try:
self.client.accept_trade_offer(trade.id)
return True
except BaseException as BE:
if BE.__class__ == KeyError:
print(f'ERROR: Issue confirming trade: {trade.id}, trying again')
#self._trades.remove(trade)
self._pending_trades.append(trade)
logging.warning(f'TRADE ACCEPT ERROR: {type(BE).__name__}: {BE}')
return False
def check_trades_content(self):
"""
This will check the current trades in self._pending_trades and decide if they are correct or not
Then it will move the good trades to self._declined_trades and self._trades after acccepting/declining
trade offers.
Prams: (self)
Output: None
"""
for trade in range(len(self._pending_trades)-1,-1,-1):
trade = self._pending_trades[trade]
sell_value = 0
buy_value = 0
extra_sell = []
extra_buy = []
if not trade.items_to_give:
self._pending_trades.remove(trade)
self._trades.append(trade)
self.accept(trade)
continue
exit_trade = False
for item in trade.items_to_give:
if not exit_trade:
if item not in sell_trades:
if item in currencies.values():
extra_sell.append(item)
else:
print('[TRADE]: Unknown item we\'re giving, declining')
self.decline(trade)
self._pending_trades.remove(trade)
logging.info("DECLINING TRADE WITH UN-KNOWN ITEM")
exit_trade = True
else:
sell_value = add_values(float(sell_trades[item]), float(sell_value))
if exit_trade:
continue
for item in trade.items_to_receive:
if item in buy_trades:
buy_value = add_values(float(buy_trades[item]), float(buy_value))
elif item in currencies.values():
extra_buy.append(item)
sell_curr = sort(extra_sell)
buy_curr = sort(extra_buy)
sell_value += calculate(sell_curr[0], sell_curr[1], sell_curr[2], sell_curr[3], sell_curr[4])
buy_value += calculate(buy_curr[0], buy_curr[1], buy_curr[2], buy_curr[3], buy_curr[4])
if sell_value <= buy_value:
print(f'[TRADE]: Looks good! They gave us:\n{str(trade.items_to_receive)}')
print(f'[TRADE]: We gave them:\n{str(trade.items_to_give)}')
print('[TRADE]: Attempting to accept offer')
try:
logging.info(f"ATTEMPTING TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")
self._trades.append(trade)
self._pending_trades.remove(trade)
self.accept(trade)
except ConfirmationExpected:
logging.warning(f'FAILED TO CONFIRM TRADE: {trade.id} (FIRST TRY)')
self._try_confs.append(trade.id)
else:
print(f'[TRADE]: No good! They offered us:\n{str(trade.items_to_receive)}')
print(f'[TRADE]: For our:\n{str(trade.items_to_give)}')
print('[TRADE]: Declining offer')
logging.info(f"DECLINING INVALID TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")
self.decline(trade)
self._pending_trades.remove(trade)
def get_new_trades(self):
"""
Collects new trades, will compare them to current trades to ensure they are new. Accepts if the sender
is whitelisted, delcines if the user is a scammer or escrow. If not, moved it to
self._pending_trades (list)
Prams: (self)
Output: None
"""
new_trades = client.get_trade_offers()['response']
#logging.debug(new_trades)
for new_trade in new_trades['trade_offers_received']:
if (not new_trade['tradeofferid'] in [t.id for t in self._trades]) \
or (new_trade['tradeofferid'] in self._declined_trades):
id64 = 76561197960265728 + new_trade['accountid_other']
trade = Trade(new_trade, id64)
logging.info(f"FOUND NEW TRADE: {trade.id}")
if str(id64) in whitelist:
print(f"[WHITELIST]: Neat! The user sending this trade is whitelisted! Attempting confirmation (STEAM ID:{id64})")
logging.info(f'TRADE WHITELISTED ATTEMPTING TRADE: {trade.id}')
self.accept(trade)
self._trades.append(trade)
continue
print(f'[TRADE]: Found trade (ID: {trade.id})')
if self._check_partner(trade):
if not accept_escrow and trade.escrow:
print("[TRADE]: Trade is escrow, declining")
logging.info(f'DECLINING ESCROW TRADE: {trade.trade}')
self.decline(trade)
else:
self._pending_trades.append(trade)
def _check_partner(self, trade):
"""
To check if the user is a scammer from backpack.tf and steamrep. This uses the backpack.tf API.
The API will supply the steamrep stats for the user. If the user is a scammer, it
will decline the trade and move it to self._declined_trades.
Prams: (self), trade (Trade object)
Output: None
"""
print("[TRADE]: Checking for trade bans on backpack.tf and steamrep.com")
rJson = requests.get(f"https://backpack.tf/api/users/info/v1?",
data={'key':bkey, 'steamids':trade.other_steamid}).json()
logging.debug(str(rJson))
if "bans" in rJson['users'][trade.other_steamid].keys():
if "steamrep_caution" in rJson['users'][trade.other_steamid]['bans'] or \
"steamrep_scammer" in rJson['users'][trade.other_steamid]['bans']:
print("[steamrep.com]: SCAMMER")
print('[TRADE]: Ending trade...')
logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")
self.decline(trade)
return False
print('[steamrep.com]: User is not banned')
if "all" in rJson['users'][trade.other_steamid]['bans']:
print('[backpack.tf]: SCAMMER')
print('[TRADE]: Ending trade...')
logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")
self.decline(trade)
return False
print('[backpack.tf]: User is clean')
print("[backpack.tf/steamrep.com]: User is clean")
return True
def check_bad_trades(self):
"""
Looks at the current trades in self._trades and checks if the trade has become invalid (eg
if the trade was cancled). It will remove it from trades and report what happened to the user
Prams: (self)
Output: None
"""
for trade_index in range(len(self._trades)-1, -1, -1):
trade = self._trades[trade_index]
status = trade.status()
if status == TradeOfferStatus.INVALID.value:
print(f'[ERROR]: Trade offer id {trade.id} seems to be invalid')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME invalid')
elif status == TradeOfferStatus.CANCELED.value:
print(f'[TRADE]: Trade {trade.id} was canceled.')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME canceled')
elif status == TradeOfferStatus.EXPIRED.value:
print(f'[TRADE]: Trade {trade.id} has expired... How did that happen?')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME expired')
elif status == TradeOfferStatus.INVALID_ITEMS.value:
print(f'[TRADE]: Items attempting to trade became invalid. {trade.id}')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME invalid_items')
elif status == TradeOfferStatus.ESCROW.value and not accept_escrow:
print('[ERROR]: Whoops, escrow trade was confirmed. Sorry about that')
self._trades.remove(trade)
logging.fatal(f'ACCEPTED ESCROW TRADE')
def check_good_trades(self):
"""
This method does 2 things. The first thing it does is check to see if trades have been accepted.
If they have, they will be removed from self._trades and will report that the trade was accepted.
The second thing is to try and confirm trades that are having issues confirming. If it was confirmed,
it will be removed from self._try_confs, and report to user it was confirmed.
Prams: (self)
Output: None
"""
for trade_index in range(len(self._trades) - 1, -1, -1):
trade = self._trades[trade_index]
status = trade.status()
if status == TradeOfferStatus.ACCEPTED.value:
print(f'[TRADE]: Accepted trade {trade.id}')
self._trades.remove(trade)
logging.info(f'TRADE {trade.id} WAS ACCEPTED')
def confirm_check(self):
if confirm_settings == 'all':
logging.debug('ACCEPTING EVERYTHING')
for confirmation in self.conf._get_confirmations():
self.conf._send_confirmation(confirmation)
logging.info(f'SENT CONFIRMATION FOR CONF WITH ID OF {confirmation.id}')
elif confirm_settings == 'trade':
for tradeid in self._try_confs:
try:
self.conf.send_trade_allow_request(tradeid)
print(f'[TRADE]: Accepted trade {tradeid}')
logging.info(f'TRADE {tradeid} WAS ACCEPTED (after manual confirmation)')
self._try_confs.remove(tradeid)
except ConfirmationExpected:
logging.debug(f'CONFIRMATION FAILED ON {tradeid}')
class Trade:
"""
This is an object mainly to store data about a trade, and make it easy to access. It can also
the currency in the trade and fetch the status of the trade.
Prams: trade_json (dict), other_steamid (str)
Public values: self.trade (dict), self.escrow (int), self.items_to_give (list), self.items_to_receive (list),
self.id (int/str), self.other_steamid (str)
Public functions: sort, status
"""
def __init__(self, trade_json:dict, other_steamid:int):
self.trade = trade_json
self.escrow = int(trade_json['escrow_end_date'])
self.items_to_give = self._items_to_give()
self.items_to_receive = self._items_to_receive()
self.id = trade_json["tradeofferid"]
self.other_steamid = str(other_steamid)
def _items_to_receive(self):
"""
Adds all items to self.items_to_receive as their market name. Should only be used in initialization.
Prams: (self)
Output: item_names (list)
"""
item_names = []
for assetID in self.trade['items_to_receive']:
item_names.append(self.trade['items_to_receive'][assetID]['market_name'])
return item_names
def _items_to_give(self):
"""
Adds all items to self.items_to_give as their market name. Should only be used in initialization.
Prams: (self)
Output: item_names (list)
"""
item_names = []
for assetID in self.trade['items_to_give']:
item_names.append(self.trade['items_to_give'][assetID]['market_name'])
return item_names
def sort(self, typ):
"""
Counts the amount of a currency type there is in one side of the trade. "sort" is sort
of misleading (see what I did there), it just counts how many scraps, recs, ref, keys and
buds there are.
Prams: (self), type (str)
Output: curr (list)
"""
if typ == 'sell':
return sort(self.items_to_receive)
else:
return sort(self.items_to_give)
def status(self):
"""
Fetches the status of the trade from steam. This way we can get live data.
Prams: (self)
Output: trade_json['trade_offer_state'] (int/str)
"""
try:
trade_json = client.get_trade_offer(self.id)['response']['offer']
except KeyError:
#If key error, the trade doesn't exist anymore. If so, it's invalid
trade_json = {'trade_offer_state':1}
return trade_json['trade_offer_state']
def add_values(v1, v2):
v1_rem, v2_rem = int(str(v1).split('.')[1]), int(str(v2).split('.')[1])
ref = int(v1) + int(v2)
v1_rec, v2_rec = v1_rem // 33, v2_rem // 33
v1_rem, v2_rem = v1_rem - v1_rec * 33, v2_rem - v2_rec * 33
srp_added = v1_rem + v2_rem
v1_rec += srp_added // 33
srp_added -= (srp_added // 33) * 33
rec_added = v1_rec + v2_rec
ref += rec_added // 3
rec_added -= (rec_added // 3) * 3
return float(str(ref) + '.' + str(rec_added*33 + srp_added))
def sort(items:list):
curr = [0,0,0,0,0]
for item in items:
if item == currencies['scrap']:
curr[0] += 1
elif item == currencies['rec']:
curr[1] += 1
elif item == currencies['ref']:
curr[2] += 1
elif item == currencies['key']:
curr[3] += 1
elif item == currencies['bud']:
curr[4] += 1
return curr
def check_for_updates():
with open('__version__', 'r') as file:
curr_version = file.read()
r = requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/__version__')
new_version = r.text
if LooseVersion(new_version) > LooseVersion(curr_version):
print('[PROGRAM]: A new version is available, would you like to install?')
yn = input('[y/n]: ')
if yn[0].lower() == 'y':
print('[Installer]: Starting installation...', end='')
bot_update = requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/bot.py')
with open('__version__', 'w') as file:
file.write(new_version)
print('.', end='')
with open('bot.py', 'w') as file:
file.write(bot_update.text)
print('.')
print('Update complete! Restart now.')
input('press enter to close program...\n')
os._exit(0)
def calculate(scrapC, recC, refC, keyC, budC):
#For each currency, add it using add_values function
total_value = 0.0
for scrap in range(scrapC):
total_value = add_values(total_value, .11)
for rec in range(recC):
total_value = add_values(total_value, .33)
for ref in range(refC):
total_value = add_values(total_value, 1.0)
for key in range(keyC):
total_value = add_values(total_value, float(key_price))
for bud in range(budC):
total_value = add_values(total_value, float(bud_price))
return total_value
def check_install(pkg, c, imp=''):
try:
importlib.import_module(pkg)
print(f'[PROGRAM]: Required package is installed {c}/{len(packages)}')
logging.debug(f"MODULE {pkg} IS INSTALLED")
except:
logging.info(f"MODULE {pkg} IS NOT INSTALLED")
if imp:
pkg = imp
print('[PROGRAM]: A required package is not installed, installing...')
main(['install', pkg])
print('[PROGRAM]: Installed package! Please restart this program to continue.')
input('press enter to close program...\n')
os._exit(0)
# def check_trade(trade_obj, items_value, typ):
# curr = trade_obj.sort(typ)
# value = calculate(curr[0], curr[1], curr[2], curr[3], curr[4])
# if typ == 'sell':
# b_curr = trade_obj.sort('buy')
# items_value += calculate(b_curr[0], b_curr[1], b_curr[2], b_curr[3], b_curr[4])
# else:
# s_curr = trade_obj.sort('sell')
# items_value += calculate(s_curr[0], s_curr[1], s_curr[2], s_curr[3], s_curr[4])
#
# logging.debug(f"TRADE {trade_obj.id} is a {typ} trade, and is worth {value}, with items being {items_value}")
# if typ == 'sell':
# if value >= items_value:
# return True
# else:
# return False
# else:
# if value <= items_value:
# return True
# else:
# return False
def heartbeat():
global past_time
print(f"[HEARTBEAT]: ~{90 - int(time.time() - past_time)} seconds until next heartbeat")
if int(time.time() - past_time) >= 90:
p = requests.post(f"https://backpack.tf/api/aux/heartbeat/v1?", data={"token": token, "automatic": "all"})
if p.status_code != 200:
print(f'[HEARTBEAT]: Error when sending heartbeat: {p.json()["message"]}')
logging.warning(f"ERROR SENDING HEARTBEAT: {p.json()['message']}")
else:
print("[HEARTBEAT]: Sent heartbeat to backpack.tf")
logging.info("HEARTBEAT SENT")
past_time = time.time()
if __name__ == '__main__':
print(start_text)
for pkg in packages:
check_install(pkg, packages.index(pkg)+1, '' if pkg!='backpackpy' else 'backpack.py')
from steampy.client import SteamClient
from steampy import confirmation
from steampy.exceptions import InvalidCredentials, ConfirmationExpected
#from backpackpy import listings
import requests
check_for_updates()
try:
with open('settings.json', 'r') as cfg:
try:
data = json.load(cfg)
try:
apikey, password, username, bkey, accept_escrow = data['apikey'],\
data['password'], data['username'], data['bkey'], data['accept_escrow']
token = requests.get(f"https://backpack.tf/api/aux/token/v1?key={bkey}").json()['token']
decline_trades = data.get('decline_trades', 1)
confirm_settings = data.get('confirm_options', 'trades')
except KeyError as k:
logging.warning(f'SETTINGS FILE MISSING {k} VALUE')
print(f'[settings.json]: Whoops! You are missing the {k} value')
input('Press enter to close program...\n')
os._exit(1)
except json.JSONDecodeError:
logging.warning('INVALID SETTINGS FILE')
print('[PROGRAM]: Whoops! It would seem that you settings.json file is invalid!')
input('press enter to close program...\n')
os._exit(1)
logging.debug("LOADED SETTINGS")
except FileNotFoundError:
logging.warning("SETTINGS NOT FOUND, CREATING")
print('[PROGRAM]: File settings.json not found! Would you like to make one?')
yn = input('[y/n]: ')
if yn[0].lower() == 'y':
apikey = input('[settings.json]: Enter your steam API key. (https://steamcommunity.com/dev/apikey)\n')
password = input('[settings.json]: Enter your password. \n')
username = input('[settings.json]: Enter your username. \n')
bkey = input('[settings.json]: Enter your backpack.tf API key. (https://backpack.tf/api/register)\n')
accept_escrow = input('[settings.json]: Accept escrow trades? (0 for no, 1 for yes)\n')
print('[PROGRAM]: Writing data to file...')
with open('settings.json', 'w') as file:
json.dump({'apikey':apikey, 'password':password, 'username':username, 'bkey':bkey,
"accept_escrow":accept_escrow}, file)
print('[PROGRAM]: Wrote to file')
else:
print("[PROGRAM]: Can't run without user information.")
input('Press enter to close program...\n')
os._exit(1)
client = SteamClient(apikey)
conf = None
print('[PROGRAM]: Obtaining bud and key values from backpack.tf...')
rJson = requests.get(f'https://backpack.tf/api/IGetCurrencies/v1?key={bkey}').json()['response']
logging.debug(f"KEY VALUE RESPONSE: {rJson}")
if rJson['success']:
key_price = rJson['currencies']['keys']['price']['value']
bud_price = rJson['currencies']['earbuds']['price']['value']
print(f'[PROGRAM]: Obtained values! KEY <{key_price} ref>, BUD <{bud_price} keys>.')
logging.debug("OBTAINED KEY AND BUD VALUES")
else:
logging.fatal("FAILED TO OBTAIN KEY AND BUG VALUES")
print(f'[backpack.tf]: {rJson["message"]}')
input('Press enter to close program...\n')
os._exit(1)
try:
client.login(username, password, '<PASSWORD>')
except json.decoder.JSONDecodeError:
logging.warning("STEAMGUARD FILE INVALID")
print('[steamguard.json]: Unable to read file.')
input('Press enter to close program...\n')
os._exit(1)
except FileNotFoundError:
logging.warning("UNABLE TO FIND STEAMGAURD FILE")
print('[steamguard.json]: Unable to find file.')
input('Press enter to close program...\n')
os._exit(1)
except InvalidCredentials:
logging.info("CREDENTIALS INVALID")
print('[PROGRAM]: Your username, password, ID and/or secrets are invalid.')
input('Press enter to close program...\n')
os._exit(1)
else:
conf = confirmation.ConfirmationExecutor(
client.steam_guard['identity_secret'],
client.steam_guard['steamid'],
client._session)
logging.info("CREATED CLIENT AND CONFIRMATION MANAGER")
print(f'[PROGRAM]: Connected to steam! Logged in as {username}')
try:
with open('trade.csv', 'r') as file:
reader = csv.DictReader(file)
count = 1
fails = []
for row in reader:
count += 1
try:
if row['type'].strip()[0].lower() == 's':
p = row['price'].split('.')
p = [int(i) for i in p]
price = calculate(p[0], p[1], p[2], p[3], p[4])
sell_trades[row['item_name'].strip().replace("$$", ",")] = price
elif row['type'].strip()[0].lower() == 'b':
p = row['price'].split('.')
p = [int(i) for i in p]
price = calculate(p[0], p[1], p[2], p[3], p[4])
buy_trades[row['item_name'].strip().replace("$$", ",")] = price
except AttributeError:
fails.append(count)
logging.info(f'LOADED TRADE DATA: BUY: {buy_trades} SELL: {sell_trades}')
except FileNotFoundError:
logging.warning("TRADE FILE NOT FOUND")
print('[trade.data]: Unable to find file.')
input('Press enter to close program...\n')
os._exit(1)
print(f'[CSV]: Failed to load these lines: {fails}')
print('[PROGRAM]: Finished loading trading data.')
# yn = input("Would you like to sync to backpack.tf listings?\n[y/n]: ")
# if yn[0].lower() == 'y':
# steamid = client.steam_guard['steamid']
# steam_inv = requests.get(f'http://steamcommunity.com/inventory/{steamid}/440/2?l=english&count=5000').json()
# bp_listings = requests.get("https://backpack.tf/api/classifieds/listings/v1?", data={'token':token}).json()
# class_id = False
# for classified in bp_listings["listings"]:
# asset_id = classified['id']
# for item in steam_inv['assets']:
# if item['assetid'] == classified['id']:
# class_id = item['classid']
# if class_id:
# for item in steam_inv['descriptions']:
# if item['classid'] == class_id:
# market_name = item['market_name']
# market_type = classified['intent']
# ref, keys = classified['currencies']['metal'], classified['currencies']['keys']
# sep = str(ref).split('.')
# if len(sep) == 2:
# price = calculate(int(sep[0])/11, 0, int(sep[0]), keys, 0)
# else:
# price = calculate(0, 0, int(ref), keys, 0)
# if market_type:
# sell_trades[market_name] = price
# else:
# buy_trades[market_name] = price
# print(buy_trades)
# print(sell_trades)
# os._exit(0)
try:
with open('whitelist.data', 'r') as file:
steam_ids = file.read()
if steam_ids:
for steam_id in steam_ids.split(','):
whitelist.append(steam_id)
print(f'[WHITELIST]: Whitelist created with the following ids: {whitelist}')
logging.info(f"LOADED WHITELIST: {whitelist}")
except FileNotFoundError:
logging.debug("WHITELIST NOT FOUND")
print('[PROGRAM]: Everything ready, starting trading.')
print('[PROGRAM]: Press Ctrl+C to close at any time.')
manager = TradeManager(client, conf)
while True:
if time.time() - start_time >= 3600:
pass
#subprocess.call(["python", os.path.join(sys.path[0], __file__)] + sys.argv[1:])
try:
heartbeat()
try:
manager.get_new_trades()
print('[TRADE-MANAGER] STEP 1 (get new trades) COMPLETE')
logging.debug("(STEP 1 COMPLETE)")
except json.decoder.JSONDecodeError:
print("[PROGRAM]: Unexpected error, taking a break (10 seconds).")
time.sleep(10)
print('Starting again...')
continue
manager.check_trades_content()
print('[TRADE-MANAGER]: STEP 2 (check new trades) COMPLETE')
logging.debug("(STEP 2 COMPLETE)")
manager.check_bad_trades()
print('[TRADE-MANAGER]: STEP 3 (check for trades gone bad) COMPLETE')
logging.debug("(STEP 3 COMPLETE)")
manager.check_good_trades()
print('[TRADE-MANAGER]: STEP 4 (check for successful trades) COMPLETE')
logging.debug("(STEP 4 COMPLETE)")
manager.confirm_check()
print('[TRADE-MANAGER]: STEP 5 (check confirmations) COMPLETE')
logging.debug("(STEP 5 COMPLETE)")
print('[PROGRAM]: Cooling down... (10)')
except InterruptedError:
os._exit(0)
except BaseException as BE:
print(f'[ERROR]: {type(BE).__name__}: {BE}')
logging.warning(f"UNEXPECTED ERROR: {type(BE).__name__}: {BE}")
time.sleep(10)
|
84420
|
import unittest.mock
from programy.clients.config import ClientConfigurationData
from programy.clients.events.client import EventBotClient
from programytest.clients.arguments import MockArgumentParser
class MockEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
class MockRunningEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
self.prior = False
self.ran = False
self.post = False
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
def prior_to_run_loop(self):
self.prior = True
def wait_and_answer(self):
self.ran = True
def post_run_loop(self):
self.post = True
class EventBotClientTests(unittest.TestCase):
def test_init_raw(self):
arguments = MockArgumentParser()
with self.assertRaises(NotImplementedError):
client = EventBotClient("testevents", arguments)
def test_init_actual(self):
arguments = MockArgumentParser()
client = MockEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
with self.assertRaises(NotImplementedError):
client.wait_and_answer()
def test_init_running(self):
arguments = MockArgumentParser()
client = MockRunningEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
client.run()
self.assertTrue(client.prior)
self.assertTrue(client.ran)
self.assertTrue(client.post)
|
84422
|
import torch
from torch import nn as nn
from basicsr.archs.arch_util import ResidualBlockNoBN, Upsample, make_layer
from basicsr.utils.registry import ARCH_REGISTRY
@ARCH_REGISTRY.register()
class EDSR(nn.Module):
"""EDSR network structure.
Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution.
Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64.
num_block (int): Block number in the trunk network. Default: 16.
upscale (int): Upsampling factor. Support 2^n and 3.
Default: 4.
res_scale (float): Used to scale the residual in residual block.
Default: 1.
img_range (float): Image range. Default: 255.
rgb_mean (tuple[float]): Image mean in RGB orders.
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
"""
def __init__(self,
num_in_ch,
num_out_ch,
num_feat=64,
num_block=16,
upscale=4,
res_scale=1,
img_range=255.,
rgb_mean=(0.4488, 0.4371, 0.4040)):
super(EDSR, self).__init__()
self.img_range = img_range
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat, res_scale=res_scale, pytorch_init=True)
self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
def forward(self, x):
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
x = self.conv_first(x)
res = self.conv_after_body(self.body(x))
res += x
x = self.conv_last(self.upsample(res))
x = x / self.img_range + self.mean
return x
|
84445
|
import os
NORMALIZATION_COEF = 7
PLAYER_CIRCLE_SIZE = 12 / NORMALIZATION_COEF
INTERVAL = 10
DIFF = 6
X_MIN = 0
X_MAX = 100
Y_MIN = 0
Y_MAX = 50
COL_WIDTH = 0.3
SCALE = 1.65
FONTSIZE = 6
X_CENTER = X_MAX / 2 - DIFF / 1.5 + 0.10
Y_CENTER = Y_MAX - DIFF / 1.5 - 0.35
BALL_COLOR = "#ff8c00"
(COURT_WIDTH, COURT_LENGTH) = (50, 94)
TEAM_ID2PROPS = {
1610612737: {"color": "#E13A3E", "abbreviation": "ATL"},
1610612738: {"color": "#008348", "abbreviation": "BOS"},
1610612751: {"color": "#061922", "abbreviation": "BKN"},
1610612766: {"color": "#1D1160", "abbreviation": "CHA"},
1610612741: {"color": "#CE1141", "abbreviation": "CHI"},
1610612739: {"color": "#860038", "abbreviation": "CLE"},
1610612742: {"color": "#007DC5", "abbreviation": "DAL"},
1610612743: {"color": "#4D90CD", "abbreviation": "DEN"},
1610612765: {"color": "#006BB6", "abbreviation": "DET"},
1610612744: {"color": "#FDB927", "abbreviation": "GSW"},
1610612745: {"color": "#CE1141", "abbreviation": "HOU"},
1610612754: {"color": "#00275D", "abbreviation": "IND"},
1610612746: {"color": "#ED174C", "abbreviation": "LAC"},
1610612747: {"color": "#552582", "abbreviation": "LAL"},
1610612763: {"color": "#0F586C", "abbreviation": "MEM"},
1610612748: {"color": "#98002E", "abbreviation": "MIA"},
1610612749: {"color": "#00471B", "abbreviation": "MIL"},
1610612750: {"color": "#005083", "abbreviation": "MIN"},
1610612740: {"color": "#002B5C", "abbreviation": "NOP"},
1610612752: {"color": "#006BB6", "abbreviation": "NYK"},
1610612760: {"color": "#007DC3", "abbreviation": "OKC"},
1610612753: {"color": "#007DC5", "abbreviation": "ORL"},
1610612755: {"color": "#006BB6", "abbreviation": "PHI"},
1610612756: {"color": "#1D1160", "abbreviation": "PHX"},
1610612757: {"color": "#E03A3E", "abbreviation": "POR"},
1610612758: {"color": "#724C9F", "abbreviation": "SAC"},
1610612759: {"color": "#BAC3C9", "abbreviation": "SAS"},
1610612761: {"color": "#CE1141", "abbreviation": "TOR"},
1610612762: {"color": "#00471B", "abbreviation": "UTA"},
1610612764: {"color": "#002B5C", "abbreviation": "WAS"},
}
EVENTS_DIR = os.environ["EVENTS_DIR"]
TRACKING_DIR = os.environ["TRACKING_DIR"]
GAMES_DIR = os.environ["GAMES_DIR"]
DATA_DIR = os.environ["DATA_DIR"]
EXPERIMENTS_DIR = os.environ["EXPERIMENTS_DIR"]
|
84447
|
from microbit import *
import neopixel
np = neopixel.NeoPixel(pin1 ,32)
def np_rainbow(np, num, bright=32, offset = 0):
rb = ((255,0,0), (255,127,0), (255,255,0), (0,255,0), (0,255,255),(0,0,255),(136,0,255), (255,0,0))
for i in range(num):
t = 7*i/num
t0 = int(t)
r = round((rb[t0][0] + (t-t0)*(rb[t0+1][0]-rb[t0][0]))*bright)>>8
g = round((rb[t0][1] + (t-t0)*(rb[t0+1][1]-rb[t0][1]))*bright)>>8
b = round((rb[t0][2] + (t-t0)*(rb[t0+1][2]-rb[t0][2]))*bright)>>8
np[(i+offset)%num] = (r, g, b)
np_rainbow(np, 32, bright=16, offset=0)
np.show()
|
84470
|
from __future__ import annotations
import ast
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
List,
Optional,
Type,
Union,
cast,
)
from ....utils.logging import LoggingDescriptor
from ....utils.uri import Uri
from ...common.language import language_id
from ...common.text_document import TextDocument
from ...common.types import Location, LocationLink, Position
from ..utils.ast import (
HasTokens,
Token,
get_nodes_at_position,
get_tokens_at_position,
range_from_token,
range_from_token_or_node,
tokenize_variables,
)
if TYPE_CHECKING:
from ..protocol import RobotLanguageServerProtocol
from .model_helper import ModelHelperMixin
from .protocol_part import RobotLanguageServerProtocolPart
_DefinitionMethod = Callable[
[ast.AST, TextDocument, Position],
Awaitable[Union[Location, List[Location], List[LocationLink], None]],
]
class RobotGotoProtocolPart(RobotLanguageServerProtocolPart, ModelHelperMixin):
_logger = LoggingDescriptor()
def __init__(self, parent: RobotLanguageServerProtocol) -> None:
super().__init__(parent)
parent.definition.collect.add(self.collect)
parent.implementation.collect.add(self.collect)
def _find_method(self, cls: Type[Any]) -> Optional[_DefinitionMethod]:
if cls is ast.AST:
return None
method_name = "definition_" + cls.__name__
if hasattr(self, method_name):
method = getattr(self, method_name)
if callable(method):
return cast(_DefinitionMethod, method)
for base in cls.__bases__:
method = self._find_method(base)
if method:
return cast(_DefinitionMethod, method)
return None
@language_id("robotframework")
async def collect(
self, sender: Any, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
result_nodes = await get_nodes_at_position(await self.parent.documents_cache.get_model(document), position)
if not result_nodes:
return None
result_node = result_nodes[-1]
if result_node is None:
return None
method = self._find_method(type(result_node))
if method is not None:
result = await method(result_node, document, position)
if result is not None:
return result
return await self._definition_default(result_nodes, document, position)
async def _definition_default(
self, nodes: List[ast.AST], document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.api.parsing import Token as RobotToken
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
if not nodes:
return None
node = nodes[-1]
if not isinstance(node, HasTokens):
return None
tokens = get_tokens_at_position(node, position)
for token in tokens:
try:
for sub_token in filter(
lambda s: s.type == RobotToken.VARIABLE, tokenize_variables(token, ignore_errors=True)
):
range = range_from_token(sub_token)
if position.is_in_range(range):
variable = await namespace.find_variable(sub_token.value, nodes, position)
if variable is not None and variable.source:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(node, sub_token),
target_uri=str(Uri.from_path(variable.source)),
target_range=variable.range(),
target_selection_range=range_from_token(variable.name_token)
if variable.name_token
else variable.range(),
)
]
except BaseException:
pass
return None
async def definition_KeywordCall( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import KeywordCall
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
kw_node = cast(KeywordCall, node)
result = await self.get_keyworddoc_and_token_from_position(
kw_node.keyword,
cast(Token, kw_node.get_token(RobotToken.KEYWORD)),
[cast(Token, t) for t in kw_node.get_tokens(RobotToken.ARGUMENT)],
namespace,
position,
)
if result is not None and result[0] is not None:
source = result[0].source
if source is not None:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(node, result[1]),
target_uri=str(Uri.from_path(source)),
target_range=result[0].range,
target_selection_range=result[0].range,
)
]
return None
async def definition_Fixture( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import Fixture
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
fixture_node = cast(Fixture, node)
result = await self.get_keyworddoc_and_token_from_position(
fixture_node.name,
cast(Token, fixture_node.get_token(RobotToken.NAME)),
[cast(Token, t) for t in fixture_node.get_tokens(RobotToken.ARGUMENT)],
namespace,
position,
)
if result is not None and result[0] is not None:
source = result[0].source
if source is not None:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(node, result[1]),
target_uri=str(Uri.from_path(source)),
target_range=result[0].range,
target_selection_range=result[0].range,
)
]
return None
async def _definition_Template_or_TestTemplate( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import Template, TestTemplate
node = cast(Union[Template, TestTemplate], node)
if node.value:
keyword_token = cast(RobotToken, node.get_token(RobotToken.NAME))
if keyword_token is None:
return None
if position.is_in_range(range_from_token(keyword_token)):
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
result = await namespace.find_keyword(node.value)
if result is not None and result.source is not None:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(node, keyword_token),
target_uri=str(Uri.from_path(result.source)),
target_range=result.range,
target_selection_range=result.range,
)
]
return None
async def definition_TestTemplate( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
return await self._definition_Template_or_TestTemplate(node, document, position)
async def definition_Template( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
return await self._definition_Template_or_TestTemplate(node, document, position)
async def definition_LibraryImport( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import LibraryImport
library_node = cast(LibraryImport, node)
if library_node.name:
name_token = cast(RobotToken, library_node.get_token(RobotToken.NAME))
if name_token is None:
return None
if position.is_in_range(range_from_token(name_token)):
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
try:
libdoc = await namespace.imports_manager.get_libdoc_for_library_import(
library_node.name, library_node.args, str(document.uri.to_path().parent)
)
python_source = libdoc.source_or_origin
if python_source is not None:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(library_node, name_token),
target_uri=str(Uri.from_path(python_source)),
target_range=libdoc.range,
target_selection_range=libdoc.range,
)
]
except BaseException:
pass
return None
async def definition_ResourceImport( # noqa: N802
self, node: ast.AST, document: TextDocument, position: Position
) -> Union[Location, List[Location], List[LocationLink], None]:
from robot.parsing.lexer.tokens import Token as RobotToken
from robot.parsing.model.statements import ResourceImport
resource_node = cast(ResourceImport, node)
if resource_node.name:
name_token = cast(RobotToken, resource_node.get_token(RobotToken.NAME))
if name_token is None:
return None
if position.is_in_range(range_from_token(name_token)):
namespace = await self.parent.documents_cache.get_namespace(document)
if namespace is None:
return None
try:
libdoc = await namespace.imports_manager.get_libdoc_for_resource_import(
resource_node.name, str(document.uri.to_path().parent)
)
python_source = libdoc.source_or_origin
if python_source is not None:
return [
LocationLink(
origin_selection_range=range_from_token_or_node(resource_node, name_token),
target_uri=str(Uri.from_path(python_source)),
target_range=libdoc.range,
target_selection_range=libdoc.range,
)
]
except BaseException:
pass
return None
|
84478
|
import tensorflow as tf
def saturation(img):
mean = tf.keras.backend.mean(img, axis=-1, keepdims = True)
mul = tf.constant([1,1,1,3], tf.int32)
mean = tf.tile(mean, mul)
img = tf.math.subtract(img, mean)
sat = tf.einsum('aijk,aijk->aij', img, img)
sat = tf.math.scalar_mul((1.0/3.0),sat)
sat = tf.math.add(sat, tf.math.scalar_mul(1e-7, tf.ones_like(sat)))
sat = tf.math.sqrt(sat)
return sat
def get_exp(img,c):
#cimg = tf.slice(img,[0,0,0,c],[img.get_shape()[0],img.get_shape()[1],img.get_shape()[2],1])
cimg = tf.squeeze(img,axis=-1)
m = tf.math.scalar_mul(0.5, tf.ones_like(cimg))
cimg = tf.math.subtract(cimg,m)
cimg = tf.math.multiply(cimg,cimg)
cimg = tf.math.scalar_mul(-12.5,cimg)
return cimg
def exposure(img):
rimg, gimg, bimg = tf.split(img, num_or_size_splits=3, axis=-1)
rimg = get_exp(rimg,0)
gimg = get_exp(gimg,1)
bimg = get_exp(bimg,2)
img = tf.math.add(rimg,gimg)
img = tf.math.add(img,bimg)
exp = tf.math.exp(img)
return exp
def contrast(img):
mean = tf.keras.backend.mean(img, axis=-1, keepdims=True)
lap_fil = [[0.0,-1.0,0.0],[-1.0,4.0,-1.0],[0.0,-1.0,0.0]]
lap_fil = tf.expand_dims(lap_fil,-1)
lap_fil = tf.expand_dims(lap_fil,-1)
con = tf.nn.convolution(mean, lap_fil, padding='SAME')
con = tf.math.abs(con)
con = tf.squeeze(con,axis=-1)
return con
def exp_map(img,pc,ps,pe):
con = contrast(img)
sat = saturation(img)
exp = exposure(img)
if pc!=1 or pe!=1 or ps!=1:
pc = tf.math.scalar_mul(pc, tf.ones_like(con))
ps = tf.math.scalar_mul(ps, tf.ones_like(con))
pe = tf.math.scalar_mul(pe, tf.ones_like(con))
con = tf.math.pow(con,pc)
sat = tf.math.pow(sat,pe)
exp = tf.math.pow(exp,ps)
wt_map = tf.math.multiply(con,sat)
wt_map = tf.math.multiply(wt_map,exp)
return wt_map
|
84513
|
import os
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from test import test
import torchvision
class TrainLoop(object):
def __init__(self, model, optimizer, source_loader, test_source_loader, target_loader, patience, l2, penalty_weight, penalty_anneal_epochs, checkpoint_path=None, checkpoint_epoch=None, cuda=True, logging=False):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, 'IRM_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.device = next(self.model.parameters()).device
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=patience)
self.source_loader = source_loader
self.test_source_loader = test_source_loader
self.target_loader = target_loader
self.history = {'loss': [], 'accuracy_source':[], 'accuracy_target':[]}
self.cur_epoch = 0
self.dummy = torch.tensor(1.).to(self.device).requires_grad_()
self.l2 = l2
self.penalty_weight = penalty_weight
self.penalty_anneal_epochs = penalty_anneal_epochs
self.total_iter = 0
if checkpoint_epoch is not None:
self.load_checkpoint(checkpoint_epoch)
self.logging = logging
if self.logging:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter()
def train(self, n_epochs=1, save_every=1):
while self.cur_epoch < n_epochs:
print('Epoch {}/{}'.format(self.cur_epoch + 1, n_epochs))
cur_loss = 0
source_iter = tqdm(enumerate(self.source_loader))
for t, batch in source_iter:
loss_it = self.train_step(batch)
self.total_iter += 1
cur_loss += loss_it
self.history['loss'].append(cur_loss/(t+1))
print('Current loss: {}.'.format(cur_loss/(t+1)))
print('Current LR: {}'.format(self.optimizer.state_dict()['param_groups'][0]['lr']))
if self.logging:
self.writer.add_scalar('train/task_loss', cur_loss_task, self.total_iter)
self.writer.add_scalar('train/hypervolume_loss', cur_hypervolume, self.total_iter)
self.writer.add_scalar('misc/LR', self.optimizer_task.param_groups[0]['lr'], self.total_iter)
self.history['accuracy_source'].append(test(self.test_source_loader, self.model, self.device, source_target = 'source', epoch = self.cur_epoch, tb_writer = self.writer if self.logging else None))
self.history['accuracy_target'].append(test(self.target_loader, self.model, self.device, source_target = 'target', epoch = self.cur_epoch, tb_writer = self.writer if self.logging else None))
print('Valid. on SOURCE data - Current acc., best acc., and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['accuracy_source'][-1], np.max(self.history['accuracy_source']), 1+np.argmax(self.history['accuracy_source'])))
print('Valid. on TARGET data - Current acc., best acc., and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['accuracy_target'][-1], np.max(self.history['accuracy_target']), 1+np.argmax(self.history['accuracy_target'])))
if self.cur_epoch % save_every == 0 or self.history['accuracy_target'][-1] > np.max([-np.inf]+self.history['accuracy_target'][:-1]):
self.checkpointing()
self.cur_epoch += 1
self.scheduler.step()
# saving final models
print('Saving final model...')
self.checkpointing()
return 1. - np.max(self.history['accuracy_target'])
def train_step(self, batch):
self.model.train()
loss_acc = 0
penalty = 0
for domain in range(3):
x = batch[domain].to(self.device)
y_task = batch[domain+3].to(self.device)
out = self.model(x)
loss_current = torch.nn.CrossEntropyLoss()(out*self.dummy, y_task)
penalty += self.penalty(loss_current, self.dummy)
loss_acc += loss_current
weight_norm = torch.tensor(0.).to(self.device)
for w in self.model.parameters():
weight_norm += w.norm().pow(2)
loss = loss_acc / 3
#penalty = penalty / 3
loss += self.l2 * weight_norm
penalty_weight = (self.penalty_weight if self.cur_epoch >= self.penalty_anneal_epochs else 1.0)
loss += penalty_weight * penalty
if penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
loss /= penalty_weight
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def checkpointing(self):
# Checkpointing
print('Checkpointing...')
ckpt = {'model_state': self.model.state_dict(),
'history': self.history,
'cur_epoch': self.cur_epoch,
'optimizer_state': self.optimizer.state_dict(),
'scheduler_state': self.scheduler.state_dict()}
torch.save(ckpt, self.save_epoch_fmt.format(self.cur_epoch))
def load_checkpoint(self, epoch):
ckpt = self.save_epoch_fmt_task.format(epoch)
if os.path.isfile(ckpt):
ckpt = torch.load(ckpt)
# Load model state
self.model.load_state_dict(ckpt['model_state'])
# Load optimizer state
self.optimizer.load_state_dict(ckpt['optimizer_state'])
# Load scheduler state
self.scheduler.load_state_dict(ckpt['scheduler_state'])
# Load history
self.history = ckpt['history']
self.cur_epoch = ckpt['cur_epoch']
else:
print('No checkpoint found at: {}'.format(ckpt))
def print_grad_norms(self, model):
norm = 0.0
for params in list(filter(lambda p: p.grad is not None, model.parameters())):
norm += params.grad.norm(2).item()
print('Sum of grads norms: {}'.format(norm))
def penalty(self, loss, dummy):
grad = torch.autograd.grad(loss, [dummy], create_graph=True)[0]
return torch.sum(grad**2)
|
84554
|
import json
import pytest
from indy import ledger, error
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_raw_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
raw = '{"endpoint":{"ha":"127.0.0.1:5555"}}'
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"raw": raw
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, None, raw, None))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_hash_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
xhash = "83d907821df1c87db829e96569a11f6fc2e7880acba5e43d07ab786959e13bd3"
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"hash": xhash
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, xhash, None, None))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_enc_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
enc = "aa3f41f619aa7e5e6b6d0de555e05331787f9bf9aa672b94b57ab65b9b66c3ea960b18a98e3834b1fc6cebf49f463b81fd6e3181"
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"enc": enc
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, None, None, enc))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_missed_attribute():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
with pytest.raises(error.CommonInvalidStructure):
await ledger.build_attrib_request(identifier, destination, None, None, None)
|
84559
|
import warnings
from typing import Tuple, Callable, Any, List
import numpy as np
from nebullvm.base import QuantizationType
from nebullvm.inference_learners.base import BaseInferenceLearner
from nebullvm.measure import compute_relative_difference
def check_precision(
optimized_learner: BaseInferenceLearner,
input_data: List[Tuple[Any, ...]],
base_outputs_list: List[Tuple[Any, ...]],
perf_loss_ths: float,
metric_func: Callable = None,
ys: List = None,
aggregation_func: Callable = np.mean,
) -> bool:
metric_func = metric_func or compute_relative_difference
relative_differences = []
if ys is None:
ys = [None] * len(input_data)
for inputs, base_outputs, y in zip(input_data, base_outputs_list, ys):
opt_outputs = optimized_learner(*inputs)
relative_difference = max(
metric_func(base_output, opt_output, y)
for base_output, opt_output in zip(base_outputs, opt_outputs)
)
relative_differences.append(relative_difference)
relative_difference = aggregation_func(relative_differences)
return relative_difference <= perf_loss_ths
def check_quantization(
quantization_type: QuantizationType, perf_loss_ths: float
):
if quantization_type is None and perf_loss_ths is not None:
raise ValueError(
"When a quantization threshold is given it is necessary to "
"specify the quantization algorithm too."
)
if quantization_type is not None and perf_loss_ths is None:
warnings.warn(
"Got a valid quantization type without any given quantization "
"threshold. The quantization step will be ignored."
)
|
84560
|
import unittest
import numpy as np
from .bvgamma import *
from flavio.physics.bdecays.formfactors.b_v import bsz_parameters
from flavio.physics.eft import WilsonCoefficients
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict
from flavio.parameters import default_parameters
import flavio
wc = WilsonCoefficients()
par = default_parameters#
class TestBVgamma(unittest.TestCase):
def test_bksgamma(self):
# just check if this works
flavio.Observable["ACP(B0->K*gamma)"].prediction_central(par, wc)
flavio.Observable["S_K*gamma"].prediction_central(par, wc)
# numerical comparison to David's old Mathematica code
self.assertAlmostEqual(
flavio.Observable["BR(B0->K*gamma)"].prediction_central(par, wc)*1e5/3.91526,
1, places=1)
self.assertAlmostEqual(
flavio.Observable["BR(B+->K*gamma)"].prediction_central(par, wc)*1e5/4.11625,
1, places=1)
def test_bksgamma(self):
# just check if this works
flavio.Observable["ACP(Bs->phigamma)"].prediction_central(par, wc)
flavio.Observable["S_phigamma"].prediction_central(par, wc)
flavio.Observable["BR(Bs->phigamma)"].prediction_central(par, wc)
flavio.Observable["ADeltaGamma(Bs->phigamma)"].prediction_central(par, wc)
|
84565
|
from midca import goals, base
from midca import midcatime
from ._goalgen import tf_fire
from ._goalgen import tf_3_scen
from midca.domains.logistics import deliverstate
from midca.domains.blocksworld import blockstate
from midca.worldsim import stateread
import copy,csv
import random
from midca.modules.monitors import Monitor
class TFFire(base.BaseModule):
'''
MIDCA module that generates goals to put out fires using Michael Maynord's TF-Trees. The behavior is as follows: if any fires exist, a single goal will be generated to put out a fire on some block that is currently burning. Otherwise no goal will be generated.
'''
def __init__(self):
self.tree = tf_fire.Tree()
def fireGoalExists(self):
graph = self.mem.get(self.mem.GOAL_GRAPH)
for goal in graph.getAllGoals():
if goal['predicate'] == "onfire":
return True
return False
def run(self, cycle, verbose = 2):
world = self.mem.get(self.mem.STATES)[-1]
blocks = blockstate.get_block_list(world)
goal = self.tree.givegoal(blocks)
if goal:
inserted = self.mem.get(self.mem.GOAL_GRAPH).insert(goal)
if verbose >= 2:
print("TF-Tree goal generated:", goal, end=' ')
if inserted:
print()
else:
print(". This goal was already in the graph.")
|
84568
|
import unittest
import mock
from copy import copy
from tests import BaseTest
import logging
# Units under test
import cadquery
from cadquery.freecad_impl import console_logging
class TestLogging(BaseTest):
def setUp(self):
# save root logger's state
root_logger = logging.getLogger()
self._initial_level = root_logger.level
self._initial_logging_handlers = copy(root_logger.handlers)
def tearDown(self):
# forcefully re-establish original log state
root_logger = logging.getLogger()
root_logger.level = self._initial_level
root_logger.handlers = self._initial_logging_handlers
# reset console_logging's global state
cadquery.freecad_impl.console_logging._logging_handler = None
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleMessage(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.info('foo')
mock_freecad.Console.PrintMessage.assert_called_once_with('foo\n')
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleWarning(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.warning('bar')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_called_once_with('bar\n')
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleError(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.error('roo')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_called_once_with('roo\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleDebugOffDefault(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.debug('no show')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleSetLevelDebug(self, mock_freecad):
console_logging.enable(level=logging.DEBUG)
log = logging.getLogger('test')
log.debug('now showing')
mock_freecad.Console.PrintMessage.assert_called_once_with('now showing\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleSetLevelWarning(self, mock_freecad):
console_logging.enable(level=logging.WARNING)
log = logging.getLogger('test')
log.info('no show')
log.warning('be warned')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_called_once_with('be warned\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleLogFormat(self, mock_freecad):
console_logging.enable(format=">> %(message)s <<")
log = logging.getLogger('test')
log.info('behold brackets!')
mock_freecad.Console.PrintMessage.assert_called_once_with('>> behold brackets! <<\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleEnableDisable(self, mock_freecad):
console_logging.enable()
console_logging.disable()
log = logging.getLogger('test')
log.error('nope, disabled')
mock_freecad.Console.PrintError.assert_not_called()
|
84616
|
from . import datahandler
from .backtester import Backtest
from .enums import Stock, Type, Direction
__all__ = ['datahandler', 'Backtest', 'Stock', 'Type', 'Direction']
|
84617
|
expected_output = {
"key_chains": {
"bla": {
"keys": {
1: {
"accept_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
"key_string": "cisco123",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
2: {
"accept_lifetime": {
"end": "06:01:00 UTC Jan 1 2010",
"is_valid": False,
"start": "10:10:10 UTC Jan 1 2002",
},
"key_string": "blabla",
"send_lifetime": {
"end": "06:01:00 UTC Jan 1 2010",
"is_valid": False,
"start": "10:10:10 UTC Jan 1 2002",
},
},
},
},
"cisco": {
"keys": {
1: {
"accept_lifetime": {
"end": "infinite",
"is_valid": True,
"start": "11:11:11 UTC Mar 1 2001",
},
"key_string": "cisco123",
"send_lifetime": {
"end": "infinite",
"is_valid": True,
"start": "11:11:11 UTC Mar 1 2001",
},
},
2: {
"accept_lifetime": {
"end": "22:11:11 UTC Dec 20 2030",
"is_valid": True,
"start": "11:22:11 UTC Jan 1 2001",
},
"key_string": "cisco234",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
3: {
"accept_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
"key_string": "cisco",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
},
},
},
}
|
84692
|
import argparse
import os
import json
import sys
def copyright_main():
parser = argparse.ArgumentParser(
description="automatically set the copyright for you"
)
parser.add_argument("-p", "--path",
help="choose the path you want to add the copyright")
parser.add_argument("-t", "--title",
help="add the copyright title")
parser.add_argument("-l", "--license",
help="add the license name for the copyright")
parser.add_argument("-y", "--year",
help="add the year the production was made")
parser.add_argument("-o", "--owner",
help="add the owner of the production")
parser.add_argument("--config",
help="add the config file")
parser.add_argument("-d", "--description",
help="add description of the program")
parser.add_argument("-c", "--cversion",
help="add the version of the production")
parser.add_argument("-u", "--update",
help="add the latest time that you updated")
parser.add_argument("-f", "--file",
help="add the file name of the program")
args = parser.parse_args()
if args.config:
try:
with open(args.config, "r", encoding="utf-8") as f:
content = f.read()
info = json.loads(content)
try:
args.path = info["path"]
args.title = info["title"]
args.license = info["license"]
args.year = info["year"]
args.owner = info["owner"]
except Exception as e:
print("Argument not find!")
sys.exit()
try:
args.description = info["description"]
except Exception as e:
pass
try:
args.cversion = info["cversion"]
except Exception as e:
pass
try:
args.update = info["update"]
except Exception as e:
pass
try:
args.file = info["file"]
except Exception as e:
pass
except Exception as e:
print("Config File not exists!")
sys.exit()
args_list = [args.year, args.license, args.owner, args.title]
if not all(args_list):
print("Error: lack of args!")
sys.exit()
data = '"""\n'
data += "Copyright: Copyright (c) " + args.year + "\n"
data += "License : " + args.license + "\n"
data += "owner : " + args.owner + "\n"
data += "title : " + args.title + "\n"
if args.description:
data += "description : " + args.description + "\n"
if args.cversion:
data += "version : " + args.cversion + "\n"
if args.update:
data += "time : " + args.update + "\n"
if args.file:
data += "file : " + args.file + "\n"
data += '"""\n\n'
for root, dirs, files in os.walk(args.path):
for file in files:
with open(root + "/" + file, "r+", encoding="utf-8") as f:
old = f.read()
f.seek(0)
f.write(data)
f.write(old)
if __name__ == "__main__":
copyright_main()
|
84695
|
import tinyapi
from unittest import TestCase
from getpass import getpass
from nose.tools import raises
import os
DEFAULT_USERNAME = "tinyapi-test-account"
USERNAME = os.environ.get("TINYAPI_TEST_USERNAME") or DEFAULT_USERNAME
PASSWORD = os.environ.get("TINYAPI_TEST_PASSWORD") or getpass()
class TestDraft(TestCase):
def setUp(self):
self.session = tinyapi.Session(USERNAME, PASSWORD)
def test_basic(self):
draft = self.session.create_draft()
repr(draft)
draft.subject = "testing"
draft.body = "this is the body"
draft.public_message = True
message_id = draft.save().message_id
fetched_draft = self.session.edit_draft(message_id)
assert(fetched_draft.message_id == draft.message_id)
assert(fetched_draft.subject == draft.subject)
assert(fetched_draft.body == draft.body)
assert(fetched_draft.public_message == draft.public_message)
fetched_draft.delete()
@raises(Exception)
def test_bad_id(self):
draft = self.session.edit_draft(1)
draft.fetch()
@raises(Exception)
def test_unsaved_fetch(self):
draft = self.session.create_draft()
draft.fetch()
@raises(Exception)
def test_bad_public_value(self):
draft = self.session.create_draft()
draft.public_message = "a string"
|
84702
|
from __future__ import unicode_literals
import io
import json
import os
import re
from collections import OrderedDict
VALID_COUNTRY_CODE = re.compile(r"^\w{2,3}$")
VALIDATION_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
VALIDATION_DATA_PATH = os.path.join(VALIDATION_DATA_DIR, "%s.json")
FIELD_MAPPING = {
"A": "street_address",
"C": "city",
"D": "city_area",
"N": "name",
"O": "company_name",
"S": "country_area",
"X": "sorting_code",
"Z": "postal_code",
}
KNOWN_FIELDS = set(FIELD_MAPPING.values()) | {"country_code"}
def load_validation_data(country_code="all"):
if not VALID_COUNTRY_CODE.match(country_code):
raise ValueError("%r is not a valid country code" % (country_code,))
country_code = country_code.lower()
try:
# VALIDATION_DATA_PATH may have '%' symbols
# for backwards compatability if VALIDATION_DATA_PATH is imported
# by consumers of this package.
path = VALIDATION_DATA_PATH % (country_code,)
except TypeError:
path = os.path.join(VALIDATION_DATA_DIR, "%s.json" % country_code)
if not os.path.exists(path):
raise ValueError("%r is not a valid country code" % (country_code,))
with io.open(path, encoding="utf-8") as data:
return json.load(data)
class ValidationRules(object):
__slots__ = [
"country_code",
"country_name",
"address_format",
"address_latin_format",
"allowed_fields",
"required_fields",
"upper_fields",
"country_area_type",
"country_area_choices",
"city_type",
"city_choices",
"city_area_type",
"city_area_choices",
"postal_code_type",
"postal_code_matchers",
"postal_code_examples",
"postal_code_prefix",
]
def __init__(
self,
country_code,
country_name,
address_format,
address_latin_format,
allowed_fields,
required_fields,
upper_fields,
country_area_type,
country_area_choices,
city_type,
city_choices,
city_area_type,
city_area_choices,
postal_code_type,
postal_code_matchers,
postal_code_examples,
postal_code_prefix,
):
self.country_code = country_code
self.country_name = country_name
self.address_format = address_format
self.address_latin_format = address_latin_format
self.allowed_fields = allowed_fields
self.required_fields = required_fields
self.upper_fields = upper_fields
self.country_area_type = country_area_type
self.country_area_choices = country_area_choices
self.city_type = city_type
self.city_choices = city_choices
self.city_area_type = city_area_type
self.city_area_choices = city_area_choices
self.postal_code_type = postal_code_type
self.postal_code_matchers = postal_code_matchers
self.postal_code_examples = postal_code_examples
self.postal_code_prefix = postal_code_prefix
def __repr__(self):
return (
"ValidationRules("
"country_code=%r, "
"country_name=%r, "
"address_format=%r, "
"address_latin_format=%r, "
"allowed_fields=%r, "
"required_fields=%r, "
"upper_fields=%r, "
"country_area_type=%r, "
"country_area_choices=%r, "
"city_type=%r, "
"city_choices=%r, "
"city_area_type=%r, "
"city_area_choices=%r, "
"postal_code_type=%r, "
"postal_code_matchers=%r, "
"postal_code_examples=%r, "
"postal_code_prefix=%r)"
% (
self.country_code,
self.country_name,
self.address_format,
self.address_latin_format,
self.allowed_fields,
self.required_fields,
self.upper_fields,
self.country_area_type,
self.country_area_choices,
self.city_type,
self.city_choices,
self.city_area_type,
self.city_area_choices,
self.postal_code_type,
self.postal_code_matchers,
self.postal_code_examples,
self.postal_code_prefix,
)
)
def _make_choices(rules, translated=False):
sub_keys = rules.get("sub_keys")
if not sub_keys:
return []
choices = []
sub_keys = sub_keys.split("~")
sub_names = rules.get("sub_names")
if sub_names:
choices += [
(key, value) for key, value in zip(sub_keys, sub_names.split("~")) if value
]
else:
if not translated:
choices += [(key, key) for key in sub_keys]
if not translated:
sub_lnames = rules.get("sub_lnames")
if sub_lnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lnames.split("~"))
if value
]
sub_lfnames = rules.get("sub_lfnames")
if sub_lfnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lfnames.split("~"))
if value
]
return choices
def _compact_choices(choices):
value_map = OrderedDict()
for key, value in choices:
if not key in value_map:
value_map[key] = set()
value_map[key].add(value)
return [
(key, value) for key, values in value_map.items() for value in sorted(values)
]
def _match_choices(value, choices):
if value:
value = value.strip().lower()
for name, label in choices:
if name.lower() == value:
return name
if label.lower() == value:
return name
def _load_country_data(country_code):
database = load_validation_data("zz")
country_data = database["ZZ"]
if country_code:
country_code = country_code.upper()
if country_code.lower() == "zz":
raise ValueError("%r is not a valid country code" % (country_code,))
database = load_validation_data(country_code.lower())
country_data.update(database[country_code])
return country_data, database
def get_validation_rules(address):
country_code = address.get("country_code", "").upper()
country_data, database = _load_country_data(country_code)
country_name = country_data.get("name", "")
address_format = country_data["fmt"]
address_latin_format = country_data.get("lfmt", address_format)
format_fields = re.finditer(r"%([ACDNOSXZ])", address_format)
allowed_fields = {FIELD_MAPPING[m.group(1)] for m in format_fields}
required_fields = {FIELD_MAPPING[f] for f in country_data["require"]}
upper_fields = {FIELD_MAPPING[f] for f in country_data["upper"]}
languages = [None]
if "languages" in country_data:
languages = country_data["languages"].split("~")
postal_code_matchers = []
if "postal_code" in allowed_fields:
if "zip" in country_data:
postal_code_matchers.append(re.compile("^" + country_data["zip"] + "$"))
postal_code_examples = []
if "zipex" in country_data:
postal_code_examples = country_data["zipex"].split(",")
country_area_choices = []
city_choices = []
city_area_choices = []
country_area_type = country_data["state_name_type"]
city_type = country_data["locality_name_type"]
city_area_type = country_data["sublocality_name_type"]
postal_code_type = country_data["zip_name_type"]
postal_code_prefix = country_data.get("postprefix", "")
# second level of data is for administrative areas
country_area = None
city = None
city_area = None
if country_code in database:
if "sub_keys" in country_data:
for language in languages:
is_default_language = (
language is None or language == country_data["lang"]
)
matched_country_area = None
matched_city = None
if is_default_language:
localized_country_data = database[country_code]
else:
localized_country_data = database[
"%s--%s" % (country_code, language)
]
localized_country_area_choices = _make_choices(localized_country_data)
country_area_choices += localized_country_area_choices
existing_choice = country_area is not None
matched_country_area = country_area = _match_choices(
address.get("country_area"), localized_country_area_choices
)
if matched_country_area:
# third level of data is for cities
if is_default_language:
country_area_data = database[
"%s/%s" % (country_code, country_area)
]
else:
country_area_data = database[
"%s/%s--%s" % (country_code, country_area, language)
]
if not existing_choice:
if "zip" in country_area_data:
postal_code_matchers.append(
re.compile("^" + country_area_data["zip"])
)
if "zipex" in country_area_data:
postal_code_examples = country_area_data["zipex"].split(",")
if "sub_keys" in country_area_data:
localized_city_choices = _make_choices(country_area_data)
city_choices += localized_city_choices
existing_choice = city is not None
matched_city = city = _match_choices(
address.get("city"), localized_city_choices
)
if matched_city:
# fourth level of data is for dependent sublocalities
if is_default_language:
city_data = database[
"%s/%s/%s" % (country_code, country_area, city)
]
else:
city_data = database[
"%s/%s/%s--%s"
% (country_code, country_area, city, language)
]
if not existing_choice:
if "zip" in city_data:
postal_code_matchers.append(
re.compile("^" + city_data["zip"])
)
if "zipex" in city_data:
postal_code_examples = city_data["zipex"].split(",")
if "sub_keys" in city_data:
localized_city_area_choices = _make_choices(city_data)
city_area_choices += localized_city_area_choices
existing_choice = city_area is not None
matched_city_area = city_area = _match_choices(
address.get("city_area"), localized_city_area_choices
)
if matched_city_area:
if is_default_language:
city_area_data = database[
"%s/%s/%s/%s"
% (country_code, country_area, city, city_area)
]
else:
city_area_data = database[
"%s/%s/%s/%s--%s"
% (
country_code,
country_area,
city,
city_area,
language,
)
]
if not existing_choice:
if "zip" in city_area_data:
postal_code_matchers.append(
re.compile("^" + city_area_data["zip"])
)
if "zipex" in city_area_data:
postal_code_examples = city_area_data[
"zipex"
].split(",")
country_area_choices = _compact_choices(country_area_choices)
city_choices = _compact_choices(city_choices)
city_area_choices = _compact_choices(city_area_choices)
return ValidationRules(
country_code,
country_name,
address_format,
address_latin_format,
allowed_fields,
required_fields,
upper_fields,
country_area_type,
country_area_choices,
city_type,
city_choices,
city_area_type,
city_area_choices,
postal_code_type,
postal_code_matchers,
postal_code_examples,
postal_code_prefix,
)
class InvalidAddress(ValueError):
def __init__(self, message, errors):
super(InvalidAddress, self).__init__(message)
self.errors = errors
def _normalize_field(name, rules, data, choices, errors):
value = data.get(name)
if name in rules.upper_fields and value is not None:
value = value.upper()
data[name] = value
if name not in rules.allowed_fields:
data[name] = ""
elif not value and name in rules.required_fields:
errors[name] = "required"
elif choices:
if value or name in rules.required_fields:
value = _match_choices(value, choices)
if value is not None:
data[name] = value
else:
errors[name] = "invalid"
if not value:
data[name] = ""
def normalize_address(address):
errors = {}
try:
rules = get_validation_rules(address)
except ValueError:
errors["country_code"] = "invalid"
else:
cleaned_data = address.copy()
country_code = cleaned_data.get("country_code")
if not country_code:
errors["country_code"] = "required"
else:
cleaned_data["country_code"] = country_code.upper()
_normalize_field(
"country_area", rules, cleaned_data, rules.country_area_choices, errors
)
_normalize_field("city", rules, cleaned_data, rules.city_choices, errors)
_normalize_field(
"city_area", rules, cleaned_data, rules.city_area_choices, errors
)
_normalize_field("postal_code", rules, cleaned_data, [], errors)
postal_code = cleaned_data.get("postal_code", "")
if rules.postal_code_matchers and postal_code:
for matcher in rules.postal_code_matchers:
if not matcher.match(postal_code):
errors["postal_code"] = "invalid"
break
_normalize_field("street_address", rules, cleaned_data, [], errors)
_normalize_field("sorting_code", rules, cleaned_data, [], errors)
if errors:
raise InvalidAddress("Invalid address", errors)
return cleaned_data
def _format_address_line(line_format, address, rules):
def _get_field(name):
value = address.get(name, "")
if name in rules.upper_fields:
value = value.upper()
return value
replacements = {
"%%%s" % code: _get_field(field_name)
for code, field_name in FIELD_MAPPING.items()
}
fields = re.split("(%.)", line_format)
fields = [replacements.get(f, f) for f in fields]
return "".join(fields).strip()
def get_field_order(address, latin=False):
"""
Returns expected order of address form fields as a list of lists.
Example for PL:
>>> get_field_order({'country_code': 'PL'})
[[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']]
"""
rules = get_validation_rules(address)
address_format = rules.address_latin_format if latin else rules.address_format
address_lines = address_format.split("%n")
replacements = {
"%%%s" % code: field_name for code, field_name in FIELD_MAPPING.items()
}
all_lines = []
for line in address_lines:
fields = re.split("(%.)", line)
single_line = [replacements.get(field) for field in fields]
single_line = list(filter(None, single_line))
all_lines.append(single_line)
return all_lines
def format_address(address, latin=False):
rules = get_validation_rules(address)
address_format = rules.address_latin_format if latin else rules.address_format
address_line_formats = address_format.split("%n")
address_lines = [
_format_address_line(lf, address, rules) for lf in address_line_formats
]
address_lines.append(rules.country_name)
address_lines = filter(None, address_lines)
return "\n".join(address_lines)
def latinize_address(address, normalized=False):
if not normalized:
address = normalize_address(address)
cleaned_data = address.copy()
country_code = address.get("country_code", "").upper()
dummy_country_data, database = _load_country_data(country_code)
if country_code:
country_area = address["country_area"]
if country_area:
key = "%s/%s" % (country_code, country_area)
country_area_data = database.get(key)
if country_area_data:
cleaned_data["country_area"] = country_area_data.get(
"lname", country_area_data.get("name", country_area)
)
city = address["city"]
key = "%s/%s/%s" % (country_code, country_area, city)
city_data = database.get(key)
if city_data:
cleaned_data["city"] = city_data.get(
"lname", city_data.get("name", city)
)
city_area = address["city_area"]
key = "%s/%s/%s/%s" % (country_code, country_area, city, city_area)
city_area_data = database.get(key)
if city_area_data:
cleaned_data["city_area"] = city_area_data.get(
"lname", city_area_data.get("name", city_area)
)
return cleaned_data
|
84709
|
from MDRSREID.Loss_Meter import Loss
import torch.nn as nn
import torch
from MDRSREID.utils.meter import RecentAverageMeter as Meter
class IDLoss(Loss):
def __init__(self, cfg, tb_writer=None):
super(IDLoss, self).__init__(cfg, tb_writer=tb_writer)
self.criterion = nn.CrossEntropyLoss(reduction='none') # 'none' | 'mean' | 'sum'.
self.part_fmt = '#{}'
def __call__(self, item, pred, step=0, **kwargs):
loss_list = [self.criterion(logits, item['label']).mean() for logits in pred['cls_feat_list']]
# New version of pytorch allow stacking 0-dim tensors, but not concatenating.
loss = torch.stack(loss_list).mean() # sum()
# Meter: stores and computes the average of recent values
self.store_calculate_loss(loss)
# May calculate part loss separately
self.may_calculate_part_loss(loss_list)
# May record losses.
self.may_record_loss(loss_list, step)
# Scale by loss weight
loss *= self.cfg.weight
return {'loss': loss}
def store_calculate_loss(self, loss):
"""
:param loss: torch.stack(loss_list).sum()
:return:
Meter: stores and computes the average of recent values.
"""
if self.cfg.name not in self.meter_dict:
# Here use RecentAverageMeter as Meter
self.meter_dict[self.cfg.name] = Meter(name=self.cfg.name)
# Update the meter, store the current whole loss.
self.meter_dict[self.cfg.name].update(loss.item())
def may_calculate_part_loss(self, loss_list):
"""
:param loss_list: each part loss
:return:
Meter: stores and computes the average of recent values.
For each part loss, calculate the loss separately.
"""
if len(loss_list) > 1:
# stores and computes each part average of recent values
for i in range(len(loss_list)):
# if there is not the meter of the part, create a new one.
if self.part_fmt.format(i + 1) not in self.meter_dict:
self.meter_dict[self.part_fmt.format(i + 1)] = Meter(name=self.part_fmt.format(i + 1))
# Update the meter, store the current part loss
self.meter_dict[self.part_fmt.format(i + 1)].update(loss_list[i].item())
def may_record_loss(self, loss_list, step):
"""
:param loss_list:
:param step:
:return:
Use TensorBoard to record the losses.
"""
if self.tb_writer is not None:
self.tb_writer.add_scalars(main_tag=self.cfg.name,
tag_scalar_dict={self.cfg.name: self.meter_dict[self.cfg.name].avg},
global_step=step
)
# Record each part loss
if len(loss_list) > 1:
self.tb_writer.add_scalars(main_tag='Part ID Losses',
tag_scalar_dict={self.part_fmt.format(i + 1): self.meter_dict[self.part_fmt.format(i + 1)].avg
for i in range(len(loss_list))},
global_step=step
)
|
84737
|
import torch
import torch.nn as nn
from ever import registry
import ever as er
from ever import module as erm
class Deeplabv3pDecoder(nn.Module):
"""
This module is a reimplemented version in the following paper.
<NAME>, <NAME>, <NAME>, et al. Encoder-decoder with atrous
separable convolution for semantic image segmentation[J],
"""
def __init__(self,
os4_feature_channels=256,
os16_feature_channels=2048,
aspp_channels=256,
aspp_atrous=(6, 12, 18),
reduction_dim=48,
out_channels=256,
num_3x3_convs=2,
scale_factor=4.0,
):
super(Deeplabv3pDecoder, self).__init__()
self.scale_factor = scale_factor
# 3x3 conv is better
self.os4_transform = erm.ConvBlock(os4_feature_channels, reduction_dim, 3, 1, 1, bias=False)
# 3x3 conv is better
self.os16_transform = nn.Sequential(
erm.AtrousSpatialPyramidPool(os16_feature_channels, aspp_channels, aspp_atrous),
erm.ConvBlock(aspp_channels, aspp_channels, 3, 1, 1, bias=False)
)
layers = [erm.SeparableConvBlock(aspp_channels + reduction_dim, out_channels, 3, 1, 1, bias=False)]
for i in range(num_3x3_convs - 1):
layers.append(erm.SeparableConvBlock(out_channels, out_channels, 3, 1, 1, bias=False))
self.upsample = nn.UpsamplingBilinear2d(scale_factor=scale_factor)
self.stack_conv3x3 = nn.Sequential(*layers)
def forward(self, feat_list):
os4_feat, os16_feat = feat_list
os4_feat = self.os4_transform(os4_feat)
os16_feat = self.os16_transform(os16_feat)
feat_upx = self.upsample(os16_feat)
concat_feat = torch.cat([os4_feat, feat_upx], dim=1)
out = self.stack_conv3x3(concat_feat)
return out
@registry.MODEL.register()
class Deeplabv3pHead(er.ERModule):
def __init__(self, config):
super(Deeplabv3pHead, self).__init__(config)
self.head = nn.Sequential(
Deeplabv3pDecoder(**self.config.deeplabv3p_decoder),
nn.Conv2d(self.config.deeplabv3p_decoder.out_channels, self.config.num_classes, 1),
nn.UpsamplingBilinear2d(scale_factor=self.config.upsample_scale)
)
def forward(self, x):
x = self.head(x)
return x
def set_default_config(self):
self.config.update(dict(
deeplabv3p_decoder=dict(
os4_feature_channels=256,
os16_feature_channels=2048,
aspp_channels=256,
aspp_atrous=(6, 12, 18),
reduction_dim=48,
out_channels=256,
num_3x3_convs=2,
scale_factor=4.0,
),
num_classes=3,
upsample_scale=4.0
))
|
84748
|
def extractLizonkanovelsWordpressCom(item):
'''
Parser for 'lizonkanovels.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('bestial blade by priest', 'bestial blade', 'translated'),
('creatures of habit by meat in the shell', 'creatures of habit', 'translated'),
('seal cultivation for self-improvement by mo xiao xian', 'seal cultivation for self-improvement', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
84777
|
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_slim, cfg_rfb
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from thop import profile
from thop import clever_format
from models.retinaface import RetinaFace
from models.net_slim import Slim
from models.net_rfb import RFB
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--long_side', default=320, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
args = parser.parse_args()
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
net = None
if args.network == "mobile0.25":
cfg = cfg_mnet
net = RetinaFace(cfg = cfg, phase = 'test')
elif args.network == "slim":
cfg = cfg_slim
net = Slim(cfg = cfg, phase = 'test')
elif args.network == "RFB":
cfg = cfg_rfb
net = RFB(cfg = cfg, phase = 'test')
else:
print("Don't support network!")
exit(0)
long_side = int(args.long_side)
short_side = int(args.long_side/4*3)
img = torch.randn(1, 3, long_side, short_side)
flops, params = profile(net, inputs=(img, ))
flops, params = clever_format([flops, params], "%.3f")
print("param:", params, "flops:", flops)
|
84799
|
from beetl.task_datasets import BeetlSleepTutorial, BeetlSleepSource
ds = BeetlSleepTutorial()
path = ds.download()
# Load all subject data
X, y, info = ds.get_data()
# Assume source group is subject 0-4, target group is subject 5-7,
# and subject 8,9 are from target group for testing.
X_source_train, y_source_train, info = ds.get_data(subjects=range(5))
X_target_train, y_target_train, info = ds.get_data(subjects=range(5, 8))
X_target_test, y_target_test, _ = ds.get_data(subjects=range(8, 10))
################################
# For Sleep Source
ds = BeetlSleepSource()
path = ds.download()
# Load all subject data
X, y, info = ds.get_data()
|
84841
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
COLORS = {
'1 - New': '#00CD33', # (success green)
'2 - In Progress': '#7995D4', # (royal blue)
'3 - On Hold': '#FF9000', # (warning orange)
'4 - Awaiting Caller': '#FF9000', # (warning orange)
'5 - Awaiting Evidence': '#FF9000', # (warning orange)
'6 - Resolved': '#89A5C1', # (polo)
'7 - Closed': '#9AA0A3', # (natural grey)
'8 - Canceled': '#FF1744' # (alert-red)
}
TEXT = {
'1 - New': 'New',
'2 - In Progress': 'In Progress',
'3 - On Hold': 'On-Hold',
'4 - Awaiting Caller': 'Awaiting Caller',
'5 - Awaiting Evidence': 'Awaiting Evidence',
'6 - Resolved': 'Resolved',
'7 - Closed': 'Closed',
'8 - Canceled': 'Canceled'
}
incident = demisto.incidents()
service_now_state = (incident[0].get('CustomFields', {}).get('servicenowstate'))
try:
text_color = COLORS[service_now_state]
text_content = TEXT[service_now_state]
except Exception as e:
demisto.debug(f'SnowIncidentStatus debug - state is: {service_now_state}\n{e}')
text_color = '#000000'
text_content = 'Pending Update'
html = f"<div style='color:{text_color};text-align:center;'><h2>{text_content}</h2></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
|
84885
|
import pytest
from lightbus import Api, Event
from lightbus.api import ApiRegistry
from lightbus.exceptions import (
MisconfiguredApiOptions,
InvalidApiEventConfiguration,
InvalidApiRegistryEntry,
UnknownApi,
)
pytestmark = pytest.mark.unit
@pytest.fixture()
def SimpleApi():
class SimpleApi(Api):
class Meta:
name = "simple.api"
return SimpleApi
@pytest.fixture()
def registry():
return ApiRegistry()
def test_api_named_default():
# Apis can not start with the name 'default'
with pytest.raises(MisconfiguredApiOptions):
class BadApi(Api):
class Meta:
name = "default"
def test_api_named_default_dot_something():
# Apis can not start with the name 'default'
with pytest.raises(MisconfiguredApiOptions):
class BadApi(Api):
class Meta:
name = "default.foo"
def test_pass_string_as_event_params():
# Check we cannot accidentally pass a string to Event in the
# case that we omit a ',' when specifying a parameters tuple
with pytest.raises(InvalidApiEventConfiguration):
Event(parameters=("foo"))
def test_api_registry_add_ok(SimpleApi, registry):
registry.add(SimpleApi())
assert "simple.api" in registry._apis
def test_api_registry_add_class(SimpleApi, registry):
with pytest.raises(InvalidApiRegistryEntry):
registry.add(SimpleApi)
def test_api_registry_get_ok(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.get("simple.api") == api
def test_api_registry_get_unknown(SimpleApi, registry):
with pytest.raises(UnknownApi):
registry.get("unknown.api")
def test_api_registry_remove_ok(SimpleApi, registry):
registry.add(SimpleApi())
registry.remove("simple.api")
assert not registry._apis
def test_api_registry_remove_unknown(SimpleApi, registry):
with pytest.raises(UnknownApi):
registry.remove("unknown.api")
def test_api_registry_internal(registry):
class InternalApi(Api):
class Meta:
name = "internal.api"
internal = True
api = InternalApi()
registry.add(api)
assert registry.internal() == [api]
assert registry.public() == []
def test_api_registry_public(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.public() == [api]
assert registry.internal() == []
def test_api_registry_all(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.all() == [api]
def test_api_registry_names(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.names() == ["simple.api"]
|
84908
|
import torch
from torch import nn
import torch.nn.functional as F
from .utils import PI, rfft, irfft
def ramp_filter(size):
image_n = torch.cat([
torch.arange(1, size / 2 + 1, 2, dtype=torch.int),
torch.arange(size / 2 - 1, 0, -2, dtype=torch.int),
])
image_filter = torch.zeros(size, dtype=torch.double)
image_filter[0] = 0.25
image_filter[1::2] = -1 / (PI * image_n) ** 2
fourier_filter = torch.rfft(image_filter, 1, onesided=False)
fourier_filter[:, 1] = fourier_filter[:, 0]
return 2*fourier_filter
class AbstractFilter(nn.Module):
def forward(self, x):
input_size = x.shape[2]
projection_size_padded = \
max(64, int(2**(2*torch.tensor(input_size)).float().log2().ceil()))
pad_width = projection_size_padded - input_size
padded_tensor = F.pad(x, (0, 0, 0, pad_width))
fourier_filter = ramp_filter(padded_tensor.shape[2]).to(x.device)
fourier_filter = self.create_filter(fourier_filter)
fourier_filter = fourier_filter.unsqueeze(-2)
projection = rfft(padded_tensor, axis=2)*fourier_filter
return irfft(projection, axis=2)[:, :, :input_size, :].to(x.dtype)
def create_filter(self, fourier_ramp):
raise NotImplementedError
class RampFilter(AbstractFilter):
def create_filter(self, fourier_ramp):
return fourier_ramp
class HannFilter(AbstractFilter):
def create_filter(self, fourier_ramp):
n = torch.arange(0, fourier_ramp.shape[0])
hann = (0.5 - 0.5*(2.0*PI*n/(fourier_ramp.shape[0]-1)).cos()).to(fourier_ramp.device)
return fourier_ramp*hann.roll(hann.shape[0]//2, 0).unsqueeze(-1)
class LearnableFilter(AbstractFilter):
def __init__(self, filter_size):
super(LearnableFilter, self).__init__()
self.filter = nn.Parameter(ramp_filter(filter_size)[..., 0].view(-1, 1))
def forward(self, x):
fourier_filter = self.filter.unsqueeze(-1).repeat(1, 1, 2).to(x.device)
projection = rfft(x, axis=2) * fourier_filter
return irfft(projection, axis=2).to(x.dtype)
def create_filter(self, fourier_ramp):
raise NotImplementedError
|
84967
|
from rply.token import SourcePosition
from hippy.sourceparser import SourceParser, LexerWrapper, ParseError, get_lexer
from hippy.astcompiler import compile_ast
from rpython.rlib.objectmodel import we_are_translated
MODE_LITERAL = 0
MODE_EQUALSIGN = 1
MODE_PHPCODE = 2
class PHPLexerWrapper(LexerWrapper):
def __init__(self, source, filename="", interp=None):
self.lexer = get_lexer(we_are_translated())
self.source = source
self.startlineno = 0
self.startindex = 0
self.mode = MODE_LITERAL
self.filename = filename
self.heredoc_term = None
self.interp = interp
def next(self):
mode = self.mode
if mode == MODE_PHPCODE:
return self.next_phpcode()
elif mode == MODE_LITERAL:
return self.next_literal_mode()
elif mode == MODE_EQUALSIGN:
return self.next_equal_sign()
else:
assert 0
def next_literal_mode(self):
# "literal" mode, i.e. outside "<?php ?>" tags: generates
# one B_LITERAL_BLOCK until the next opening "<?php" tag
self.mode = MODE_PHPCODE
source = self.source
index = self.startindex
assert index >= 0
tagindex = source.find('<?', index)
if tagindex == -1:
tagindex = len(source)
assert tagindex >= 0
startindex = self.startindex
assert startindex >= 0
block_of_text = source[startindex:tagindex] # may be empty
source_pos = SourcePosition(self.startindex, self.startlineno + 1, 0)
tok = self.lexer.token_class('B_LITERAL_BLOCK', block_of_text, source_pos)
self.startlineno += block_of_text.count('\n')
if source[tagindex:tagindex+5].lower() == '<?php':
pos = tagindex + 5
elif source[tagindex:tagindex+3] == '<?=':
pos = tagindex + 3
self.mode = MODE_EQUALSIGN
else:
pos = tagindex + 2
self.lexer.input(self.source, pos, self.startlineno)
return tok
def next_equal_sign(self):
self.mode = MODE_PHPCODE
source_pos = SourcePosition(self.startindex, self.startlineno + 1, 0)
return self.lexer.token_class("T_ECHO", "echo", source_pos)
def next_phpcode(self):
for tok in self.lexer.token():
# Lexer indexes lines from 0, humans from 1
tok.source_pos.lineno += 1
if tok is None:
return None # end of file
elif tok.name == 'H_NEW_LINE':
continue # ignore these and continue
elif tok.name == 'H_TABULATURE':
continue # ignore these and continue
elif tok.name == 'H_WHITESPACE':
continue # ignore these and continue
elif tok.name == 'T_COMMENT':
# look for "?>" inside single-line comments too
if not tok.getstr().startswith('/*'):
i = tok.getstr().find('?>')
if i >= 0:
endpos = self.lexer.pos - len(tok.getstr()) + i + 2
return self.end_current_block(tok, endpos)
continue
elif tok.name == 'B_END_OF_CODE_BLOCK':
return self.end_current_block(tok, self.lexer.pos)
elif tok.name == 'T_HALT_COMPILER':
return self.do_halt_compiler()
else:
return tok # a normal php token
def end_current_block(self, tok, endpos):
# a "?>" marker that ends the current block of code
# generates a ";" token followed by a B_LITERAL_BLOCK
lineno = tok.source_pos.lineno
self.startlineno = lineno
self.startindex = endpos + 1
self.mode = MODE_LITERAL
if (self.startindex < len(self.source) and
self.source[self.startindex] == '\n'):
# self.startlineno += 1 # consume \n if immediately following
self.startindex += 1
return self.lexer.token_class(";", ";", SourcePosition(endpos, lineno, 0))
def do_halt_compiler(self):
for expecting in ['(', ')', ';']:
token = self.next()
if token is None or token.name != expecting:
raise ParseError('"__halt_compiler" not followed by "();"',
token.source_pos.lineno)
#
# hack: copies the end position to a constant
if self.interp is not None:
if self.interp.lookup_constant('__COMPILER_HALT_OFFSET__') is None:
if self.mode == MODE_LITERAL:
endpos = self.startindex
else:
endpos = self.lexer.pos + 1
w_end = self.interp.space.newint(endpos)
self.interp.declare_new_constant('__COMPILER_HALT_OFFSET__',
w_end)
#
return None
DEBUG = False
def compile_php(filename, source, space, interp=None):
"""Parse and compile a PHP file, starting in literal mode (i.e.
dumping all input directly) until the first '<?' or '<?php'.
Supports a mixture of blocks of code between the blocks of texts."""
phplexerwrapper = PHPLexerWrapper(source, filename, interp)
if DEBUG:
lst = []
while True:
tok = phplexerwrapper.next()
if tok is None:
break
else:
lst.append(tok)
print [x.__dict__ for x in lst]
phplexerwrapper = iter(lst + [None])
parser = SourceParser(space, None, filename=filename)
tokens = parser.parser.parse(phplexerwrapper, state=parser)
bc = compile_ast(filename, source, tokens, space)
return bc
|
84987
|
from sys import platform
import unittest
import checksieve
class TestVariables(unittest.TestCase):
def test_set(self):
sieve = '''
require "variables";
set "honorific" "Mr";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_mod_length(self):
sieve = '''
require "variables";
set :length "b" "${a}";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_wrong_tag(self):
sieve = '''
require "variables";
set :mime "b" "c";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_set_wrong_arg(self):
sieve = '''
require "variables";
set "a" "b" "c";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_too_many_args(self):
sieve = '''
require "variables";
set "a" "b" "c" "d";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
def test_test_string(self):
sieve = '''
require "variables";
set "state" "${state} pending";
if string :matches " ${state} " "* pending *" {
# the above test always succeeds
stop;
}
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_numeral_varname(self):
sieve = '''
require "variables";
set "1" "${state} pending";
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_bad_varname(self):
sieve = '''
require "variables";
set "bad-variable" "no dashes allowed!";
'''
self.assertTrue(checksieve.parse_string(sieve, True))
if __name__ == '__main__':
unittest.main()
|
85023
|
import unittest
import pytest
from django.test import override_settings
from channels import DEFAULT_CHANNEL_LAYER
from channels.exceptions import InvalidChannelLayerError
from channels.layers import InMemoryChannelLayer, channel_layers, get_channel_layer
class TestChannelLayerManager(unittest.TestCase):
@override_settings(
CHANNEL_LAYERS={"default": {"BACKEND": "channels.layers.InMemoryChannelLayer"}}
)
def test_config_error(self):
"""
If channel layer doesn't specify TEST_CONFIG, `make_test_backend`
should result into error.
"""
with self.assertRaises(InvalidChannelLayerError):
channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
@override_settings(
CHANNEL_LAYERS={
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {"expiry": 100500},
}
}
)
def test_config_instance(self):
"""
If channel layer provides TEST_CONFIG, `make_test_backend` should
return channel layer instance appropriate for testing.
"""
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
self.assertEqual(layer.expiry, 100500)
def test_override_settings(self):
"""
The channel layers cache is reset when the CHANNEL_LAYERS setting
changes.
"""
with override_settings(
CHANNEL_LAYERS={
"default": {"BACKEND": "channels.layers.InMemoryChannelLayer"}
}
):
self.assertEqual(channel_layers.backends, {})
get_channel_layer()
self.assertNotEqual(channel_layers.backends, {})
self.assertEqual(channel_layers.backends, {})
# In-memory layer tests
@pytest.mark.asyncio
async def test_send_receive():
layer = InMemoryChannelLayer()
message = {"type": "test.message"}
await layer.send("test.channel", message)
assert message == await layer.receive("test.channel")
|
85053
|
import math
import numpy as np
import tvm
from tvm.tir import IterVar
from .hw_abs_dag import construct_dag
from itertools import permutations, product
from functools import reduce
from . import _ffi_api
####################################################
# schedule parameter functions
####################################################
def get_factor_lst(value):
assert isinstance(value, int)
ret = []
end = math.sqrt(value)
for i in range(1, math.ceil(end)):
if value % i == 0:
ret.append(i)
ret.append(value // i)
if end - int(end) < 1e-10 and value % int(end) == 0:
ret.append(int(end))
return ret
def powerx_lst(x, left, right):
ret = []
beg = 1
while beg < left:
beg *= x
while beg < right:
ret.append(beg)
beg = beg * x
return ret
def any_factor_split(value, number, allow_non_divisible="off"):
assert allow_non_divisible in ["off", "power2", "continuous"]
ret = []
assert isinstance(number, int)
recursive_factor_split(value, [], number, ret, allow_non_divisible)
return ret
def recursive_factor_split(left, cur, number, ret, policy):
if number == 1:
ret.append(cur + [left])
return
if policy == "power2":
f_lst = get_factor_lst(left)
f_lst.extend(powerx_lst(2, 1, left))
f_lst = list(set(f_lst))
elif policy == "continuous":
f_lst = list(range(1, left + 1))
else:
f_lst = get_factor_lst(left)
f_lst = sorted(f_lst)
for f in f_lst:
recursive_factor_split(left // f, cur + [f], number - 1, ret, policy)
def remap_factors(factor_lst):
assert isinstance(factor_lst, (list, tuple))
assert len(factor_lst) > 0
sample = factor_lst[0]
assert isinstance(sample, (list, tuple))
assert len(sample) > 0
dim = len(sample) - 1
number_count = {i: set() for i in range(dim + 1)}
# check the factor list
for v in factor_lst:
assert isinstance(v, (list, tuple))
assert len(v) == dim + 1, dim
for i, ele in enumerate(v):
number_count[i].add(ele)
num_factors = len(number_count[0])
for k, v in number_count.items():
assert len(v) == num_factors
# remap the factor list
sorted_factors = sorted(number_count[0])
factor_map = {x: i for i, x in enumerate(sorted_factors)}
reverse_map = {i: x for i, x in enumerate(sorted_factors)}
ret = list(map(lambda factors: [factor_map[x] for x in factors], factor_lst))
return ret, reverse_map, dim, num_factors - 1
def get_directions(dim):
return list(product([-1, 0, 1], repeat=dim))
def get_partial_directions(dim):
def worker(v):
def set_value(i):
d = [0 for _ in range(dim)]
d[i] = v
return d
return set_value
return list(map(worker(1), range(dim))) + list(map(worker(-1), range(dim)))
def bi_product(repeat):
return list(product([0, 1], repeat=repeat))
def softmax(x):
e_x = np.exp(x - np.max(x))
return (e_x / (e_x.sum() + 1e-5)).tolist()
####################################################
# schedule helper tools
####################################################
def substitute_inputs(org_dag, op_map):
"""Infer ranges for expressions
Parameters
----------
org_dag: ComputeDAG
op_map: dict of {Operation: Operation}
Returns
-------
ComputeDAG
"""
n = _ffi_api.SubstituteInputs(org_dag, op_map)
return n
def reconstruct_dag_as_intrin(target_dag, main_op, hw_abs_dag, compute_key, shape_key):
inputs = list(main_op.input_tensors)
outputs = [main_op.output(0)]
# TODO: consider elem op in dag construction
input_names, output_names, nodes, read_graph, feed_graph = construct_dag(
hw_abs_dag, compute_key, shape_key, inputs, outputs, [], outputs
)
output_tensors = reduce(lambda x, y: x + y, [nodes[x] for x in output_names], [])
output = output_tensors[0]
replace_map = {main_op: output.op}
result_dag = substitute_inputs(target_dag, replace_map)
return (result_dag, (input_names, output_names, nodes, read_graph, feed_graph))
def can_inline(op, dag):
"""
op: tvm.te.Operation
dag: ComputeDAG
"""
if op not in dag.feed_graph:
return False
if not isinstance(op, tvm.te.ComputeOp):
return False
if len(op.reduce_axis) > 0:
return False
return True
def is_reduce_axis(iv: IterVar):
return int(iv.iter_type) == IterVar.CommReduce
def is_heavy_reduce_op(op):
re_exts = [int(iv.dom.extent) for iv in getattr(op, "reduce_axis", [])]
re_time = reduce(lambda a, b: a * b, re_exts, 1)
return re_time >= 64
def is_vectorized(iv: IterVar):
return int(iv.iter_type) == IterVar.Vectorized
|
85117
|
import pytest
@pytest.fixture()
def login(request):
name = request.param
print(f"== 账号是:{name} ==")
return name
data = ["pyy1", "polo"]
ids = [f"login_test_name is:{name}" for name in data]
# 添加 indirect=True 参数是为了把 login 当成一个函数去执行,而不是一个参数,并且将data当做参数传入函数
@pytest.mark.parametrize("login", data, ids=ids, indirect=True)
def test_name(login): # def test_name(login) ,这里的login是获取fixture返回的值
print(f" 测试用例的登录账号是:{login} ")
# 多个参数
# 如果需要传多个参数,需要通过字典去传
@pytest.fixture()
def logins(request):
param = request.param
print(f"账号是:{param['username']},密码是:{param['pwd']}")
return param
data = [
{"username": "name1", "pwd": "<PASSWORD>"},
{"username": "name2", "pwd": "<PASSWORD>"},
]
@pytest.mark.parametrize("logins", data, indirect=True)
def test_name_pwd(logins):
print(f"账号是:{logins['username']},密码是:{logins['pwd']}")
# 多个fixture
@pytest.fixture(scope="module")
def input_user(request):
user = request.param
print("登录账户:%s" % user)
return user
@pytest.fixture(scope="module")
def input_psw(request):
psw = request.param
print("登录密码:%s" % psw)
return psw
data = [
("name1", "pwd1"),
("name2", "<PASSWORD>")
]
@pytest.mark.parametrize("input_user,input_psw", data, indirect=True)
def test_more_fixture(input_user, input_psw):
print("fixture返回的内容:", input_user, input_psw)
# 多个fixture
@pytest.fixture(scope="function")
def input_user(request):
user = request.param
print("登录账户:%s" % user)
return user
@pytest.fixture(scope="function")
def input_psw(request):
psw = request.param
print("登录密码:%s" % psw)
return psw
name = ["name1", "name2"]
pwd = ["<PASSWORD>", "<PASSWORD>"]
@pytest.mark.parametrize("input_user", name, indirect=True)
@pytest.mark.parametrize("input_psw", pwd, indirect=True)
def test_more_fixture(input_user, input_psw):
print("fixture返回的内容:", input_user, input_psw)
|
85157
|
import pymongo
import datetime
import os
import numpy as np
import struct
from array import array
from pymongo import MongoClient
from mspasspy.ccore.seismic import (
Seismogram,
TimeReferenceType,
TimeSeries,
DoubleVector,
)
def find_channel(collection):
st = datetime.datetime(1990, 1, 1, 6)
et = datetime.datetime(1990, 1, 4, 6)
for cr in collection.find({"st": {"$gte": st}, "et": {"$lte": et}}):
print(cr)
# net channel station scheme
def save_data(d):
di = d.get_string("dir")
dfile = d.get_string("dfile")
fname = os.path.join(di, dfile)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode="a+b") as fh:
foff = fh.seek(0, 2)
float_array = array("d", d.data)
d.put("nofbytes", float_array.itemsize * float_array.buffer_info()[1])
float_array.tofile(fh)
di = os.path.dirname(os.path.realpath(fname))
dfile = os.path.basename(os.path.realpath(fname))
d.put("dir", di)
d.put("dfile", dfile)
d.put("foff", foff)
def read_data(d):
di = d.get_string("dir")
dfile = d.get_string("dfile")
foff = d.get("foff")
fname = os.path.join(di, dfile)
with open(fname, mode="rb") as fh:
fh.seek(foff)
float_array = array("d")
float_array.frombytes(fh.read(d.get("nofbytes")))
d.data = DoubleVector(float_array)
if __name__ == "__main__":
s = TimeSeries()
s.data = DoubleVector(np.random.rand(255))
s["dir"] = "./"
s["dfile"] = "test_op"
save_data(s)
s2 = TimeSeries()
for k in s:
s2[k] = s[k]
s2.data = DoubleVector([])
print(len(s2.data))
read_data(s2)
print(len(s2.data))
assert all(a == b for a, b in zip(s.data, s2.data))
# client = MongoClient('localhost', 27017)
# db = client.mspass
# channels = db.channels
# find_channel(channels)
|
85158
|
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
# Source:
# https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Kingdom
# Fake phone numbers should be fake - this provider has been rewritten to
# use numbers reserved for dramatic use by Ofcom. See the following:
# https://en.wikipedia.org/wiki/Fictitious_telephone_number#United_Kingdom
# This ensures no genuine numbers are generated at random.
#
# It's worth noting that the following examples include incorrect notation
# of British phone numbers. +44(0)xxx is incorrect and the '(0)' should
# be omitted. However, it's commonly written this way by Joe Public
# and would better serve this project to be included, as it represents
# more realistic data and is of benefit to those developing data cleansing
# tools etc. All possible official fake numbers are covered below.
cellphone_formats = (
'07700 900 ###',
'07700 900###',
'07700900###',
'(07700) 900 ###',
'(07700) 900###',
'(07700)900###',
'+447700 900 ###',
'+447700 900###',
'+447700900###',
'+44(0)7700 900 ###',
'+44(0)7700 900###',
'+44(0)7700900###',
)
formats = (
'0113 496 0###',
'0113 4960###',
'01134960###',
'(0113) 496 0###',
'(0113) 4960###',
'(0113)4960###',
'+44113 496 0###',
'+44113 4960###',
'+441134960###',
'+44(0)113 496 0###',
'+44(0)113 4960###',
'+44(0)1134960###',
'0114 496 0###',
'0114 4960###',
'01144960###',
'(0114) 496 0###',
'(0114) 4960###',
'(0114)4960###',
'+44114 496 0###',
'+44114 4960###',
'+441144960###',
'+44(0)114 496 0###',
'+44(0)114 4960###',
'+44(0)1144960###',
'0115 496 0###',
'0115 4960###',
'01154960###',
'(0115) 496 0###',
'(0115) 4960###',
'(0115)4960###',
'+44115 496 0###',
'+44115 4960###',
'+441154960###',
'+44(0)115 496 0###',
'+44(0)115 4960###',
'+44(0)1154960###',
'0116 496 0###',
'0116 4960###',
'01164960###',
'(0116) 496 0###',
'(0116) 4960###',
'(0116)4960###',
'+44116 496 0###',
'+44116 4960###',
'+441164960###',
'+44(0)116 496 0###',
'+44(0)116 4960###',
'+44(0)1164960###',
'0117 496 0###',
'0117 4960###',
'01174960###',
'(0117) 496 0###',
'(0117) 4960###',
'(0117)4960###',
'+44117 496 0###',
'+44117 4960###',
'+441174960###',
'+44(0)117 496 0###',
'+44(0)117 4960###',
'+44(0)1174960###',
'0118 496 0###',
'0118 4960###',
'01184960###',
'(0118) 496 0###',
'(0118) 4960###',
'(0118)4960###',
'+44118 496 0###',
'+44118 4960###',
'+441184960###',
'+44(0)118 496 0###',
'+44(0)118 4960###',
'+44(0)1184960###',
'0121 496 0###',
'0121 4960###',
'01214960###',
'(0121) 496 0###',
'(0121) 4960###',
'(0121)4960###',
'+44121 496 0###',
'+44121 4960###',
'+441214960###',
'+44(0)121 496 0###',
'+44(0)121 4960###',
'+44(0)1214960###',
'0131 496 0###',
'0131 4960###',
'01314960###',
'(0131) 496 0###',
'(0131) 4960###',
'(0131)4960###',
'+44131 496 0###',
'+44131 4960###',
'+441314960###',
'+44(0)131 496 0###',
'+44(0)131 4960###',
'+44(0)1314960###',
'0141 496 0###',
'0141 4960###',
'01414960###',
'(0141) 496 0###',
'(0141) 4960###',
'(0141)4960###',
'+44141 496 0###',
'+44141 4960###',
'+441414960###',
'+44(0)141 496 0###',
'+44(0)141 4960###',
'+44(0)1414960###',
'0151 496 0###',
'0151 4960###',
'01514960###',
'(0151) 496 0###',
'(0151) 4960###',
'(0151)4960###',
'+44151 496 0###',
'+44151 4960###',
'+441514960###',
'+44(0)151 496 0###',
'+44(0)151 4960###',
'+44(0)1514960###',
'0161 496 0###',
'0161 4960###',
'01614960###',
'(0161) 496 0###',
'(0161) 4960###',
'(0161)4960###',
'+44161 496 0###',
'+44161 4960###',
'+441614960###',
'+44(0)161 496 0###',
'+44(0)161 4960###',
'+44(0)1614960###',
'0191 498 0###',
'0191 4960###',
'01914960###',
'(0191) 496 0###',
'(0191) 4960###',
'(0191)4960###',
'+44191 496 0###',
'+44191 4960###',
'+441914960###',
'+44(0)191 496 0###',
'+44(0)191 4960###',
'+44(0)1914960###',
'020 7946 0###',
'020 74960###',
'02074960###',
'(020) 7496 0###',
'(020) 74960###',
'(020)74960###',
'+4420 7496 0###',
'+4420 74960###',
'+442074960###',
'+44(0)20 7496 0###',
'+44(0)20 74960###',
'+44(0)2074960###',
'028 9018 0###',
'028 9018###',
'0289018###',
'(028) 9018 0###',
'(028) 9018###',
'(028)9018###',
'+4428 9018 0###',
'+4428 9018###',
'+44289018###',
'+44(0)28 9018 0###',
'+44(0)28 9018###',
'+44(0)289018###',
'029 2018 0###',
'029 2018###',
'0292018###',
'(029) 2018 0###',
'(029) 2018###',
'(029)2018###',
'+4429 2018 0###',
'+4429 2018###',
'+44292018###',
'+44(0)29 2018 0###',
'+44(0)29 2018###',
'+44(0)292018###',
'01632 960 ###',
'01632 960###',
'01632960###',
'(01632) 960 ###',
'(01632) 960###',
'(01632)960###',
'+441632 960 ###',
'+441632 960###',
'+441632960###',
'+44(0)1632 960 ###',
'+44(0)1632 960###',
'+44(0)1632960###',
'0306 999 0###',
'0306 9990###',
'03069990###',
'(0306) 999 0###',
'(0306) 9990###',
'(0306)9990###',
'+44306 999 0###',
'+44306 9990###',
'+443069990###',
'+44(0)306 999 0###',
'+44(0)306 9990###',
'+44(0)3069990###',
'0808 157 0###',
'0808 1570###',
'08081570###',
'(0808) 157 0###',
'(0808) 1570###',
'(0808)1570###',
'+44808 157 0###',
'+44808 1570###',
'+448081570###',
'+44(0)808 157 0###',
'+44(0)808 1570###',
'+44(0)8081570###',
'0909 879 0###',
'0909 8790###',
'09098790###',
'(0909) 879 0###',
'(0909) 8790###',
'(0909)8790###',
'+44909 879 0###',
'+44909 8790###',
'+449098790###',
'+44(0)909 879 0###',
'+44(0)909 8790###',
'+44(0)9098790###',
)
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
|
85192
|
import torch
from torch.distributions import Categorical
from survae.distributions.conditional import ConditionalDistribution
from survae.utils import sum_except_batch
class ConditionalCategorical(ConditionalDistribution):
"""A Categorical distribution with conditional logits."""
def __init__(self, net):
super(ConditionalCategorical, self).__init__()
self.net = net
def cond_dist(self, context):
logits = self.net(context)
return Categorical(logits=logits)
def log_prob(self, x, context):
dist = self.cond_dist(context)
return sum_except_batch(dist.log_prob(x))
def sample(self, context):
dist = self.cond_dist(context)
return dist.sample()
def sample_with_log_prob(self, context):
dist = self.cond_dist(context)
z = dist.sample()
log_prob = dist.log_prob(z)
log_prob = sum_except_batch(log_prob)
return z, log_prob
def logits(self, context):
return self.cond_dist(context).logits
def probs(self, context):
return self.cond_dist(context).probs
def mode(self, context):
return self.cond_dist(context).logits.argmax(-1)
|
85238
|
from StringIO import StringIO
import pandas as pd
from harvest import Harvest
from domain import Domain
from .stream import init_plot
from django.conf import settings
ENABLE_STREAM_VIZ = settings.ENABLE_STREAM_VIZ
class PlotsNotReadyException(Exception):
pass
class AcheDashboard(object):
def __init__(self, crawl):
self.crawl = crawl
if self.crawl.crawler != "ache":
raise ValueError("Crawl must be using the Ache crawler.")
self.harvest = Harvest(crawl)
self.domain = Domain(crawl)
def get_harvest_plot(self):
# TODO: Remove Pokemon exception catching
try:
script, div = self.harvest.create()
except:
return [None, None]
return [script, div]
def get_domain_plot(self):
# TODO: Remove Pokemon exception catching
try:
script, div = self.domain.create()
except Exception:
return [None, None]
return [script, div]
def get_relevant_seeds(self):
# Converts string to StringIO to allow pandas to read it as a file
seeds = pd.read_csv(StringIO(self.domain.get_relevant_data()),
delimiter='\t', header=None,
names=['url', 'timestamp'])
return seeds['url'].to_dict().values()
def get_plots(self):
harvest_plot = self.get_harvest_plot()
domain_plot = self.get_domain_plot()
if harvest_plot != [None, None]:
return {
'scripts': [domain_plot[0], harvest_plot[0]],
'divs': [domain_plot[1], harvest_plot[1]],
}
else:
return {
'scripts': None,
'divs': None,
}
class NutchDashboard(object):
def __init__(self, crawl):
self.crawl = crawl
if self.crawl.crawler != "nutch":
raise ValueError("Crawl must be using the Nutch crawler.")
def get_plots(self):
# TODO: For simultaneous crawl monitoring need to use unique crawl ids
if ENABLE_STREAM_VIZ:
script = init_plot(self.crawl.name)
else:
script = None
return {
'scripts': [script],
'divs': [],
}
|
85277
|
import numpy as np
from typing import Tuple
from typing import List
from typing import Any
import matplotlib.pyplot as plt
import cv2
from GroundedScan.gym_minigrid.minigrid import DIR_TO_VEC
# TODO faster
def topo_sort(items, constraints):
if not constraints:
return items
items = list(items)
constraints = list(constraints)
out = []
while len(items) > 0:
roots = [
i for i in items
if not any(c[1] == i for c in constraints)
]
assert len(roots) > 0, (items, constraints)
to_pop = roots[0]
items.remove(to_pop)
constraints = [c for c in constraints if c[0] != to_pop]
out.append(to_pop)
return out
def random_weights(size: int) -> np.ndarray:
return 2 * (np.random.random(size) - 0.5)
def accept_weights(size: int) -> np.ndarray:
return np.ones(size)
def plan_step(position: Tuple[int, int], move_direction: int):
"""
:param position: current position of form (x-axis, y-axis) (i.e. column, row)
:param move_direction: East is 0, south is 1, west is 2, north is 3.
:return: next position of form (x-axis, y-axis) (i.e. column, row)
"""
assert 0 <= move_direction < 4
dir_vec = DIR_TO_VEC[move_direction]
return position + dir_vec
def one_hot(size: int, idx: int) -> np.ndarray:
one_hot_vector = np.zeros(size, dtype=int)
one_hot_vector[idx] = 1
return one_hot_vector
def generate_possible_object_names(color: str, shape: str) -> List[str]:
# TODO: does this still make sense when size is not small or large
names = [shape, ' '.join([color, shape])]
return names
def save_counter(description, counter, file):
file.write(description + ": \n")
for key, occurrence_count in counter.items():
file.write(" {}: {}\n".format(key, occurrence_count))
def bar_plot(values: dict, title: str, save_path: str, errors={}, y_axis_label="Occurrence"):
sorted_values = list(values.items())
sorted_values = [(y, x) for x, y in sorted_values]
sorted_values.sort()
values_per_label = [value[0] for value in sorted_values]
if len(errors) > 0:
sorted_errors = [errors[value[1]] for value in sorted_values]
else:
sorted_errors = None
labels = [value[1] for value in sorted_values]
assert len(labels) == len(values_per_label)
y_pos = np.arange(len(labels))
plt.bar(y_pos, values_per_label, yerr=sorted_errors, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
plt.savefig(save_path)
plt.close()
def grouped_bar_plot(values: dict, group_one_key: Any, group_two_key: Any, title: str, save_path: str,
errors_group_one={}, errors_group_two={}, y_axis_label="Occurence", sort_on_key=True):
sorted_values = list(values.items())
if sort_on_key:
sorted_values.sort()
values_group_one = [value[1][group_one_key] for value in sorted_values]
values_group_two = [value[1][group_two_key] for value in sorted_values]
if len(errors_group_one) > 0:
sorted_errors_group_one = [errors_group_one[value[0]] for value in sorted_values]
sorted_errors_group_two = [errors_group_two[value[0]] for value in sorted_values]
else:
sorted_errors_group_one = None
sorted_errors_group_two = None
labels = [value[0] for value in sorted_values]
assert len(labels) == len(values_group_one)
assert len(labels) == len(values_group_two)
y_pos = np.arange(len(labels))
fig, ax = plt.subplots()
width = 0.35
p1 = ax.bar(y_pos, values_group_one, width, yerr=sorted_errors_group_one, align='center', alpha=0.5)
p2 = ax.bar(y_pos + width, values_group_two, width, yerr=sorted_errors_group_two, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
ax.legend((p1[0], p2[0]), (group_one_key, group_two_key))
plt.savefig(save_path)
plt.close()
def numpy_array_to_image(numpy_array, image_name):
plt.imsave(image_name, numpy_array)
def image_to_numpy_array(image_path):
im = cv2.imread(image_path)
return np.flip(im, 2) # cv2 returns image in BGR order
|
85312
|
import bpy
import os
bl_info = {
"name" : "plantFEM_export", # プラグイン名
"author" : "<NAME>", # 作者
"version" : (0,1), # プラグインのバージョン
"blender" : (2, 80, 0), # プラグインが動作するBlenderのバージョン
"location" : "File > Export > plantFEM_export", # Blender内部でのプラグインの位置づけ
"description" : "File export for plantFEM.", # プラグインの説明
"warning" : "",
"wiki_url" : "", # プラグインの説明が存在するWikiページのURL
"tracker_url" : "", # Blender Developer OrgのスレッドURL
"category" : "Import-Export" # プラグインのカテゴリ名
}
def write_some_data(context, filepath, use_some_setting):
print("running write_plantFEM_data...")
#f = open(filepath, 'w', encoding='utf-8')
num_wa = 0
#objs = [obj for obj in scene.objects if obj.name.startswith("Seed_")]
#f.write(str(objs) )
sname = filepath
#sname = sname+".f90"
sf = open(str(sname), 'w')
sf.write("program main\n")
sf.write(" use SeedClass\n")
sf.write(" implicit none\n")
seedlist = []
# detect seed objects
for i in range(len(bpy.context.editable_objects)):
if str(bpy.context.editable_objects[i].data ).find('Mesh("') == -1:
continue
st = str(bpy.context.editable_objects[i].name )
st = st.replace("bpy_struct,", " ")
st = st.replace('Mesh("', " ")
st = st.replace('")>', " ")
st = st.replace('<', " ")
st = st.replace(' ', "")
st = st.replace('_RNA_UI',"")
st = st.replace('_RNA_UI',"")
# import the object is Seed_ object to script
if str(st).find("Seed_") >= 0 :
sf.write("type(Seed_) :: seed"+str(i)+"\n")
# detect seed objects
for i in range(len(bpy.context.editable_objects)):
#f.write( str(bpy.context.editable_objects[i].display_bounds_type ))
#f.write("\n")
if str(bpy.context.editable_objects[i].data ).find('Mesh("') == -1:
continue
#f.write("\n")
#f.write( str(bpy.context.editable_objects[i].name ))
#f.write("\n")
st = str(bpy.context.editable_objects[i].name )
st = st.replace("bpy_struct,", " ")
st = st.replace('Mesh("', " ")
st = st.replace('")>', " ")
st = st.replace('<', " ")
st = st.replace(' ', "")
st = st.replace('_RNA_UI',"")
st = st.replace('_RNA_UI',"")
#f.write(st)
#f.write("\n")
# import the object is Seed_ object to script
if str(st).find("Seed_") >= 0 :
sf.write("call seed"+str(i)+"%create(Name='seed"+str(i)+"'")
seedlist.append("seed"+str(i))
st = str(bpy.context.editable_objects[i].data )
st = st.replace("bpy_struct,", " ")
st = st.replace('Mesh("', " ")
st = st.replace('")>', " ")
st = st.replace('<', " ")
st = st.replace(' ', "")
st = st.replace('_RNA_UI',"")
#f.write(st)
#f.write("\n")
if str(st).lower() == "sphere":
sf.write(",MeshType='Sphere3D'")
elif str(st).lower() == "cube":
sf.write(",MeshType='Cube'")
n=0
m=0
#f.write("\n")
for mykey, myvalue in bpy.context.editable_objects[i].items():
n=n+1
m=1
#f.write(str(n-m)+"\n")
n=0
m=0
for mykey, myvalue in bpy.context.editable_objects[i].items():
n=n+1
if n == 1:
continue
#f.write( str(mykey) + " " + str(myvalue) )
if str(mykey).lower() == "x_num" or str(mykey).lower() == "xnum" :
sf.write(",x_num="+str(myvalue) )
if str(mykey).lower() == "y_num" or str(mykey).lower() == "ynum" :
sf.write(",y_num="+str(myvalue)+"&\n" )
if str(mykey).lower() == "z_num" or str(mykey).lower() == "znum" :
sf.write(",z_num="+str(myvalue)+"&\n" )
if str(mykey).lower() == "youngsmodulus" or str(mykey).lower() == "youngmodulus" :
sf.write(",YoungsModulus="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "poissonratio" or str(mykey).lower() == "poissonsratio" :
sf.write(",PoissonRatio="+str(myvalue)+"d0 &\n")
if str(mykey).lower() == "permiability" or str(mykey).lower() == "conductance" :
sf.write(",Permiability=dble("+str( float(myvalue) )+") &\n" )
if str(mykey).lower() == "a_psi" or str(mykey).lower() == "apsi" :
sf.write(",a_Psi="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "a_p" or str(mykey).lower() == "ap" :
sf.write(",a_P="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "theta_eq" or str(mykey).lower() == "thetaeq" :
sf.write(",theta_eq="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "psi_eq" or str(mykey).lower() == "psieq" :
sf.write(",Psi_eq="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "a_e" or str(mykey).lower() == "ae" :
sf.write(",a_E="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "a_v" or str(mykey).lower() == "av" :
sf.write(",a_v="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "e_eq" or str(mykey).lower() == "eeq" :
sf.write(",E_eq="+str(myvalue)+"d0 &\n" )
if str(mykey).lower() == "v_eq" or str(mykey).lower() == "veq" :
sf.write(",v_eq="+str(myvalue)+"d0 &\n" )
#if str(mykey) == "WaterAbsorption" :
# m=1
#elif str(mykey) == "waterabsorption" :
# m=1
#elif str(mykey) == "Waterabsorption" :
# m=1
#elif str(mykey) == "waterAbsorption" :
# m=1
#else:
#f.write("\n")
#num_wa=num_wa+m
#f.write( str(bpy.context.editable_objects[i].active_material ))
#f.write("scale\n")
for j in range(len(bpy.context.editable_objects[i].scale)):
#f.write( str(bpy.context.editable_objects[i].scale[j]) )
#f.write("\n")
if j == 0:
sf.write(",x_len="+str( float(2.0)*float(bpy.context.editable_objects[i].scale[j]))+"d0&\n")
if j == 1:
sf.write(",y_len="+str( float(2.0)*float(bpy.context.editable_objects[i].scale[j]))+"d0&\n")
if j == 2:
sf.write(",z_len="+str( float(2.0)*float(bpy.context.editable_objects[i].scale[j]))+"d0)\n")
sf.write("\n")
#f.write("\n")
#f.write("location\n")
#f.write("\n")
sf.write("call seed"+str(i)+"%move(")
for j in range(len(bpy.context.editable_objects[i].location)):
#f.write( str(bpy.context.editable_objects[i].location[j]) )
if j == 0:
sf.write("x="+str( float(bpy.context.editable_objects[i].location[j]))+"d0&\n")
if j == 1:
sf.write(",y="+str( float(bpy.context.editable_objects[i].location[j]))+"d0&\n")
if j == 2:
sf.write(",z="+str( float(bpy.context.editable_objects[i].location[j]))+"d0)\n")
sf.write("\n")
#f.write("\n")
#f.write("rotation\n")
sf.write("call seed"+str(i)+"%rotate(")
for j in range(len(bpy.context.editable_objects[i].delta_rotation_euler)):
#f.write( str(bpy.context.editable_objects[i].delta_rotation_euler[j]) )
#f.write("\n")
if j == 0:
sf.write("x="+str( float(bpy.context.editable_objects[i].delta_rotation_euler[j]))+"d0&\n")
if j == 1:
sf.write(",y="+str( float(bpy.context.editable_objects[i].delta_rotation_euler[j]))+"d0&\n")
if j == 2:
sf.write(",z="+str( float(bpy.context.editable_objects[i].delta_rotation_euler[j]))+"d0)\n")
sf.write("\n")
#f.write("\n")
sf.write("call seed"+str(i)+"%gmsh(Name='seed"+str(i)+"')")
sf.write("\n")
for i in range(len(bpy.context.editable_objects)):
#f.write( str(bpy.context.editable_objects[i].display_bounds_type ))
#f.write("\n")
if str(bpy.context.editable_objects[i].data ).find('Mesh("') == -1:
continue
#f.write("\n")
#f.write( str(bpy.context.editable_objects[i].name ))
#f.write("\n")
st = str(bpy.context.editable_objects[i].name )
st = st.replace("bpy_struct,", " ")
st = st.replace('Mesh("', " ")
st = st.replace('")>', " ")
st = st.replace('<', " ")
st = st.replace(' ', "")
st = st.replace('_RNA_UI',"")
st = st.replace('_RNA_UI',"")
#f.write(st)
#f.write("\n")
# import the object is Seed_ object to script
if str(st).find("BC_") >= 0 :
# Boundary conditions >> for all seed objects
for seed in seedlist:
n=0
m=0
for mykey, myvalue in bpy.context.editable_objects[i].items():
n=n+1
if n == 1:
continue
m=0
if str(mykey).lower() == "disp_x" or str(mykey).lower() == "dispx" :
m=1
if str(mykey).lower() == "disp_y" or str(mykey).lower() == "dispy" :
m=2
if str(mykey).lower() == "disp_z" or str(mykey).lower() == "dispz" :
m=3
if str(mykey).lower() == "watercontent" or str(mykey).lower() == "water_content" :
m=4
if m==1:
sf.write("call "+str(seed)+"%env(")
sf.write("disp_x="+str(myvalue)+"d0" )
elif m==2:
sf.write("call "+str(seed)+"%env(")
sf.write("disp_y="+str(myvalue)+"d0" )
elif m==3:
sf.write("call "+str(seed)+"%env(")
sf.write("disp_z="+str(myvalue)+"d0" )
elif m==4:
sf.write("call "+str(seed)+"%env(")
sf.write("WaterContent="+str(myvalue)+"d0" )
else:
continue
x_max = float(bpy.context.editable_objects[i].scale[0]) \
+ float(bpy.context.editable_objects[i].location[0])
x_min = -float(bpy.context.editable_objects[i].scale[0]) \
+ float(bpy.context.editable_objects[i].location[0])
y_max = float(bpy.context.editable_objects[i].scale[1]) \
+ float(bpy.context.editable_objects[i].location[1])
y_min = -float(bpy.context.editable_objects[i].scale[1]) \
+ float(bpy.context.editable_objects[i].location[1])
z_max = float(bpy.context.editable_objects[i].scale[2]) \
+ float(bpy.context.editable_objects[i].location[2])
z_min = -float(bpy.context.editable_objects[i].scale[2]) \
+ float(bpy.context.editable_objects[i].location[2])
#f.write("scale\n")
sf.write(",x_max="+str(x_max)+"d0 &\n" )
sf.write(",x_min="+str(x_min)+"d0 &\n" )
sf.write(",y_max="+str(y_max)+"d0 &\n" )
sf.write(",y_min="+str(y_min)+"d0 &\n" )
sf.write(",z_max="+str(z_max)+"d0 &\n" )
sf.write(",z_min="+str(z_min)+"d0" )
sf.write(")")
sf.write("\n")
for seed in seedlist:
sf.write("\n" )
sf.write("call "+str(seed)+"%grow(" )
m=0
for mykey, myvalue in bpy.context.scene.world.items():
if str(mykey).lower() == "timestep" or str(mykey).lower() == "time_step" :
sf.write("timestep="+str(int(myvalue) )+"&\n")
m=1
if m==0:
sf.write("timestep="+str(1)+"&\n")
for mykey, myvalue in bpy.context.scene.world.items():
if str(mykey).lower() == "dt" or str(mykey).lower() == "d_t" :
sf.write(",dt="+str(float(myvalue) )+"d0"+"&\n")
for mykey, myvalue in bpy.context.scene.world.items():
if str(mykey).lower() == "display" or str(mykey).lower() == "export" :
sf.write(",Display=."+str(myvalue)+".&\n")
for mykey, myvalue in bpy.context.scene.world.items():
if str(mykey).lower() == "nr_tol" or str(mykey).lower() == "nrtol" :
sf.write(",nr_tol="+str(float(myvalue))+"d0"+"&\n")
for mykey, myvalue in bpy.context.scene.world.items():
if str(mykey).lower() == "interval" or str(mykey).lower() == "inter" :
sf.write(",interval="+str(int(myvalue) )+"&\n")
sf.write(")" )
sf.write("\n" )
#f.write("Hello World %s" % use_some_setting)
#sf.write(str(bpy.context.scene.world.items() ) )
#f.write("EOF")
#f.close()
#f = open(filepath, 'r', encoding='utf-8')
#f.close()
sf.write("end program")
sf.close()
#create .f90 script
return {'FINISHED'}
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class ExportSomeData(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "export_test.some_data" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Export plantFEM Data"
# ExportHelper mixin class uses this
filename_ext = ".f90"
filter_glob: StringProperty(
default="*.f90",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_setting: BoolProperty(
name="Example Boolean",
description="Example Tooltip",
default=True,
)
type: EnumProperty(
name="Example Enum",
description="Choose between two items",
items=(
('OPT_A', "First Option", "Description one"),
('OPT_B', "Second Option", "Description two"),
),
default='OPT_A',
)
def execute(self, context):
return write_some_data(context, self.filepath, self.use_setting)
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(ExportSomeData.bl_idname, text="Export plantFEM objects and world")
def register():
bpy.utils.register_class(ExportSomeData)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(ExportSomeData)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
# test call
bpy.ops.export_test.some_data('INVOKE_DEFAULT')
#messagebox.askquestion("Run this simulation?", "Run this simulation?")
|
85322
|
from abc import ABC, abstractmethod
import logging
import selectors
import multiprocessing
from defusedxml import ElementTree as etree
from FreeTAKServer.controllers.DatabaseControllers.DatabaseController import DatabaseController
from FreeTAKServer.controllers.services.service_abstracts import ServerServiceInterface, ServiceBase
from FreeTAKServer.controllers.serializers.protobuf_serializer import ProtobufSerializer
from FreeTAKServer.controllers.serializers.xml_serializer import XmlSerializer
from FreeTAKServer.controllers.XMLCoTController import XMLCoTController
from FreeTAKServer.model.ClientInformation import ClientInformation
from FreeTAKServer.model.SpecificCoT.SendDisconnect import SendDisconnect
from FreeTAKServer.model.protobufModel.fig_pb2 import FederatedEvent
from FreeTAKServer.model.federate import Federate
class FederationServiceBase(ServerServiceInterface, ServiceBase):
def __init__(self):
self.federates: {str: Federate}
self.logger: logging.Logger
self.sel: selectors.select
def _process_protobuff_to_object(self, protobuf_object: FederatedEvent):
""" this method will convert the protobuf object to a FTS model object and xml string
it will also add the remarks to indicate that the client or cot is comming from a federate
Args:
protobuf_object:
Returns:
"""
model_object, fts_object = XMLCoTController().determine_model_object_type(protobuf_object.event.type) # pylint: disable=no-member; member does exist
fts_object = fts_object()
model_object = ProtobufSerializer().from_format_to_fts_object(protobuf_object, model_object())
xml_object = XmlSerializer().from_fts_object_to_format(model_object)
fts_object.setModelObject(model_object)
fts_object.setXmlString(etree.tostring(xml_object))
"""xmlstring = event
if xmlstring.find('detail') and xmlstring.find('detail').
xmlstring.find('detail').remove(xmlstring.find('detail').find('remarks'))
xmlstring.find('detail').extend([child for child in xmlstring.find('detail')])"""
return fts_object
def _get_header_length(self, header):
return int.from_bytes(header, 'big')
def _generate_header(self, contentlength):
return contentlength.to_bytes(4, byteorder="big")
def check_dest_user(self, data):
""" this method is responsible for validating that the federate has
any of the intended recipients for the CoT
Args:
data: a CoT object
Returns boolean: True if the federate has any dest client otherwise false
"""
def send_data_to_clients(self, data):
from defusedxml import ElementTree as etree
try:
if self.federates:
xmlstring = data.xmlString
detail = etree.fromstring(xmlstring).find('detail')
if detail:
protobuf = ProtobufSerializer().from_fts_object_to_format(data.modelObject)
try:
protobuf.event.other = etree.tostring(detail) # pylint: disable=no-member; member does exist
protobufstring = protobuf.SerializeToString()
header = self._generate_header(len(protobufstring))
protobufstring = header + protobufstring
print(protobufstring)
except Exception as e:
self.logger.warning("creating protobuf message failed " + str(e))
return None
for client in self.federates.values():
client.conn.send(protobufstring)
else:
return None
else:
return None
except Exception as e:
import traceback
trace = traceback.format_exc()
self.logger.warning("sending data to federates failed " + str(e))
def send_connection_data(self, CoT: ClientInformation) -> None:
try:
if self.federates:
proto_obj = FederatedEvent()
proto_obj.contact.uid = str(CoT.modelObject.uid) # pylint: disable=no-member; member does exist
proto_obj.contact.callsign = str(CoT.modelObject.detail.contact.callsign) # pylint: disable=no-member; member does exist
proto_obj.contact.operation = 1 # pylint: disable=no-member; member does exist
proto_str = proto_obj.SerializeToString()
header = self._generate_header(len(proto_str))
for fed in self.federates.values():
fed.conn.send(header + proto_str)
return None
else:
return None
except Exception as e:
self.logger.warning("exception throw sending new connection data to federates " + str(e))
return None
def _send_connected_clients(self, connection):
try:
clients = self.db.query_user()
except Exception as e:
self.logger.warning("error thrown in getting clients from DataBase to send to federates " + str(e))
return None
for client in clients:
try:
proto_obj = FederatedEvent()
proto_obj.contact.uid = str(client.uid) # pylint: disable=no-member; member does exist
proto_obj.contact.callsign = str(client.CoT.detail.contact.callsign) # pylint: disable=no-member; member does exist
proto_obj.contact.operation = 1 # pylint: disable=no-member; member does exist
proto_str = proto_obj.SerializeToString()
header = self._generate_header(len(proto_str))
connection.send(header + proto_str)
except Exception as e:
self.logger.warning("error thrown sending federate data to newly connected federate " + str(e))
continue
def disconnect_client(self, id: str) -> None:
try:
self.logger.info("disconnecting client")
try:
federate = self.federates[id]
except Exception as e:
self.logger.warning("federate array has no item with uid " + str(id) + " federates array is len " + str(
len(self.federates)))
return None
try:
federate.conn.close()
self.sel.unregister(federate.conn)
del (self.federates[federate.uid])
except Exception as e:
self.logger.warning("exception thrown disconnecting client " + str(e))
try:
self.db.remove_ActiveFederation(f'id == "{federate.uid}"')
except Exception as e:
self.logger.warning("exception thrown removing outgoing federation from DB " + str(e))
return None
except Exception as e:
self.logger.warning("exception thrown accessing client for disconnecting client " + str(e))
def send_disconnection_data(self, CoT: SendDisconnect) -> None:
if self.federates:
proto_obj = FederatedEvent()
proto_obj.contact.uid = str(CoT.modelObject.detail.link.uid) # pylint: disable=no-member; member does exist
proto_obj.contact.callsign = str(CoT.modelObject.detail.link.type) # pylint: disable=no-member; member does exist
proto_obj.contact.operation = 4 # pylint: disable=no-member; member does exist
proto_str = proto_obj.SerializeToString()
header = self._generate_header(len(proto_str))
for fed in self.federates.values():
fed.conn.send(header + proto_str)
return None
else:
return None
def start(self, pipe):
"""this is an abstract start method, and should be implemented by any child classes.
the following hinted vars should be implemented and create_context and main methods
should be called."""
self.db: DatabaseController
self.pipe: multiprocessing.Pipe
|
85398
|
import numpy as np
from py_diff_stokes_flow.env.env_base import EnvBase
from py_diff_stokes_flow.common.common import ndarray
from py_diff_stokes_flow.core.py_diff_stokes_flow_core import ShapeComposition2d, StdIntArray2d
class FluidicTwisterEnv3d(EnvBase):
def __init__(self, seed, folder):
np.random.seed(seed)
cell_nums = (32, 32, 16)
E = 100
nu = 0.499
vol_tol = 1e-2
edge_sample_num = 2
EnvBase.__init__(self, cell_nums, E, nu, vol_tol, edge_sample_num, folder)
# Initialize the parametric shapes.
self._parametric_shape_info = [ ('polar_bezier-6', 51)]
# Initialize the node conditions.
self._node_boundary_info = []
inlet_radius = 0.3
outlet_radius = 0.3
inlet_velocity = 1.0
outlet_velocity = 2.0
cx, cy, _ = self.cell_nums()
assert cx == cy
nx, ny, nz = self.node_nums()
def get_bezier(radius):
bezier = ShapeComposition2d()
params = np.concatenate([
np.full(8, radius) * cx,
ndarray([0.5 * cx, 0.5 * cy, 0])
])
bezier.AddParametricShape('polar_bezier', params.size)
cxy = StdIntArray2d((int(cx), int(cy)))
bezier.Initialize(cxy, params, True)
return bezier
inlet_bezier = get_bezier(inlet_radius)
outlet_bezier = get_bezier(outlet_radius)
for i in range(nx):
for j in range(ny):
if inlet_bezier.signed_distance((i, j)) > 0:
self._node_boundary_info.append(((i, j, 0, 0), 0))
self._node_boundary_info.append(((i, j, 0, 1), 0))
self._node_boundary_info.append(((i, j, 0, 2), inlet_velocity))
# Initialize the interface.
self._interface_boundary_type = 'free-slip'
# Compute the target velocity field (for rendering purposes only)
desired_omega = 2 * outlet_velocity / (cx * outlet_radius)
target_velocity_field = np.zeros((nx, ny, 3))
for i in range(nx):
for j in range(ny):
if outlet_bezier.signed_distance((i, j)) > 0:
x, y = i / cx, j / cy
# u = (-(j - ny / 2), (i - nx / 2), 0) * c.
# ux_pos = (-j, i + 1, 0) * c.
# uy_pos = (-j - 1, i, 0) * c.
# curl = (i + 1) * c + (j + 1) * c - i * c - j * c.
# = (i + j + 2 - i - j) * c = 2 * c.
# c = outlet_vel / (num_cells[0] * outlet_radius).
c = desired_omega / 2
target_velocity_field[i, j] = ndarray([
-(y - 0.5) * c,
(x - 0.5) * c,
0
])
# Other data members.
self._inlet_radius = inlet_radius
self._outlet_radius = outlet_radius
self._inlet_velocity = inlet_velocity
self._target_velocity_field = target_velocity_field
self._inlet_bezier = inlet_bezier
self._outlet_bezier = outlet_bezier
self._desired_omega = desired_omega
def _variables_to_shape_params(self, x):
x = ndarray(x).copy().ravel()
assert x.size == 32
cx, cy, _ = self._cell_nums
assert cx == cy
params = np.concatenate([
np.full(8, self._inlet_radius),
x,
np.full(8, self._outlet_radius),
ndarray([0.5, 0.5, 0]),
])
params[:-1] *= cx
# Jacobian.
J = np.zeros((params.size, x.size))
for i in range(x.size):
J[8 + i, i] = cx
return ndarray(params).copy(), ndarray(J).copy()
def _loss_and_grad_on_velocity_field(self, u):
u_field = self.reshape_velocity_field(u)
grad = np.zeros(u_field.shape)
nx, ny, nz = self.node_nums()
assert nx == ny
loss = 0
cnt = 0
for i in range(nx):
for j in range(ny):
if self._outlet_bezier.signed_distance((i, j)) > 0:
cnt += 1
uxy = u_field[i, j, nz - 1, :2]
ux_pos = u_field[i + 1, j, nz - 1, :2]
uy_pos = u_field[i, j + 1, nz - 1, :2]
# Compute the curl.
curl = ux_pos[1] - uy_pos[0] - uxy[1] + uxy[0]
loss += (curl - self._desired_omega) ** 2
# ux_pos[1]
grad[i + 1, j, nz - 1, 1] += 2 * (curl - self._desired_omega)
grad[i, j + 1, nz - 1, 0] += -2 * (curl - self._desired_omega)
grad[i, j, nz - 1, 1] += -2 * (curl - self._desired_omega)
grad[i, j, nz - 1, 0] += 2 * (curl - self._desired_omega)
loss /= cnt
grad /= cnt
return loss, ndarray(grad).ravel()
def _color_velocity(self, u):
return float(np.linalg.norm(u) / 2)
def sample(self):
return np.random.uniform(low=self.lower_bound(), high=self.upper_bound())
def lower_bound(self):
return np.full(32, 0.1)
def upper_bound(self):
return np.full(32, 0.4)
|
85402
|
import time
from azureml.core import (
Workspace, Datastore, Dataset,
ComputeTarget, Environment,
Experiment
)
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.data import OutputFileDatasetConfig
from azureml.core.runconfig import RunConfiguration
from azureml.core.authentication import InteractiveLoginAuthentication
# import ngrok_utils
# Run params
dataset_name = "test-features"
compute_target_name = "mev-compute"
source_dir = "./src_train"
script_name = "train.py"
# Auth
tenant_id = "<PASSWORD>"
interactive_auth = InteractiveLoginAuthentication(tenant_id)
# Fetch workspace
ws = Workspace.from_config(auth=interactive_auth)
# Initialize conda environment from config file
env = Environment.from_conda_specification(
name='env',
file_path='./.azureml/env_train.yml'
)
# Fetch compute
compute_target = ComputeTarget(workspace=ws, name=compute_target_name)
# # Start ngrok tunnel
# host, port = ngrok_utils.start_tunnel("8000")
#
# # Pass pycharm debug host and port as environment variables
# env.environment_variables = {
# 'PYCHARM_DEBUG_PORT': port,
# 'PYCHARM_DEBUG_HOST': host
# }
#
# Setup run config
run_config = RunConfiguration()
run_config.target = compute_target
run_config.environment = env
run_config.docker.use_docker = True
# Load dataset from default datastore
datastore = Datastore.get_default(ws)
# try:
# dataset = Dataset.get_by_name(ws, dataset_name)
# except:
dataset_path = [(datastore, dataset_name)]
dataset = Dataset.File.from_files(dataset_path).register(ws, dataset_name)
# Define names/inputs/outputs
dataset_input_name = 'features'
dataset_input = dataset.as_named_input(dataset_input_name)
output = OutputFileDatasetConfig(
name='report',
destination=(datastore, "report")
).as_upload(overwrite=True).register_on_complete('report')
# Define pipeline
pipeline_step = PythonScriptStep(
script_name=script_name,
source_directory=source_dir,
arguments=[
"--input_name", dataset_input_name,
"--output", output,
],
inputs=[dataset_input],
outputs=[output],
compute_target=compute_target,
runconfig=run_config,
allow_reuse=False
)
pipeline = Pipeline(
workspace=ws,
steps=[pipeline_step]
)
experiment = Experiment(workspace=ws, name='automl')
pipeline_run = experiment.submit(pipeline)
# # Kill ngrok on run end
# kill_status = ["Finished", "Failed"]
# while pipeline_run.get_detailed_status()['status'] not in kill_status:
# try:
# time.sleep(10)
# except KeyboardInterrupt:
# pipeline_run.cancel()
#
# ngrok_utils.kill_all()
|
85415
|
import torch
from torch import nn
from .utils import EarlyStopping, appendabledict, \
calculate_multiclass_accuracy, calculate_multiclass_f1_score,\
append_suffix, compute_dict_average
from copy import deepcopy
import numpy as np
from torch.utils.data import RandomSampler, BatchSampler
from .categorization import summary_key_dict
class LinearProbe(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.model = nn.Linear(in_features=input_dim, out_features=num_classes)
def forward(self, feature_vectors):
return self.model(feature_vectors)
class FullySupervisedLinearProbe(nn.Module):
def __init__(self, encoder, num_classes=255):
super().__init__()
self.encoder = deepcopy(encoder)
self.probe = LinearProbe(input_dim=self.encoder.hidden_size,
num_classes=num_classes)
def forward(self, x):
feature_vec = self.encoder(x)
return self.probe(feature_vec)
class ProbeTrainer():
def __init__(self,
encoder=None,
method_name="my_method",
wandb=None,
patience=15,
num_classes=256,
fully_supervised=False,
save_dir=".models",
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
lr=5e-4,
epochs=100,
batch_size=64,
representation_len=256):
self.encoder = encoder
self.wandb = wandb
self.device = device
self.fully_supervised = fully_supervised
self.save_dir = save_dir
self.num_classes = num_classes
self.epochs = epochs
self.lr = lr
self.batch_size = batch_size
self.patience = patience
self.method = method_name
self.feature_size = representation_len
self.loss_fn = nn.CrossEntropyLoss()
# bad convention, but these get set in "create_probes"
self.probes = self.early_stoppers = self.optimizers = self.schedulers = None
def create_probes(self, sample_label):
if self.fully_supervised:
assert self.encoder != None, "for fully supervised you must provide an encoder!"
self.probes = {k: FullySupervisedLinearProbe(encoder=self.encoder,
num_classes=self.num_classes).to(self.device) for k in
sample_label.keys()}
else:
self.probes = {k: LinearProbe(input_dim=self.feature_size,
num_classes=self.num_classes).to(self.device) for k in sample_label.keys()}
self.early_stoppers = {
k: EarlyStopping(patience=self.patience, verbose=False, name=k + "_probe", save_dir=self.save_dir)
for k in sample_label.keys()}
self.optimizers = {k: torch.optim.Adam(list(self.probes[k].parameters()),
eps=1e-5, lr=self.lr) for k in sample_label.keys()}
self.schedulers = {
k: torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizers[k], patience=5, factor=0.2, verbose=True,
mode='max', min_lr=1e-5) for k in sample_label.keys()}
def generate_batch(self, episodes, episode_labels):
total_steps = sum([len(e) for e in episodes])
assert total_steps > self.batch_size
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
episode_labels_batch = [episode_labels[x] for x in indices]
xs, labels = [], appendabledict()
for ep_ind, episode in enumerate(episodes_batch):
# Get one sample from this episode
t = np.random.randint(len(episode))
xs.append(episode[t])
labels.append_update(episode_labels_batch[ep_ind][t])
yield torch.stack(xs).float().to(self.device) / 255., labels
def probe(self, batch, k):
probe = self.probes[k]
probe.to(self.device)
if self.fully_supervised:
# if method is supervised batch is a batch of frames and probe is a full encoder + linear or nonlinear probe
preds = probe(batch)
elif not self.encoder:
# if encoder is None then inputs are vectors
f = batch.detach()
assert len(f.squeeze().shape) == 2, "if input is not a batch of vectors you must specify an encoder!"
preds = probe(f)
else:
with torch.no_grad():
self.encoder.to(self.device)
f = self.encoder(batch).detach()
preds = probe(f)
return preds
def do_one_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
epoch_loss, accuracy = {k + "_loss": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}, \
{k + "_acc": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
if self.early_stoppers[k].early_stop:
continue
optim = self.optimizers[k]
optim.zero_grad()
label = torch.tensor(label).long().to(self.device)
preds = self.probe(x, k)
loss = self.loss_fn(preds, label)
epoch_loss[k + "_loss"].append(loss.detach().item())
preds = preds.cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
label = label.cpu().detach().numpy()
accuracy[k + "_acc"].append(calculate_multiclass_accuracy(preds,
label))
if self.probes[k].training:
loss.backward()
optim.step()
epoch_loss = {k: np.mean(loss) for k, loss in epoch_loss.items()}
accuracy = {k: np.mean(acc) for k, acc in accuracy.items()}
return epoch_loss, accuracy
def do_test_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
accuracy_dict, f1_score_dict = {}, {}
pred_dict, all_label_dict = {k: [] for k in sample_label.keys()}, \
{k: [] for k in sample_label.keys()}
# collect all predictions first
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
label = torch.tensor(label).long().cpu()
all_label_dict[k].append(label)
preds = self.probe(x, k).detach().cpu()
pred_dict[k].append(preds)
for k in all_label_dict.keys():
preds, labels = torch.cat(pred_dict[k]).cpu().detach().numpy(),\
torch.cat(all_label_dict[k]).cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
accuracy = calculate_multiclass_accuracy(preds, labels)
f1score = calculate_multiclass_f1_score(preds, labels)
accuracy_dict[k] = accuracy
f1_score_dict[k] = f1score
return accuracy_dict, f1_score_dict
def train(self, tr_eps, val_eps, tr_labels, val_labels):
# if not self.encoder:
# assert len(tr_eps[0][0].squeeze().shape) == 2, "if input is a batch of vectors you must specify an encoder!"
sample_label = tr_labels[0][0]
self.create_probes(sample_label)
e = 0
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
while (not all_probes_stopped) and e < self.epochs:
epoch_loss, accuracy = self.do_one_epoch(tr_eps, tr_labels)
self.log_results(e, epoch_loss, accuracy)
val_loss, val_accuracy = self.evaluate(val_eps, val_labels, epoch=e)
# update all early stoppers
for k in sample_label.keys():
if not self.early_stoppers[k].early_stop:
self.early_stoppers[k](val_accuracy["val_" + k + "_acc"], self.probes[k])
for k, scheduler in self.schedulers.items():
if not self.early_stoppers[k].early_stop:
scheduler.step(val_accuracy['val_' + k + '_acc'])
e += 1
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
print("All probes early stopped!")
def evaluate(self, val_episodes, val_label_dicts, epoch=None):
for k, probe in self.probes.items():
probe.eval()
epoch_loss, accuracy = self.do_one_epoch(val_episodes, val_label_dicts)
epoch_loss = {"val_" + k: v for k, v in epoch_loss.items()}
accuracy = {"val_" + k: v for k, v in accuracy.items()}
self.log_results(epoch, epoch_loss, accuracy)
for k, probe in self.probes.items():
probe.train()
return epoch_loss, accuracy
def test(self, test_episodes, test_label_dicts, epoch=None):
for k in self.early_stoppers.keys():
self.early_stoppers[k].early_stop = False
for k, probe in self.probes.items():
probe.eval()
acc_dict, f1_dict = self.do_test_epoch(test_episodes, test_label_dicts)
acc_dict, f1_dict = postprocess_raw_metrics(acc_dict, f1_dict)
print("""In our paper, we report F1 scores and accuracies averaged across each category.
That is, we take a mean across all state variables in a category to get the average score for that category.
Then we average all the category averages to get the final score that we report per game for each method.
These scores are called \'across_categories_avg_acc\' and \'across_categories_avg_f1\' respectively
We do this to prevent categories with large number of state variables dominating the mean F1 score.
""")
self.log_results("Test", acc_dict, f1_dict)
return acc_dict, f1_dict
def log_results(self, epoch_idx, *dictionaries):
print("Epoch: {}".format(epoch_idx))
for dictionary in dictionaries:
for k, v in dictionary.items():
print("\t {}: {:8.4f}".format(k, v))
print("\t --")
def postprocess_raw_metrics(acc_dict, f1_dict):
acc_overall_avg, f1_overall_avg = compute_dict_average(acc_dict), \
compute_dict_average(f1_dict)
acc_category_avgs_dict, f1_category_avgs_dict = compute_category_avgs(acc_dict), \
compute_category_avgs(f1_dict)
acc_avg_across_categories, f1_avg_across_categories = compute_dict_average(acc_category_avgs_dict), \
compute_dict_average(f1_category_avgs_dict)
acc_dict.update(acc_category_avgs_dict)
f1_dict.update(f1_category_avgs_dict)
acc_dict["overall_avg"], f1_dict["overall_avg"] = acc_overall_avg, f1_overall_avg
acc_dict["across_categories_avg"], f1_dict["across_categories_avg"] = [acc_avg_across_categories,
f1_avg_across_categories]
acc_dict = append_suffix(acc_dict, "_acc")
f1_dict = append_suffix(f1_dict, "_f1")
return acc_dict, f1_dict
def compute_category_avgs(metric_dict):
category_dict = {}
for category_name, category_keys in summary_key_dict.items():
category_values = [v for k, v in metric_dict.items() if k in category_keys]
if len(category_values) < 1:
continue
category_mean = np.mean(category_values)
category_dict[category_name + "_avg"] = category_mean
return category_dict
|
85441
|
import argparse
import json
import os
from os import listdir
from os.path import isfile
import shutil
from genson import SchemaBuilder
from enum import Enum
import copy
import flatdict
import pandas as pd
import numpy as np
from collections import OrderedDict
from functools import reduce # forward compatibility for Python 3
import operator
import sys
from echr.utils.folders import make_build_folder
from echr.utils.cli import TAB
from echr.utils.logger import getlogger
from rich.markdown import Markdown
from rich.console import Console
log = getlogger()
__console = Console(record=True)
DELIMITER = '.'
type_priority = OrderedDict([
('number', float),
('integer', int),
('string', str)
])
class COL_HINT(str, Enum):
HOT_ONE = 'hot_one'
POSITIONAL = 'positional'
def format_structured_json(cases_list):
res = []
representents = {}
extractedapp = {}
scl = {}
decision_body = {}
for name in cases_list:
with open(name, 'r') as f:
c = json.load(f)
c['representedby'] = [r for r in c['representedby'] if r != 'N/A']
representents[c['appno']] = {'representedby': c['representedby']}
extractedapp[c['appno']] = {'appnos': c['extractedappno']}
decision_body[c['appno']] = {
'name': [e['name'] for e in c['decision_body']],
'role': {e['name']: e['role'] for e in c['decision_body'] if 'role' in e}
}
scl[c['appno']] = {'scl': c['scl']}
c['respondent'] = c['respondent'].split(';') #
c['applicability'] = c['applicability'].strip().split(';')
c['appno'] = c['appno'].split(';')[0]
c['decisiondate'] = c['decisiondate'].split(' ')[0]
c['judgementdate'] = c['judgementdate'].split(' ')[0]
c['introductiondate'] = c['introductiondate'].split(' ')[0]
c['kpdate'] = c['kpdate'].split(' ')[0]
c['separateopinion'] = True if c['separateopinion'] == 'TRUE' else False
del c['representedby']
del c['extractedappno']
del c['decision_body']
del c['scl']
del c['documents']
del c['content']
del c['externalsources']
del c['kpthesaurus']
del c['__conclusion']
del c['__articles']
if not len(c['issue']):
del c['issue']
else:
c['issue'] = sorted(c['issue'])
if not len(c['applicability']):
del c['applicability']
res.append(c)
return res, representents, extractedapp, scl, decision_body
def get_by_path(root, items):
return reduce(operator.getitem, items, root)
def set_by_path(root, items, value):
get_by_path(root, items[:-1])[items[-1]] = value
def determine_schema(X):
builder = SchemaBuilder()
for x in X:
builder.add_object(x)
schema = builder
return schema
def get_flat_type_mapping(flat_schema):
flat_type_mapping = {}
for k in flat_schema.keys():
if k.endswith(DELIMITER + 'type'):
key = k.replace('properties' + DELIMITER, '').replace(DELIMITER + 'type', '')
flat_type_mapping[key] = flat_schema[k]
return flat_type_mapping
def get_flat_domain_mapping(X, flat_type_mapping):
flat_domain_mapping = {}
for x in X:
flat = flatdict.FlatterDict(x, delimiter='.')
for k in flat_type_mapping.keys():
v = flat.get(k)
if v is not None:
if k not in flat_domain_mapping:
flat_domain_mapping[k] = set()
type_ = flat_type_mapping[k]
try:
if type_ == 'array':
flat_domain_mapping[k].update(get_by_path(x, k.split('.')))
else:
flat_domain_mapping[k].add(get_by_path(x, k.split('.')))
except:
if not flat_domain_mapping[k]:
del flat_domain_mapping[k]
for k in flat_domain_mapping:
flat_domain_mapping[k] = list(flat_domain_mapping[k])
return flat_domain_mapping
def flatten_dataset(X, flat_type_mapping, schema_hints=None):
if schema_hints is None:
schema_hints = {}
flat_X = []
for x in X:
flat = flatdict.FlatterDict(x, delimiter=DELIMITER)
c_x = copy.deepcopy(x)
for k in flat_type_mapping.keys():
col_type = schema_hints.get(k, {}).get('col_type')
if col_type not in [None, COL_HINT.POSITIONAL]:
continue
v = flat.get(k)
if v is not None:
sort = schema_hints.get(k, {}).get('sort', False)
if sort:
type_ = flat_type_mapping[k]
if type_ == 'array':
item_types = flat_type_mapping.get(k + '.items')
a = get_by_path(c_x, k.split('.'))
if isinstance(item_types, list):
try:
a = sorted(a)
except:
print('# Warning: mix-type array with types: {}'.format(', '.join(item_types)))
print('# Warning; no comparison operator provided. Try to assess the proper cast...')
for t in type_priority:
try:
a = list(map(type_priority[t], a))
print('# Casting \'{}\' to {}'.format(k, t))
break
except:
log.error('Could not cast \'{}\' to {}'.format(k, t))
else:
print('# Error: Could not find any way to sort {}'.format(k))
raise Exception('Could not find any way to sort {}'.format(k))
set_by_path(c_x, k.split('.'), sorted(a))
flat = flatdict.FlatterDict(c_x, delimiter=DELIMITER)
flat_X.append(flat)
return flat_X
def hot_one_encoder_on_list(df, column):
v = [x if isinstance(x, list) else [] for x in df[column].values]
l = [len(x) for x in v]
f, u = pd.factorize(np.concatenate(v))
n, m = len(v), u.size
i = np.arange(n).repeat(l)
dummies = pd.DataFrame(
np.bincount(i * m + f, minlength=n * m).reshape(n, m),
df.index, map(lambda x: str(column) + '=' + str(x), u)
)
return df.drop(column, 1).join(dummies)
def normalize(X, schema_hints=None):
if schema_hints is None:
schema_hints = {}
def hot_one_encoder(df, columns):
return pd.get_dummies(df, prefix_sep="=", columns=columns)
schema = determine_schema(X)
flat_schema = flatdict.FlatDict(schema.to_schema(), delimiter=DELIMITER)
flat_type_mapping = get_flat_type_mapping(flat_schema)
flat_domain_mapping = get_flat_domain_mapping(X, flat_type_mapping)
flat_X = flatten_dataset(X, flat_type_mapping, schema_hints)
columns_to_encode = [k for k, v in schema_hints.items() if v['col_type'] == COL_HINT.HOT_ONE]
df = pd.DataFrame(flat_X)
for c in df.columns:
f = next((k for k in columns_to_encode if c.startswith(k)), None)
if f:
df = df.drop(c, 1)
encoded = []
for c in columns_to_encode:
type_ = flat_type_mapping[c]
if type_ == 'array':
if c == 'conclusion':
articles = set()
for x in X:
for e in x[c]:
if 'article' in e:
articles.add(e['article'])
articles = sorted(articles)
df2 = []
for x in X:
e = []
xart = {v['article']: v['type'] for v in x['conclusion'] if 'article' in v}
for a in articles:
v = 0
if a in xart:
if xart[a] == 'violation':
v = 1
else:
v = -1
e.append(v)
df2.append(e)
df2 = pd.DataFrame(df2, columns=list(map(lambda x: 'ccl_article={}'.format(x), articles)))
encoded.append(df2)
else:
df2 = pd.DataFrame(X)[[c]]
e = hot_one_encoder_on_list(df2, c)
encoded.append(e)
else:
df2 = pd.DataFrame(X)[c]
e = hot_one_encoder(df2, [c])
encoded.append(e)
df = pd.concat([df] + encoded, axis=1)
return df, schema, flat_schema, flat_type_mapping, flat_domain_mapping
def run(console, build, title, output_prefix='cases', force=False):
__console = console
global print
print = __console.print
print(Markdown("- **Step configuration**"))
print(TAB + "> Prepare release folder structure")
paths = ['unstructured', 'structured', 'raw']
for p in paths:
make_build_folder(console, os.path.join(build, p), force, strict=False)
print(Markdown("- **Normalize database**"))
input_folder = os.path.join(build, 'raw', 'preprocessed_documents')
cases_files = [os.path.join(input_folder, f) for f in listdir(input_folder)
if isfile(os.path.join(input_folder, f)) and '.json' in f]
print(TAB + "> Prepare unstructured cases [green][DONE]")
# Unstructured
with open(os.path.join(build, 'unstructured', 'cases.json'), 'w') as outfile:
outfile.write('[\n')
for i, f in enumerate(cases_files):
with open(f) as json_file:
data = json.load(json_file)
json.dump(data, outfile, indent=4)
if i != len(cases_files) - 1:
outfile.write(',\n')
outfile.write('\n]')
# Structured
print(TAB + "> Generate flat cases [green][DONE]")
flat_cases , representatives, extractedapp, scl, decision_body = format_structured_json(cases_files)
print(TAB + "> Flat cases size: {}MiB".format(sys.getsizeof(flat_cases) / 1000))
schema_hints = {
'article': {
'col_type': COL_HINT.HOT_ONE
},
'documentcollectionid': {
'col_type': COL_HINT.HOT_ONE
},
'applicability': {
'col_type': COL_HINT.HOT_ONE
},
'paragraphs': {
'col_type': COL_HINT.HOT_ONE
},
'conclusion': {
'col_type': COL_HINT.HOT_ONE,
'sub_element': 'flatten'
}
}
output_path = os.path.join(build, 'structured')
with open(os.path.join(output_path, 'flat_cases.json'), 'w') as outfile:
json.dump(flat_cases, outfile, indent=4)
with open(os.path.join(output_path, 'schema_hint.json'), 'w') as outfile:
json.dump(schema_hints, outfile, indent=4)
X = flat_cases
df, schema, flat_schema, flat_type_mapping, flat_domain_mapping = normalize(X, schema_hints)
df.to_json(os.path.join(output_path, '{}.json'.format(output_prefix)), orient='records')
df.to_csv(os.path.join(output_path, '{}.csv'.format(output_prefix)))
json_files = [
('schema', schema.to_schema()),
('flat_schema', flat_schema.as_dict()),
('flat_type_mapping', flat_type_mapping),
('flat_domain_mapping', flat_domain_mapping)
]
for f in json_files:
with open(os.path.join(output_path, '{}_{}.json'.format(output_prefix, f[0])), 'w') as outfile:
json.dump(f[1], outfile, indent=4)
os.remove(os.path.join(output_path, 'flat_cases.json'))
os.remove(os.path.join(output_path, 'cases_flat_schema.json'))
os.remove(os.path.join(output_path, 'cases_flat_type_mapping.json'))
print(TAB + '> Generate appnos matrice [green][DONE]')
matrice_appnos = {}
for k, v in extractedapp.items():
matrice_appnos[k] = {e:1 for e in v['appnos']}
with open(os.path.join(output_path, 'matrice_appnos.json'), 'w') as outfile:
json.dump(matrice_appnos, outfile, indent=4)
print(TAB + '> Generate scl matrice [green][DONE]')
matrice_scl = {}
for k, v in scl.items():
matrice_scl[k] = {e: 1 for e in v['scl']}
with open(os.path.join(output_path, 'matrice_scl.json'), 'w') as outfile:
json.dump(matrice_scl, outfile, indent=4)
print(TAB + '> Generate representatives matrice [green][DONE]')
matrice_representedby = {}
for k, v in representatives.items():
matrice_representedby[k] = {e: 1 for e in v['representedby']}
with open(os.path.join(output_path, 'matrice_representatives.json'), 'w') as outfile:
json.dump(matrice_representedby, outfile, indent=4)
print(TAB + '> Generate decision body matrice [green][DONE]')
matrice_decision_body = {}
for k, v in decision_body.items():
matrice_decision_body[k] = {k:v for k,v in v['role'].items()}
with open(os.path.join(output_path, 'matrice_decision_body.json'), 'w') as outfile:
json.dump(matrice_decision_body, outfile, indent=4)
print(TAB + '> Create archives [green][DONE]')
# Raw
shutil.make_archive(os.path.join(build, 'raw', 'judgments'), 'zip',
os.path.join(build, 'raw', 'judgments'))
# All
from zipfile import ZipFile
with ZipFile(os.path.join(build, 'all.zip'), 'w') as zipObj:
# Iterate over all the files in directory
folders = ['unstructured', 'raw', 'structured']
for f in folders:
for folderName, _, filenames in os.walk(os.path.join(build, f)):
for filename in filenames:
if not filename.endswith('.zip'):
filePath = os.path.join(folderName, filename)
zipObj.write(filePath)
def main(args):
console = Console(record=True)
run(console,
build=args.build,
title=args.title,
force=args.f)
def parse_args(parser):
args = parser.parse_args()
# Check path
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalize any databse of arbitrarily nested documents.')
parser.add_argument('--build', type=str, default="./build/echr_database/")
parser.add_argument('--title', type=str)
parser.add_argument('--schema_hints', type=str)
parser.add_argument('--output_prefix', type=str)
parser.add_argument('-f', action='store_true')
parser.add_argument('-u', action='store_true')
args = parse_args(parser)
main(args)
|
85491
|
class InternalError(Exception):
pass
class InvalidAccessError(Exception):
pass
class InvalidStateError(Exception):
pass
|
85506
|
import os
from testglobals import config
import subprocess
import re
# Feature names generally follow the naming used by Linux's /proc/cpuinfo.
SUPPORTED_CPU_FEATURES = {
# These aren't comprehensive; they are only CPU features that we care about
# x86:
'sse', 'sse2', 'sse3', 'ssse3', 'sse4_1', 'sse4_2',
'avx1', 'avx2',
'popcnt', 'bmi1', 'bmi2'
}
cpu_feature_cache = None
def get_cpu_features():
if config.os in ['mingw32', 'linux'] and os.path.exists('/proc/cpuinfo'):
f = open('/proc/cpuinfo').read()
flags = re.search(r'flags\s*:\s*.*$', f, re.M)
if flags is None:
print('get_cpu_features: failed to find cpu features')
return {}
flags = set(flags.group(0).split())
if 'pni' in flags:
flags.add('sse3')
flags.remove('pni')
return flags
elif config.os == 'darwin':
out = subprocess.check_output(['sysctl', 'hw']).decode('UTF-8')
features = set()
def check_feature(darwin_name, our_name=None):
if re.search(r'hw\.optional.%s:\s*1' % darwin_name, out) is not None:
features.add(darwin_name if our_name is None else our_name)
for feature in SUPPORTED_CPU_FEATURES:
check_feature(feature)
# A few annoying cases
check_feature('avx1_0', 'avx1')
check_feature('avx2_0', 'avx2')
return features
else:
# TODO: Add {Open,Free}BSD support
print('get_cpu_features: Lacking support for your platform')
return {}
def have_cpu_feature(feature):
"""
A testsuite predicate for testing the availability of CPU features.
"""
assert feature in SUPPORTED_CPU_FEATURES
if cpu_feature_cache is None:
cpu_feature_cache = get_cpu_features()
print('Found CPU features:', ' '.join(cpu_feature_cache))
# Sanity checking
assert all(feat in SUPPORTED_CPU_FEATURES
for feat in cpu_feature_cache)
return feature in cpu_feature_cache
if __name__ == '__main__':
import sys
config.os = sys.argv[1]
print(get_cpu_features())
|
85519
|
import json
import logging
import random
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union, overload
from urllib.parse import urlparse
import requests
from ens import ENS
from ens.abis import ENS as ENS_ABI, RESOLVER as ENS_RESOLVER_ABI
from ens.exceptions import InvalidName
from ens.main import ENS_MAINNET_ADDR
from ens.utils import (
address_to_reverse_domain,
is_none_or_zero_address,
normal_name_to_hash,
normalize_name,
)
from eth_typing import BlockNumber, HexStr
from web3 import HTTPProvider, Web3
from web3._utils.abi import get_abi_output_types
from web3._utils.contracts import find_matching_event_abi
from web3._utils.filters import construct_event_filter_params
from web3.datastructures import MutableAttributeDict
from web3.exceptions import (
BadFunctionCallOutput,
BadResponseFormat,
BlockNotFound,
TransactionNotFound,
)
from web3.types import BlockIdentifier, FilterParams
from rotkehlchen.chain.constants import DEFAULT_EVM_RPC_TIMEOUT
from rotkehlchen.chain.ethereum.contracts import EthereumContract
from rotkehlchen.chain.ethereum.graph import Graph
from rotkehlchen.chain.ethereum.modules.eth2.constants import ETH2_DEPOSIT
from rotkehlchen.chain.ethereum.types import EnsContractParams, string_to_ethereum_address
from rotkehlchen.chain.ethereum.utils import multicall, multicall_2
from rotkehlchen.constants.ethereum import ERC20TOKEN_ABI, ETH_SCAN, UNIV1_LP_ABI
from rotkehlchen.errors.misc import (
BlockchainQueryError,
InputError,
RemoteError,
UnableToDecryptRemoteData,
)
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.externalapis.etherscan import Etherscan
from rotkehlchen.fval import FVal
from rotkehlchen.greenlets import GreenletManager
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_ethereum_address,
deserialize_ethereum_transaction,
deserialize_int_from_hex,
)
from rotkehlchen.serialization.serialize import process_result
from rotkehlchen.types import (
ChecksumEthAddress,
EthereumTransaction,
EVMTxHash,
SupportedBlockchain,
Timestamp,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.misc import from_wei, hex_or_bytes_to_str
from rotkehlchen.utils.network import request_get_dict
from .types import NodeName
from .utils import ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def _is_synchronized(current_block: int, latest_block: int) -> Tuple[bool, str]:
""" Validate that the ethereum node is synchronized
within 20 blocks of latest block
Returns a tuple (results, message)
- result: Boolean for confirmation of synchronized
- message: A message containing information on what the status is.
"""
message = ''
if current_block < (latest_block - 20):
message = (
f'Found ethereum node but it is out of sync. {current_block} / '
f'{latest_block}. Will use etherscan.'
)
log.warning(message)
return False, message
return True, message
WEB3_LOGQUERY_BLOCK_RANGE = 250000
def _query_web3_get_logs(
web3: Web3,
filter_args: FilterParams,
from_block: int,
to_block: Union[int, Literal['latest']],
contract_address: ChecksumEthAddress,
event_name: str,
argument_filters: Dict[str, Any],
) -> List[Dict[str, Any]]:
until_block = web3.eth.block_number if to_block == 'latest' else to_block
events: List[Dict[str, Any]] = []
start_block = from_block
# we know that in most of its early life the Eth2 contract address returns a
# a lot of results. So limit the query range to not hit the infura limits every time
# supress https://lgtm.com/rules/1507386916281/ since it does not apply here
infura_eth2_log_query = (
'infura.io' in web3.manager.provider.endpoint_uri and # type: ignore # noqa: E501 lgtm [py/incomplete-url-substring-sanitization]
contract_address == ETH2_DEPOSIT.address
)
block_range = initial_block_range = WEB3_LOGQUERY_BLOCK_RANGE
if infura_eth2_log_query:
block_range = initial_block_range = 75000
while start_block <= until_block:
filter_args['fromBlock'] = start_block
end_block = min(start_block + block_range, until_block)
filter_args['toBlock'] = end_block
log.debug(
'Querying web3 node for contract event',
contract_address=contract_address,
event_name=event_name,
argument_filters=argument_filters,
from_block=filter_args['fromBlock'],
to_block=filter_args['toBlock'],
)
# As seen in https://github.com/rotki/rotki/issues/1787, the json RPC, if it
# is infura can throw an error here which we can only parse by catching the exception
try:
new_events_web3: List[Dict[str, Any]] = [dict(x) for x in web3.eth.get_logs(filter_args)] # noqa: E501
except (ValueError, KeyError) as e:
if isinstance(e, ValueError):
try:
decoded_error = json.loads(str(e).replace("'", '"'))
except json.JSONDecodeError:
# reraise the value error if the error is not json
raise e from None
msg = decoded_error.get('message', '')
else: # temporary hack for key error seen from pokt
msg = 'query returned more than 10000 results'
# errors from: https://infura.io/docs/ethereum/json-rpc/eth-getLogs
if msg in ('query returned more than 10000 results', 'query timeout exceeded'):
block_range = block_range // 2
if block_range < 50:
raise # stop retrying if block range gets too small
# repeat the query with smaller block range
continue
# else, well we tried .. reraise the error
raise e
# Turn all HexBytes into hex strings
for e_idx, event in enumerate(new_events_web3):
new_events_web3[e_idx]['blockHash'] = event['blockHash'].hex()
new_topics = []
for topic in event['topics']:
new_topics.append(topic.hex())
new_events_web3[e_idx]['topics'] = new_topics
new_events_web3[e_idx]['transactionHash'] = event['transactionHash'].hex()
start_block = end_block + 1
events.extend(new_events_web3)
# end of the loop, end of 1 query. Reset the block range to max
block_range = initial_block_range
return events
def _prepare_ens_call_arguments(addr: ChecksumEthAddress) -> List[Any]:
try:
reversed_domain = address_to_reverse_domain(addr)
except (TypeError, ValueError) as e:
raise InputError(f'Address {addr} has incorrect format or type. {str(e)}') from e
normalized_domain_name = normalize_name(reversed_domain)
arguments = [normal_name_to_hash(normalized_domain_name)]
return arguments
def _encode_ens_contract(params: EnsContractParams) -> str:
contract = EthereumContract(address=params.address, abi=params.abi, deployed_block=0)
return contract.encode(method_name=params.method_name, arguments=params.arguments)
def _decode_ens_contract(params: EnsContractParams, result_encoded: Any) -> ChecksumEthAddress:
contract = EthereumContract(address=params.address, abi=params.abi, deployed_block=0)
result = contract.decode( # pylint: disable=E1136
result=result_encoded,
method_name=params.method_name,
arguments=params.arguments,
)[0]
return string_to_ethereum_address(result)
# TODO: Ideally all these should become configurable
# Taking LINKPOOL out since it's just really too slow and seems to not
# respond to the batched calls almost at all. Combined with web3.py retries
# this makes the tokens balance queries super slow.
OPEN_NODES = (
NodeName.MYCRYPTO,
NodeName.BLOCKSCOUT,
NodeName.AVADO_POOL,
NodeName.ONEINCH,
NodeName.MYETHERWALLET,
# NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH,
NodeName.ETHERSCAN,
)
ETHEREUM_NODES_TO_CONNECT_AT_START = (
NodeName.OWN,
NodeName.MYCRYPTO,
NodeName.BLOCKSCOUT,
NodeName.ONEINCH,
NodeName.AVADO_POOL,
NodeName.ONEINCH,
NodeName.MYETHERWALLET,
# NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH,
)
OPEN_NODES_WEIGHT_MAP = { # Probability with which to select each node
NodeName.ETHERSCAN: 0.3,
NodeName.MYCRYPTO: 0.15,
NodeName.BLOCKSCOUT: 0.1,
NodeName.AVADO_POOL: 0.05,
NodeName.ONEINCH: 0.15,
NodeName.MYETHERWALLET: 0.15,
# NodeName.LINKPOOL: 0.05,
NodeName.CLOUDFLARE_ETH: 0.1,
}
class EthereumManager():
def __init__(
self,
ethrpc_endpoint: str,
etherscan: Etherscan,
msg_aggregator: MessagesAggregator,
greenlet_manager: GreenletManager,
connect_at_start: Sequence[NodeName],
eth_rpc_timeout: int = DEFAULT_EVM_RPC_TIMEOUT,
) -> None:
log.debug(f'Initializing Ethereum Manager with own rpc endpoint: {ethrpc_endpoint}')
self.greenlet_manager = greenlet_manager
self.web3_mapping: Dict[NodeName, Web3] = {}
self.own_rpc_endpoint = ethrpc_endpoint
self.etherscan = etherscan
self.msg_aggregator = msg_aggregator
self.eth_rpc_timeout = eth_rpc_timeout
self.archive_connection = False
self.queried_archive_connection = False
for node in connect_at_start:
self.greenlet_manager.spawn_and_track(
after_seconds=None,
task_name=f'Attempt connection to {str(node)} ethereum node',
exception_is_error=True,
method=self.attempt_connect,
name=node,
ethrpc_endpoint=node.endpoint(self.own_rpc_endpoint),
mainnet_check=True,
)
self.blocks_subgraph = Graph(
'https://api.thegraph.com/subgraphs/name/blocklytics/ethereum-blocks',
)
# A cache for the erc20 contract info to not requery same one
self.contract_info_cache: Dict[ChecksumEthAddress, Dict[str, Any]] = {}
def connected_to_any_web3(self) -> bool:
return (
NodeName.OWN in self.web3_mapping or
NodeName.MYCRYPTO in self.web3_mapping or
NodeName.BLOCKSCOUT in self.web3_mapping or
NodeName.AVADO_POOL in self.web3_mapping
)
def default_call_order(self, skip_etherscan: bool = False) -> List[NodeName]:
"""Default call order for ethereum nodes
Own node always has preference. Then all other node types are randomly queried
in sequence depending on a weighted probability.
Some benchmarks on weighted probability based random selection when compared
to simple random selection. Benchmark was on blockchain balance querying with
29 ethereum accounts and at the time 1010 different ethereum tokens.
With weights: etherscan: 0.5, mycrypto: 0.25, blockscout: 0.2, avado: 0.05
===> Runs: 66, 58, 60, 68, 58 seconds
---> Average: 62 seconds
- Without weights
===> Runs: 66, 82, 72, 58, 72 seconds
---> Average: 70 seconds
"""
result = []
if NodeName.OWN in self.web3_mapping:
result.append(NodeName.OWN)
selection = list(OPEN_NODES)
if skip_etherscan:
selection.remove(NodeName.ETHERSCAN)
ordered_list = []
while len(selection) != 0:
weights = []
for entry in selection:
weights.append(OPEN_NODES_WEIGHT_MAP[entry])
node = random.choices(selection, weights, k=1)
ordered_list.append(node[0])
selection.remove(node[0])
return result + ordered_list
def attempt_connect(
self,
name: NodeName,
ethrpc_endpoint: str,
mainnet_check: bool = True,
) -> Tuple[bool, str]:
"""Attempt to connect to a particular node type
For our own node if the given rpc endpoint is not the same as the saved one
the connection is re-attempted to the new one
"""
message = ''
node_connected = self.web3_mapping.get(name, None) is not None
own_node_already_connected = (
name == NodeName.OWN and
self.own_rpc_endpoint == ethrpc_endpoint and
node_connected
)
if own_node_already_connected or (node_connected and name != NodeName.OWN):
return True, 'Already connected to an ethereum node'
try:
parsed_eth_rpc_endpoint = urlparse(ethrpc_endpoint)
if not parsed_eth_rpc_endpoint.scheme:
ethrpc_endpoint = f"http://{ethrpc_endpoint}"
provider = HTTPProvider(
endpoint_uri=ethrpc_endpoint,
request_kwargs={'timeout': self.eth_rpc_timeout},
)
ens = ENS(provider)
web3 = Web3(provider, ens=ens)
except requests.exceptions.RequestException:
message = f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'
log.warning(message)
return False, message
try:
is_connected = web3.isConnected()
except AssertionError:
# Terrible, terrible hack but needed due to https://github.com/rotki/rotki/issues/1817
is_connected = False
if is_connected:
# Also make sure we are actually connected to the Ethereum mainnet
synchronized = True
msg = ''
try:
if mainnet_check:
network_id = int(web3.net.version)
if network_id != 1:
message = (
f'Connected to ethereum node {name} at endpoint {ethrpc_endpoint} but '
f'it is not on the ethereum mainnet. The chain id '
f'the node is in is {network_id}.'
)
log.warning(message)
return False, message
try:
current_block = web3.eth.block_number # pylint: disable=no-member
latest_block = self.query_eth_highest_block()
except (requests.exceptions.RequestException, RemoteError) as e:
msg = f'Could not query latest block due to {str(e)}'
log.warning(msg)
synchronized = False
else:
synchronized, msg = _is_synchronized(current_block, latest_block)
except ValueError as e:
message = (
f'Failed to connect to ethereum node {name} at endpoint '
f'{ethrpc_endpoint} due to {str(e)}'
)
return False, message
if not synchronized:
self.msg_aggregator.add_warning(
f'We could not verify that ethereum node {name} is '
'synchronized with the ethereum mainnet. Balances and other queries '
'may be incorrect.',
)
log.info(f'Connected ethereum node {name} at {ethrpc_endpoint}')
self.web3_mapping[name] = web3
return True, ''
# else
message = f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'
log.warning(message)
return False, message
def set_rpc_endpoint(self, endpoint: str) -> Tuple[bool, str]:
""" Attempts to set the RPC endpoint for the user's own ethereum node
Returns a tuple (result, message)
- result: Boolean for success or failure of changing the rpc endpoint
- message: A message containing information on what happened. Can
be populated both in case of success or failure"""
if endpoint == '':
self.web3_mapping.pop(NodeName.OWN, None)
self.own_rpc_endpoint = ''
return True, ''
# else
result, message = self.attempt_connect(name=NodeName.OWN, ethrpc_endpoint=endpoint)
if result:
log.info('Setting own node ETH RPC endpoint', endpoint=endpoint)
self.own_rpc_endpoint = endpoint
return result, message
def query(self, method: Callable, call_order: Sequence[NodeName], **kwargs: Any) -> Any:
"""Queries ethereum related data by performing the provided method to all given nodes
The first node in the call order that gets a succcesful response returns.
If none get a result then a remote error is raised
"""
for node in call_order:
web3 = self.web3_mapping.get(node, None)
if web3 is None and node != NodeName.ETHERSCAN:
continue
try:
result = method(web3, **kwargs)
except (
RemoteError,
requests.exceptions.RequestException,
BlockchainQueryError,
TransactionNotFound,
BlockNotFound,
BadResponseFormat,
ValueError, # Yabir saw this happen with mew node for unavailable method at node. Since it's generic we should replace if web3 implements https://github.com/ethereum/web3.py/issues/2448 # noqa: E501
) as e:
log.warning(f'Failed to query {node} for {str(method)} due to {str(e)}')
# Catch all possible errors here and just try next node call
continue
return result
# no node in the call order list was succesfully queried
raise RemoteError(
f'Failed to query {str(method)} after trying the following '
f'nodes: {[str(x) for x in call_order]}. Check logs for details.',
)
def _get_latest_block_number(self, web3: Optional[Web3]) -> int:
if web3 is not None:
return web3.eth.block_number
# else
return self.etherscan.get_latest_block_number()
def get_latest_block_number(self, call_order: Optional[Sequence[NodeName]] = None) -> int:
return self.query(
method=self._get_latest_block_number,
call_order=call_order if call_order is not None else self.default_call_order(),
)
def get_historical_eth_balance(
self,
address: ChecksumEthAddress,
block_number: int,
) -> Optional[FVal]:
"""Attempts to get a historical eth balance from the local own node only.
If there is no node or the node can't query historical balance (not archive) then
returns None"""
web3 = self.web3_mapping.get(NodeName.OWN)
if web3 is None:
return None
try:
result = web3.eth.get_balance(address, block_identifier=block_number)
except (
requests.exceptions.RequestException,
BlockchainQueryError,
KeyError, # saw this happen inside web3.py if resulting json contains unexpected key. Happened with mycrypto's node # noqa: E501
):
return None
try:
balance = from_wei(FVal(result))
except ValueError:
return None
return balance
def have_archive(self, requery: bool = False) -> bool:
"""Checks to see if our own connected node is an archive node
If requery is True it always queries the node. Otherwise it remembers last query.
"""
if self.queried_archive_connection and requery is False:
return self.archive_connection
balance = self.get_historical_eth_balance(
address=string_to_ethereum_address('0x50532e4Be195D1dE0c2E6DfA46D9ec0a4Fee6861'),
block_number=87042,
)
self.archive_connection = balance is not None and balance == FVal('5.1063307')
self.queried_archive_connection = True
return self.archive_connection
def query_eth_highest_block(self) -> BlockNumber:
""" Attempts to query an external service for the block height
Returns the highest blockNumber
May Raise RemoteError if querying fails
"""
url = 'https://api.blockcypher.com/v1/eth/main'
log.debug('Querying blockcypher for ETH highest block', url=url)
eth_resp: Optional[Dict[str, str]]
try:
eth_resp = request_get_dict(url)
except (RemoteError, UnableToDecryptRemoteData, requests.exceptions.RequestException):
eth_resp = None
block_number: Optional[int]
if eth_resp and 'height' in eth_resp:
block_number = int(eth_resp['height'])
log.debug('ETH highest block result', block=block_number)
else:
block_number = self.etherscan.get_latest_block_number()
log.debug('ETH highest block result', block=block_number)
return BlockNumber(block_number)
def get_eth_balance(self, account: ChecksumEthAddress) -> FVal:
"""Gets the balance of the given account in ETH
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""
result = self.get_multieth_balance([account])
return result[account]
def get_multieth_balance(
self,
accounts: List[ChecksumEthAddress],
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[ChecksumEthAddress, FVal]:
"""Returns a dict with keys being accounts and balances in ETH
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
"""
balances: Dict[ChecksumEthAddress, FVal] = {}
log.debug(
'Querying ethereum chain for ETH balance',
eth_addresses=accounts,
)
result = ETH_SCAN.call(
ethereum=self,
method_name='etherBalances',
arguments=[accounts],
call_order=call_order if call_order is not None else self.default_call_order(),
)
balances = {}
for idx, account in enumerate(accounts):
balances[account] = from_wei(result[idx])
return balances
def get_block_by_number(
self,
num: int,
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[str, Any]:
return self.query(
method=self._get_block_by_number,
call_order=call_order if call_order is not None else self.default_call_order(),
num=num,
)
def _get_block_by_number(self, web3: Optional[Web3], num: int) -> Dict[str, Any]:
"""Returns the block object corresponding to the given block number
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
- BlockNotFound if number used to lookup the block can't be found. Raised
by web3.eth.get_block().
"""
if web3 is None:
return self.etherscan.get_block_by_number(num)
block_data: MutableAttributeDict = MutableAttributeDict(web3.eth.get_block(num)) # type: ignore # pylint: disable=no-member # noqa: E501
block_data['hash'] = hex_or_bytes_to_str(block_data['hash'])
return dict(block_data)
def get_code(
self,
account: ChecksumEthAddress,
call_order: Optional[Sequence[NodeName]] = None,
) -> str:
return self.query(
method=self._get_code,
call_order=call_order if call_order is not None else self.default_call_order(),
account=account,
)
def _get_code(self, web3: Optional[Web3], account: ChecksumEthAddress) -> str:
"""Gets the deployment bytecode at the given address
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""
if web3 is None:
return self.etherscan.get_code(account)
return hex_or_bytes_to_str(web3.eth.getCode(account))
def ens_reverse_lookup(self, reversed_addresses: List[ChecksumEthAddress]) -> Dict[ChecksumEthAddress, Optional[str]]: # noqa: E501
"""Performs a reverse ENS lookup on a list of addresses
Because a multicall is used, no exceptions are raised.
If any exceptions occur, they are logged and None is returned for that
"""
human_names: Dict[ChecksumEthAddress, Optional[str]] = {}
# Querying resolvers' addresses
resolver_params = [
EnsContractParams(address=addr, abi=ENS_ABI, method_name='resolver', arguments=_prepare_ens_call_arguments(addr)) # noqa: E501
for addr in reversed_addresses
]
resolvers_output = multicall(
ethereum=self,
calls=[(ENS_MAINNET_ADDR, _encode_ens_contract(params=params)) for params in resolver_params], # noqa: E501
)
resolvers = []
# We need a new list for reversed_addresses because not all addresses have resolver
filtered_reversed_addresses = []
# Processing resolvers query output
for reversed_addr, params, resolver_output in zip(reversed_addresses, resolver_params, resolvers_output): # noqa: E501
decoded_resolver = _decode_ens_contract(params=params, result_encoded=resolver_output)
if is_none_or_zero_address(decoded_resolver):
human_names[reversed_addr] = None
continue
try:
deserialized_resolver = deserialize_ethereum_address(decoded_resolver)
except DeserializationError:
log.error(
f'Error deserializing address {decoded_resolver} while doing reverse ens lookup', # noqa: E501
)
human_names[reversed_addr] = None
continue
resolvers.append(deserialized_resolver)
filtered_reversed_addresses.append(reversed_addr)
# Querying human names
human_names_params = [
EnsContractParams(address=resolver, abi=ENS_RESOLVER_ABI, method_name='name', arguments=_prepare_ens_call_arguments(addr)) # noqa: E501
for addr, resolver in zip(filtered_reversed_addresses, resolvers)]
human_names_output = multicall(
ethereum=self,
calls=[(params.address, _encode_ens_contract(params=params)) for params in human_names_params], # noqa: E501
)
# Processing human names query output
for addr, params, human_name_output in zip(filtered_reversed_addresses, human_names_params, human_names_output): # noqa: E501
human_names[addr] = _decode_ens_contract(params=params, result_encoded=human_name_output) # noqa: E501
return human_names
@overload
def ens_lookup(
self,
name: str,
blockchain: Literal[SupportedBlockchain.ETHEREUM] = SupportedBlockchain.ETHEREUM,
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[ChecksumEthAddress]:
...
@overload
def ens_lookup(
self,
name: str,
blockchain: Literal[
SupportedBlockchain.BITCOIN,
SupportedBlockchain.KUSAMA,
SupportedBlockchain.POLKADOT,
],
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[HexStr]:
...
def ens_lookup(
self,
name: str,
blockchain: SupportedBlockchain = SupportedBlockchain.ETHEREUM,
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[Union[ChecksumEthAddress, HexStr]]:
return self.query(
method=self._ens_lookup,
call_order=call_order if call_order is not None else self.default_call_order(),
name=name,
blockchain=blockchain,
)
@overload
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: Literal[SupportedBlockchain.ETHEREUM],
) -> Optional[ChecksumEthAddress]:
...
@overload
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: Literal[
SupportedBlockchain.BITCOIN,
SupportedBlockchain.KUSAMA,
SupportedBlockchain.POLKADOT,
],
) -> Optional[HexStr]:
...
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: SupportedBlockchain = SupportedBlockchain.ETHEREUM,
) -> Optional[Union[ChecksumEthAddress, HexStr]]:
"""Performs an ENS lookup and returns address if found else None
TODO: currently web3.py 5.15.0 does not support multichain ENS domains
(EIP-2304), therefore requesting a non-Ethereum address won't use the
web3 ens library and will require to extend the library resolver ABI.
An issue in their repo (#1839) reporting the lack of support has been
created. This function will require refactoring once they include
support for EIP-2304.
https://github.com/ethereum/web3.py/issues/1839
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
- InputError if the given name is not a valid ENS name
"""
try:
normal_name = normalize_name(name)
except InvalidName as e:
raise InputError(str(e)) from e
resolver_addr = self._call_contract(
web3=web3,
contract_address=ENS_MAINNET_ADDR,
abi=ENS_ABI,
method_name='resolver',
arguments=[normal_name_to_hash(normal_name)],
)
if is_none_or_zero_address(resolver_addr):
return None
ens_resolver_abi = ENS_RESOLVER_ABI.copy()
arguments = [normal_name_to_hash(normal_name)]
if blockchain != SupportedBlockchain.ETHEREUM:
ens_resolver_abi.extend(ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS)
arguments.append(blockchain.ens_coin_type())
try:
deserialized_resolver_addr = deserialize_ethereum_address(resolver_addr)
except DeserializationError:
log.error(
f'Error deserializing address {resolver_addr} while doing'
f'ens lookup',
)
return None
address = self._call_contract(
web3=web3,
contract_address=deserialized_resolver_addr,
abi=ens_resolver_abi,
method_name='addr',
arguments=arguments,
)
if is_none_or_zero_address(address):
return None
if blockchain != SupportedBlockchain.ETHEREUM:
return HexStr(address.hex())
try:
return deserialize_ethereum_address(address)
except DeserializationError:
log.error(f'Error deserializing address {address}')
return None
def _call_contract_etherscan(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
) -> Any:
"""Performs an eth_call to an ethereum contract via etherscan
May raise:
- RemoteError if there is a problem with
reaching etherscan or with the returned result
"""
web3 = Web3()
contract = web3.eth.contract(address=contract_address, abi=abi)
input_data = contract.encodeABI(method_name, args=arguments if arguments else [])
result = self.etherscan.eth_call(
to_address=contract_address,
input_data=input_data,
)
if result == '0x':
raise BlockchainQueryError(
f'Error doing call on contract {contract_address} for {method_name} '
f'with arguments: {str(arguments)} via etherscan. Returned 0x result',
)
fn_abi = contract._find_matching_fn_abi(
fn_identifier=method_name,
args=arguments,
)
output_types = get_abi_output_types(fn_abi)
output_data = web3.codec.decode_abi(output_types, bytes.fromhex(result[2:]))
if len(output_data) == 1:
# due to https://github.com/PyCQA/pylint/issues/4114
return output_data[0] # pylint: disable=unsubscriptable-object
return output_data
def _get_transaction_receipt(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> Dict[str, Any]:
if web3 is None:
tx_receipt = self.etherscan.get_transaction_receipt(tx_hash)
try:
# Turn hex numbers to int
block_number = int(tx_receipt['blockNumber'], 16)
tx_receipt['blockNumber'] = block_number
tx_receipt['cumulativeGasUsed'] = int(tx_receipt['cumulativeGasUsed'], 16)
tx_receipt['gasUsed'] = int(tx_receipt['gasUsed'], 16)
tx_receipt['status'] = int(tx_receipt.get('status', '0x1'), 16)
tx_index = int(tx_receipt['transactionIndex'], 16)
tx_receipt['transactionIndex'] = tx_index
for receipt_log in tx_receipt['logs']:
receipt_log['blockNumber'] = block_number
receipt_log['logIndex'] = deserialize_int_from_hex(
symbol=receipt_log['logIndex'],
location='etherscan tx receipt',
)
receipt_log['transactionIndex'] = tx_index
except (DeserializationError, ValueError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key {msg}'
log.error(
f'Couldnt deserialize transaction receipt {tx_receipt} data from '
f'etherscan due to {msg}',
)
raise RemoteError(
f'Couldnt deserialize transaction receipt data from etherscan '
f'due to {msg}. Check logs for details',
) from e
return tx_receipt
# Can raise TransactionNotFound if the user's node is pruned and transaction is old
tx_receipt = web3.eth.get_transaction_receipt(tx_hash) # type: ignore
return process_result(tx_receipt)
def get_transaction_receipt(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[str, Any]:
return self.query(
method=self._get_transaction_receipt,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def _get_transaction_by_hash(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> EthereumTransaction:
if web3 is None:
tx_data = self.etherscan.get_transaction_by_hash(tx_hash=tx_hash)
else:
tx_data = web3.eth.get_transaction(tx_hash) # type: ignore
try:
transaction = deserialize_ethereum_transaction(data=tx_data, internal=False, ethereum=self) # noqa: E501
except (DeserializationError, ValueError) as e:
raise RemoteError(
f'Couldnt deserialize ethereum transaction data from {tx_data}. Error: {str(e)}',
) from e
return transaction
def get_transaction_by_hash(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> EthereumTransaction:
return self.query(
method=self._get_transaction_by_hash,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def call_contract(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
call_order: Optional[Sequence[NodeName]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
return self.query(
method=self._call_contract,
call_order=call_order if call_order is not None else self.default_call_order(),
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
block_identifier=block_identifier,
)
def _call_contract(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
"""Performs an eth_call to an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
- BlockchainQueryError if web3 is used and there is a VM execution error
"""
if web3 is None:
return self._call_contract_etherscan(
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
)
contract = web3.eth.contract(address=contract_address, abi=abi)
try:
method = getattr(contract.caller(block_identifier=block_identifier), method_name)
result = method(*arguments if arguments else [])
except (ValueError, BadFunctionCallOutput) as e:
raise BlockchainQueryError(
f'Error doing call on contract {contract_address}: {str(e)}',
) from e
return result
def get_logs(
self,
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
call_order: Optional[Sequence[NodeName]] = None,
) -> List[Dict[str, Any]]:
if call_order is None: # Default call order for logs
call_order = (NodeName.OWN, NodeName.ETHERSCAN)
return self.query(
method=self._get_logs,
call_order=call_order,
contract_address=contract_address,
abi=abi,
event_name=event_name,
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
)
def _get_logs(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
) -> List[Dict[str, Any]]:
"""Queries logs of an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
"""
event_abi = find_matching_event_abi(abi=abi, event_name=event_name)
_, filter_args = construct_event_filter_params(
event_abi=event_abi,
abi_codec=Web3().codec,
contract_address=contract_address,
argument_filters=argument_filters,
fromBlock=from_block,
toBlock=to_block,
)
if event_abi['anonymous']:
# web3.py does not handle the anonymous events correctly and adds the first topic
filter_args['topics'] = filter_args['topics'][1:]
events: List[Dict[str, Any]] = []
start_block = from_block
if web3 is not None:
events = _query_web3_get_logs(
web3=web3,
filter_args=filter_args,
from_block=from_block,
to_block=to_block,
contract_address=contract_address,
event_name=event_name,
argument_filters=argument_filters,
)
else: # etherscan
until_block = (
self.etherscan.get_latest_block_number() if to_block == 'latest' else to_block
)
blocks_step = 300000
while start_block <= until_block:
while True: # loop to continuously reduce block range if need b
end_block = min(start_block + blocks_step, until_block)
try:
new_events = self.etherscan.get_logs(
contract_address=contract_address,
topics=filter_args['topics'], # type: ignore
from_block=start_block,
to_block=end_block,
)
except RemoteError as e:
if 'Please select a smaller result dataset' in str(e):
blocks_step = blocks_step // 2
if blocks_step < 100:
raise # stop trying
# else try with the smaller step
continue
# else some other error
raise
break # we must have a result
# Turn all Hex ints to ints
for e_idx, event in enumerate(new_events):
try:
block_number = deserialize_int_from_hex(
symbol=event['blockNumber'],
location='etherscan log query',
)
log_index = deserialize_int_from_hex(
symbol=event['logIndex'],
location='etherscan log query',
)
# Try to see if the event is a duplicate that got returned
# in the previous iteration
for previous_event in reversed(events):
if previous_event['blockNumber'] < block_number:
break
same_event = (
previous_event['logIndex'] == log_index and
previous_event['transactionHash'] == event['transactionHash']
)
if same_event:
events.pop()
new_events[e_idx]['address'] = deserialize_ethereum_address(
event['address'],
)
new_events[e_idx]['blockNumber'] = block_number
new_events[e_idx]['timeStamp'] = deserialize_int_from_hex(
symbol=event['timeStamp'],
location='etherscan log query',
)
new_events[e_idx]['gasPrice'] = deserialize_int_from_hex(
symbol=event['gasPrice'],
location='etherscan log query',
)
new_events[e_idx]['gasUsed'] = deserialize_int_from_hex(
symbol=event['gasUsed'],
location='etherscan log query',
)
new_events[e_idx]['logIndex'] = log_index
new_events[e_idx]['transactionIndex'] = deserialize_int_from_hex(
symbol=event['transactionIndex'],
location='etherscan log query',
)
except DeserializationError as e:
raise RemoteError(
'Couldnt decode an etherscan event due to {str(e)}}',
) from e
# etherscan will only return 1000 events in one go. If more than 1000
# are returned such as when no filter args are provided then continue
# the query from the last block
if len(new_events) == 1000:
start_block = new_events[-1]['blockNumber']
else:
start_block = end_block + 1
events.extend(new_events)
return events
def get_event_timestamp(self, event: Dict[str, Any]) -> Timestamp:
"""Reads an event returned either by etherscan or web3 and gets its timestamp
Etherscan events contain a timestamp. Normal web3 events don't so it needs to
be queried from the block number
WE could also add this to the get_logs() call but would add unnecessary
rpc calls for get_block_by_number() for each log entry. Better have it
lazy queried like this.
TODO: Perhaps better approach would be a log event class for this
"""
if 'timeStamp' in event:
# event from etherscan
return Timestamp(event['timeStamp'])
# event from web3
block_number = event['blockNumber']
block_data = self.get_block_by_number(block_number)
return Timestamp(block_data['timestamp'])
def _get_blocknumber_by_time_from_subgraph(self, ts: Timestamp) -> int:
"""Queries Ethereum Blocks Subgraph for closest block at or before given timestamp"""
response = self.blocks_subgraph.query(
f"""
{{
blocks(
first: 1, orderBy: timestamp, orderDirection: desc,
where: {{timestamp_lte: "{ts}"}}
) {{
id
number
timestamp
}}
}}
""",
)
try:
result = int(response['blocks'][0]['number'])
except (IndexError, KeyError) as e:
raise RemoteError(
f'Got unexpected ethereum blocks subgraph response: {response}',
) from e
else:
return result
def get_blocknumber_by_time(self, ts: Timestamp, etherscan: bool = True) -> int:
"""Searches for the blocknumber of a specific timestamp
- Performs the etherscan api call by default first
- If RemoteError raised or etherscan flag set to false
-> queries blocks subgraph
"""
if etherscan:
try:
return self.etherscan.get_blocknumber_by_time(ts)
except RemoteError:
pass
return self._get_blocknumber_by_time_from_subgraph(ts)
def get_basic_contract_info(self, address: ChecksumEthAddress) -> Dict[str, Any]:
"""
Query a contract address and return basic information as:
- Decimals
- name
- symbol
if it is provided in the contract. This method may raise:
- BadFunctionCallOutput: If there is an error calling a bad address
"""
cache = self.contract_info_cache.get(address)
if cache is not None:
return cache
properties = ('decimals', 'symbol', 'name')
info: Dict[str, Any] = {}
contract = EthereumContract(address=address, abi=ERC20TOKEN_ABI, deployed_block=0)
try:
# Output contains call status and result
output = multicall_2(
ethereum=self,
require_success=False,
calls=[(address, contract.encode(method_name=prop)) for prop in properties],
)
except RemoteError:
# If something happens in the connection the output should have
# the same length as the tuple of properties
output = [(False, b'')] * len(properties)
try:
decoded = [
contract.decode(x[1], method_name)[0] # pylint: disable=E1136
if x[0] and len(x[1]) else None
for (x, method_name) in zip(output, properties)
]
except OverflowError as e:
# This can happen when contract follows the ERC20 standard methods
# but name and symbol return bytes instead of string. UNIV1 LP is in this case
log.error(
f'{address} failed to decode as ERC20 token. Trying UNIV1 LP token. {str(e)}',
)
contract = EthereumContract(address=address, abi=UNIV1_LP_ABI, deployed_block=0)
decoded = [
contract.decode(x[1], method_name)[0] # pylint: disable=E1136
if x[0] and len(x[1]) else None
for (x, method_name) in zip(output, properties)
]
log.debug(f'{address} was succesfuly decoded as ERC20 token')
for prop, value in zip(properties, decoded):
if isinstance(value, bytes):
value = value.rstrip(b'\x00').decode()
info[prop] = value
self.contract_info_cache[address] = info
return info
|
85532
|
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_batch import pointnet2_modules
from ....ops.pointnet2.pointnet2_batch import pointnet2_modules as pointnet2_batch_modules
from ....utils import common_utils
class VoteModule(nn.Module):
def __init__(self, model_cfg, voxel_size=None, point_cloud_range=None, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_points = model_cfg.NUM_POINTS
self.in_channels = model_cfg.NUM_INPUT_FEATURES
self.vote_xyz_range = model_cfg.VOTE_RANGE
self.with_res_feat = model_cfg.WITH_RES_FEATURE
self.aggre_mlps = model_cfg.AGGREGATION_MLPS
self.aggre_radius = model_cfg.AGGREGATION_RADIUS
self.aggre_samples = model_cfg.AGGREGATION_NSAMPLES
# self.skip_channels = model_cfg.SKIP_CHANNELS
# vote mlps
mlp = model_cfg.MLPS
mlp = [self.in_channels] + mlp
vote_conv_list = list()
for k in range(len(mlp) - 1):
vote_conv_list.extend([
nn.Conv1d(mlp[k], mlp[k + 1], kernel_size=1, bias=True),
nn.BatchNorm1d(mlp[k + 1]),
nn.ReLU()
])
if self.with_res_feat:
out_channel = 3 + self.in_channels
else:
out_channel = 3
vote_conv_list.extend([
nn.Conv1d(mlp[-1], out_channels=out_channel, kernel_size=1),
])
self.vote_mlp = nn.Sequential(*vote_conv_list)
# aggregation
self.vote_aggregation = pointnet2_batch_modules.PointnetSAModuleMSG_FPS(npoint=self.num_points,
radii=self.aggre_radius,
nsamples=self.aggre_samples,
mlps=self.aggre_mlps,
use_xyz=True)
sa_channel_out = 0
for aggre_mlp in self.aggre_mlps:
sa_channel_out += aggre_mlp[-1]
self.conv_out = nn.Sequential(
nn.Conv1d(sa_channel_out, self.model_cfg.AGGREGATION_OUT, kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.AGGREGATION_OUT),
nn.ReLU())
# TODO: optional FP module for PointRCNN compatibility
'''
self.FP_modules = nn.ModuleList()
channel_out = self.model_cfg.AGGREGATION_OUT
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + self.skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
'''
def extract_input(self, batch_dict):
batch_size = batch_dict['batch_size']
xyz = batch_dict['point_coords'].view(batch_size, -1, 4)[..., 1:].contiguous()
features = batch_dict['point_features'].view(batch_size, -1, batch_dict['point_features'].shape[-1]).contiguous()
return xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
batch_size = batch_dict['batch_size']
xyz, features = self.extract_input(batch_dict)
features = features.permute(0, 2, 1).contiguous()
if isinstance(self.num_points, list):
self.num_points = self.num_points[0]
seed_points = xyz[:, :self.num_points, :].contiguous() # (B, N, 3)
seed_features = features[:, :, :self.num_points].contiguous() # (B, C, N)
# generate vote points
votes = self.vote_mlp(seed_features) # (B, 3+C, N)
votes = votes.transpose(2, 1)
seed_offset = votes[:, :, :3]
limited_offset_list = []
for axis in range(len(self.vote_xyz_range)):
limited_offset_list.append(
seed_offset[..., axis].clamp(min=-self.vote_xyz_range[axis],
max=self.vote_xyz_range[axis])
)
limited_offset = torch.stack(limited_offset_list, dim=-1) # (B, N, 3)
vote_points = (seed_points + limited_offset).contiguous()
# generate shifted features
if self.with_res_feat:
res_features = votes[:, :, 3:]
vote_features = res_features + seed_features.transpose(2, 1).contiguous()
else:
vote_features = seed_features.transpose(2, 1).contiguous()
# aggregation
aggregated_points, aggregated_features = self.vote_aggregation(xyz=xyz,
features=features,
new_xyz=vote_points)
aggregated_features = self.conv_out(aggregated_features)
# FP forward
# pack output
ctr_batch_idx = torch.arange(batch_size, device=seed_offset.device).view(-1, 1).repeat(1, seed_offset.shape[1]).view(-1)
batch_dict['ctr_offsets'] = torch.cat((ctr_batch_idx[:, None].float(), seed_offset.contiguous().view(-1, 3)), dim=1)
batch_dict['centers'] = torch.cat((ctr_batch_idx[:, None].float(), aggregated_points.contiguous().view(-1, 3)), dim=1)
batch_dict['centers_origin'] = torch.cat((ctr_batch_idx[:, None].float(), seed_points.contiguous().view(-1, 3)), dim=1)
center_features = aggregated_features.permute(0, 2, 1).contiguous().view(-1, aggregated_features.shape[1])
batch_dict['centers_features'] = center_features
batch_dict['ctr_batch_idx'] = ctr_batch_idx
return batch_dict
|
85541
|
from keras.datasets import boston_housing
from keras.models import Sequential
from keras.layers import Activation, Dense
from keras import optimizers
(X_train, y_train), (X_test, y_test) = boston_housing.load_data()
model = Sequential()
# Keras model with two hidden layer with 10 neurons each
model.add(Dense(10, input_shape = (13,))) # Input layer => input_shape should be explicitly designated
model.add(Activation('sigmoid'))
model.add(Dense(10)) # Hidden layer => only output dimension should be designated
model.add(Activation('sigmoid'))
model.add(Dense(10)) # Hidden layer => only output dimension should be designated
model.add(Activation('sigmoid'))
model.add(Dense(1)) # Output layer => output dimension = 1 since it is regression problem
'''
This is equivalent to the above code block
>> model.add(Dense(10, input_shape = (13,), activation = 'sigmoid'))
>> model.add(Dense(10, activation = 'sigmoid'))
>> model.add(Dense(10, activation = 'sigmoid'))
>> model.add(Dense(1))
'''
sgd = optimizers.SGD(lr = 0.01) # stochastic gradient descent optimizer
model.compile(optimizer = sgd, loss = 'mean_squared_error', metrics = ['mse']) # for regression problems, mean squared error (MSE) is often employed
model.fit(X_train, y_train, batch_size = 50, epochs = 100, verbose = 1)
results = model.evaluate(X_test, y_test)
print('loss: ', results[0])
print('mse: ', results[1])
|
85549
|
from psutil import virtual_memory
def mock_cluster(n_workers=1,
threads_per_worker=1,
diagnostics_port=8787,
memory_limit=None,
**dask_kwarg):
return (n_workers, threads_per_worker, diagnostics_port, memory_limit)
class MockClient():
def __init__(self, cluster):
self.cluster = cluster
def scheduler_info(self):
return {'workers': {'worker 1': {'memory_limit': virtual_memory().total}}}
def get_mock_client_cluster():
return MockClient, mock_cluster
|
85565
|
from django.conf.urls import *
from . import views
from django.urls import include, path
urlpatterns = [
path('', views.index, name='variant_index'),
path('view/<int:variant_id>/', views.view, name='variant_view'),
]
|
85568
|
import sys
import os
from collections import OrderedDict
from ttfautohint._compat import (
ensure_binary, ensure_text, basestring, open, IntEnum,
)
USER_OPTIONS = dict(
in_file=None,
in_buffer=None,
out_file=None,
control_file=None,
control_buffer=None,
reference_file=None,
reference_buffer=None,
reference_index=0,
reference_name=None,
hinting_range_min=8,
hinting_range_max=50,
hinting_limit=200,
hint_composites=False,
adjust_subglyphs=False,
increase_x_height=14,
x_height_snapping_exceptions="",
windows_compatibility=False,
default_script="latn",
fallback_script="none",
fallback_scaling=False,
symbol=False,
fallback_stem_width=0,
ignore_restrictions=False,
family_suffix=None,
detailed_info=False,
no_info=False,
TTFA_info=False,
dehint=False,
epoch=None,
debug=False,
verbose=False,
)
StemWidthMode = IntEnum("StemWidthMode",
[
"NATURAL", # -1
"QUANTIZED", # 0
"STRONG", # 1
],
start=-1)
STEM_WIDTH_MODE_OPTIONS = OrderedDict([
("gray_stem_width_mode", StemWidthMode.QUANTIZED),
("gdi_cleartype_stem_width_mode", StemWidthMode.STRONG),
("dw_cleartype_stem_width_mode", StemWidthMode.QUANTIZED),
])
USER_OPTIONS.update(STEM_WIDTH_MODE_OPTIONS)
# Deprecated; use stem width mode options
STRONG_STEM_WIDTH_OPTIONS = dict(
gdi_cleartype_strong_stem_width=True,
gray_strong_stem_width=False,
dw_cleartype_strong_stem_width=False,
)
PRIVATE_OPTIONS = frozenset([
"in_buffer_len",
"control_buffer_len",
"reference_buffer_len",
"out_buffer",
"out_buffer_len",
"error_string",
"alloc_func",
"free_func",
"info_callback",
"info_post_callback",
"info_callback_data",
"progress_callback",
"progress_callback_data",
"error_callback",
"error_callback_data",
])
ALL_OPTIONS = frozenset(USER_OPTIONS) | PRIVATE_OPTIONS
# used when the control file does not have a name on the filesystem
CONTROL_NAME_FALLBACK = u"<control-instructions>"
def validate_options(kwargs):
opts = {k: kwargs.pop(k, USER_OPTIONS[k]) for k in USER_OPTIONS}
if kwargs:
raise TypeError(
"unknown keyword argument%s: %s" % (
"s" if len(kwargs) > 1 else "",
", ".join(repr(k) for k in kwargs)))
if opts["no_info"] and opts["detailed_info"]:
raise ValueError("no_info and detailed_info are mutually exclusive")
in_file, in_buffer = opts.pop("in_file"), opts.pop("in_buffer")
if in_file is None and in_buffer is None:
raise ValueError("No input file or buffer provided")
elif in_file is not None and in_buffer is not None:
raise ValueError("in_file and in_buffer are mutually exclusive")
if in_file is not None:
try:
in_buffer = in_file.read()
except AttributeError:
with open(in_file, "rb") as f:
in_buffer = f.read()
if not isinstance(in_buffer, bytes):
raise TypeError("in_buffer type must be bytes, not %s"
% type(in_buffer).__name__)
opts['in_buffer'] = in_buffer
opts['in_buffer_len'] = len(in_buffer)
control_file = opts.pop('control_file')
control_buffer = opts.pop('control_buffer')
if control_file is not None:
if control_buffer is not None:
raise ValueError(
"control_file and control_buffer are mutually exclusive")
try:
control_buffer = control_file.read()
except AttributeError:
with open(control_file, "rt", encoding="utf-8") as f:
control_buffer = f.read()
opts["control_name"] = control_file
else:
try:
opts["control_name"] = control_file.name
except AttributeError:
pass
if control_buffer is not None:
opts['control_buffer'] = ensure_binary(control_buffer, "utf-8")
opts['control_buffer_len'] = len(control_buffer)
if "control_name" in opts:
opts["control_name"] = ensure_text(
opts["control_name"], encoding=sys.getfilesystemencoding())
else:
opts["control_name"] = CONTROL_NAME_FALLBACK
reference_file = opts.pop('reference_file')
reference_buffer = opts.pop('reference_buffer')
if reference_file is not None:
if reference_buffer is not None:
raise ValueError(
"reference_file and reference_buffer are mutually exclusive")
try:
reference_buffer = reference_file.read()
except AttributeError:
with open(reference_file, "rb") as f:
reference_buffer = f.read()
if opts["reference_name"] is None:
opts["reference_name"] = reference_file
else:
if opts["reference_name"] is None:
try:
opts["reference_name"] = reference_file.name
except AttributeError:
pass
if reference_buffer is not None:
if not isinstance(reference_buffer, bytes):
raise TypeError("reference_buffer type must be bytes, not %s"
% type(reference_buffer).__name__)
opts['reference_buffer'] = reference_buffer
opts['reference_buffer_len'] = len(reference_buffer)
if opts["reference_name"] is not None:
opts["reference_name"] = ensure_binary(
opts["reference_name"], encoding=sys.getfilesystemencoding())
for key in ('default_script', 'fallback_script',
'x_height_snapping_exceptions'):
opts[key] = ensure_binary(opts[key])
if opts['epoch'] is not None:
from ctypes import c_ulonglong
opts['epoch'] = c_ulonglong(opts['epoch'])
if opts["family_suffix"] is not None:
opts["family_suffix"] = ensure_text(opts["family_suffix"])
for mode_option in STEM_WIDTH_MODE_OPTIONS:
# raises ValueError if integer value is not a valid stem width mode
opts[mode_option] = StemWidthMode(opts[mode_option])
return opts
def format_varargs(**options):
items = sorted((k, v) for k, v in options.items()
if k in ALL_OPTIONS and v is not None)
format_string = b", ".join(ensure_binary(k.replace("_", "-"))
for k, v in items)
values = tuple(v for k, v in items)
return format_string, values
def strong_stem_width(s):
if len(s) > 3:
import argparse
raise argparse.ArgumentTypeError(
"string can only contain up to 3 letters")
valid = {
"g": "gray_stem_width_mode",
"G": "gdi_cleartype_stem_width_mode",
"D": "dw_cleartype_stem_width_mode"}
chars = set(s)
invalid = chars - set(valid)
if invalid:
import argparse
raise argparse.ArgumentTypeError(
"invalid value: %s" % ", ".join(
repr(v) for v in sorted(invalid)))
result = {}
for char, opt_name in valid.items():
is_strong = char in chars
result[opt_name] = (StemWidthMode.STRONG if is_strong
else StemWidthMode.QUANTIZED)
return result
def stem_width_mode(s):
if len(s) != 3:
import argparse
raise argparse.ArgumentTypeError(
"Stem width mode string must consist of exactly three letters")
modes = {k[0].lower(): v
for k, v in StemWidthMode.__members__.items()}
result = {}
for i, option in enumerate(STEM_WIDTH_MODE_OPTIONS):
m = s[i]
if m not in modes:
import argparse
letters = sorted(repr(k) for k in modes)
raise argparse.ArgumentTypeError(
"Stem width mode letter for %s must be %s, or %s"
% (option, ", ".join(letters[:-1]), letters[-1]))
result[option] = modes[m]
return result
def stdin_or_input_path_type(s):
# the special argument "-" means sys.stdin
if s == "-":
try:
if sys.stdin.isatty(): # ignore if interactive
return None
return open(sys.stdin.fileno(), mode="rb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def stdout_or_output_path_type(s):
# the special argument "-" means sys.stdout
if s == "-":
try:
if sys.stdout.isatty(): # ignore if interactive
return None
return open(sys.stdout.fileno(), mode="wb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def parse_args(args=None):
"""Parse command line arguments and return a dictionary of options
for ttfautohint.ttfautohint function.
`args` can be either None, a list of strings, or a single string,
that is split into individual options with `shlex.split`.
When `args` is None, the console's default sys.argv are used, and any
SystemExit exceptions raised by argparse are propagated.
If args is a string list or a string, it is assumed that the function
was not called from a console script's `main` entry point, but from
other client code, and thus the SystemExit exceptions are muted and
a `None` value is returned.
"""
import argparse
from ttfautohint import __version__, libttfautohint
from ttfautohint.cli import USAGE, DESCRIPTION, EPILOG
version_string = "ttfautohint-py %s (libttfautohint %s)" % (
__version__, libttfautohint.version_string)
if args is None:
capture_sys_exit = False
else:
capture_sys_exit = True
if isinstance(args, basestring):
import shlex
args = shlex.split(args)
parser = argparse.ArgumentParser(
prog="ttfautohint",
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"in_file", nargs="?", metavar="IN-FILE", default="-",
type=stdin_or_input_path_type,
help="input file (default: standard input)")
parser.add_argument(
"out_file", nargs="?", metavar="OUT-FILE", default="-",
type=stdout_or_output_path_type,
help="output file (default: standard output)")
parser.add_argument(
"--debug", action="store_true", help="print debugging information")
stem_width_group = parser.add_mutually_exclusive_group(required=False)
stem_width_group.add_argument(
"-a", "--stem-width-mode", type=stem_width_mode, metavar="S",
default=STEM_WIDTH_MODE_OPTIONS,
help=("select stem width mode for grayscale, GDI ClearType, and DW "
"ClearType, where S is a string of three letters with possible "
"values 'n' for natural, 'q' for quantized, and 's' for strong "
"(default: qsq)"))
stem_width_group.add_argument( # deprecated
"-w", "--strong-stem-width", type=strong_stem_width, metavar="S",
help=argparse.SUPPRESS)
parser.add_argument(
"-c", "--composites", dest="hint_composites", action="store_true",
help="hint glyph composites also")
parser.add_argument(
"-d", "--dehint", action="store_true", help="remove all hints")
parser.add_argument(
"-D", "--default-script", metavar="SCRIPT",
default=USER_OPTIONS["default_script"],
help="set default OpenType script (default: %(default)s)")
parser.add_argument(
"-f", "--fallback-script", metavar="SCRIPT",
default=USER_OPTIONS["fallback_script"],
help="set fallback script (default: %(default)s)")
parser.add_argument(
"-F", "--family-suffix", metavar="SUFFIX",
help="append SUFFIX to the family name string(s) in the `name' table")
parser.add_argument(
"-G", "--hinting-limit", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_limit"],
help=("switch off hinting above this PPEM value (default: "
"%(default)s); value 0 means no limit"))
parser.add_argument(
"-H", "--fallback-stem-width", type=int, metavar="UNITS",
default=USER_OPTIONS["fallback_stem_width"],
help=("set fallback stem width (default: %(default)s font units at "
"2048 UPEM)"))
parser.add_argument(
"-i", "--ignore-restrictions", action="store_true",
help="override font license restrictions")
parser.add_argument(
"-I", "--detailed-info", action="store_true",
help=("add detailed ttfautohint info to the version string(s) in "
"the `name' table"))
parser.add_argument(
"-l", "--hinting-range-min", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_min"],
help="the minimum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-m", "--control-file", metavar="FILE",
help="get control instructions from FILE")
parser.add_argument(
"-n", "--no-info", action="store_true",
help=("don't add ttfautohint info to the version string(s) in the "
"`name' table"))
parser.add_argument(
"-p", "--adjust-subglyphs", action="store_true",
help="handle subglyph adjustments in exotic fonts")
parser.add_argument(
"-r", "--hinting-range-max", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_max"],
help="the maximum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-R", "--reference", dest="reference_file", metavar="FILE",
help="derive blue zones from reference font FILE")
parser.add_argument(
"-s", "--symbol", action="store_true",
help="input is symbol font")
parser.add_argument(
"-S", "--fallback-scaling", action="store_true",
help="use fallback scaling, not hinting")
parser.add_argument(
"-t", "--ttfa-table", action="store_true", dest="TTFA_info",
help="add TTFA information table")
parser.add_argument(
"-T", "--ttfa-info", dest="show_TTFA_info", action="store_true",
help="display TTFA table in IN-FILE and exit")
parser.add_argument(
"-v", "--verbose", action="store_true",
help="show progress information")
parser.add_argument(
"-V", "--version", action="version",
version=version_string,
help="print version information and exit")
parser.add_argument(
"-W", "--windows-compatibility", action="store_true",
help=("add blue zones for `usWinAscent' and `usWinDescent' to avoid "
"clipping"))
parser.add_argument(
"-x", "--increase-x-height", type=int, metavar="PPEM",
default=USER_OPTIONS["increase_x_height"],
help=("increase x height for sizes in the range 6<=PPEM<=N; value "
"0 switches off this feature (default: %(default)s)"))
parser.add_argument(
"-X", "--x-height-snapping-exceptions", metavar="STRING",
default=USER_OPTIONS["x_height_snapping_exceptions"],
help=('specify a comma-separated list of x-height snapping exceptions'
', for example "-9, 13-17, 19" (default: "%(default)s")'))
parser.add_argument(
"-Z", "--reference-index", type=int, metavar="NUMBER",
default=USER_OPTIONS["reference_index"],
help="face index of reference font (default: %(default)s)")
try:
options = vars(parser.parse_args(args))
except SystemExit:
if capture_sys_exit:
return None
raise
# if either input/output are interactive, print help and exit
if (not capture_sys_exit and
(options["in_file"] is None or options["out_file"] is None)):
parser.print_help()
parser.exit(1)
# check SOURCE_DATE_EPOCH environment variable
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch:
try:
options["epoch"] = int(source_date_epoch)
except ValueError:
import warnings
warnings.warn(
UserWarning("invalid SOURCE_DATE_EPOCH: %r" % source_date_epoch))
if options.pop("show_TTFA_info"):
# TODO use fonttools to dump TTFA table?
raise NotImplementedError()
stem_width_options = options.pop("stem_width_mode")
strong_stem_width_options = options.pop("strong_stem_width")
if strong_stem_width_options:
import warnings
warnings.warn(
UserWarning("Option '-w' is deprecated! Use option '-a' instead"))
stem_width_options = strong_stem_width_options
options.update(stem_width_options)
return options
|
85569
|
from datetime import datetime, timezone
from .enums import StatisticTypeEnum
def convert_timestamp_to_datetime(timestamp: float) -> datetime:
"""
Convert timestamp date format to datetime.
Arguments:
timestamp {float} -- Input timestamp.
Returns:
datetime -- Datetime formatted object which represents the
same information as timestamp.
"""
return datetime.fromtimestamp(timestamp, timezone.utc)
def get_enum_type(statistic_type: str) -> StatisticTypeEnum:
"""
Convert string object to enum.
Arguments:
statistic_type {str} -- Input string.
Returns:
StatisticTypeEnum -- Enum corresponding to statistic_type.
"""
return (
StatisticTypeEnum.ALERT
if statistic_type.lower() == "alerts"
else StatisticTypeEnum.REPORT
)
|
85618
|
from braintree.exceptions.unexpected_error import UnexpectedError
class ConnectionError(UnexpectedError):
pass
|
85643
|
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy as sp
from sklearn import linear_model
import sklearn.metrics.pairwise
###############################
## Random Explainer
###############################
class RandomExplainer:
def __init__(self):
pass
def reset(self):
pass
def explain_instance(self,
instance_vector,
label,
classifier,
num_features,
dataset):
nonzero = instance_vector.nonzero()[1]
explanation = np.random.choice(nonzero, num_features)
return [(x, 1) for x in explanation]
def explain(self,
train_vectors,
train_labels,
classifier,
num_features,
dataset):
i = np.random.randint(0, train_vectors.shape[0])
explanation = self.explain_instance(train_vectors[i], None, None,
num_features, dataset)
return i, explanation
###############################
## Standalone Explainers
###############################
def most_important_word(classifier, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = classifier.predict_proba(v)[0][class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = classifier.predict_proba(v)[0][class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1
return max_index
def explain_greedy(instance_vector,
label,
classifier,
num_features,
dataset=None):
explanation = []
z = instance_vector.copy()
while len(explanation) < num_features:
i = most_important_word(classifier, z, label)
if i == -1:
break
z[0,i] = 0
explanation.append(i)
return [(x, 1) for x in explanation]
def most_important_word_martens(predict_fn, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = predict_fn(v)[0,class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = predict_fn(v)[0,class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1, max_change
return max_index, max_change
def explain_greedy_martens(instance_vector,
label,
predict_fn,
num_features,
dataset=None):
if not hasattr(predict_fn, '__call__'):
predict_fn = predict_fn.predict_proba
explanation = []
z = instance_vector.copy()
cur_score = predict_fn(instance_vector)[0, label]
while len(explanation) < num_features:
i, change = most_important_word_martens(predict_fn, z, label)
cur_score -= change
if i == -1:
break
explanation.append(i)
if cur_score < .5:
break
z[0,i] = 0
return [(x, 1) for x in explanation]
def data_labels_distances_mapping_text(x, classifier_fn, num_samples):
distance_fn = lambda x : sklearn.metrics.pairwise.cosine_distances(x[0],x)[0] * 100
features = x.nonzero()[1]
vals = np.array(x[x.nonzero()])[0]
doc_size = len(sp.sparse.find(x)[2])
sample = np.random.randint(1, doc_size, num_samples - 1)
data = np.zeros((num_samples, len(features)))
inverse_data = np.zeros((num_samples, len(features)))
data[0] = np.ones(doc_size)
inverse_data[0] = vals
features_range = range(len(features))
for i, s in enumerate(sample, start=1):
active = np.random.choice(features_range, s, replace=False)
data[i, active] = 1
for j in active:
inverse_data[i, j] = 1
sparse_inverse = sp.sparse.lil_matrix((inverse_data.shape[0], x.shape[1]))
sparse_inverse[:, features] = inverse_data
sparse_inverse = sp.sparse.csr_matrix(sparse_inverse)
mapping = features
labels = classifier_fn(sparse_inverse)
distances = distance_fn(sparse_inverse)
return data, labels, distances, mapping
# This is LIME
class GeneralizedLocalExplainer:
def __init__(self,
kernel_fn,
data_labels_distances_mapping_fn,
num_samples=5000,
lasso=True,
mean=None,
return_mean=False,
return_mapped=False,
lambda_=None,
verbose=True,
positive=False):
# Transform_classifier, transform_explainer,
# transform_explainer_to_classifier all take raw data in, whatever that is.
# perturb(x, num_samples) returns data (perturbed data in f'(x) form),
# inverse_data (perturbed data in x form) and mapping, where mapping is such
# that mapping[i] = j, where j is an index for x form.
# distance_fn takes raw data in. what we're calling raw data is just x
self.lambda_ = lambda_
self.kernel_fn = kernel_fn
self.data_labels_distances_mapping_fn = data_labels_distances_mapping_fn
self.num_samples = num_samples
self.lasso = lasso
self.mean = mean
self.return_mapped=return_mapped
self.return_mean = return_mean
self.verbose = verbose
self.positive=positive;
def reset(self):
pass
def data_labels_distances_mapping(self, raw_data, classifier_fn):
data, labels, distances, mapping = self.data_labels_distances_mapping_fn(raw_data, classifier_fn, self.num_samples)
return data, labels, distances, mapping
def generate_lars_path(self, weighted_data, weighted_labels):
X = weighted_data
alphas, active, coefs = linear_model.lars_path(X, weighted_labels, method='lasso', verbose=False, positive=self.positive)
return alphas, coefs
def explain_instance_with_data(self, data, labels, distances, label, num_features):
weights = self.kernel_fn(distances)
weighted_data = data * weights[:, np.newaxis]
if self.mean is None:
mean = np.mean(labels[:, label])
else:
mean = self.mean
shifted_labels = labels[:, label] - mean
if self.verbose:
print 'mean', mean
weighted_labels = shifted_labels * weights
used_features = range(weighted_data.shape[1])
nonzero = used_features
alpha = 1
if self.lambda_:
classif = linear_model.Lasso(alpha=self.lambda_, fit_intercept=False, positive=self.positive)
classif.fit(weighted_data, weighted_labels)
used_features = classif.coef_.nonzero()[0]
if used_features.shape[0] == 0:
if self.return_mean:
return [], mean
else:
return []
elif self.lasso:
alphas, coefs = self.generate_lars_path(weighted_data, weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
chosen_coefs = coefs.T[i]
alpha = alphas[i]
break
used_features = nonzero
debiased_model = linear_model.Ridge(alpha=0, fit_intercept=False)
debiased_model.fit(weighted_data[:, used_features], weighted_labels)
if self.verbose:
print 'Prediction_local', debiased_model.predict(data[0, used_features].reshape(1, -1)) + mean, 'Right:', labels[0, label]
if self.return_mean:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True), mean
else:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True)
def explain_instance(self,
raw_data,
label,
classifier_fn,
num_features, dataset=None):
if not hasattr(classifier_fn, '__call__'):
classifier_fn = classifier_fn.predict_proba
data, labels, distances, mapping = self.data_labels_distances_mapping(raw_data, classifier_fn)
if self.return_mapped:
if self.return_mean:
exp, mean = self.explain_instance_with_data(data, labels, distances, label, num_features)
else:
exp = self.explain_instance_with_data(data, labels, distances, label, num_features)
exp = [(mapping[x[0]], x[1]) for x in exp]
if self.return_mean:
return exp, mean
else:
return exp
return self.explain_instance_with_data(data, labels, distances, label, num_features), mapping
|
85689
|
EXECUTE_MODE_AUTO = "auto"
EXECUTE_MODE_ASYNC = "async"
EXECUTE_MODE_SYNC = "sync"
EXECUTE_MODE_OPTIONS = frozenset([
EXECUTE_MODE_AUTO,
EXECUTE_MODE_ASYNC,
EXECUTE_MODE_SYNC,
])
EXECUTE_CONTROL_OPTION_ASYNC = "async-execute"
EXECUTE_CONTROL_OPTION_SYNC = "sync-execute"
EXECUTE_CONTROL_OPTIONS = frozenset([
EXECUTE_CONTROL_OPTION_ASYNC,
EXECUTE_CONTROL_OPTION_SYNC,
])
EXECUTE_RESPONSE_RAW = "raw"
EXECUTE_RESPONSE_DOCUMENT = "document"
EXECUTE_RESPONSE_OPTIONS = frozenset([
EXECUTE_RESPONSE_RAW,
EXECUTE_RESPONSE_DOCUMENT,
])
EXECUTE_TRANSMISSION_MODE_VALUE = "value"
EXECUTE_TRANSMISSION_MODE_REFERENCE = "reference"
EXECUTE_TRANSMISSION_MODE_OPTIONS = frozenset([
EXECUTE_TRANSMISSION_MODE_VALUE,
EXECUTE_TRANSMISSION_MODE_REFERENCE,
])
|
85714
|
from .common import get_tfvars_file, replace_tfvars, passwd_generator
def configure_sonarqube_container():
"""
Configure a containerized Sonar server.
"""
replace_tfvars("dockerizedSonarqube", "true", get_tfvars_file(), False)
replace_tfvars('sonar_username', "admin", get_tfvars_file())
replace_tfvars('sonar_passwd', passwd_generator(), get_tfvars_file())
replace_tfvars('codequality_type', 'sonarqube', get_tfvars_file())
replace_tfvars('codeq', 1, get_tfvars_file())
|
85715
|
from typing import Optional, Dict
import falcon
class Request:
def __init__(self, request: falcon.Request, request_number: int):
self.cookies: Optional[Dict[str, str]] = request.cookies
self.body: Optional[bytes] = request.bounded_stream.read()
self.content_type: Optional[str] = request.content_type
self.files: Optional[Dict[str, bytes]] = self._get_files(request)
self.headers: Optional[Dict[str, str]] = request.headers
self.query_params: Optional[Dict[str, str]] = request.params
self.number = request_number
@staticmethod
def _get_files(request: falcon.Request) -> Optional[Dict[str, bytes]]:
files = {
param_name: param_value.file.read()
for param_name, param_value in request.params.items()
if hasattr(param_value, "file")
}
return files if files else None
|
85764
|
from bap.utils.bap_comment import parse, dumps, is_valid
def test_parse():
assert parse('hello') is None
assert parse('BAP: hello') == {'hello': []}
assert parse('BAP: hello,world') == {'hello': [], 'world': []}
assert parse('BAP: hello=cruel,world') == {'hello': ['cruel', 'world']}
assert parse('BAP: hello="hello, world"') == {'hello': ['hello, world']}
assert parse('BAP: hello=cruel,world goodbye=real,life') == {
'hello': ['cruel', 'world'],
'goodbye': ['real', 'life']
}
assert parse('BAP: hello="f\'"') == {'hello': ["f'"]}
def test_dumps():
assert 'BAP:' in dumps({'hello': []})
assert dumps({'hello': ['cruel', 'world'], 'nice': [], 'thing': []}) == \
'BAP: nice,thing hello=cruel,world'
assert dumps({'hello': ["world'"]}) == 'BAP: hello="world\'"'
def test_is_valid():
assert is_valid('BAP: hello')
assert is_valid('BAP: hello,world')
assert not is_valid('some comment')
def test_roundup():
comm = {
'x': [], 'y': [], 'z': [],
'a': ['1', '2', '3'],
'b': ['thing\''],
'c': ['many things'],
'd': ['strange \\ things'],
}
assert parse(dumps(parse(dumps(comm)))) == comm
def test_quotation():
data = 'BAP: chars="{\\\"a\\\", \\\"b\\\", \\\"c\\\"}"'
assert parse(data) == {'chars': ['{"a", "b", "c"}']}
assert parse(data) == parse(dumps(parse(data)))
def test_single_quote():
data = 'BAP: key="{can\\\'t do}"'
assert parse(data) == {'key': ["{can\\'t do}"]}
|
85786
|
from spikex.defaults import spacy_version
from spikex.pipes import SentX
SENTS = [
"This is a bullet list that we want to be a unique sentence:\n"
"\ta) the first bullet;\n"
"\tb) the second bullet;\n"
"\tc) a bullet with nested bullets:\n"
"\t\t1) first nested bullet;"
"\t\t2) second nested bullet."
"\td) last bullet.\n",
"Paragraph title ",
"The title was misformatted with the text. ",
"Now we try to split on abbreviations like Figs. 1 or Fig. 2. ",
"They can create confusion, like No.42 or eg. Num. 42 or U.S.; ",
"these are some cases, but there could it be more out there.",
]
def test_splitta(nlp):
sentx_pipe = SentX() if spacy_version < 3 else "sentx"
nlp.add_pipe(sentx_pipe, before="parser")
doc = nlp("".join(SENTS))
assert len([s for s in doc.sents]) == len(SENTS)
|
85839
|
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers="127.0.0.1:9092")
for _ in range(10000):
producer.send("my_topic", b"message")
# producer.flush()
|
85885
|
from poethepoet.envfile import parse_env_file
import pytest
valid_examples = [
(
"""
# empty
""",
{},
),
(
"""
# single word values
WORD=something
WORD_WITH_HASH=some#thing
NUMBER=0
EMOJI=😃😃
DOUBLE_QUOTED_WORD="something"
SINGLE_QUOTED_WORD='something'
""",
{
"WORD": "something",
"WORD_WITH_HASH": "some#thing",
"NUMBER": "0",
"EMOJI": "😃😃",
"DOUBLE_QUOTED_WORD": "something",
"SINGLE_QUOTED_WORD": "something",
},
),
(
"""
# multiword values
WORD=some\\ thing # and trailing comments
DOUBLE_QUOTED_WORD="some thing"
SINGLE_QUOTED_WORD='some thing'
""",
{
"WORD": r"some thing",
"DOUBLE_QUOTED_WORD": "some thing",
"SINGLE_QUOTED_WORD": "some thing",
},
),
(
"""
# values with line breaks
WORD=some\\
thing
DOUBLE_QUOTED_WORD="some
thing"
SINGLE_QUOTED_WORD='some
thing'
""",
{
"WORD": "some\nthing",
"DOUBLE_QUOTED_WORD": "some\n thing",
"SINGLE_QUOTED_WORD": "some\n thing",
},
),
(
"""
# without linebreak between vars
FOO=BAR BAR=FOO
""",
{"FOO": "BAR", "BAR": "FOO"},
),
(
"""
# with semicolons
; FOO=BAR;BAR=FOO ;
;
BAZ="2;'2"#;
\tQUX=3\t;
""",
{"FOO": "BAR", "BAR": "FOO", "BAZ": "2;'2#", "QUX": "3"},
),
(
r"""
# with extra backslashes
FOO=a\\\ b
BAR='a\\\ b'
BAZ="a\\\ b"
""",
{"FOO": r"a\ b", "BAR": r"a\\\ b", "BAZ": r"a\ b"},
),
( # a value with many parts and some empty vars
r"""FOO=a\\\ b'a\\\ b'"a\\\ b"#"#"'\'' ;'#;\t
BAR=
BAZ= # still empty
QUX=""",
{"FOO": r"a\ ba\\\ ba\ b##\ ;#", "BAR": "", "BAZ": "", "QUX": ""},
),
# export keyword is allowed
(
"""export answer=42
export \t question=undefined
export\tdinner=chicken
""",
{"answer": "42", "question": "undefined", "dinner": "chicken"},
),
]
invalid_examples = [
"foo = bar",
"foo =bar",
"foo= bar",
"foo\t=\tbar",
"foo\t=bar",
"foo=\tbar",
"foo= 'bar",
'foo= "bar"',
"foo",
"foo;",
"8oo=bar",
"foo@=bar",
'"foo@"=bar',
"'foo@'=bar",
r"foo\=bar",
r"foo\==bar",
r"export;foo=bar",
r"export\nfoo=bar",
]
@pytest.mark.parametrize("example", valid_examples)
def test_parse_valid_env_files(example):
assert parse_env_file(example[0]) == example[1]
@pytest.mark.parametrize("example", invalid_examples)
def test_parse_invalid_env_files(example):
with pytest.raises(ValueError):
parse_env_file(example)
|
85907
|
import unittest
import logging
from bsn_sdk_py.until.bsn_logger import log_info
class TestLogger(unittest.TestCase):
def setUp(self):
FORMAT = "%(asctime)s %(thread)d %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt="[%Y-%m-%d %H:%M:%S]")
def test_log_info(self):
log_info('1111111111')
if __name__ == '__main__':
unittest.main()
|
85936
|
import pandas as pd
import numpy as np
from .basecomparison import BaseTwoSorterComparison
from .comparisontools import (do_score_labels, make_possible_match,
make_best_match, make_hungarian_match, do_confusion_matrix, do_count_score,
compute_performance)
class GroundTruthComparison(BaseTwoSorterComparison):
"""
Compares a sorter to a ground truth.
This class can:
* compute a "match between gt_sorting and tested_sorting
* compute optionally the score label (TP, FN, CL, FP) for each spike
* count by unit of GT the total of each (TP, FN, CL, FP) into a Dataframe
GroundTruthComparison.count
* compute the confusion matrix .get_confusion_matrix()
* compute some performance metric with several strategy based on
the count score by unit
* count well detected units
* count false positive detected units
* count redundant units
* count overmerged units
* summary all this
Parameters
----------
gt_sorting: SortingExtractor
The first sorting for the comparison
tested_sorting: SortingExtractor
The second sorting for the comparison
gt_name: str
The name of sorter 1
tested_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms) match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
redundant_score: float
Agreement score above which units are redundant (default 0.2)
overmerged_score: float
Agreement score above which units can be overmerged (default 0.2)
well_detected_score: float
Agreement score above which units are well detected (default 0.8)
exhaustive_gt: bool (default True)
Tell if the ground true is "exhaustive" or not. In other world if the
GT have all possible units. It allows more performance measurement.
For instance, MEArec simulated dataset have exhaustive_gt=True
match_mode: 'hungarian', or 'best'
What is match used for counting : 'hungarian' or 'best match'.
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
compute_labels: bool
If True, labels are computed at instantiation (default False)
compute_misclassifications: bool
If True, misclassifications are computed at instantiation (default False)
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""
def __init__(self, gt_sorting, tested_sorting, gt_name=None, tested_name=None,
delta_time=0.4, sampling_frequency=None, match_score=0.5, well_detected_score=0.8,
redundant_score=0.2, overmerged_score=0.2, chance_score=0.1, exhaustive_gt=False, n_jobs=-1,
match_mode='hungarian', compute_labels=False, compute_misclassifications=False, verbose=False):
if gt_name is None:
gt_name = 'ground truth'
if tested_name is None:
tested_name = 'tested'
BaseTwoSorterComparison.__init__(self, gt_sorting, tested_sorting, sorting1_name=gt_name,
sorting2_name=tested_name, delta_time=delta_time,
match_score=match_score, # sampling_frequency=sampling_frequency,
chance_score=chance_score, n_jobs=n_jobs,
verbose=verbose)
self.exhaustive_gt = exhaustive_gt
self._compute_misclassifications = compute_misclassifications
self.redundant_score = redundant_score
self.overmerged_score = overmerged_score
self.well_detected_score = well_detected_score
assert match_mode in ['hungarian', 'best']
self.match_mode = match_mode
self._compute_labels = compute_labels
self._do_count()
self._labels_st1 = None
self._labels_st2 = None
if self._compute_labels:
self._do_score_labels()
# confusion matrix is compute on demand
self._confusion_matrix = None
def get_labels1(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting1.get_unit_ids():
return self._labels_st1[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def get_labels2(self, unit_id):
if self._labels_st1 is None:
self._do_score_labels()
if unit_id in self.sorting2.get_unit_ids():
return self._labels_st2[unit_id]
else:
raise Exception("Unit_id is not a valid unit")
def _do_matching(self):
if self._verbose:
print("Matching...")
self.possible_match_12, self.possible_match_21 = make_possible_match(self.agreement_scores, self.chance_score)
self.best_match_12, self.best_match_21 = make_best_match(self.agreement_scores, self.chance_score)
self.hungarian_match_12, self.hungarian_match_21 = make_hungarian_match(self.agreement_scores,
self.match_score)
def _do_count(self):
"""
Do raw count into a dataframe.
Internally use hungarian match or best match.
"""
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self.count_score = do_count_score(self.event_counts1, self.event_counts2,
match_12, self.match_event_count)
def _do_confusion_matrix(self):
if self._verbose:
print("Computing confusion matrix...")
if self.match_mode == 'hungarian':
match_12 = self.hungarian_match_12
elif self.match_mode == 'best':
match_12 = self.best_match_12
self._confusion_matrix = do_confusion_matrix(self.event_counts1, self.event_counts2, match_12,
self.match_event_count)
def get_confusion_matrix(self):
"""
Computes the confusion matrix.
Returns
-------
confusion_matrix: pandas.DataFrame
The confusion matrix
"""
if self._confusion_matrix is None:
self._do_confusion_matrix()
return self._confusion_matrix
def _do_score_labels(self):
assert self.match_mode == 'hungarian', \
'Labels (TP, FP, FN) can be computed only with hungarian match'
if self._verbose:
print("Adding labels...")
self._labels_st1, self._labels_st2 = do_score_labels(self.sorting1, self.sorting2,
self.delta_frames, self.hungarian_match_12,
self._compute_misclassifications)
def get_performance(self, method='by_unit', output='pandas'):
"""
Get performance rate with several method:
* 'raw_count' : just render the raw count table
* 'by_unit' : render perf as rate unit by unit of the GT
* 'pooled_with_average' : compute rate unit by unit and average
Parameters
----------
method: str
'by_unit', or 'pooled_with_average'
output: str
'pandas' or 'dict'
Returns
-------
perf: pandas dataframe/series (or dict)
dataframe/series (based on 'output') with performance entries
"""
possibles = ('raw_count', 'by_unit', 'pooled_with_average')
if method not in possibles:
raise Exception("'method' can be " + ' or '.join(possibles))
if method == 'raw_count':
perf = self.count_score
elif method == 'by_unit':
perf = compute_performance(self.count_score)
elif method == 'pooled_with_average':
perf = self.get_performance(method='by_unit').mean(axis=0)
if output == 'dict' and isinstance(perf, pd.Series):
perf = perf.to_dict()
return perf
def print_performance(self, method='pooled_with_average'):
"""
Print performance with the selected method
"""
template_txt_performance = _template_txt_performance
if method == 'by_unit':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
# ~ print(perf)
d = {k: perf[k].tolist() for k in perf.columns}
txt = template_txt_performance.format(method=method, **d)
print(txt)
elif method == 'pooled_with_average':
perf = self.get_performance(method=method, output='pandas')
perf = perf * 100
txt = template_txt_performance.format(method=method, **perf.to_dict())
print(txt)
def print_summary(self, well_detected_score=None, redundant_score=None, overmerged_score=None):
"""
Print a global performance summary that depend on the context:
* exhaustive= True/False
* how many gt units (one or several)
This summary mix several performance metrics.
"""
txt = _template_summary_part1
d = dict(
num_gt=len(self.unit1_ids),
num_tested=len(self.unit2_ids),
num_well_detected=self.count_well_detected_units(well_detected_score),
num_redundant=self.count_redundant_units(redundant_score),
num_overmerged=self.count_overmerged_units(overmerged_score),
)
if self.exhaustive_gt:
txt = txt + _template_summary_part2
d['num_false_positive_units'] = self.count_false_positive_units()
d['num_bad'] = self.count_bad_units()
txt = txt.format(**d)
print(txt)
def get_well_detected_units(self, well_detected_score=None):
"""
Return units list of "well detected units" from tested_sorting.
"well detected units" are defined as units in tested that
are well matched to GT units.
Parameters
----------
well_detected_score: float (default 0.8)
The agreement score above which tested units
are counted as "well detected".
"""
if well_detected_score is not None:
self.well_detected_score = well_detected_score
matched_units2 = self.hungarian_match_12
well_detected_ids = []
for u2 in self.unit2_ids:
if u2 in list(matched_units2.values):
u1 = self.hungarian_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score >= self.well_detected_score:
well_detected_ids.append(u2)
return well_detected_ids
def count_well_detected_units(self, well_detected_score):
"""
Count how many well detected units.
kwargs are the same as get_well_detected_units.
"""
return len(self.get_well_detected_units(well_detected_score=well_detected_score))
def get_false_positive_units(self, redundant_score=None):
"""
Return units list of "false positive units" from tested_sorting.
"false positive units" are defined as units in tested that
are not matched at all in GT units.
Need exhaustive_gt=True
Parameters
----------
redundant_score: float (default 0.2)
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""
assert self.exhaustive_gt, 'false_positive_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
false_positive_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
if self.best_match_21[u2] == -1:
false_positive_ids.append(u2)
else:
u1 = self.best_match_21[u2]
score = self.agreement_scores.at[u1, u2]
if score < self.redundant_score:
false_positive_ids.append(u2)
return false_positive_ids
def count_false_positive_units(self, redundant_score=None):
"""
See get_false_positive_units().
"""
return len(self.get_false_positive_units(redundant_score))
def get_redundant_units(self, redundant_score=None):
"""
Return "redundant units"
"redundant units" are defined as units in tested
that match a GT units with a big agreement score
but it is not the best match.
In other world units in GT that detected twice or more.
Parameters
----------
redundant_score=None: float (default 0.2)
The agreement score above which tested units
are counted as "redundant" (and not "false positive" ).
"""
assert self.exhaustive_gt, 'redundant_units list is valid only if exhaustive_gt=True'
if redundant_score is not None:
self.redundant_score = redundant_score
matched_units2 = list(self.hungarian_match_12.values)
redundant_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2 and self.best_match_21[u2] != -1:
u1 = self.best_match_21[u2]
if u2 != self.best_match_12[u1]:
score = self.agreement_scores.at[u1, u2]
if score >= self.redundant_score:
redundant_ids.append(u2)
return redundant_ids
def count_redundant_units(self, redundant_score=None):
"""
See get_redundant_units().
"""
return len(self.get_redundant_units(redundant_score=redundant_score))
def get_overmerged_units(self, overmerged_score=None):
"""
Return "overmerged units"
"overmerged units" are defined as units in tested
that match more than one GT unit with an agreement score larger than overmerged_score.
Parameters
----------
overmerged_score: float (default 0.4)
Tested units with 2 or more agreement scores above 'overmerged_score'
are counted as "overmerged".
"""
assert self.exhaustive_gt, 'overmerged_units list is valid only if exhaustive_gt=True'
if overmerged_score is not None:
self.overmerged_score = overmerged_score
overmerged_ids = []
for u2 in self.unit2_ids:
scores = self.agreement_scores.loc[:, u2]
if len(np.where(scores > self.overmerged_score)[0]) > 1:
overmerged_ids.append(u2)
return overmerged_ids
def count_overmerged_units(self, overmerged_score=None):
"""
See get_overmerged_units().
"""
return len(self.get_overmerged_units(overmerged_score=overmerged_score))
def get_bad_units(self):
"""
Return units list of "bad units".
"bad units" are defined as units in tested that are not
in the best match list of GT units.
So it is the union of "false positive units" + "redundant units".
Need exhaustive_gt=True
"""
assert self.exhaustive_gt, 'bad_units list is valid only if exhaustive_gt=True'
matched_units2 = list(self.hungarian_match_12.values)
bad_ids = []
for u2 in self.unit2_ids:
if u2 not in matched_units2:
bad_ids.append(u2)
return bad_ids
def count_bad_units(self):
"""
See get_bad_units
"""
return len(self.get_bad_units())
# usefull also for gathercomparison
_template_txt_performance = """PERFORMANCE ({method})
-----------
ACCURACY: {accuracy}
RECALL: {recall}
PRECISION: {precision}
FALSE DISCOVERY RATE: {false_discovery_rate}
MISS RATE: {miss_rate}
"""
_template_summary_part1 = """SUMMARY
-------
GT num_units: {num_gt}
TESTED num_units: {num_tested}
num_well_detected: {num_well_detected}
num_redundant: {num_redundant}
num_overmerged: {num_overmerged}
"""
_template_summary_part2 = """num_false_positive_units {num_false_positive_units}
num_bad: {num_bad}
"""
def compare_sorter_to_ground_truth(*args, **kwargs):
return GroundTruthComparison(*args, **kwargs)
compare_sorter_to_ground_truth.__doc__ = GroundTruthComparison.__doc__
|
85968
|
from Interprete.NodoAST import NodoArbol
from Interprete.Tabla_de_simbolos import Tabla_de_simbolos
from Interprete.Arbol import Arbol
from Interprete.Valor.Valor import Valor
from Interprete.Primitivos.TIPO import TIPO
class BOOLEANO(NodoArbol):
def __init__(self, data, line, column):
super().__init__(line, column)
self.data = data
def execute(self, entorno:Tabla_de_simbolos, arbol:Arbol):
value:Valor = Valor(TIPO.BOOLEAN, self.data)
return value
|
85987
|
import os
import traceback
import networkx as nx
from nasbench import api
from nord.utils import pdownload
NASBENCH_TFRECORD = './data/nasbench_only108.tfrecord'
file_url = 'https://storage.googleapis.com/nasbench/nasbench_only108.tfrecord'
INPUT = 'input'
OUTPUT = 'output'
CONV1X1 = 'conv1x1-bn-relu'
CONV3X3 = 'conv3x3-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
class BenchmarkEvaluator():
"""A class to evaluate a network on a benchmark
NAS dataset.
"""
def get_available_ops(self) -> list:
return [CONV1X1, CONV3X3, MAXPOOL3X3]
def __init__(self):
if not os.path.isfile(NASBENCH_TFRECORD):
print('Downloading NASBench-101 Data.')
pdownload(file_url, NASBENCH_TFRECORD)
print('Downloaded')
self.dataset = api.NASBench(NASBENCH_TFRECORD)
self.checked_models = {}
def __descriptor_to_spec(self, descriptor):
matrix, ops = BenchmarkEvaluator.descriptor_to_matrix(descriptor)
try:
model_spec = api.ModelSpec(
# Adjacency matrix of the module
matrix=matrix,
# Operations at the vertices of the module, matches order of matrix
ops=ops)
except Exception:
print(matrix)
print(ops)
print(descriptor)
input('PROLBEM')
traceback.print_exc()
return None
return model_spec
def has_been_evaluated(self, descriptor):
model_spec = self.__descriptor_to_spec(descriptor)
return model_spec in self.checked_models
def descriptor_evaluate(self, descriptor, acc='validation_accuracy'):
model_spec = self.__descriptor_to_spec(descriptor)
data = 0, 0
try:
hash_ = model_spec.hash_spec(self.dataset.config['available_ops'])
if hash_ in self.checked_models:
data = self.checked_models[hash_]
else:
data = self.dataset.query(model_spec)
self.checked_models[hash_] = data
except:
traceback.print_exc()
return 0, 0
return data[acc], data['training_time']
@staticmethod
def descriptor_to_matrix(descriptor):
graph = nx.DiGraph()
for origin in descriptor.connections:
for destination in descriptor.connections[origin]:
graph.add_edge(origin, destination)
node_lvls = {}
first_node = min(list(descriptor.layers.keys()))
last_node = max(list(descriptor.layers.keys()))
nodes_no = len(list(descriptor.layers.keys()))
for node in list(descriptor.layers.keys()):
if node in (first_node, last_node):
continue
paths = nx.all_simple_paths(graph, source=node, target=last_node)
lengths = [len(p) for p in paths]
lengths.append(0)
node_lvl = nodes_no - max(lengths)
if node_lvl not in node_lvls:
node_lvls[node_lvl] = [node]
else:
node_lvls[node_lvl].append(node)
nodes_ordered = []
ops = []
first_lvl = -1
last_lvl = nodes_no + 1
try:
node_lvls[first_lvl] = [first_node]
node_lvls[last_lvl] = [last_node]
for node_lvl in sorted(node_lvls):
nodelist = node_lvls[node_lvl]
nodes_ordered.extend(nodelist)
for node in nodelist:
ops.append(descriptor.layers[node][0])
matrix = nx.linalg.adjacency_matrix(
graph, nodelist=nodes_ordered).todense().tolist()
except Exception:
print(nodes_ordered)
print(descriptor)
traceback.print_exc()
return None
return matrix, ops
|
86006
|
from urllib.parse import urlparse
from arekit.processing.text.token import Token
# TODO. Leave it here but provide the base (BaseTokens) type.
# TODO. With the related API at BaseTokens.
class Tokens:
"""
Tokens used to describe a non-word text units, such as punctuation,
uknown words/chars, smiles, etc.
"""
_wrapper = "<[{}]>"
COMMA = _wrapper.format(',')
SEMICOLON = _wrapper.format(';')
COLON = _wrapper.format(':')
QUOTE = _wrapper.format('QUOTE')
DASH = _wrapper.format('-')
LONG_DASH = _wrapper.format('long_dash')
DOT = _wrapper.format('.')
TRIPLE_DOTS = _wrapper.format('…')
EXC_SIGN = _wrapper.format('!')
QUESTION_SIGN = _wrapper.format('?')
OPEN_BRACKET = _wrapper.format('OPEN_BRACKET')
CLOSED_BRACKET = _wrapper.format('CLOSED_BRACKET')
NUMBER = _wrapper.format('NUMBER')
NEW_LINE = _wrapper.format("NEW_LINE")
UNKNOWN_CHAR = _wrapper.format('UNKNOWN_CHAR')
UNKNOWN_WORD = _wrapper.format('UNKNOWN_WORD')
URL = _wrapper.format("URL")
__token_mapping = {
',': COMMA,
'.': DOT,
'…': TRIPLE_DOTS,
':': COLON,
';': SEMICOLON,
'-': DASH,
'—': LONG_DASH,
'?': QUESTION_SIGN,
'!': EXC_SIGN,
'(': OPEN_BRACKET,
')': CLOSED_BRACKET,
'{': OPEN_BRACKET,
'}': CLOSED_BRACKET,
'[': OPEN_BRACKET,
']': CLOSED_BRACKET,
'\n': NEW_LINE,
'«': QUOTE,
'»': QUOTE,
'"': QUOTE,
}
__supported_tokens = {
COMMA,
SEMICOLON,
COLON,
QUOTE,
DASH,
DOT,
LONG_DASH,
TRIPLE_DOTS,
EXC_SIGN,
QUESTION_SIGN,
OPEN_BRACKET,
CLOSED_BRACKET,
NUMBER,
URL,
NEW_LINE,
UNKNOWN_CHAR,
UNKNOWN_WORD}
@staticmethod
def try_create(subterm):
"""
Trying create a token by given 'term' parameter
subterm: unicode
I.e. term ending, so means a part of original term
"""
assert(isinstance(subterm, str))
if subterm not in Tokens.__token_mapping:
return None
return Token(term=subterm, token_value=Tokens.__token_mapping[subterm])
@staticmethod
def try_create_number(term):
assert(isinstance(term, str))
if not term.isdigit():
return None
return Token(term=term, token_value=Tokens.NUMBER)
@staticmethod
def try_create_url(term):
assert(isinstance(term, str))
result = urlparse(term)
is_correct = result.scheme and result.netloc and result.path
if not is_correct:
return None
return Token(term=term, token_value=Tokens.URL)
@staticmethod
def is_token(term):
assert(isinstance(term, str))
return term in Tokens.__supported_tokens
@staticmethod
def iter_chars_by_token(term):
"""
Iterate through charts that is related to term
token: char
"""
assert(isinstance(term, str))
for char, token in Tokens.__token_mapping.items():
if term == token:
yield char
@staticmethod
def iter_supported_tokens():
for token in Tokens.__supported_tokens:
yield token
|
86011
|
import glob
import os
import time
import cv2
import numpy as np
class GetLoadouts():
def __init__(self):
self.weapon_templates = self.generate_weapon_templates()
def generate_weapon_templates(self):
generated_weapon_templates = []
template_directory = os.path.abspath(os.path.join(
__file__, "../../templates/weapon_templates"))
all_templates = [image for image in glob.glob(
template_directory+"/*_template.png")]
for template in all_templates:
weapon = ((template.split("\weapon_templates"))
[1]).split("_template")[0][1:]
generated_weapon_templates.append({
"weapon": weapon,
"gray": cv2.imread(template, 0)[0:100, 0:139]
})
return generated_weapon_templates
def process_loadouts_frame(self, image):
identified_weapon = None
for template in self.weapon_templates:
current_template = template["gray"]
result = cv2.matchTemplate(
image, current_template, cv2.TM_CCOEFF_NORMED)
max_location = cv2.minMaxLoc(result)[1]
if max_location > 0.75:
identified_weapon = template["weapon"]
return identified_weapon
def identify_weapons(self, frame, side):
all_identified_weapons = []
if side == "top":
y_start = 380
y_end = 410
else:
y_start = 380 # fix this
y_end = 410 # fix this
for agent_loadout in range(0, 5):
resized_frame = frame[y_start:y_end, 1065:1204]
identified_weapon = self.process_loadouts_frame(resized_frame)
all_identified_weapons.append(identified_weapon)
y_start = y_start + 38
y_end = y_end + 38
return all_identified_weapons
def get_loadouts(self, frame):
all_identified_weapons = {}
main_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
all_identified_weapons["top"] = self.identify_weapons(
main_frame_gray, "top")
return all_identified_weapons
if __name__ == "__main__":
get_all_loadouts = GetLoadouts()
tab_images_directory = os.path.abspath(
os.path.join(__file__, "../../test_images/Tab Images/"))
for i in range(2, 10):
start = time.time()
image = cv2.imread('{}/{}.png'.format(tab_images_directory, i))
print("===================Image No. {}===================".format(i))
identified_weapons = get_all_loadouts.get_loadouts(image)
print("Identified Weapons", identified_weapons)
end = time.time()
print("Time elapsed", end - start)
|
86016
|
import setuptools
setuptools.setup(
name="itanium_demangler",
version="1.0",
author="whitequark",
author_email="<EMAIL>",
description="Pure Python parser for mangled itanium symbols",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/whitequark/python-itanium_demangler",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
)
|
86066
|
class Listlike:
def __init__(self):
self.list = [1, 2, 3, 4, 5]
def __getitem__(self, index):
return self.list[index]
def __len__(self):
return len(self.list)
>>> listlike = Listlike()
>>> list(listlike)
[1, 2, 3, 4, 5]
|
86080
|
import torch
from torch.autograd import Variable
from ptstat.core import RandomVariable, _to_v
# TODO: Implement Uniform(a, b) constructor.
class Uniform(RandomVariable):
"""
Uniform(0, 1) iid rv.
"""
def __init__(self, size, cuda=False):
super(Uniform, self).__init__()
assert len(size) == 2, str(size)
self._cuda = cuda
self._p_size = size
def _size(self):
return self._p_size
def _log_pdf(self, x):
return self._entropy()
def _sample(self):
# TODO: Use CUDA random_ when implemented.
y = Variable(torch.FloatTensor(*self._p_size).uniform_())
if self._cuda:
y = y.cuda()
return y
def _entropy(self):
return _to_v(0, self._p_size[0], self._cuda)
|
86082
|
from ir_sim.world import obs_circle
from math import pi, cos, sin
import numpy as np
from collections import namedtuple
from ir_sim.util import collision_cir_cir, collision_cir_matrix, collision_cir_seg, reciprocal_vel_obs
class env_obs_cir:
def __init__(self, obs_cir_class=obs_circle, obs_model='static', obs_cir_num=1, dist_mode = 0, step_time=0.1, components=[], **kwargs):
self.obs_cir_class = obs_cir_class
self.obs_num = obs_cir_num
self.dist_mode = dist_mode
self.obs_cir_list = []
self.components = components
self.obs_model = obs_model # 'static' 'dynamic'
self.obs_square = kwargs.get('obs_square', [0, 0, 10, 10])
self.obs_interval = kwargs.get('obs_interval', 1)
if self.obs_num > 0:
if self.dist_mode == 0:
assert 'obs_radius_list' and 'obs_state_list' in kwargs.keys()
obs_radius_list = kwargs['obs_radius_list']
obs_state_list = kwargs['obs_state_list']
obs_goal_list = kwargs.get('obs_goal_list', [0]*self.obs_num)
if len(obs_radius_list) < self.obs_num:
temp_end = obs_radius_list[-1]
obs_radius_list += [temp_end for i in range(self.obs_num - len(obs_radius_list))]
else:
obs_radius_list = kwargs.get('obs_radius_list', [0.2])
obs_state_list, obs_goal_list, obs_radius_list = self.obs_state_dis(obs_init_mode=self.dist_mode, radius=obs_radius_list[0], **kwargs)
if self.obs_model == 'dynamic':
self.rvo = reciprocal_vel_obs(vxmax = 1.5, vymax = 1.5, **kwargs)
for i in range(self.obs_num):
obs_cir = self.obs_cir_class(id=i, state=obs_state_list[i], radius=obs_radius_list[i], step_time=step_time, obs_model=obs_model, goal=obs_goal_list[i], **kwargs)
self.obs_cir_list.append(obs_cir)
def step_wander(self, **kwargs):
ts = self.obs_total_states()
rvo_vel_list = list(map(lambda agent_s: self.rvo.cal_vel(agent_s, nei_state_list=ts[1]), ts[0]))
arrive_flag = False
for i, obs_cir in enumerate(self.obs_cir_list):
obs_cir.move_forward(rvo_vel_list[i], **kwargs)
if obs_cir.arrive():
arrive_flag = True
if arrive_flag:
goal_list = self.random_goal(**kwargs)
for i, obs_cir in enumerate(self.obs_cir_list):
obs_cir.goal = goal_list[i]
def obs_state_dis(self, obs_init_mode=1, radius=0.2, circular=[5, 5, 4], min_radius=0.2, max_radius=1, **kwargs):
# init_mode: 1 single row
# 2 random
# 3 circular
# square area: x_min, y_min, x_max, y_max
# circular area: x, y, radius
self.random_bear = kwargs.get('random_bear', False)
random_radius = kwargs.get('random_radius', False)
num = self.obs_num
state_list, goal_list = [], []
if obs_init_mode == 1:
# single row
state_list = [np.array([ [i * self.obs_interval], [self.obs_square[1]]]) for i in range(int(self.obs_square[0]), int(self.obs_square[0])+num)]
goal_list = [np.array([ [i * self.obs_interval], [self.obs_square[3]] ]) for i in range(int(self.obs_square[0]), int(self.obs_square[0])+num)]
goal_list.reverse()
elif obs_init_mode == 2:
# random
state_list, goal_list = self.random_start_goal(**kwargs)
elif obs_init_mode == 3:
# circular
circle_point = np.array(circular)
theta_step = 2*pi / num
theta = 0
while theta < 2*pi:
state = circle_point + np.array([ cos(theta) * circular[2], sin(theta) * circular[2], theta + pi- circular[2] ])
goal = circle_point[0:2] + np.array([cos(theta+pi), sin(theta+pi)]) * circular[2]
theta = theta + theta_step
state_list.append(state[:, np.newaxis])
goal_list.append(goal[:, np.newaxis])
if random_radius:
radius_list = np.random.uniform(low = min_radius, high = max_radius, size = (num,))
else:
radius_list = [radius for i in range(num)]
return state_list, goal_list, radius_list
def random_start_goal(self, **kwargs):
num = self.obs_num
random_list = []
goal_list = []
while len(random_list) < 2*num:
new_point = np.random.uniform(low = self.obs_square[0:2], high = self.obs_square[2:4], size = (1, 2)).T
if not self.check_collision(new_point, random_list, self.components, self.obs_interval):
random_list.append(new_point)
start_list = random_list[0 : num]
goal_list = random_list[num : 2 * num]
return start_list, goal_list
def random_goal(self, **kwargs):
num = self.obs_num
random_list = []
while len(random_list) < num:
new_point = np.random.uniform(low = self.obs_square[0:2], high = self.obs_square[2:4], size = (1, 2)).T
if not self.check_collision(new_point, random_list, self.components, self.obs_interval):
random_list.append(new_point)
return random_list
def check_collision(self, check_point, point_list, components, range):
circle = namedtuple('circle', 'x y r')
point = namedtuple('point', 'x y')
self_circle = circle(check_point[0, 0], check_point[1, 0], range/2)
# check collision with map
if collision_cir_matrix(self_circle, components['map_matrix'], components['xy_reso'], components['offset']):
return True
# check collision with line obstacles
for line in components['obs_lines'].obs_line_states:
segment = [point(line[0], line[1]), point(line[2], line[3])]
if collision_cir_seg(self_circle, segment):
return True
for point in point_list:
if self.distance(check_point, point) < range:
return True
return False
def distance(self, point1, point2):
diff = point2[0:2] - point1[0:2]
return np.linalg.norm(diff)
def obs_total_states(self):
agent_state_list = list(map(lambda a: np.squeeze( a.omni_state()), self.obs_cir_list))
nei_state_list = list(map(lambda a: np.squeeze( a.omni_obs_state()), self.obs_cir_list))
return agent_state_list, nei_state_list
|
86106
|
def remove_whilespace_nodes(node, unlink=False):
"""Removes all of the whitespace-only text decendants of a DOM node.
When creating a DOM from an XML source, XML parsers are required to
consider several conditions when deciding whether to include
whitespace-only text nodes. This function ignores all of those
conditions and removes all whitespace-only text decendants of the
specified node. If the unlink flag is specified, the removed text
nodes are unlinked so that their storage can be reclaimed. If the
specified node is a whitespace-only text node then it is left
unmodified."""
remove_list = []
for child in node.childNodes:
if child.nodeType == dom.Node.TEXT_NODE and \
not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
remove_whilespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
|
86116
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adverts', '0006_auto_20150303_0009'),
]
operations = [
migrations.AlterField(
model_name='adchannel',
name='ad_formats',
field=models.ManyToManyField(
to='adverts.AdFormat', help_text='size and shape of ad'
),
),
migrations.AlterField(
model_name='advert',
name='ad_channels',
field=models.ManyToManyField(
blank=True,
to='adverts.AdChannel',
help_text='Where to show the ad'
),
),
]
|
86135
|
from math import sqrt, atan
import pytest
from pytest import approx
import ts2vg
import numpy as np
@pytest.fixture
def empty_ts():
return []
@pytest.fixture
def sample_ts():
return [3.0, 4.0, 2.0, 1.0]
def test_basic(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).edges
out_truth = [
(0, 1),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(sorted(e) for e in out_got) == sorted(sorted(e) for e in out_truth)
def test_left_to_right(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right').build(sample_ts).edges
out_truth = [
(0, 1),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(sqrt(2.))),
(1, 2, approx(sqrt(5.))),
(1, 3, approx(sqrt(13.))),
(2, 3, approx(sqrt(2.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_sq_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='sq_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(2.)),
(1, 2, approx(5.)),
(1, 3, approx(13.)),
(2, 3, approx(2.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='v_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-3.)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_v_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(3.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='h_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_h_distance').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='slope').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-1.5)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_slope').build(sample_ts).edges
out_truth = [
(0, 1, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(1.5)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='angle').build(sample_ts).edges
out_truth = [
(0, 1, approx(atan(1.))),
(1, 2, approx(atan(-2.))),
(1, 3, approx(atan(-1.5))),
(2, 3, approx(atan(-1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_left_to_right_abs_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='left_to_right', weighted='abs_angle').build(sample_ts).edges
out_truth = [
(0, 1, approx(atan(1.))),
(1, 2, approx(atan(2.))),
(1, 3, approx(atan(1.5))),
(2, 3, approx(atan(1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom').build(sample_ts).edges
out_truth = [
(1, 0),
(1, 2),
(1, 3),
(2, 3),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(sqrt(2.))),
(1, 2, approx(sqrt(5.))),
(1, 3, approx(sqrt(13.))),
(2, 3, approx(sqrt(2.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_sq_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='sq_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(2.)),
(1, 2, approx(5.)),
(1, 3, approx(13.)),
(2, 3, approx(2.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='v_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(-1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-3.)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_v_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_v_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(3.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='h_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(-1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_h_distance(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_h_distance').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(1.)),
(1, 3, approx(2.)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='slope').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(-2.)),
(1, 3, approx(-1.5)),
(2, 3, approx(-1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_slope(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_slope').build(sample_ts).edges
out_truth = [
(1, 0, approx(1.)),
(1, 2, approx(2.)),
(1, 3, approx(1.5)),
(2, 3, approx(1.)),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='angle').build(sample_ts).edges
out_truth = [
(1, 0, approx(atan(1.))),
(1, 2, approx(atan(-2.))),
(1, 3, approx(atan(-1.5))),
(2, 3, approx(atan(-1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_top_to_bottom_abs_angle(sample_ts):
out_got = ts2vg.NaturalVG(directed='top_to_bottom', weighted='abs_angle').build(sample_ts).edges
out_truth = [
(1, 0, approx(atan(1.))),
(1, 2, approx(atan(2.))),
(1, 3, approx(atan(1.5))),
(2, 3, approx(atan(1.))),
]
assert sorted(out_got) == sorted(out_truth)
def test_adjacency_matrix(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).adjacency_matrix(triangle='upper')
out_truth = [
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0],
]
np.testing.assert_array_equal(out_got, out_truth)
def test_degrees(sample_ts):
out_got = ts2vg.NaturalVG().build(sample_ts).degrees
out_truth = [1, 3, 2, 2]
np.testing.assert_array_equal(out_got, out_truth)
def test_not_built():
with pytest.raises(ts2vg.graph.base.NotBuiltError):
ts2vg.NaturalVG().edges
def test_empty_ts(empty_ts):
out_got = ts2vg.NaturalVG().build(empty_ts).edges
out_truth = []
assert out_got == out_truth
def test_with_xs(sample_ts):
xs = [0., 1., 2., 2.1]
out_got = ts2vg.NaturalVG().build(sample_ts, xs=xs).edges
out_truth = [
(0, 1),
(1, 2),
(2, 3),
]
assert sorted(sorted(e) for e in out_got) == sorted(sorted(e) for e in out_truth)
def test_with_incompatible_xs(sample_ts):
xs = [0., 1., 2., 3., 4., 5., 6.]
with pytest.raises(ValueError):
ts2vg.NaturalVG().build(sample_ts, xs=xs)
def test_with_non_monotonic_increasing_xs(sample_ts):
xs = [0., 4., 2., 3.]
with pytest.raises(ValueError):
ts2vg.NaturalVG().build(sample_ts, xs=xs)
|
86160
|
from __future__ import absolute_import, division, print_function
from six.moves import range
def intify(a):
return tuple([int(round(val)) for val in a])
def reference_map(sg, mi):
from cctbx import sgtbx
asu = sgtbx.reciprocal_space_asu(sg.type())
isym_ = []
mi_ = []
for hkl in mi:
found = False
for i_inv in range(sg.f_inv()):
for i_smx in range(sg.n_smx()):
rt_mx = sg(0, i_inv, i_smx)
hkl_ = intify(hkl * rt_mx.r())
if asu.is_inside(hkl_):
mi_.append(hkl_)
if i_inv:
isym_.append(- i_smx)
else:
isym_.append(i_smx)
found = True
break
if found:
continue
else:
assert(not sg.is_centric())
for i_inv in range(sg.f_inv()):
for i_smx in range(sg.n_smx()):
rt_mx = sg(0, i_inv, i_smx)
_hkl = [-h for h in hkl]
mhkl_ = intify(_hkl * rt_mx.r())
if asu.is_inside(mhkl_):
mi_.append(mhkl_)
isym_.append(- i_smx)
found = True
break
return mi_, isym_
def tst_map_to_asu_isym(anomalous_flag):
from cctbx import sgtbx
from cctbx.miller import map_to_asu_isym
from cctbx.array_family import flex
mi = flex.miller_index()
i = flex.int()
import random
nhkl = 1000
for j in range(nhkl):
hkl = [random.randint(-10, 10) for j in range(3)]
mi.append(hkl)
i.append(0)
spacegroup = sgtbx.space_group_symbols(195).hall()
sg = sgtbx.space_group(spacegroup)
mi_, isym_ = reference_map(sg, mi)
map_to_asu_isym(sg.type(), anomalous_flag, mi, i)
for j in range(nhkl):
assert(i[j] == isym_[j])
if __name__ == '__main__':
tst_map_to_asu_isym(True)
tst_map_to_asu_isym(False)
print('OK')
|
86170
|
import foo
class testing():
def tester(self):
return self.blah
def tester2():
print "bleh"
|
86173
|
from random import *
import subprocess
import sys
import warnings
warnings.filterwarnings("ignore")
nb_prob = 50
open_prob = 100
prob_end = 5
num_range = [0, 100]
expr = ""
operators = ["+","-","*","/", "%"]
opened_p = 0
min_expr_len = 5
max_expr_len = 30
no_overflow = False
error = False
def append_number():
global expr
if (len(expr) > 0 and expr[len(expr) - 1] in operators):
expr += str(randint(num_range[0],num_range[1]))
def append_ope():
global expr
if (len(expr) > 0 and (expr[len(expr) - 1].isnumeric() or expr[len(expr) - 1] == ")")):
expr += operators[randint(0,4)]
def str_to_stdout(s):
ex = subprocess.Popen(['echo', s],stdout=subprocess.PIPE)
return ex.stdout
while not error:
while not no_overflow:
while (len(expr) < min_expr_len or randint(0,100) > prob_end) and len(expr) < max_expr_len:
if randint(0,100) < nb_prob:
append_ope()
expr += str(randint(num_range[0],num_range[1]))
expr += operators[randint(0,4)]
nb_prob = 50
if (opened_p > 0):
open_prob = 25
else:
open_prob = 100
else:
if (randint(0,100) < open_prob):
expr += "("
nb_prob = 100
opened_p += 1
if (opened_p > 0):
open_prob = 0
else:
open_prob = 100
else:
append_number()
opened_p += -1
expr += ")"
if (opened_p > 0):
open_prob = 25
else:
open_prob = 100
append_number()
while expr[len(expr) - 1].isnumeric() == "(":
expr = expr[:-1]
opened_p+= -1
while opened_p > 0:
expr += ")"
opened_p+= -1
expr = expr.replace("()","1")
try:
ex = subprocess.Popen('bc',stdin=str_to_stdout(expr),stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tmp_res, err = ex.communicate()
tmp_res = int(tmp_res.decode('ascii').replace("\n",""))
no_overflow = True
if tmp_res > 1000000 or tmp_res < -1000000:
raise Exception()
except:
expr = ""
open_prob = 100
nb_prob = 50
no_overflow = False
ex = subprocess.Popen(['./eval_expr',expr],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, err = ex.communicate()
ex = subprocess.Popen('bc',stdin=str_to_stdout(expr),stdout=subprocess.PIPE, stderr=subprocess.PIPE)
solution, err = ex.communicate()
solution = int(solution.decode('ascii').replace("\n",""))
print("With expr : \"" +expr + "\"\nGot : " + result.decode('ascii').replace("\n","") + "\nExpected : "+ str(solution))
if int(result) != solution:
print("ERROR\n\n")
error = True
else:
print("PASS\n\n")
expr = ""
open_prob = 100
nb_prob = 50
no_overflow = False
|
86224
|
class Cell:
def __init__(self, c=' '):
self.c = c
self.highlight = {}
def __mul__(self, n):
return [Cell(self.c) for i in range(n)]
def __str__(self):
return self.c
class Highlight:
def __init__(self, line, highlight):
self.line = line
self.highlight = highlight
self.start = 0
self.end = 0
def s(self):
return (self.line, self.start, self.end, tuple(self.highlight.items()))
def __eq__(self, h):
return self.s() == h.s()
def __hash__(self):
return hash((self.line, self.start, self.end, tuple(self.highlight.items())))
class Screen:
def __init__(self):
self.x = 0
self.y = 0
self.resize(1, 1)
self.highlight = {}
self.changes = 0
def resize(self, w, h):
self.w = w
self.h = h
# TODO: should resize clear?
self.screen = [Cell() * w for i in range(h)]
self.scroll_region = [0, self.h, 0, self.w]
# clamp cursor
self.x = min(self.x, w - 1)
self.y = min(self.y, h - 1)
def clear(self):
self.resize(self.w, self.h)
def scroll(self, dy):
ya, yb = self.scroll_region[0:2]
xa, xb = self.scroll_region[2:4]
yi = (ya, yb)
if dy < 0:
yi = (yb, ya - 1)
for y in range(yi[0], yi[1], int(dy / abs(dy))):
if ya <= y + dy < yb:
self.screen[y][xa:xb] = self.screen[y + dy][xa:xb]
else:
self.screen[y][xa:xb] = Cell() * (xb - xa)
def redraw(self, updates):
blacklist = [
'mode_change',
'bell', 'mouse_on', 'highlight_set',
'update_fb', 'update_bg', 'update_sp', 'clear',
]
changed = False
for cmd in updates:
if not cmd:
continue
name, args = cmd[0], cmd[1:]
if name == 'cursor_goto':
self.y, self.x = args[0]
elif name == 'eol_clear':
changed = True
self.screen[self.y][self.x:] = Cell() * (self.w - self.x)
elif name == 'put':
changed = True
for cs in args:
for c in cs:
cell = self.screen[self.y][self.x]
cell.c = c
cell.highlight = self.highlight
self.x += 1
# TODO: line wrap is not specified, neither is wrapping off the end. semi-sane defaults.
if self.x >= self.w:
self.x = 0
self.y += 1
if self.y >= self.h:
self.y = 0
elif name == 'resize':
changed = True
self.resize(*args[0])
elif name == 'highlight_set':
self.highlight = args[0][0]
elif name == 'set_scroll_region':
self.scroll_region = args[0]
elif name == 'scroll':
changed = True
self.scroll(args[0][0])
elif name in blacklist:
pass
# else:
# print('unknown update cmd', name)
if changed:
self.changes += 1
def highlights(self):
hlset = []
for y, line in enumerate(self.screen):
cur = {}
h = None
for x, cell in enumerate(line):
if h and cur and cell.highlight == cur:
h.end = x + 1
else:
cur = cell.highlight
if cur:
h = Highlight(y, cur)
h.start = x
h.end = x + 1
hlset.append(h)
return hlset
def p(self):
print('-' * self.w)
print(str(self))
print('-' * self.w)
def __setitem__(self, xy, c):
x, y = xy
try:
cell = self.screen[y][x]
cell.c = c
cell.highlight = self.highlight
except IndexError:
pass
def __getitem__(self, y):
if isinstance(y, tuple):
return self.screen[y[1]][y[0]]
return ''.join(str(c) for c in self.screen[y])
def __str__(self):
return '\n'.join([self[y] for y in range(self.h)])
|
86265
|
from collections import namedtuple
Param = namedtuple('Param', ['name', 'display_name', 'type', 'required', 'default', 'choices'])
Param.__new__.__defaults__ = (None,) * len(Param._fields)
_FORMATTERS = {}
def formatter(name=None, params=[]):
def deco_func(func):
# 做注册工作
func_name = name if name else func.__name__
_FORMATTERS[func_name] = (params, func)
return func
return deco_func
def format(value, formatter, params=None):
if formatter not in _FORMATTERS:
return value
func = _FORMATTERS[formatter][1]
# TODO,校验一下参数合法性
return func(value, **params)
@formatter(name='prefix', params=[
Param(name="prefix", display_name='前缀', type='str', required=True)
])
def prefix_formatter(value, prefix):
return f'{prefix}{str(value)}'
@formatter(name='suffix', params=[
Param(name="suffix", display_name='后缀', type='str', required=True)
])
def suffix_formatter(value, suffix):
return f'{str(value)}{suffix}'
@formatter(name='bothfix', params=[
Param(name="prefix", display_name='前缀', type='str'),
Param(name="suffix", display_name='后缀', type='str')
])
def bothfix_formatter(value, prefix='', suffix=''):
result = value
if prefix:
result = prefix_formatter(result, prefix)
if suffix:
result = suffix_formatter(result, suffix)
return result
|
86304
|
from chainer.links import BatchNormalization, GroupNormalization
from chainermn.links import MultiNodeBatchNormalization
from chainer.functions import softmax_cross_entropy
from chainer.optimizers import Adam
from chainer.iterators import MultiprocessIterator, SerialIterator
from chainer.optimizer import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.backends.cuda import get_device_from_id
import chainermn
from src.datasets.msd_bound import MSDBoundDataset
from src.links.model.vaeseg import BoundaryStream, CPCPredictor, Decoder, Encoder, VAE, VD
from src.training.updaters.vaeseg_updater import VAESegUpdater
from src.training.extensions.vaeseg_evaluator import VAESegEvaluator
from src.training.updaters.encdec_seg_updater import EncDecSegUpdater
from src.training.extensions.encdec_seg_evaluator import EncDecSegEvaluator
from src.training.updaters.boundseg_updater import BoundSegUpdater
from src.training.extensions.boundseg_evaluator import BoundSegEvaluator
from src.training.updaters.cpcseg_updater import CPCSegUpdater
from src.training.extensions.cpcseg_evaluator import CPCSegEvaluator
def _setup_communicator(config, gpu_start_id=0):
if config['mn']:
comm = chainermn.create_communicator('pure_nccl')
is_master = (comm.rank == 0)
device = comm.intra_rank + gpu_start_id
else:
comm = None
is_master = True
device = gpu_start_id
return comm, is_master, device
def _setup_datasets(config, comm, is_master):
if is_master:
if config['dataset_name'] == 'msd_bound':
train_data = MSDBoundDataset(config, config['train_list_path'])
validation_data = MSDBoundDataset(config, config['validation_list_path'])
test_data = MSDBoundDataset(config, config['test_list_path'])
validation_data.random_scale = False
test_data.random_scale = False
validation_data.shift_intensity = 0
test_data.shift_intensity = 0
validation_data.random_flip = False
test_data.random_flip = False
validation_data.nb_copies = 1
test_data.nb_copies = 1
validation_data.training = False
test_data.training = False
else:
raise ValueError('Unknown dataset_name: {}'.format(config['dataset_name']))
print('Training dataset size: {}'.format(len(train_data)))
print('Validation dataset size: {}'.format(len(validation_data)))
print('Test dataset size: {}'.format(len(test_data)))
else:
train_data = None
validation_data = None
test_data = None
# scatter dataset
if comm is not None:
train_data = chainermn.scatter_dataset(train_data, comm, shuffle=True)
validation_data = chainermn.scatter_dataset(validation_data, comm, shuffle=True)
test_data = chainermn.scatter_dataset(test_data, comm, shuffle=True)
return train_data, validation_data, test_data
def _setup_vae_segmentor(config, comm=None):
in_channels = config['in_channels']
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
input_shape = eval(config['crop_size'])
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
embedder = VD(
channels=8*base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
vae = VAE(
in_channels=in_channels,
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
input_shape=input_shape,
comm=comm
)
return encoder, embedder, decoder, vae
def _setup_vae_segmentor_only(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
return encoder, decoder
def _setup_cpc_segmentor(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
ndim_latent = config['vaeseg_ndim_latent']
mode = config['vaeseg_skip_connect_mode']
input_shape = eval(config['crop_size'])
grid_size = config['grid_size']
cpc_pattern = config['cpc_pattern']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
cpcpred1 = CPCPredictor(
base_channels=base_channels*8,
norm=norm,
bn_first=bn_first,
grid_size=grid_size,
input_shape=input_shape,
upper=True,
cpc_pattern=cpc_pattern,
comm=comm
)
return encoder, decoder, cpcpred1
def _setup_bound_segmentor(config, comm=None):
base_channels = config['base_channels']
out_channels = config['nb_labels']
nested_label = config['nested_label']
norm = eval(config['vaeseg_norm'])
bn_first = config['vaeseg_bn_first']
mode = config['vaeseg_skip_connect_mode']
ndim_latent = config['vaeseg_ndim_latent']
if nested_label:
out_channels = 2 * (out_channels - 1)
encoder = Encoder(
base_channels=base_channels,
norm=norm,
bn_first=bn_first,
ndim_latent=ndim_latent,
comm=comm
)
decoder = Decoder(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
bn_first=bn_first,
mode=mode,
comm=comm
)
boundary = BoundaryStream(
base_channels=base_channels,
out_channels=out_channels,
norm=norm,
comm=comm
)
return encoder, decoder, boundary
def _setup_iterators(config, batch_size, train_data, validation_data, test_data):
if isinstance(config['loaderjob'], int) and config['loaderjob'] > 1:
train_iterator = MultiprocessIterator(
train_data, batch_size, n_processes=config['loaderjob'])
validation_iterator = MultiprocessIterator(
validation_data, batch_size, n_processes=config['loaderjob'],
repeat=False, shuffle=False)
test_iterator = MultiprocessIterator(
test_data, batch_size, n_processes=config['loaderjob'],
repeat=False, shuffle=False)
else:
train_iterator = SerialIterator(train_data, batch_size)
validation_iterator = SerialIterator(
validation_data, batch_size, repeat=False, shuffle=False)
test_iterator = SerialIterator(
test_data, batch_size, repeat=False, shuffle=False)
return train_iterator, validation_iterator, test_iterator
# Optimizer
def _setup_optimizer(config, model, comm):
optimizer_name = config['optimizer']
lr = float(config['init_lr'])
weight_decay = float(config['weight_decay'])
if optimizer_name == 'Adam':
optimizer = Adam(alpha=lr, weight_decay_rate=weight_decay)
elif optimizer_name in \
('SGD', 'MomentumSGD', 'CorrectedMomentumSGD', 'RMSprop'):
optimizer = eval(optimizer_name)(lr=lr)
if weight_decay > 0.:
optimizer.add_hook(WeightDecay(weight_decay))
else:
raise ValueError('Invalid optimizer: {}'.format(optimizer_name))
if comm is not None:
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
optimizer.setup(model)
return optimizer
# Updater
def _setup_updater(config, device, train_iterator, optimizers):
updater_kwargs = dict()
updater_kwargs['iterator'] = train_iterator
updater_kwargs['optimizer'] = optimizers
updater_kwargs['device'] = device
if config['segmentor_name'] == 'vaeseg':
return VAESegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'encdec_seg':
return EncDecSegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'boundseg':
return BoundSegUpdater(config, **updater_kwargs)
elif config['segmentor_name'] == 'cpcseg':
return CPCSegUpdater(config, **updater_kwargs)
else:
return training.StandardUpdater(**updater_kwargs)
def _setup_extensions(config, trainer, optimizers, logging_counts, logging_attributes):
if config['segmentor_name'] == 'vaeseg':
trainer.extend(extensions.dump_graph('loss/total', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'encdec_seg':
trainer.extend(extensions.dump_graph('loss/seg', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'boundseg':
trainer.extend(extensions.dump_graph('loss/seg', out_name="segmentor.dot"))
elif config['segmentor_name'] == 'cpcseg':
trainer.extend(extensions.dump_graph('loss/total', out_name="segmentor.dot"))
else:
trainer.extend(extensions.dump_graph('main/loss', out_name="segmentor.dot"))
# Report
repo_trigger = (config['report_interval'], 'iteration')
trainer.extend(
extensions.LogReport(
trigger=repo_trigger
)
)
trainer.extend(
extensions.PrintReport(logging_counts + logging_attributes),
trigger=repo_trigger
)
trainer.extend(
extensions.ProgressBar()
)
snap_trigger = (config['snapshot_interval'], 'epoch')
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
trigger=snap_trigger
)
for k, v in optimizers.items():
trainer.extend(
extensions.snapshot_object(v.target, k+'_epoch_{.updater.epoch}'),
trigger=snap_trigger
)
for attr in logging_attributes:
trainer.extend(
extensions.PlotReport([attr, 'validation/' + attr], 'epoch',
file_name=attr.replace('/', '_') + '.png')
)
# Trainer
def setup_trainer(config, out, batch_size, epoch, gpu_start_id):
comm, is_master, device = _setup_communicator(config, gpu_start_id)
train_data, validation_data, test_data = _setup_datasets(config, comm, is_master)
if config['segmentor_name'] == 'vaeseg':
encoder, embedder, decoder, vae = _setup_vae_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_embedder'] is not None:
serializers.load_npz(config['init_embedder'], embedder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if config['init_vae'] is not None:
serializers.load_npz(config['init_vae'], vae)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
embedder.to_gpu()
decoder.to_gpu()
vae.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_emb = _setup_optimizer(config, embedder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_vae = _setup_optimizer(config, vae, comm)
optimizers = {'enc': opt_enc, 'emb': opt_emb, 'dec': opt_dec, 'vae': opt_vae}
elif config['segmentor_name'] == 'cpcseg':
encoder, decoder, cpcpred1 = _setup_cpc_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if config['init_cpcpred'] is not None:
serializers.load_npz(config['init_cpcpred'], cpcpred1)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
cpcpred1.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_p1 = _setup_optimizer(config, cpcpred1, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec, 'cpcpred1': opt_p1}
elif config['segmentor_name'] == 'encdec_seg':
encoder, decoder = _setup_vae_segmentor_only(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec}
elif config['segmentor_name'] == 'boundseg':
encoder, decoder, boundary = _setup_bound_segmentor(config, comm)
# load weights
if config['init_encoder'] is not None:
serializers.load_npz(config['init_encoder'], encoder)
if config['init_decoder'] is not None:
serializers.load_npz(config['init_decoder'], decoder)
if device is not None:
get_device_from_id(device).use()
encoder.to_gpu()
decoder.to_gpu()
boundary.to_gpu()
opt_enc = _setup_optimizer(config, encoder, comm)
opt_dec = _setup_optimizer(config, decoder, comm)
opt_bound = _setup_optimizer(config, boundary, comm)
optimizers = {'enc': opt_enc, 'dec': opt_dec, 'bound': opt_bound}
train_iterator, validation_iterator, test_iterator = \
_setup_iterators(config, batch_size, train_data, validation_data, test_data)
logging_counts = ['epoch', 'iteration']
if config['segmentor_name'] == 'vaeseg':
logging_attributes = \
['loss/rec', 'loss/kl', 'loss/total', 'acc',
'mean_dc', 'val/mean_dc', 'test/mean_dc']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'cpcseg':
logging_attributes = \
['loss/total', 'acc', 'loss/cpc']
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'encdec_seg':
logging_attributes = \
['loss/seg', 'loss/total', 'acc']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
elif config['segmentor_name'] == 'boundseg':
logging_attributes = \
['loss/seg', 'loss/total', 'acc', 'loss/bound', 'loss/bce']
if config['print_each_dc']:
for i in range(0, config['nb_labels']):
logging_attributes.append('dc_{}'.format(i))
logging_attributes.append('val/dc_{}'.format(i))
logging_attributes.append('test/dc_{}'.format(i))
else:
logging_attributes = ['main/loss', 'main/acc']
for i in range(1, config['nb_labels']):
logging_attributes.append('main/dc_{}'.format(i))
logging_attributes.append('val/main/dc_{}'.format(i))
logging_attributes.append('test/main/dc_{}'.format(i))
updater = _setup_updater(config, device, train_iterator, optimizers)
trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
if is_master:
_setup_extensions(config, trainer, optimizers, logging_counts, logging_attributes)
if config['segmentor_name'] == 'vaeseg':
targets = {'enc': encoder, 'emb': embedder, 'dec': decoder, 'vae': vae}
val_evaluator = VAESegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = VAESegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'cpcseg':
targets = {'enc': encoder, 'dec': decoder, 'cpcpred1': cpcpred1}
val_evaluator = CPCSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = CPCSegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'encdec_seg':
targets = {'enc': encoder, 'dec': decoder}
val_evaluator = EncDecSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = EncDecSegEvaluator(config, test_iterator, targets, device=device)
elif config['segmentor_name'] == 'boundseg':
targets = {'enc': encoder, 'dec': decoder, 'bound': boundary}
val_evaluator = BoundSegEvaluator(config, validation_iterator, targets, device=device)
test_evaluator = BoundSegEvaluator(config, test_iterator, targets, device=device)
val_evaluator.default_name = 'val'
test_evaluator.default_name = 'test'
if comm is not None:
val_evaluator = chainermn.create_multi_node_evaluator(val_evaluator, comm)
test_evaluator = chainermn.create_multi_node_evaluator(test_evaluator, comm)
trainer.extend(val_evaluator, trigger=(config['eval_interval'], 'epoch'))
trainer.extend(test_evaluator, trigger=(config['eval_interval'], 'epoch'))
# Resume
if config['resume'] is not None:
serializers.load_npz(config['resume'], trainer)
return trainer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.