max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_server.py | cloudcomputingabc/volttron | 406 | 12639420 | <reponame>cloudcomputingabc/volttron
from platform_driver.interfaces.modbus_tk.server import Server
from platform_driver.interfaces.modbus_tk import helpers
from platform_driver.interfaces.modbus_tk.client import Client, Field
from platform_driver.interfaces.modbus_tk.maps import Map, Catalog
import serial
from struct import pack, unpack
import logging
logger = logging.getLogger(__name__)
class ModbusTkClient (Client):
"""
Testing for tcp transport
"""
byte_order = helpers.BIG_ENDIAN
addressing = helpers.ADDRESS_OFFSET
unsigned_short = Field("unsigned_short", 0, helpers.USHORT, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
unsigned_int = Field("unsigned_int", 1, helpers.UINT, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
unsigned_long = Field("unsigned_long", 3, helpers.UINT64, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_short = Field("sample_short", 7, helpers.SHORT, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_int = Field("sample_int", 8, helpers.INT, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_float = Field("sample_float", 10, helpers.FLOAT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_long = Field("sample_long", 12, helpers.INT64, 'PPM', 0, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_bool = Field('sample_bool', 16, helpers.BOOL, 'PPM', 0, helpers.no_op, helpers.COIL_READ_WRITE, helpers.OP_MODE_READ_WRITE)
sample_str = Field("sample_str", 17, helpers.string(4), "bytes", 4, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
class WattsOn (Client):
"""
Testing Stub for WattsOn modbus device.
"""
byte_order = helpers.BIG_ENDIAN
addressing = helpers.ADDRESS_OFFSET
active_power_total = Field.default_holding_register('Active Power Total', 0x200, helpers.FLOAT, "kW", helpers.no_op)
active_power_A = Field.default_holding_register('Active Power A', 0x232, helpers.FLOAT, "kW", helpers.no_op)
apparent_power_A = Field.default_holding_register('Apparent Power A', 0x23E, helpers.FLOAT, "kW", helpers.no_op)
net_total_energy = Field.default_holding_register('Net Total Energy', 0x1100, helpers.FLOAT, "kWh", helpers.no_op)
voltage_A = Field.default_holding_register('Voltage A', 0x220, helpers.FLOAT, "V", helpers.no_op)
current_A = Field.default_holding_register('Current A', 0x22C, helpers.FLOAT, "A", helpers.no_op)
class PPSPi32Client (Client):
"""
Define some regiesters to PPSPi32Client
"""
def __init__(self, *args, **kwargs):
super(PPSPi32Client, self).__init__(*args, **kwargs)
byte_order = helpers.BIG_ENDIAN
addressing = helpers.ADDRESS_OFFSET
BigUShort = Field("BigUShort", 0, helpers.USHORT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigUInt = Field("BigUInt", 1, helpers.UINT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigULong = Field("BigULong", 3, helpers.UINT64, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigShort = Field("BigShort", 7, helpers.SHORT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigInt = Field("BigInt", 8, helpers.INT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigFloat = Field("BigFloat", 10, helpers.FLOAT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigLong = Field("BigLong", 12, helpers.INT64, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleUShort = Field(
"LittleUShort", 100, helpers.USHORT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleUInt = Field(
"LittleUInt", 101, helpers.UINT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleULong = Field(
"LittleULong", 103, helpers.UINT64, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleShort = Field(
"LittleShort", 107, helpers.SHORT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleInt = Field(
"LittleInt", 108, helpers.INT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleFloat = Field(
"LittleFloat", 110, helpers.FLOAT, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleLong = Field(
"LittleLong", 112, helpers.INT64, 'PPM', 2, helpers.no_op, helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
def watts_on_server():
# For rtu transport: connect to the Elkor Watts On meter by usb to the RS-485 interface
# Can define ModbusClient2 by Map or defined the class as ModbusClient1 or ModbusClient2
# modbus_map = Map(
# map_dir='/Users/anhnguyen/repos/kisensum-volttron/volttron/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps',
# addressing='offset', name='watts_on', file='watts_on.csv', endian='big')
# ModbusClient2 = modbus_map.get_class()
ModbusClient2 = Catalog()['watts_on'].get_class()
client = ModbusClient2(slave_address=2, verbose=True)
client.set_transport_rtu('/dev/tty.usbserial-AL00IEEY',
115200,
serial.EIGHTBITS,
serial.PARITY_NONE,
serial.STOPBITS_ONE,
False)
# Get reading values for defined registers
print(dict((field.name, value) for field, value, timestamp in client.dump_all()))
setattr(client, "serial_baud_rate", 115)
client.write_all()
print(getattr(client, "serial_baud_rate"))
if __name__ == '__main__':
# For tcp transport
ModbusClient = Catalog()['modbus_tk_test'].get_class()
server_process = Server(address='127.0.0.1', port=5020)
server_process.define_slave(1, ModbusClient, unsigned=True)
server_process.start()
# For rtu transport
# watts_on_server()
|
bot/exts/holidays/hanukkah/hanukkah_embed.py | ragr07/sir-lancebot | 122 | 12639437 | import datetime
import logging
from discord import Embed
from discord.ext import commands
from bot.bot import Bot
from bot.constants import Colours, Month
from bot.utils.decorators import in_month
log = logging.getLogger(__name__)
HEBCAL_URL = (
"https://www.hebcal.com/hebcal/?v=1&cfg=json&maj=on&min=on&mod=on&nx=on&"
"year=now&month=x&ss=on&mf=on&c=on&geo=geoname&geonameid=3448439&m=50&s=on"
)
class HanukkahEmbed(commands.Cog):
"""A cog that returns information about Hanukkah festival."""
def __init__(self, bot: Bot):
self.bot = bot
self.hanukkah_dates: list[datetime.date] = []
def _parse_time_to_datetime(self, date: list[str]) -> datetime.datetime:
"""Format the times provided by the api to datetime forms."""
try:
return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S%z")
except ValueError:
# there is a possibility of an event not having a time, just a day
# to catch this, we try again without time information
return datetime.datetime.strptime(date, "%Y-%m-%d")
async def fetch_hanukkah_dates(self) -> list[datetime.date]:
"""Gets the dates for hanukkah festival."""
# clear the datetime objects to prevent a memory link
self.hanukkah_dates = []
async with self.bot.http_session.get(HEBCAL_URL) as response:
json_data = await response.json()
festivals = json_data["items"]
for festival in festivals:
if festival["title"].startswith("Chanukah"):
date = festival["date"]
self.hanukkah_dates.append(self._parse_time_to_datetime(date).date())
return self.hanukkah_dates
@in_month(Month.NOVEMBER, Month.DECEMBER)
@commands.command(name="hanukkah", aliases=("chanukah",))
async def hanukkah_festival(self, ctx: commands.Context) -> None:
"""Tells you about the Hanukkah Festivaltime of festival, festival day, etc)."""
hanukkah_dates = await self.fetch_hanukkah_dates()
start_day = hanukkah_dates[0]
end_day = hanukkah_dates[-1]
today = datetime.date.today()
embed = Embed(title="Hanukkah", colour=Colours.blue)
if start_day <= today <= end_day:
if start_day == today:
now = datetime.datetime.utcnow()
hours = now.hour + 4 # using only hours
hanukkah_start_hour = 18
if hours < hanukkah_start_hour:
embed.description = (
"Hanukkah hasnt started yet, "
f"it will start in about {hanukkah_start_hour - hours} hour/s."
)
await ctx.send(embed=embed)
return
elif hours > hanukkah_start_hour:
embed.description = (
"It is the starting day of Hanukkah! "
f"Its been {hours - hanukkah_start_hour} hours hanukkah started!"
)
await ctx.send(embed=embed)
return
festival_day = hanukkah_dates.index(today)
number_suffixes = ["st", "nd", "rd", "th"]
suffix = number_suffixes[festival_day - 1 if festival_day <= 3 else 3]
message = ":menorah:" * festival_day
embed.description = (
f"It is the {festival_day}{suffix} day of Hanukkah!\n{message}"
)
elif today < start_day:
format_start = start_day.strftime("%d of %B")
embed.description = (
"Hanukkah has not started yet. "
f"Hanukkah will start at sundown on {format_start}."
)
else:
format_end = end_day.strftime("%d of %B")
embed.description = (
"Looks like you missed Hanukkah! "
f"Hanukkah ended on {format_end}."
)
await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
"""Load the Hanukkah Embed Cog."""
bot.add_cog(HanukkahEmbed(bot))
|
python/app/thirdparty/oneforall/modules/datasets/qianxun.py | taomujian/linbing | 351 | 12639438 | <filename>python/app/thirdparty/oneforall/modules/datasets/qianxun.py<gh_stars>100-1000
from app.thirdparty.oneforall.common.query import Query
class QianXun(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Query'
self.source = 'QianXunQuery'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
num = 1
while True:
data = {'ecmsfrom': '',
'show': '',
'num': '',
'classid': '0',
'keywords': self.domain}
url = f'https://www.dnsscan.cn/dns.html?' \
f'keywords={self.domain}&page={num}'
resp = self.post(url, data)
subdomains = self.match_subdomains(resp)
if not subdomains: # 没有发现子域名则停止查询
break
self.subdomains.update(subdomains)
if '<div id="page" class="pagelist">' not in resp.text:
break
if '<li class="disabled"><span>»</span></li>' in resp.text:
break
num += 1
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def run(domain):
"""
类统一调用入口
:param str domain: 域名
"""
query = QianXun(domain)
query.run()
if __name__ == '__main__':
run('example.com')
|
optunity/tests/test_solvers.py | xrounder/optunity | 401 | 12639459 | <filename>optunity/tests/test_solvers.py<gh_stars>100-1000
#!/usr/bin/env python
# A simple smoke test for all available solvers.
import optunity
def f(x, y):
return x + y
solvers = optunity.available_solvers()
for solver in solvers:
# simple API
opt, _, _ = optunity.maximize(f, 100,
x=[0, 5], y=[-5, 5],
solver_name=solver)
# expert API
suggestion = optunity.suggest_solver(num_evals=100, x=[0, 5], y=[-5, 5],
solver_name=solver)
s = optunity.make_solver(**suggestion)
# without parallel evaluations
opt, _ = optunity.optimize(s, f)
# with parallel evaluations
opt, _ = optunity.optimize(s, f, pmap=optunity.pmap)
|
python/definitions.py | netcookies/vim-hyperstyle | 132 | 12639469 | """
A list of CSS properties to expand.
Format:
(property, [aliases], unit, [values])
- property : (String) the full CSS property name.
- aliases : (String list) a list of aliases for this shortcut.
- unit : (String) when defined, assumes that the property has a value with a
unit. When the value is numberless (eg, `margin:12`), the given unit will
be assumed (`12px`). Set this to `_` for unitless numbers (eg, line-height).
- values : (String list) possible values for this property.
Each property will be accessible through these ways:
- fuzzy matches of the aliases defined
- fuzzy matches of the property name, no dashes (eg: mi, min, minh, minhe,
minhei...)
- fuzzy matches with dashes (eg: min-h, min-hei, min-heig...)
"""
properties = [
("margin", [], "px", ["auto"]),
("width", [], "px", ["auto"]),
("height", [], "px", ["auto"]),
("padding", [], "px", ["auto"]),
("border", [], None, None),
("outline", [], None, None),
("left", [], "px", None),
("top", [], "px", None),
("bottom", [], "px", None),
("right", [], "px", None),
("background", ["bground"], None, ["transparent"]),
("min-height", ["mheight"], "px", ["auto"]),
("min-width", ["mwidth"], "px", ["auto"]),
("max-height", ["xheight","mxheight"], "px", ["auto"]),
("max-width", ["xwidth","mxheight"], "px", ["auto"]),
("margin-left", ["mleft","marleft"], "px", ["auto"]),
("margin-right", ["mright","marright"], "px", ["auto"]),
("margin-top", ["mtop","martop"], "px", ["auto"]),
("margin-bottom", ["mbottom","marbottom"], "px", ["auto"]),
("padding-left", ["pleft","padleft"], "px", None),
("padding-right", ["pright","padright"], "px", None),
("padding-top", ["ptop","padtop"], "px", None),
("padding-bottom", ["pbottom","padbottom"], "px", None),
("z-index", [], "_", None),
("display", [], None, ["none", "block", "inline", "inline-block", "table", "table-cell", "table-row"]),
("text-align", ["talign"], None, ["left", "right", "justify", "center", "inherit"]),
("overflow", ["oflow"], None, ["visible", "scroll", "hidden", "auto", "inherit"]),
("overflow-x", ["ox"], None, ["visible", "scroll", "hidden", "auto", "inherit"]),
("overflow-y", ["oy"], None, ["visible", "scroll", "hidden", "auto", "inherit"]),
("font", [], None, None),
("font-size", ["fsize", "fosize"], "em", None),
("font-style", ["fstyle", "fostyle"], None, ["italic","normal","inherit"]),
("font-weight", ["fweight", "foweight"], None, ["100","200","300","400","500","600","700","800","900","bold","normal","inherit"]),
("font-variant", ["fvariant", "fovariant"], None, None),
("font-family", ["ffamily", "family"], None, None),
("line-height", ["lheight", "liheight"], "_", None),
("letter-spacing", ["lspacing", "lespacing"], "px", None),
("transition", ["trans", "tn", "tsition"], None, None),
("transform", ["tform", "xform"], None, None),
("text-transform", ["ttransform"], None, ["uppercase", "lowercase", "capitalize", "none", "full-width", "inherit"]),
("text-decoration", ["tdecoration"], None, ["underline", "none", "line-through", "overline", "inherit", "initial"]),
("text-decoration-line", ["tdline"], None, ["underline", "none", "line-through", "overline", "inherit", "initial"]),
("text-indent", ["tindent"], "px", None),
("text-shadow", ["tshadow", "teshadow"], None, ["none"]),
("table-layout", ["tlayout", "talayout"], None, ["fixed", "auto", "inherit"]),
("vertical-align", ["valign"], None, ["middle","top","bottom","baseline","text-top","text-bottom","sub","super"]),
("transition-duration", ["tduration"], "ms", None),
("float", [], None, ["left", "right", "none", "inherit"]),
("color", [], None, None),
("opacity", [], "_", None),
("border-right", ["bright", "borright"], None, None),
("border-left", ["bleft", "borleft"], None, None),
("border-top", ["btop", "bortop"], None, None),
("border-bottom", ["bbottom", "borbottom"], None, None),
("border-width", ["bwidth"], "px", None),
("border-right-width", ["brwidth"], "px", None),
("border-left-width", ["blwidth"], "px", None),
("border-top-width", ["btwidth"], "px", None),
("border-bottom-width", ["bbwidth"], "px", None),
("border-image", ["borimage"], None, None),
("cursor", [], None, ["wait", "pointer", "auto", "default", "help", "progress", "cell", "crosshair", "text", "vertical-text", "alias", "copy", "move", "not-allowed", "no-drop", "all-scroll", "col-resize", "row-resize", "n-resize", "e-resize", "s-resize", "w-resize", "nw-resize", "ne-resize", "sw-resize", "se-resize", "ew-resize", "ns-resize", "zoom-in", "zoom-out", "grab", "grabbing" ]),
("animation", [], None, None),
("background-image", ["bgimage", "backimage", "bimage"], None, None),
("background-color", ["bgcolor", "backcolor", "bcolor"], None, None),
("background-size", ["bgsize", "backsize"], None, None),
("background-position", ["bgposition", "backposition", "bposition"], None, ["center", "top", "left", "middle", "bottom", "right"]),
("background-repeat", ["bgrepeat", "backrepeat", "brepeat"], None, ["repeat-x", "repeat-y", "no-repeat"]),
("border-radius", ["bradius", "boradius"], "px", None),
("border-color", ["bcolor", "bocolor", "borcolor"], "px", None),
("border-collapse", ["bcollapse", "borcollapse", "collapse"], None, ["collapse","auto","inherit"]),
("box-shadow", ["bshadow", "boshadow"], None, ["none"]),
("box-sizing", ["bsizing", "bsize", "boxsize"], None, ["border-box", "content-box", "padding-box"]),
("position", [], None, ["absolute", "relative", "fixed", "static", "inherit"]),
("flex", [], None, None),
("white-space", ["wspace", "whispace", "whspace", "wispace"], None, ["nowrap", "normal", "pre", "pre-wrap", "pre-line", "inherit"]),
("visibility", [], None, ["visible", "hidden", "collapse", "inherit"]),
("flex-grow", ["fgrow", "flgrow", "flegrow"], "_", None),
("flex-shrink", ["fshrink", "flshrink", "fleshrink"], "_", None),
("flex-direction", ["fdirection", "fldirection", "fledirection"], None, None),
("flex-wrap", ["fwrap", "flwrap", "flewrap"], None, None),
("align-items", ["aitems", "alitems"], None, ["flex-start", "flex-end", "center", "baseline", "stretch", "inherit"]),
("justify-content", ["jcontent", "jucontent", "juscontent", "justcontent"], None, ["flex-start", "flex-end", "center", "space-around", "space-between", "inherit"]),
("order", [], "_", None),
("page-break-after", ["pbafter"], None, ["always", "auto", "avoid", "left", "right", "inherit"]),
("page-break-before", ["pbbefore"], None, ["always", "auto", "avoid", "left", "right", "inherit"]),
("perspective", [], None, None),
("perspective-origin", ["porigin"], None, None),
("word-break", ["wbreak"], None, []),
("quotes", [], None, None),
("content", [], None, None),
("clear", [], None, ["left", "right", "both", "inherit"]),
("zoom", [], "_", None),
("direction", [], None, ["ltr", "rtl", "inherit"]),
("list-style", ["lstyle"], None, ["none", "square", "disc", "inside", "outside", "inherit", "initial", "unset", "decimal", "georgian"]),
]
"""
A list of CSS statements to expand.
This differs from `properties` as this defines shortcuts for an entire statement.
For instance, `dib<Enter>` will expand to `display: inline-block`.
Each line is in this format:
(property, value, alias)
The following options are available:
- alias : (String list) see `property_list` on how aliases work.
"""
statements = [
("display", "block", ["dblock"]),
("display", "inline", ["dinline"]),
("display", "inline-block", ["diblock"]),
("display", "inline-flex", ["diflex"]),
("display", "table", ["dtable", "table"]),
("display", "table-cell", ["dtcell","cell","tablecell","table-cell"]),
("display", "table-row", ["dtrow","row","tablerow","table-row"]),
("float", "left", ["fleft", "flleft", "floleft"]),
("float", "right", ["fright", "flright", "floright"]),
("float", "none", ["fnone", "flnone", "flonone"]),
("display", "none", ["dnone"]),
("display", "flex", ["dflex", "flex"]),
("font-weight", "normal", ["fwnormal"]),
("font-weight", "bold", ["fwbold", "bold"]),
("font-style", "italic", ["fsitalic", "italic"]),
("font-style", "normal", ["fnormal"]),
("border", "0", ["b0"]),
("padding", "0", ["p0","po"]),
("margin", "0", ["m0","mo"]),
("margin", "0 auto", ["m0a", "moa"]),
("overflow", "hidden", ["ohidden"]),
("overflow", "scroll", ["oscroll"]),
("overflow", "auto", ["oauto"]),
("overflow", "visible", ["ovisible"]),
("overflow-x", "hidden", ["oxhidden"]),
("overflow-x", "scroll", ["oxscroll"]),
("overflow-x", "auto", ["oxauto"]),
("overflow-x", "visible", ["oxvisible"]),
("overflow-y", "hidden", ["oyhidden"]),
("overflow-y", "scroll", ["oyscroll"]),
("overflow-y", "auto", ["oyauto"]),
("overflow-y", "visible", ["oyvisible"]),
("font-weight", "100", ["f100", "fw100"]),
("font-weight", "200", ["f200", "fw200"]),
("font-weight", "300", ["f300", "fw300"]),
("font-weight", "400", ["f400", "fw400"]),
("font-weight", "500", ["f500", "fw500"]),
("font-weight", "600", ["f600", "fw600"]),
("font-weight", "700", ["f700", "fw700"]),
("font-weight", "800", ["f800", "fw800"]),
("font-weight", "900", ["f900", "fw900"]),
("border", "0", ["b0"]),
("border-collapse", "collapse", ["bccollapse"]),
("border-collapse", "separate", ["bcseparate"]),
("background-repeat", "repeat-x", [ "brx", "rx", "bgrx", "repeatx" ]),
("background-repeat", "repeat-y", [ "bry", "ry", "bgry", "repeaty" ]),
("background-repeat", "no-repeat", [ "brnorepeat", "norepeat"]),
("background-size", "cover", ["cover"]),
("background-size", "contain", ["contain"]),
("cursor", "pointer", ["cupointer", "curpointer"]),
("cursor", "wait", ["cuwait", "curwait"]),
("cursor", "busy", ["cubusy", "curbusy"]),
("cursor", "text", ["cutext", "curtext"]),
("vertical-align", "middle", ["vamiddle"]),
("vertical-align", "top", ["vatop"]),
("vertical-align", "bottom", ["vabottom"]),
("vertical-align", "sub", ["vasub"]),
("vertical-align", "super", ["vasuper"]),
("vertical-align", "baseline", ["vabline", "vabaseline", "baseline"]),
("vertical-align", "text-top", ["vattop"]),
("vertical-align", "text-bottom", ["vattbottom"]),
("visibility", "visible", ["vvisible","visible"]),
("visibility", "hidden", ["vhidden", "vishidden", "vihidden", "hidden", "hide"]),
("visibility", "collapse", ["vcollapse", "viscollapse", "vicollapse"]),
("clear", "both", ["cboth"]),
("clear", "right", ["cright"]),
("clear", "left", ["cleft"]),
("content", "''", ["content"]),
("text-transform", "uppercase", ["ttupper", "uppercase"]),
("text-transform", "lowercase", ["ttlower"]),
("text-transform", "none", ["ttnone"]),
("text-transform", "capitalize", ["ttcap"]),
("text-transform", "full-width", ["ttfull"]),
("text-align", "left", ["taleft"]),
("text-align", "right", ["taright"]),
("text-align", "center", ["tacenter", "center"]),
("text-align", "justify", ["tajustify", "justify"]),
("text-decoration", "underline", ["tdunderline", "underline"]),
("text-decoration", "none", ["tdnone"]),
("box-sizing", "border-box", ["bsbox"]),
("box-sizing", "padding-box", ["bspadding"]),
("box-sizing", "content-box", ["bscontent"]),
("margin", "auto", ["mauto"]),
("margin-left", "auto", ["mlauto"]),
("margin-right", "auto", ["mrauto"]),
("width", "auto", ["wauto"]),
("height", "auto", ["hauto"]),
("position", "relative", ["porelative", "prelative", "relative"]),
("position", "fixed", ["pofixed", "pfixed", "fixed"]),
("position", "static", ["postatic", "pstatic", "static"]),
("position", "absolute", ["poabsolute", "pabsolute", "absolute"]),
("white-space", "nowrap", ["nowrap"]),
("text-overflow", "ellipsis", ["ellipsis"]),
("flex", "auto", ["flauto"]),
("align-items", "flex-start", ["aistart"]),
("align-items", "flex-end", ["aiend"]),
("align-items", "center", ["aicenter"]),
("align-items", "stretch", ["aistretch"]),
("text-overflow", "ellipsis", ["elip", "ellipsis", "toellipsis"]),
("flex-wrap", "wrap", ["fwrap","flexwrap"]),
("flex-wrap", "nowrap", ["fnowrap"]),
("flex-direction", "row", ["fdrow"]),
("flex-direction", "row-reverse", ["fdrreverse"]),
("flex-direction", "column", ["fdcolumn"]),
("flex-direction", "column-reverse", ["fdcreverse"]),
("justify-content", "center", ["jccenter"]),
("justify-content", "flex-start", ["jcstart"]),
("justify-content", "flex-end", ["jcend"]),
("direction", "ltr", ["ltr","dirltr"]),
("direction", "rtl", ["rtl","dirrtl"]),
("text-shadow", "none", ["tsnone", "teshnone"]),
("table-layout", "fixed", ["tlfixed"]),
("table-layout", "auto", ["tlauto"]),
("list-style", "none", ["lsnone"]),
("list-style-type", "none", ["lstnone"]),
]
definitions = { "properties": properties, "statements": statements }
|
sponsors/migrations/0009_auto_20201103_1259.py | ewjoachim/pythondotorg | 911 | 12639474 | <filename>sponsors/migrations/0009_auto_20201103_1259.py
# Generated by Django 2.0.13 on 2020-11-03 12:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0008_auto_20201028_1814"),
]
operations = [
migrations.RemoveField(
model_name="sponsor",
name="_content_rendered",
),
migrations.RemoveField(
model_name="sponsor",
name="company",
),
migrations.RemoveField(
model_name="sponsor",
name="content",
),
migrations.RemoveField(
model_name="sponsor",
name="content_markup_type",
),
migrations.RemoveField(
model_name="sponsor",
name="featured",
),
migrations.RemoveField(
model_name="sponsor",
name="is_published",
),
migrations.AlterField(
model_name="sponsorcontact",
name="sponsor",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="contacts",
to="sponsors.Sponsor",
),
),
migrations.AlterField(
model_name="sponsorship",
name="level_name",
field=models.CharField(default="", max_length=64),
),
]
|
Chapter 03/fare_alerter.py | mdlll/Python-Machine-Learning-Blueprints-master | 352 | 12639485 | <gh_stars>100-1000
import sys
import pandas as pd
import numpy as np
import requests
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import schedule
import time
def check_flights():
URL="https://www.google.com/flights/explore/#explore;f=JFK,EWR,LGA;t=HND,NRT,TPE,HKG,KIX;s=1;li=8;lx=12;d=2017-06-01"
driver = webdriver.PhantomJS(PJS_PATH)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36")
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=PJS_PATH)
driver.implicitly_wait(20)
driver.get(URL)
wait = WebDriverWait(driver, 20)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "span.FTWFGDB-v-c")))
s = BeautifulSoup(driver.page_source, "lxml")
best_price_tags = s.findAll('div', 'FTWFGDB-w-e')
# check if scrape worked - alert if it fails and shutdown
if len(best_price_tags) < 4:
print('Failed to Load Page Data')
requests.post('https://maker.ifttt.com/trigger/fare_alert/with/key/MY_SECRET_KEY',\
data={ "value1" : "script", "value2" : "failed", "value3" : "" })
sys.exit(0)
else:
print('Successfully Loaded Page Data')
best_prices = []
for tag in best_price_tags:
best_prices.append(int(tag.text.replace('$','')))
best_price = best_prices[0]
best_height_tags = s.findAll('div', 'FTWFGDB-w-f')
best_heights = []
for t in best_height_tags:
best_heights.append(float(t.attrs['style']\
.split('height:')[1].replace('px;','')))
best_height = best_heights[0]
# price per pixel of height
pph = np.array(best_price)/np.array(best_height)
cities = s.findAll('div', 'FTWFGDB-w-o')
hlist=[]
for bar in cities[0]\
.findAll('div', 'FTWFGDB-w-x'):
hlist.append(float(bar['style']\
.split('height: ')[1].replace('px;','')) * pph)
fares = pd.DataFrame(hlist, columns=['price'])
px = [x for x in fares['price']]
ff = pd.DataFrame(px, columns=['fare']).reset_index()
# begin the clustering
X = StandardScaler().fit_transform(ff)
db = DBSCAN(eps=1.5, min_samples=1).fit(X)
labels = db.labels_
clusters = len(set(labels))
pf = pd.concat([ff,pd.DataFrame(db.labels_,
columns=['cluster'])], axis=1)
rf = pf.groupby('cluster')['fare'].agg(['min','count']).sort_values('min', ascending=True)
# set up our rules
# must have more than one cluster
# cluster min must be equal to lowest price fare
# cluster size must be less than 10th percentile
# cluster must be $100 less the next lowest-priced cluster
if clusters > 1 and ff['fare'].min() == rf.iloc[0]['min']\
and rf.iloc[0]['count'] < rf['count'].quantile(.10)\
and rf.iloc[0]['fare'] + 100 < rf.iloc[1]['fare']:
city = s.find('span','FTWFGDB-v-c').text
fare = s.find('div','FTWFGDB-w-e').text
r = requests.post('https://maker.ifttt.com/trigger/fare_alert/with/key/MY_SECRET_KEY',\
data={ "value1" : city, "value2" : fare, "value3" : "" })
else:
print('no alert triggered')
# set up the scheduler to run our code every 60 min
schedule.every(60).minutes.do(check_flights)
while 1:
schedule.run_pending()
time.sleep(1)
|
tests/PySys/log_request_end_to_end/test_log_generator.py | itsyitsy/thin-edge.io | 102 | 12639509 | <filename>tests/PySys/log_request_end_to_end/test_log_generator.py
import os
from datetime import datetime, timedelta
from random import randint, shuffle, seed
from typing import Optional
from retry import retry
# this test will look at the date of current files in /var/log/tedge/agent/
# and create example files with the same date.
ERROR_MESSAGES = [
"Error: in line 1000.",
"Error: No such file or directory: /home/some/file",
"Error: Connection timed out. OS error 111.",
"Error: Is MQTT running?",
"Error: missing dependency mosquitto-clients.",
"thunderbird-gnome-support 1:78.14.0+build1-0ubuntu0.20.04.2",
"thunderbird-locale-en-us 1:78.14.0+build1-0ubuntu0.20.04.2",
"fonts-kacst-one 5.0+svn11846-10",
"fonts-khmeros-core 5.0-7ubuntu1",
"fonts-lao 0.0.20060226-9ubuntu1",
]
def create_fake_logs(num_lines=100) -> str:
num_loops = int(num_lines/10)
output = "\n"
for _ in range(num_loops):
output += '\n'.join(map(str, ERROR_MESSAGES))
output += '\n'
return output
class FailedToCreateLogs(Exception):
pass
@retry(FailedToCreateLogs, tries=20, delay=1)
def check_files_created():
if len(os.listdir("/tmp/sw_logs")) == 3:
return True
else:
raise FailedToCreateLogs
def create_example_logs():
file_names = ["example-log1", "example-log2", "example-log3"]
file_sizes = [50, 100, 250]
time_stamps = ["2021-11-18T13:15:10Z",
"2021-11-19T21:15:10Z", "2021-11-20T13:15:10Z"]
os.mkdir("/tmp/sw_logs")
for idx, file_name in enumerate(file_names):
with open(f"/tmp/sw_logs/{file_name}-{time_stamps[idx]}.log", "w") as handle:
fake_log = create_fake_logs(num_lines=file_sizes[idx])
handle.write(fake_log)
check_files_created()
|
notebook/itertools_repeat.py | vhn0912/python-snippets | 174 | 12639510 | import itertools
sum_value = 0
for i in itertools.repeat(10):
print(i)
sum_value += i
if sum_value > 40:
break
# 10
# 10
# 10
# 10
# 10
for i in itertools.repeat(10, 3):
print(i)
# 10
# 10
# 10
for l in itertools.repeat([0, 1, 2], 3):
print(l)
# [0, 1, 2]
# [0, 1, 2]
# [0, 1, 2]
for func in itertools.repeat(len, 3):
print(func('abc'))
# 3
# 3
# 3
l = [0, 1, 2, 3]
print(list(zip(itertools.repeat(10), l)))
# [(10, 0), (10, 1), (10, 2), (10, 3)]
|
Lib/test/test_import/data/circular_imports/rebinding2.py | shawwn/cpython | 52,316 | 12639514 | from .subpkg import util
from . import rebinding
util = util.util
|
tests/test_loading.py | ZSD-tim/dayu_widgets | 157 | 12639545 | """
Test MLoading and MLoadingWrapper class
"""
import pytest
from dayu_widgets import dayu_theme
from dayu_widgets.loading import MLoading, MLoadingWrapper
from dayu_widgets.qt import QLabel, QSize
@pytest.mark.parametrize('cls, size', (
(MLoading.tiny, dayu_theme.tiny),
(MLoading.small, dayu_theme.small),
(MLoading.medium, dayu_theme.medium),
(MLoading.large, dayu_theme.large),
(MLoading.huge, dayu_theme.huge),
))
@pytest.mark.parametrize('color', (
None,
'#13c2c2'
))
def test_loading_class_method(qtbot, cls, size, color):
"""Test for MLoading class methods"""
if color:
widget = cls(color=color)
else:
widget = cls()
qtbot.addWidget(widget)
assert widget.height() == size
assert widget.width() == size
pix = widget.pix
assert pix is not None
assert not pix.isNull()
assert pix.width() == size
assert pix.width() == size
def test_loading_wrapper(qtbot):
"""Test for MLoadingWrapper class methods"""
label = QLabel('test')
label.setFixedSize(QSize(100, 100))
widget = MLoadingWrapper(label, loading=False)
widget.show()
qtbot.addWidget(widget)
assert not widget._loading_widget.isVisible()
assert not widget._mask_widget.isVisible()
assert not widget.get_dayu_loading()
widget.set_dayu_loading(True)
def check_loading_visible():
assert widget.get_dayu_loading()
assert widget._loading_widget.isVisible()
assert widget._mask_widget.isVisible()
qtbot.waitUntil(check_loading_visible)
|
nsot/models/change.py | comerford/nsot | 387 | 12639564 | from __future__ import unicode_literals
from __future__ import absolute_import
from calendar import timegm
import difflib
import json
from django.apps import apps
from django.conf import settings
from django.db import models
from .. import exc, fields
from . import constants
from .site import Site
class Change(models.Model):
"""Record of all changes in NSoT."""
site = models.ForeignKey(
'Site', db_index=True, related_name='changes', verbose_name='Site',
help_text='Unique ID of the Site this Change is under.'
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='changes', db_index=True,
help_text='The User that initiated this Change.'
)
change_at = models.DateTimeField(
auto_now_add=True, db_index=True, null=False,
help_text='The timestamp of this Change.'
)
event = models.CharField(
max_length=10, null=False, choices=constants.EVENT_CHOICES,
help_text='The type of event this Change represents.'
)
resource_id = models.IntegerField(
'Resource ID', null=False,
help_text='The unique ID of the Resource for this Change.'
)
resource_name = models.CharField(
'Resource Type', max_length=20, null=False, db_index=True,
choices=constants.CHANGE_RESOURCE_CHOICES,
help_text='The name of the Resource for this Change.'
)
_resource = fields.JSONField(
'Resource', null=False, blank=True,
help_text='Local cache of the changed Resource. (Internal use only)'
)
def __init__(self, *args, **kwargs):
self._obj = kwargs.pop('obj', None)
super(Change, self).__init__(*args, **kwargs)
class Meta:
get_latest_by = 'change_at'
index_together = (
('resource_name', 'resource_id'),
('resource_name', 'event'),
)
def __unicode__(self):
return u'%s %s(%s)' % (self.event, self.resource_name,
self.resource_id)
@property
def resource(self):
return self._resource
def get_change_at(self):
return timegm(self.change_at.timetuple())
get_change_at.short_description = 'Change At'
@classmethod
def get_serializer_for_resource(cls, resource_name):
from ..api import serializers
serializer_class = resource_name + 'Serializer'
return getattr(serializers, serializer_class)
def clean_event(self, value):
if value not in constants.CHANGE_EVENTS:
raise exc.ValidationError('Invalid change event: %r.' % value)
return value
def clean_resource_name(self, value):
if value not in constants.VALID_CHANGE_RESOURCES:
raise exc.ValidationError('Invalid resource name: %r.' % value)
return value
def clean_site(self, obj):
"""value in this case is an instance of a model object."""
# Site doesn't have an id to itself, so if obj is a Site, use it.
# Otherwise get the value of the `.site`
return obj if isinstance(obj, Site) else getattr(obj, 'site')
def clean_fields(self, exclude=None):
"""This will populate the change fields from the incoming object."""
obj = self._obj
if obj is None:
return None
self.event = self.clean_event(self.event)
self.resource_name = self.clean_resource_name(obj.__class__.__name__)
self.resource_id = obj.id
self.site = self.clean_site(obj)
serializer_class = self.get_serializer_for_resource(self.resource_name)
serializer = serializer_class(obj)
self._resource = serializer.data
def save(self, *args, **kwargs):
self.full_clean() # First validate fields are correct
super(Change, self).save(*args, **kwargs)
def to_dict(self):
resource = None
if self.resource is not None:
resource = self.resource
return {
'id': self.id,
'site': self.site.to_dict(),
'user': self.user.to_dict(),
'change_at': timegm(self.change_at.timetuple()),
'event': self.event,
'resource_name': self.resource_name,
'resource_id': self.resource_id,
'resource': resource,
}
@property
def diff(self):
"""
Return the diff of the JSON representation of the cached copy of a
Resource with its current instance
"""
if self.event == 'Create':
old = ''
else:
# Get the Change just ahead of _this_ change because that has the
# state of the Resource before this Change occurred.
# TODO(nickpegg): Get rid of this if we change the behavior of
# Change to store the previous version of the object
old_change = Change.objects.filter(
change_at__lt=self.change_at,
resource_id=self.resource_id,
resource_name=self.resource_name
).order_by(
'-change_at'
).first()
old = json.dumps(old_change._resource, indent=2, sort_keys=True)
if self.event == 'Delete':
current = ''
else:
resource = apps.get_model(self._meta.app_label, self.resource_name)
obj = resource.objects.get(pk=self.resource_id)
serializer_class = self.get_serializer_for_resource(
self.resource_name)
serializer = serializer_class(obj)
current = json.dumps(serializer.data, indent=2, sort_keys=True)
diff = "\n".join(difflib.ndiff(
old.splitlines(),
current.splitlines()
))
return diff
|
codes/models/optim/__init__.py | sanchitvohra/EGVSR | 709 | 12639579 | <gh_stars>100-1000
import torch.nn as nn
import torch.optim as optim
def define_criterion(criterion_opt):
if criterion_opt is None:
return None
# parse
if criterion_opt['type'] == 'MSE':
criterion = nn.MSELoss(reduction=criterion_opt['reduction'])
elif criterion_opt['type'] == 'L1':
criterion = nn.L1Loss(reduction=criterion_opt['reduction'])
elif criterion_opt['type'] == 'CB':
from .losses import CharbonnierLoss
criterion = CharbonnierLoss(reduction=criterion_opt['reduction'])
elif criterion_opt['type'] == 'CosineSimilarity':
from .losses import CosineSimilarityLoss
criterion = CosineSimilarityLoss()
elif criterion_opt['type'] == 'GAN':
from .losses import VanillaGANLoss
criterion = VanillaGANLoss(reduction=criterion_opt['reduction'])
elif criterion_opt['type'] == 'LSGAN':
from .losses import LSGANLoss
criterion = LSGANLoss(reduction=criterion_opt['reduction'])
else:
raise ValueError('Unrecognized criterion: {}'.format(
criterion_opt['type']))
return criterion
def define_lr_schedule(schedule_opt, optimizer):
if schedule_opt is None:
return None
# parse
if schedule_opt['type'] == 'FixedLR':
schedule = None
elif schedule_opt['type'] == 'MultiStepLR':
schedule = optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=schedule_opt['milestones'],
gamma=schedule_opt['gamma']
)
elif schedule_opt['type'] == 'CosineAnnealingLR_Restart':
from .lr_schedules import CosineAnnealingLR_Restart
schedule = CosineAnnealingLR_Restart(
optimizer, schedule_opt['periods'],
eta_min=schedule_opt['eta_min'],
restarts=schedule_opt['restarts'],
weights=schedule_opt['restart_weights']
)
else:
raise ValueError('Unrecognized lr schedule: {}'.format(
schedule_opt['type']))
return schedule |
data_generation/preprocess_data.py | Gkud/deep-regex | 438 | 12639580 | <reponame>Gkud/deep-regex
import re
import random
import sys
import argparse
import os
import csv
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir',
help='Data directory', required=True, type=str)
parser.add_argument('--targ_separate',
help='Data directory', type=int)
args = parser.parse_args(arguments)
data_dir = args.data_dir
print(args)
targ_separate = True
if args.targ_separate == 0:
targ_separate = False
process_data(data_dir, targ_separate)
output_kushman_format(data_dir)
def process_data(data_dir , targ_separate=True):
print(targ_separate)
desc_lines, regex_lines = process_tokens(data_dir, targ_separate)
split_data_and_save(desc_lines, regex_lines, data_dir, ratio=0.65)
def process_tokens(data_dir, targ_separate=True):
desc_file_name = "{}/{}".format(data_dir, "src")
regex_file_name = "{}/{}".format(data_dir, "targ")
if targ_separate:
regex_lines = [" ".join(line.rstrip('\n')) for line in open('{}{}'.format(regex_file_name, '.txt'))]
else:
regex_lines = ["".join(line.rstrip('\n')) for line in open('{}{}'.format(regex_file_name, '.txt'))]
desc_lines = [" " + line.rstrip('\n') + " " for line in open('{}{}'.format(desc_file_name, '.txt'))]
desc_lines = [line.lower() for line in desc_lines]
punc = [',', '.', '!', ';']
for p in punc:
p_space = '{} '.format(p)
p_2_space = ' {} '.format(p)
desc_lines = [line.replace(p_space, p) for line in desc_lines]
desc_lines = [line.replace(p, p_2_space) for line in desc_lines]
num_pairs = [(' one ', ' 1 '), (' two ', ' 2 '), (' three ', ' 3 '), (' four ', ' 4 '),
(' five ', '5'), (' six ', '6'), (' seven ', ' 7 '), (' eight ', ' 8 '), (' nine ', ' 9 '), (' ten ', ' 10 ')]
for pair in num_pairs:
desc_lines = [line.replace(pair[0], pair[1]) for line in desc_lines]
single_quot_regex = re.compile("((?<=\s)'([^']+)'(?=\s))")
desc_lines = [re.sub(single_quot_regex, r'"\2"', line) for line in desc_lines]
num_lines = len(regex_lines)
reps_words = ["dog", "truck", "ring", "lake"]
reps_tags = ["<M0>", "<M1>", "<M2>", "<M3>"]
new_regex_lines = ["" for i in range(len(regex_lines))]
new_desc_lines = ["" for i in range(len(desc_lines))]
cool = False
for l_i in range(num_lines):
desc_line = desc_lines[l_i]
old_desc = desc_line
temp_desc_line = ''.join([c for c in desc_line])
words_replaced = []
for j in range(4):
double_quot = re.compile('.*\s"([^"]*)"\s.*')
double_quot_out = double_quot.match(temp_desc_line)
if double_quot_out:
word = double_quot_out.groups()[-1]
words_replaced.insert(0, word)
print(words_replaced)
temp_desc_line = temp_desc_line.replace('"{}"'.format(word), reps_tags[j])
for j in range(len(words_replaced)):
desc_line = desc_line.replace('"{}"'.format(words_replaced[j]), reps_tags[j])
new_desc_lines[l_i] = desc_line
regex_line = regex_lines[l_i]
print(regex_line)
# regex_line = regex_line.replace(" ".join('[AEIOUaeiou]'), "<VOW>")
# regex_line = regex_line.replace(" ".join('[aeiouAEIOU]'), "<VOW>")
# regex_line = regex_line.replace(" ".join('[0-9]'), "<NUM>")
# regex_line = regex_line.replace(" ".join('[A-Za-z]'), "<LET>")
# regex_line = regex_line.replace(" ".join('[A-Z]'), "<CAP>")
# regex_line = regex_line.replace(" ".join('[a-z]'), "<LOW>")
regex_line = regex_line.replace(" ".join('AEIOUaeiou'), "<VOW>")
regex_line = regex_line.replace(" ".join('aeiouAEIOU'), "<VOW>")
regex_line = regex_line.replace(" ".join('0-9'), "<NUM>")
regex_line = regex_line.replace(" ".join('A-Za-z'), "<LET>")
regex_line = regex_line.replace(" ".join('A-Z'), "<CAP>")
regex_line = regex_line.replace(" ".join('a-z'), "<LOW>")
for i in range(len(words_replaced)):
match = re.compile(re.escape(" ".join(words_replaced[i])), re.IGNORECASE)
print(match)
print(match.sub(" ".join(reps_tags[i]), regex_line))
regex_line = match.sub(reps_tags[i], regex_line)
for r_i in range(len(reps_words)):
r_word = reps_words[r_i]
regex_line = regex_line.replace(" ".join(r_word), reps_tags[r_i])
new_regex_lines[l_i] = regex_line
new_desc_lines = [line.strip(" ") for line in new_desc_lines]
new_regex_lines = [line.strip(" ") for line in new_regex_lines]
return new_desc_lines, new_regex_lines
def split_data_and_save(desc_lines, regex_lines, data_dir, ratio=0.65):
regex_lines = [line.rstrip('\n') for line in regex_lines]
# desc_lines = [line.rstrip('\n') + " <HALF> " + line.rstrip('\n') for line in desc_lines]
desc_lines = [line.rstrip('\n') for line in desc_lines]
zipped = zip(regex_lines, desc_lines)
random.seed(0)
random.shuffle(zipped)
regex_lines_shuffled, desc_lines_shuffled = zip(*zipped)
regex_train, regex_val, regex_test = split_train_test_val(regex_lines_shuffled, ratio)
desc_train, desc_val, desc_test = split_train_test_val(desc_lines_shuffled, ratio)
with open('{}{}{}.txt'.format(data_dir, "/", "src-train"), "w") as out_file:
out_file.write("\n".join(desc_train))
with open('{}{}{}.txt'.format(data_dir, "/", "src-val"), "w") as out_file:
out_file.write("\n".join(desc_val))
with open('{}{}{}.txt'.format(data_dir, "/", "src-test"), "w") as out_file:
out_file.write("\n".join(desc_test))
with open('{}{}{}.txt'.format(data_dir, "/", "targ-train"), "w") as out_file:
out_file.write("\n".join(regex_train))
with open('{}{}{}.txt'.format(data_dir, "/", "targ-val"), "w") as out_file:
out_file.write("\n".join(regex_val))
with open('{}{}{}.txt'.format(data_dir, "/", "targ-test"), "w") as out_file:
out_file.write("\n".join(regex_test))
print("Done!")
def split_train_test_val(ar, ratio):
train_set = ar[:int(len(ar)*ratio)]
not_train_set = ar[int(len(ar)*ratio):]
val_set = not_train_set[int(len(not_train_set)*(5.0/7.0)):]
test_set = not_train_set[:int(len(not_train_set)*(5.0/7.0))]
return train_set, val_set, test_set
def output_kushman_format(data_dir):
desc_lines = [line.rstrip('\n') for line in open('{}/{}'.format(data_dir, 'src.txt'))]
regex_lines = [line.rstrip('\n') for line in open('{}/{}'.format(data_dir, 'targ.txt'))]
# desc_lines = [line.replace('"', '""') for line in desc_lines]
csv_lines = ['"{}","{}","{}","p","p","p","p","p","n","n","n","n","n"'.format(str(i+1), desc_lines[i], regex_lines[i]) for i in range(len(regex_lines))]
with open('{}/{}'.format(data_dir, "data_kushman_format.csv"), "w") as out_file:
out_file.write("\n".join(csv_lines))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
autogl/module/nas/space/graph_nas.py | THUMNLab/AutoGL | 824 | 12639583 | <filename>autogl/module/nas/space/graph_nas.py
# codes in this file are reproduced from https://github.com/GraphNAS/GraphNAS with some changes.
import typing as _typ
import torch
import torch.nn.functional as F
from nni.nas.pytorch import mutables
from . import register_nas_space
from .base import BaseSpace
from ...model import BaseModel
from torch import nn
from .operation import act_map, gnn_map
GRAPHNAS_DEFAULT_GNN_OPS = [
"gat_8", # GAT with 8 heads
"gat_6", # GAT with 6 heads
"gat_4", # GAT with 4 heads
"gat_2", # GAT with 2 heads
"gat_1", # GAT with 1 heads
"gcn", # GCN
"cheb", # chebnet
"sage", # sage
"arma",
"sg", # simplifying gcn
"linear", # skip connection
"zero", # skip connection
]
GRAPHNAS_DEFAULT_ACT_OPS = [
# "sigmoid", "tanh", "relu", "linear",
# "softplus", "leaky_relu", "relu6", "elu"
"sigmoid",
"tanh",
"relu",
"linear",
"elu",
]
class LambdaModule(nn.Module):
def __init__(self, lambd):
super().__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.lambd)
class StrModule(nn.Module):
def __init__(self, lambd):
super().__init__()
self.str = lambd
def forward(self, *args, **kwargs):
return self.str
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.str)
def act_map_nn(act):
return LambdaModule(act_map(act))
def map_nn(l):
return [StrModule(x) for x in l]
@register_nas_space("graphnas")
class GraphNasNodeClassificationSpace(BaseSpace):
def __init__(
self,
hidden_dim: _typ.Optional[int] = 64,
layer_number: _typ.Optional[int] = 2,
dropout: _typ.Optional[float] = 0.9,
input_dim: _typ.Optional[int] = None,
output_dim: _typ.Optional[int] = None,
gnn_ops: _typ.Sequence[_typ.Union[str, _typ.Any]] = GRAPHNAS_DEFAULT_GNN_OPS,
act_ops: _typ.Sequence[_typ.Union[str, _typ.Any]] = GRAPHNAS_DEFAULT_ACT_OPS,
):
super().__init__()
self.layer_number = layer_number
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.output_dim = output_dim
self.gnn_ops = gnn_ops
self.act_ops = act_ops
self.dropout = dropout
def instantiate(
self,
hidden_dim: _typ.Optional[int] = None,
layer_number: _typ.Optional[int] = None,
dropout: _typ.Optional[float] = None,
input_dim: _typ.Optional[int] = None,
output_dim: _typ.Optional[int] = None,
gnn_ops: _typ.Sequence[_typ.Union[str, _typ.Any]] = None,
act_ops: _typ.Sequence[_typ.Union[str, _typ.Any]] = None,
):
super().instantiate()
self.dropout = dropout or self.dropout
self.hidden_dim = hidden_dim or self.hidden_dim
self.layer_number = layer_number or self.layer_number
self.input_dim = input_dim or self.input_dim
self.output_dim = output_dim or self.output_dim
self.gnn_ops = gnn_ops or self.gnn_ops
self.act_ops = act_ops or self.act_ops
self.preproc0 = nn.Linear(self.input_dim, self.hidden_dim)
self.preproc1 = nn.Linear(self.input_dim, self.hidden_dim)
node_labels = [mutables.InputChoice.NO_KEY, mutables.InputChoice.NO_KEY]
for layer in range(2, self.layer_number + 2):
node_labels.append(f"op_{layer}")
setattr(
self,
f"in_{layer}",
self.setInputChoice(
layer,
choose_from=node_labels[:-1],
n_chosen=1,
return_mask=False,
key=f"in_{layer}",
),
)
setattr(
self,
f"op_{layer}",
self.setLayerChoice(
layer,
[
gnn_map(op, self.hidden_dim, self.hidden_dim)
for op in self.gnn_ops
],
key=f"op_{layer}",
),
)
setattr(
self,
"act",
self.setLayerChoice(
2 * layer, [act_map_nn(a) for a in self.act_ops], key="act"
),
)
setattr(
self,
"concat",
self.setLayerChoice(
2 * layer + 1, map_nn(["add", "product", "concat"]), key="concat"
),
)
self._initialized = True
self.classifier1 = nn.Linear(
self.hidden_dim * self.layer_number, self.output_dim
)
self.classifier2 = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, data):
x, edges = data.x, data.edge_index # x [2708,1433] ,[2, 10556]
x = F.dropout(x, p=self.dropout, training=self.training)
pprev_, prev_ = self.preproc0(x), self.preproc1(x)
prev_nodes_out = [pprev_, prev_]
for layer in range(2, self.layer_number + 2):
node_in = getattr(self, f"in_{layer}")(prev_nodes_out)
node_out = getattr(self, f"op_{layer}")(node_in, edges)
prev_nodes_out.append(node_out)
act = getattr(self, "act")
con = getattr(self, "concat")()
states = prev_nodes_out
if con == "concat":
x = torch.cat(states[2:], dim=1)
else:
tmp = states[2]
for i in range(2, len(states)):
if con == "add":
tmp = torch.add(tmp, states[i])
elif con == "product":
tmp = torch.mul(tmp, states[i])
x = tmp
x = act(x)
if con == "concat":
x = self.classifier1(x)
else:
x = self.classifier2(x)
return F.log_softmax(x, dim=1)
def parse_model(self, selection, device) -> BaseModel:
# return AutoGCN(self.input_dim, self.output_dim, device)
return self.wrap(device).fix(selection)
|
running_modes/reinforcement_learning/logging/link_logging/local_bond_link_reinforcement_logger.py | lilleswing/Reinvent-1 | 183 | 12639590 | <reponame>lilleswing/Reinvent-1<filename>running_modes/reinforcement_learning/logging/link_logging/local_bond_link_reinforcement_logger.py
import numpy as np
import torch
from reinvent_chemistry.logging import fraction_valid_smiles, padding_with_invalid_smiles, \
check_for_invalid_mols_and_create_legend, find_matching_pattern_in_smiles, add_mols
from reinvent_scoring.scoring.diversity_filters.lib_invent.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
from reinvent_scoring.scoring.score_summary import FinalSummary
from torch.utils.tensorboard import SummaryWriter
from running_modes.configurations import ReinforcementLoggerConfiguration, GeneralConfigurationEnvelope
from running_modes.reinforcement_learning.logging.link_logging.base_reinforcement_logger import BaseReinforcementLogger
from running_modes.reinforcement_learning.logging.link_logging.console_message import ConsoleMessage
class LocalBondLinkReinforcementLogger(BaseReinforcementLogger):
def __init__(self, configuration: GeneralConfigurationEnvelope, log_config: ReinforcementLoggerConfiguration):
super().__init__(configuration, log_config)
self._summary_writer = SummaryWriter(log_dir=self._log_config.logging_path)
# _rows and _columns define the shape of the output grid of molecule images in tensorboard.
self._rows = 4
self._columns = 4
self._sample_size = self._rows * self._columns
self._sf_component_enum = ScoringFunctionComponentNameEnum()
self._console_message_formatter = ConsoleMessage()
def log_message(self, message: str):
self._logger.info(message)
def timestep_report(self, start_time, n_steps, step, score_summary: FinalSummary,
agent_likelihood: torch.tensor, prior_likelihood: torch.tensor,
augmented_likelihood: torch.tensor, diversity_filter):
message = self._console_message_formatter.create(start_time, n_steps, step, score_summary,
agent_likelihood, prior_likelihood,
augmented_likelihood)
self._logger.info(message)
self._tensorboard_report(step, score_summary, agent_likelihood, prior_likelihood, augmented_likelihood,
diversity_filter)
def _tensorboard_report(self, step, score_summary: FinalSummary, agent_likelihood, prior_likelihood,
augmented_likelihood, diversity_filter: BaseDiversityFilter):
self._summary_writer.add_scalars("nll/avg", {
"prior": prior_likelihood.mean(),
"augmented": augmented_likelihood.mean(),
"agent": agent_likelihood.mean()
}, step)
self._summary_writer.add_scalars("nll/variance", {
"prior": prior_likelihood.var(),
"augmented": augmented_likelihood.var(),
"agent": agent_likelihood.var()
}, step)
mean_score = np.mean(score_summary.total_score)
for i, log in enumerate(score_summary.profile):
self._summary_writer.add_scalar(score_summary.profile[i].name, np.mean(score_summary.profile[i].score),
step)
self._summary_writer.add_scalar("Valid SMILES", fraction_valid_smiles(score_summary.scored_smiles), step)
self._summary_writer.add_scalar("Number of SMILES found", diversity_filter.number_of_smiles_in_memory(), step)
self._summary_writer.add_scalar("Average score", mean_score, step)
if step % 1 == 0:
self._log_out_smiles_sample(score_summary.scored_smiles, score_summary.total_score, step, score_summary)
def _log_out_smiles_sample(self, smiles, score, step, score_summary: FinalSummary):
self._visualize_structures(smiles, score, step, score_summary)
def _visualize_structures(self, smiles, score, step, score_summary: FinalSummary):
list_of_mols, legends, pattern = self._check_for_invalid_mols_and_create_legends(smiles, score, score_summary)
try:
add_mols(self._summary_writer, "Molecules from epoch", list_of_mols[:self._sample_size], self._rows,
[x for x in legends], global_step=step, size_per_mol=(320, 320), pattern=pattern)
except:
raise Exception(f"Error in RDKit has occurred, skipping report for step {step}.")
def _check_for_invalid_mols_and_create_legends(self, smiles, score, score_summary: FinalSummary):
smiles = padding_with_invalid_smiles(smiles, self._sample_size)
list_of_mols, legend = check_for_invalid_mols_and_create_legend(smiles, score, self._sample_size)
smarts_pattern = self._get_matching_substructure_from_config(score_summary)
pattern = find_matching_pattern_in_smiles(list_of_mols=list_of_mols, smarts_pattern=smarts_pattern)
return list_of_mols, legend, pattern
def _get_matching_substructure_from_config(self, score_summary: FinalSummary):
smarts_pattern = ""
for summary_component in score_summary.scaffold_log:
if summary_component.parameters.component_type == self._sf_component_enum.MATCHING_SUBSTRUCTURE:
smarts = summary_component.parameters.specific_parameters.get('smiles', [])
if len(smarts) > 0:
smarts_pattern = smarts[0]
return smarts_pattern
|
paperboy/scheduler/remote.py | datalayer-externals/papermill-paperboy | 233 | 12639650 | <reponame>datalayer-externals/papermill-paperboy<gh_stars>100-1000
import requests
from .base import BaseScheduler
class RemoteScheduler(BaseScheduler):
'''Proxy methods to a remote worker instance'''
def __init__(self, *args, **kwargs):
super(RemoteScheduler, self).__init__(*args, **kwargs)
def status(self, user, params, session, *args, **kwargs):
# FIXME async/celery
return requests.get(self.config.scheduler.status_url, params=params).json()
def schedule(self, user, notebook, job, reports, *args, **kwargs):
# FIXME async/celery
params = {'user': user.to_json(), 'notebook': notebook.to_json(), 'job': job.to_json(), 'reports': [r.to_json() for r in reports]}
return requests.post(self.config.scheduler.schedule_url, params=params).json()
def unschedule(self, user, notebook, job, reports, *args, **kwargs):
return self.schedule(user, notebook, job, reports, *args, **kwargs)
|
codes/models/modules/FlowNet_SR_x4.py | CJWBW/HCFlow | 123 | 12639677 | import numpy as np
import torch
from torch import nn as nn
import torch.nn.functional as F
from utils.util import opt_get
from models.modules import Basic
from models.modules.FlowStep import FlowStep
from models.modules.ConditionalFlow import ConditionalFlow
class FlowNet(nn.Module):
def __init__(self, image_shape, opt=None):
assert image_shape[2] == 1 or image_shape[2] == 3
super().__init__()
H, W, self.C = image_shape
self.opt = opt
self.L = opt_get(opt, ['network_G', 'flowDownsampler', 'L'])
self.K = opt_get(opt, ['network_G', 'flowDownsampler', 'K'])
if isinstance(self.K, int): self.K = [self.K] * (self.L + 1)
n_additionalFlowNoAffine = opt_get(self.opt, ['network_G', 'flowDownsampler', 'additionalFlowNoAffine'], 0)
flow_permutation = opt_get(self.opt, ['network_G', 'flowDownsampler', 'flow_permutation'], 'invconv')
flow_coupling = opt_get(self.opt, ['network_G', 'flowDownsampler', 'flow_coupling'], 'Affine')
cond_channels = opt_get(self.opt, ['network_G', 'flowDownsampler', 'cond_channels'], None)
enable_splitOff = opt_get(opt, ['network_G', 'flowDownsampler', 'splitOff', 'enable'], False)
after_splitOff_flowStep = opt_get(opt, ['network_G', 'flowDownsampler', 'splitOff', 'after_flowstep'], 0)
if isinstance(after_splitOff_flowStep, int): after_splitOff_flowStep = [after_splitOff_flowStep] * (self.L + 1)
# construct flow
self.layers = nn.ModuleList()
self.output_shapes = []
for level in range(self.L):
# 1. Squeeze
self.layers.append(Basic.SqueezeLayer(factor=2)) # may need a better way for squeezing
self.C, H, W = self.C * 4, H // 2, W // 2
self.output_shapes.append([-1, self.C, H, W])
# 2. main FlowSteps (unconditional flow)
for k in range(self.K[level]-after_splitOff_flowStep[level]):
self.layers.append(FlowStep(in_channels=self.C, cond_channels=cond_channels,
flow_permutation=flow_permutation,
flow_coupling=flow_coupling,
opt=opt['network_G']['flowDownsampler']))
self.output_shapes.append([-1, self.C, H, W])
# 3. additional FlowSteps (split + conditional flow)
if enable_splitOff:
if level == 0:
self.layers.append(Basic.Split(num_channels_split=self.C // 2 if level < self.L-1 else 3, level=level))
self.level0_condFlow = ConditionalFlow(num_channels=self.C,
num_channels_split=self.C // 2 if level < self.L-1 else 3,
n_flow_step=after_splitOff_flowStep[level],
opt=opt['network_G']['flowDownsampler']['splitOff'],
num_levels_condition=1, SR=True)
elif level == 1:
self.layers.append(Basic.Split(num_channels_split=self.C // 2 if level < self.L-1 else 3, level=level))
self.level1_condFlow = ConditionalFlow(num_channels=self.C,
num_channels_split=self.C // 2 if level < self.L-1 else 3,
n_flow_step=after_splitOff_flowStep[level],
opt=opt['network_G']['flowDownsampler']['splitOff'],
num_levels_condition=0, SR=True)
self.C = self.C // 2 if level < self.L-1 else 3
self.output_shapes.append([-1, self.C, H, W])
self.H = H
self.W = W
self.scaleH = image_shape[0] / H
self.scaleW = image_shape[1] / W
print('shapes:', self.output_shapes)
# nodetach version; 0.05 better than detach version, 0.30 better when using only nll loss
def forward(self, hr=None, z=None, u=None, eps_std=None, logdet=None, reverse=False, training=True):
if not reverse:
return self.normal_flow(hr, u=u, logdet=logdet, training=training)
else:
return self.reverse_flow(z, u=u, eps_std=eps_std, training=training)
'''
hr->y1+z1->y2+z2
'''
def normal_flow(self, z, u=None, logdet=None, training=True):
for layer, shape in zip(self.layers, self.output_shapes):
if isinstance(layer, FlowStep):
z, logdet = layer(z, u, logdet=logdet, reverse=False)
elif isinstance(layer, Basic.SqueezeLayer):
z, logdet = layer(z, logdet=logdet, reverse=False)
elif isinstance(layer, Basic.Split):
if layer.level == 0:
z, a1 = layer(z, reverse=False)
y1 = z.clone()
elif layer.level == 1:
z, a2 = layer(z, reverse=False)
logdet, conditional_feature2 = self.level1_condFlow(a2, z, logdet=logdet, reverse=False, training=training)
conditional_feature1 = torch.cat([y1, F.interpolate(conditional_feature2, scale_factor=2, mode='nearest')],1)
logdet, _ = self.level0_condFlow(a1, conditional_feature1, logdet=logdet, reverse=False, training=training)
return z, logdet
'''
y2+z2->y1+z1->hr
'''
def reverse_flow(self, z, u=None, eps_std=None, training=True):
for layer, shape in zip(reversed(self.layers), reversed(self.output_shapes)):
if isinstance(layer, FlowStep):
z, _ = layer(z, u, reverse=True)
elif isinstance(layer, Basic.SqueezeLayer):
z, _ = layer(z, reverse=True)
elif isinstance(layer, Basic.Split):
if layer.level == 1:
a2, _, conditional_feature2 = self.level1_condFlow(None, z, eps_std=eps_std, reverse=True, training=training)
z = layer(z, a2, reverse=True)
elif layer.level == 0:
conditional_feature1 = torch.cat([z, F.interpolate(conditional_feature2, scale_factor=2, mode='nearest')],1)
a1, _, _ = self.level0_condFlow(None, conditional_feature1, eps_std=eps_std, reverse=True, training=training)
z = layer(z, a1, reverse=True)
return z
|
models/rank/logistic_regression/static_model.py | ziyoujiyi/PaddleRec | 2,739 | 12639689 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from net import LRLayer
class StaticModel():
def __init__(self, config):
self.cost = None
self.config = config
self._init_hyper_parameters()
def _init_hyper_parameters(self):
self.sparse_feature_number = self.config.get(
"hyper_parameters.sparse_feature_number", None)
self.num_field = self.config.get("hyper_parameters.num_field", None)
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate")
self.reg = self.config.get("hyper_parameters.reg", 1e-4)
def create_feeds(self, is_infer=False):
dense_input = paddle.static.data(
name="dense_input", shape=[None, self.num_field], dtype="float32")
sparse_input_ids = paddle.static.data(
name="sparse_input", shape=[None, self.num_field], dtype="int64")
label = paddle.static.data(
name="label", shape=[None, 1], dtype="int64")
feeds_list = [label] + [sparse_input_ids] + [dense_input]
return feeds_list
def net(self, inputs, is_infer=False):
init_value_ = 0.1
# ------------------------- network input --------------------------
self.label = inputs[0]
feat_idx = inputs[1]
feat_value = inputs[2]
#feat_value = paddle.reshape(
# raw_feat_value, [-1, self.num_field]) # None * num_field * 1
LR_model = LRLayer(self.sparse_feature_number, init_value_, self.reg,
self.num_field)
self.predict = LR_model(feat_idx, feat_value)
predict_2d = paddle.concat(x=[1 - self.predict, self.predict], axis=1)
label_int = paddle.cast(self.label, 'int64')
auc, batch_auc_var, _ = paddle.static.auc(input=predict_2d,
label=label_int,
slide_steps=0)
self.inference_target_var = auc
if is_infer:
fetch_dict = {'auc': auc}
return fetch_dict
cost = paddle.nn.functional.log_loss(
input=self.predict, label=paddle.cast(self.label, "float32"))
avg_cost = paddle.sum(x=cost)
self._cost = avg_cost
fetch_dict = {'cost': avg_cost, 'auc': auc}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.Adam(
learning_rate=self.learning_rate, lazy_mode=True)
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(self._cost)
def infer_net(self, input):
return self.net(input, is_infer=True)
|
applications/pytorch/cnns/utils/import_helper.py | payoto/graphcore_examples | 260 | 12639696 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import sys
from pathlib import Path
root_folder = str(Path(__file__).parent.parent.absolute())
sys.path.insert(0, root_folder)
|
prince/svd.py | gkbharathy/FactorAnalysisMixedData | 749 | 12639765 | """Singular Value Decomposition (SVD)"""
try:
import fbpca
FBPCA_INSTALLED = True
except ImportError:
FBPCA_INSTALLED = False
from sklearn.utils import extmath
def compute_svd(X, n_components, n_iter, random_state, engine):
"""Computes an SVD with k components."""
# Determine what SVD engine to use
if engine == 'auto':
engine = 'sklearn'
# Compute the SVD
if engine == 'fbpca':
if FBPCA_INSTALLED:
U, s, V = fbpca.pca(X, k=n_components, n_iter=n_iter)
else:
raise ValueError('fbpca is not installed; please install it if you want to use it')
elif engine == 'sklearn':
U, s, V = extmath.randomized_svd(
X,
n_components=n_components,
n_iter=n_iter,
random_state=random_state
)
else:
raise ValueError("engine has to be one of ('auto', 'fbpca', 'sklearn')")
U, V = extmath.svd_flip(U, V)
return U, s, V
|
third_party/harfbuzz/contrib/tables/grapheme-break-parse.py | quanganh2627/bytm-x64-L-w05-2015_external_chromium_org_third_party_skia | 264 | 12639786 | import sys
from unicode_parse_common import *
# http://www.unicode.org/Public/UNIDATA/auxiliary/GraphemeBreakProperty.txt
property_to_harfbuzz = {
'CR': 'HB_Grapheme_CR',
'LF': 'HB_Grapheme_LF',
'Control': 'HB_Grapheme_Control',
'Extend': 'HB_Grapheme_Extend',
'Prepend': 'HB_Grapheme_Other',
'SpacingMark': 'HB_Grapheme_Other',
'L': 'HB_Grapheme_L',
'V': 'HB_Grapheme_V',
'T': 'HB_Grapheme_T',
'LV': 'HB_Grapheme_LV',
'LVT': 'HB_Grapheme_LVT',
}
def main(infile, outfile):
ranges = unicode_file_parse(infile, property_to_harfbuzz)
ranges.sort()
print >>outfile, '// Generated from Unicode Grapheme break tables\n'
print >>outfile, '#ifndef GRAPHEME_BREAK_PROPERTY_H_'
print >>outfile, '#define GRAPHEME_BREAK_PROPERTY_H_\n'
print >>outfile, '#include <stdint.h>'
print >>outfile, '#include "harfbuzz-external.h"\n'
print >>outfile, 'struct grapheme_break_property {'
print >>outfile, ' uint32_t range_start;'
print >>outfile, ' uint32_t range_end;'
print >>outfile, ' HB_GraphemeClass klass;'
print >>outfile, '};\n'
print >>outfile, 'static const struct grapheme_break_property grapheme_break_properties[] = {'
for (start, end, value) in ranges:
print >>outfile, ' {0x%x, 0x%x, %s},' % (start, end, value)
print >>outfile, '};\n'
print >>outfile, 'static const unsigned grapheme_break_properties_count = %d;\n' % len(ranges)
print >>outfile, '#endif // GRAPHEME_BREAK_PROPERTY_H_'
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: %s <input .txt> <output .h>' % sys.argv[0]
else:
main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
|
tests/test_loggers/test_console_logger.py | martins0n/etna | 326 | 12639801 | <reponame>martins0n/etna
from tempfile import NamedTemporaryFile
from typing import Sequence
import pytest
from loguru import logger as _logger
from etna.datasets import TSDataset
from etna.loggers import ConsoleLogger
from etna.loggers import tslogger
from etna.metrics import MAE
from etna.metrics import MSE
from etna.metrics import SMAPE
from etna.metrics import Metric
from etna.models import CatBoostModelMultiSegment
from etna.models import LinearMultiSegmentModel
from etna.models import LinearPerSegmentModel
from etna.models import ProphetModel
from etna.pipeline import Pipeline
from etna.transforms import AddConstTransform
from etna.transforms import DateFlagsTransform
from etna.transforms import LagTransform
from etna.transforms import Transform
def check_logged_transforms(log_file: str, transforms: Sequence[Transform]):
"""Check that transforms are logged into the file."""
with open(log_file, "r") as in_file:
lines = in_file.readlines()
assert len(lines) == len(transforms)
for line, transform in zip(lines, transforms):
assert transform.__class__.__name__ in line
def test_tsdataset_transform_logging(example_tsds: TSDataset):
"""Check working of logging inside `TSDataset.transform`."""
transforms = [LagTransform(lags=5, in_column="target"), AddConstTransform(value=5, in_column="target")]
file = NamedTemporaryFile()
_logger.add(file.name)
example_tsds.fit_transform(transforms=transforms)
idx = tslogger.add(ConsoleLogger())
example_tsds.transform(transforms=example_tsds.transforms)
check_logged_transforms(log_file=file.name, transforms=transforms)
tslogger.remove(idx)
def test_tsdataset_fit_transform_logging(example_tsds: TSDataset):
"""Check working of logging inside `TSDataset.fit_transform`."""
transforms = [LagTransform(lags=5, in_column="target"), AddConstTransform(value=5, in_column="target")]
file = NamedTemporaryFile()
_logger.add(file.name)
idx = tslogger.add(ConsoleLogger())
example_tsds.fit_transform(transforms=transforms)
check_logged_transforms(log_file=file.name, transforms=transforms)
tslogger.remove(idx)
def test_tsdataset_make_future_logging(example_tsds: TSDataset):
"""Check working of logging inside `TSDataset.make_future`."""
transforms = [LagTransform(lags=5, in_column="target"), AddConstTransform(value=5, in_column="target")]
file = NamedTemporaryFile()
_logger.add(file.name)
example_tsds.fit_transform(transforms=transforms)
idx = tslogger.add(ConsoleLogger())
_ = example_tsds.make_future(5)
check_logged_transforms(log_file=file.name, transforms=transforms)
tslogger.remove(idx)
def test_tsdataset_inverse_transform_logging(example_tsds: TSDataset):
"""Check working of logging inside `TSDataset.inverse_transform`."""
transforms = [LagTransform(lags=5, in_column="target"), AddConstTransform(value=5, in_column="target")]
file = NamedTemporaryFile()
_logger.add(file.name)
example_tsds.fit_transform(transforms=transforms)
idx = tslogger.add(ConsoleLogger())
example_tsds.inverse_transform()
check_logged_transforms(log_file=file.name, transforms=transforms[::-1])
tslogger.remove(idx)
@pytest.mark.parametrize("metric", [MAE(), MSE(), MAE(mode="macro")])
def test_metric_logging(example_tsds: TSDataset, metric: Metric):
"""Check working of logging inside `Metric.__call__`."""
file = NamedTemporaryFile()
_logger.add(file.name)
horizon = 10
ts_train, ts_test = example_tsds.train_test_split(test_size=horizon)
pipeline = Pipeline(model=ProphetModel(), horizon=horizon)
pipeline.fit(ts_train)
ts_forecast = pipeline.forecast()
idx = tslogger.add(ConsoleLogger())
_ = metric(y_true=ts_test, y_pred=ts_forecast)
with open(file.name, "r") as in_file:
lines = in_file.readlines()
assert len(lines) == 1
assert repr(metric) in lines[0]
tslogger.remove(idx)
def test_backtest_logging(example_tsds: TSDataset):
"""Check working of logging inside backtest."""
file = NamedTemporaryFile()
_logger.add(file.name)
idx = tslogger.add(ConsoleLogger())
metrics = [MAE(), MSE(), SMAPE()]
metrics_str = ["MAE", "MSE", "SMAPE"]
date_flags = DateFlagsTransform(day_number_in_week=True, day_number_in_month=True)
pipe = Pipeline(model=CatBoostModelMultiSegment(), horizon=10, transforms=[date_flags])
n_folds = 5
pipe.backtest(ts=example_tsds, metrics=metrics, n_jobs=1, n_folds=n_folds)
with open(file.name, "r") as in_file:
lines = in_file.readlines()
# remain lines only about backtest
lines = [line for line in lines if "backtest" in line]
assert len(lines) == len(metrics) * n_folds * len(example_tsds.segments)
assert all([any([metric_str in line for metric_str in metrics_str]) for line in lines])
tslogger.remove(idx)
def test_backtest_logging_no_tables(example_tsds: TSDataset):
"""Check working of logging inside backtest with `table=False`."""
file = NamedTemporaryFile()
_logger.add(file.name)
idx = tslogger.add(ConsoleLogger(table=False))
metrics = [MAE(), MSE(), SMAPE()]
date_flags = DateFlagsTransform(day_number_in_week=True, day_number_in_month=True)
pipe = Pipeline(model=CatBoostModelMultiSegment(), horizon=10, transforms=[date_flags])
n_folds = 5
pipe.backtest(ts=example_tsds, metrics=metrics, n_jobs=1, n_folds=n_folds)
with open(file.name, "r") as in_file:
lines = in_file.readlines()
# remain lines only about backtest
lines = [line for line in lines if "backtest" in line]
assert len(lines) == 0
tslogger.remove(idx)
@pytest.mark.parametrize("model", [LinearPerSegmentModel(), LinearMultiSegmentModel()])
def test_model_logging(example_tsds, model):
"""Check working of logging in fit/forecast of model."""
horizon = 7
lags = LagTransform(in_column="target", lags=[i + horizon for i in range(1, 5 + 1)])
example_tsds.fit_transform([lags])
file = NamedTemporaryFile()
_logger.add(file.name)
idx = tslogger.add(ConsoleLogger())
model.fit(example_tsds)
to_forecast = example_tsds.make_future(horizon)
model.forecast(to_forecast)
with open(file.name, "r") as in_file:
lines = in_file.readlines()
# filter out logs related to transforms
lines = [line for line in lines if lags.__class__.__name__ not in line]
assert len(lines) == 2
assert "fit" in lines[0]
assert "forecast" in lines[1]
tslogger.remove(idx)
|
tests/errors_test.py | EdwardBetts/riprova | 114 | 12639818 | # -*- coding: utf-8 -*-
import pytest
from riprova import (ErrorWhitelist, ErrorBlacklist,
NotRetriableError, add_whitelist_error)
def test_error_whitelist():
whitelist = ErrorWhitelist()
assert type(ErrorWhitelist.WHITELIST) is set
assert len(whitelist._list) > 4
assert type(whitelist._list) is set
assert whitelist._list is not ErrorWhitelist.WHITELIST
# Test setter
whitelist.errors = (Exception, RuntimeError)
# Test getter
assert whitelist.errors == set([Exception, RuntimeError])
# Test add()
whitelist.add(BaseException, SystemExit)
assert whitelist.errors == set([Exception, RuntimeError,
BaseException, SystemExit])
def test_error_whitelist_invalid():
whitelist = ErrorWhitelist()
with pytest.raises(TypeError):
whitelist.errors = dict()
with pytest.raises(TypeError):
whitelist.errors = None
with pytest.raises(TypeError):
whitelist.add(None)
with pytest.raises(TypeError):
whitelist.add(dict())
class NoRetryError(NotRetriableError):
pass
class RetryError(NotRetriableError):
__retry__ = True
@pytest.mark.parametrize("error,expected", [
(SystemExit(), False),
(ImportError(), False),
(ReferenceError(), False),
(SyntaxError(), False),
(KeyboardInterrupt(), False),
(NotRetriableError(), False),
(NoRetryError(), False),
(ReferenceError(), False),
(RetryError(), True),
(Exception(), True),
(RuntimeError(), True),
(TypeError(), True),
(ValueError(), True),
])
def test_error_whitelist_isretry(error, expected):
assert ErrorWhitelist().isretry(error) is expected
def test_error_blacklist():
blacklist = ErrorBlacklist()
assert type(ErrorBlacklist.WHITELIST) is set
assert len(blacklist._list) > 4
assert type(blacklist._list) is set
assert blacklist._list is not ErrorWhitelist.WHITELIST
# Test setter
blacklist.errors = (Exception, RuntimeError)
# Test getter
assert blacklist.errors == set([Exception, RuntimeError])
# Test add()
blacklist.add(BaseException, SystemExit)
assert blacklist.errors == set([Exception, RuntimeError,
BaseException, SystemExit])
@pytest.mark.parametrize("error,expected", [
(SystemExit(), True),
(ImportError(), True),
(ReferenceError(), True),
(SyntaxError(), True),
(KeyboardInterrupt(), True),
(NotRetriableError(), True),
(NoRetryError(), True),
(ReferenceError(), True),
(RetryError(), False),
(Exception(), False),
(RuntimeError(), False),
(TypeError(), False),
(ValueError(), False),
])
def test_error_blacklist_isretry(error, expected):
assert ErrorBlacklist().isretry(error) is expected
def test_add_whitelist_error():
whitelist = ErrorWhitelist.WHITELIST.copy()
assert len(ErrorWhitelist.WHITELIST) == len(whitelist)
add_whitelist_error(AttributeError, EnvironmentError)
assert len(ErrorWhitelist.WHITELIST) == len(whitelist) + 2
def test_add_whitelist_error_invalid():
with pytest.raises(TypeError):
add_whitelist_error(None)
with pytest.raises(TypeError):
add_whitelist_error(dict())
|
prody/apps/evol_apps/evol_filter.py | grandevelia/ProDy | 210 | 12639857 | """Refine MSA application."""
from ..apptools import DevelApp
__all__ = ['evol_filter']
APP = DevelApp('filter', 'filter an MSA using sequence labels')
APP.setExample(
"""Filter sequences in an MSA based on label data.
Following example will filter human sequences:
$ evol filter piwi_seed.slx HUMAN -e""", [])
APP.addArgument('msa',
help='MSA filename to be filtered')
APP.addArgument('word',
help='word to be compared to sequence label', nargs='+')
APP.addGroup('filter', 'filtering method (required)', True, True)
APP.addArgument('-s', '--startswith',
dest='startswith',
help='sequence label starts with given words',
action='store_true',
group='filter')
APP.addArgument('-e', '--endswith',
dest='endswith',
help='sequence label ends with given words',
action='store_true',
group='filter')
APP.addArgument('-c', '--contains',
dest='contains',
help='sequence label contains with given words',
action='store_true',
group='filter')
APP.addGroup('filter2', 'filter option')
APP.addArgument('-F', '--full-label',
dest='filter_full',
help='compare full label with word(s)',
action='store_true',
group='filter2')
APP.addGroup('output', 'output options')
APP.addArgument('-o', '--outname',
dest='outname',
help='output filename, default is msa filename with _refined suffix',
type=str,
metavar='STR',
group='output')
APP.addArgument('-f', '--format',
dest='format',
type=str,
metavar='STR',
help='output MSA file format, default is same as input',
group='output')
APP.addArgument('-z', '--compressed',
dest='compressed',
action='store_true',
help='gzip refined MSA output',
group='output')
def evol_filter(msa, *word, **kwargs):
import prody
from prody import MSAFile, writeMSA, LOGGER
from os.path import splitext
outname = kwargs.get('outname')
if outname is None:
outname, ext = splitext(msa)
if ext.lower() == '.gz':
outname, _ = splitext(msa)
outname += '_filtered' + ext
single = len(word) == 1
if single:
word = word[0]
if kwargs.get('startswith', False):
if single:
filter = lambda label, seq, word=word: label.startswith(word)
elif kwargs.get('endswith', False):
if single:
filter = lambda label, seq, word=word: label.endswith(word)
elif kwargs.get('contains', False):
if single:
filter = lambda label, seq, word=word: word in label
elif kwargs.get('equals', False):
if single:
filter = lambda label, seq, word=word: word == label
else:
filter = lambda label, seq, word=set(word): label in word
else:
raise TypeError('one of startswith, endswith, contains, or equals '
'must be specified')
msa = MSAFile(msa, filter=filter,
filter_full=kwargs.get('filter_full', False))
LOGGER.info('Filtered MSA is written in file: ' +
writeMSA(outname, msa, **kwargs))
APP.setFunction(evol_filter)
|
lib/cherrypyscheduler.py | 0x20Man/Watcher3 | 320 | 12639871 | import logging
from datetime import datetime, timedelta
from threading import Timer, Lock
import os
import json
from cherrypy.process import plugins
logging = logging.getLogger('CPTaskScheduler')
class _Record(object):
''' Default tasks record handler
Will read/write from/to ./tasks.json
'''
lock = Lock()
file = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'tasks.json')
@staticmethod
def read():
''' Reads persistence record to dict
Returns dict
'''
with _Record.lock:
with open(_Record.file, 'a+') as f:
try:
f.seek(0)
r = json.load(f)
except Exception as _: # noqa
r = {}
json.dump(r, f)
return r
@staticmethod
def write(name, le):
''' Writes record to record_handler
name (str): name of task
le (str): str() cast of datetime.datetime object for last execution
Does not return
'''
with _Record.lock:
with open(_Record.file, 'r+') as f:
r = json.load(f)
r[name] = {'last_execution': le}
f.seek(0)
json.dump(r, f, indent=4, sort_keys=True)
f.seek(0)
class SchedulerPlugin(plugins.SimplePlugin):
'''
CherryPy plugin that schedules events at a specific time of day,
repeating at a certain interval.
Class Methods:
__init__
bus (obj): instance of Cherrypy engine
Class Vars:
task_list (dict): {'task_name': <instance of ScheduledTask>}.
record (dict): {'task_name': {'last_execution': '2017-01-01 23:28:00'}}
record_handler (class): class with staticmethods read() and write(); see _Record
Requires that each ScheduledTask instance be appended to task_list
On stop, terminates Timer for each task in task_list
'''
task_list = {}
record = None
record_handler = _Record
def __init__(self, bus, record_handler=_Record, record_file=None):
'''
bus (obj): instance of cherrypy.engine
record_handler (object): class to handle read/write of record data <optional, default _Record>
record_file (str): path to task record file. Only effective with default record_handler. <default './tasks.json'>
record_handler MUST be a class with staticmethods of 'read' and 'write'.
See _Record class for method descriptions and requirements.
If record_file is not specified writes to this script's directory as 'tasks.json'
'''
_Record.file = record_file or _Record.file
plugins.SimplePlugin.__init__(self, bus)
SchedulerPlugin.record_handler = record_handler or SchedulerPlugin.record_handler
SchedulerPlugin.record = record_handler.read()
def start(self):
for t in self.task_list.values():
if t.auto_start:
t.start()
return
start.priority = 75
def stop(self):
''' Calls task.stop for all tasks in task_list '''
for name, task in self.task_list.items():
task.stop()
def restart(self):
''' Calls task.restart for all tasks in task_list '''
for name, task in self.task_list.items():
task.restart()
@staticmethod
def ScheduledTask(hour, minute, interval, task, auto_start=True, name=None):
''' Creates and returns instance of __ScheduledTask '''
return SchedulerPlugin.__ScheduledTask(hour, minute, interval, task, auto_start, name)
class __ScheduledTask(object):
''' Class that creates a new scheduled task.
__init__:
hour (int): hour to first execute function (24hr time)
minute (int): minute to first execute function
interval (int): how many *seconds* to wait between executions
task (function): function to execute on timer
auto_start (bool): if timer should start on creation <optional - default True>
name (str): name of sceduled task <optional - default None>
Creates a persistence_file that records the last execution of the
scheduled task. This file is updated immediately before executing
'task' and records the current time.
If a task does not have a persistence record, adds one using current
time. This ensures that long-interval tasks are not perpetually
delayed if the server updates or restarts often.
Stores persistence record in SchedulerPlugin.record as dict
Executes a given 'task' function on a scheduled basis
First execution occurs at hr:min
Subsequent executions occur at regularly afterwards, at
intervals of 'interval' seconds
'task' will always run in a separate thread
Class Methods:
start() Starts countdown to initial execution
stop() Stops countdown. Allows any in-process tasks to finish
reload() Cancels timer then calls __init__() and start(), takes same args as
__init__ and just passes them along to __init__
now() Bypasses timer and executes function now. Starts a new timer
based off the current time
Class Vars: (there are more, but interact with them at your own risk)
name (str): passed arg 'name' or name of 'task' function if not passed
interval (int): interval between executions
running (bool): if assigned task is currently being executed
last_execution (str): timestamp of last execution formatted as '%Y-%m-%d %H:%M:%S' ('2017-07-23 17:31:40')
next_execution (obj): datetime.datetime obj of next execution
Does not return
'''
def __init__(self, hour, minute, interval, task, auto_start, name):
self._init_args = [hour, minute, interval, task, auto_start, name]
self.task = task
self.name = name or task.__name__
self.interval = interval
self.running = False
self.auto_start = auto_start
self.timer = Timer(0, None)
self.next_execution = None
record = SchedulerPlugin.record.get(self.name, {}).get('last_execution')
if record:
self.last_execution = record
le = datetime.strptime(record, '%Y-%m-%d %H:%M:%S')
hour, minute = le.hour, le.minute
else:
le = datetime.today().replace(microsecond=0)
self.last_execution = str(le)
SchedulerPlugin.record_handler.write(self.name, str(le))
SchedulerPlugin.task_list[self.name] = self
def _calc_delay(self, hour, minute, interval):
''' Calculates the next possible time to run an iteration based off
last_execution as decribed in the record_handler
hour (int): hour of day (24hr time)
minute (int): minute of hour
interval (int): seconds between iterations
Hour and minute represent a time of day that and interval will begin.
This time can be in the future or past.
Calculates the shortest delay from now that the interval can happen.
If the difference between now and the delay is 0, the delay is
equal to the interval and will not execute immediately.
If minutes is greater than 60 it will be rolled over into hours.
Returns int seconds until next interval
'''
rollover_hrs = 0
while minute >= 60:
minute -= 60
rollover_hrs += 1
rollover = timedelta(hours=rollover_hrs)
now = datetime.today().replace(second=0, microsecond=0)
next = now.replace(hour=hour, minute=minute) + rollover
while next < now:
next += timedelta(seconds=interval)
delay = (next - now).seconds
while delay > interval:
delay -= interval
# this prevents infinite loops if the task restarts the plugin
if delay == 0:
delay = interval
return delay
def _task(self, manual=False, restart=True):
''' Executes the task fn
manual (bool): if task is being ran by user command <optional - default False>
restart (bool): if the timer should be restarted <optional - default True>
manual flag only affects logging
Starts new timer based on self.interval
Gets current time as 'le'
Sets self.running to True, runs task, sets as False
After task is finished, le is written to record. This way tasks can have
access to their last execution time while running.
Does not return
'''
now = datetime.today()
ms_offset = now.microsecond / 1000000
now = now.replace(microsecond=0)
if manual:
logging.info('== Executing Task: {} Per User Command =='.format(self.name))
else:
logging.info('== Executing Scheduled Task: {} =='.format(self.name))
if self.running:
logging.warning('Task {} is already running, cancelling execution.')
return
self.running = True
if restart:
self.next_execution = now.replace(microsecond=0) + timedelta(seconds=self.interval)
self.timer = Timer(self.interval - ms_offset, self._task)
self.timer.start()
try:
self.task()
except Exception as _: # noqa
logging.warning('Scheduled Task {} Failed:'.format(self.name), exc_info=True)
self.running = False
self.last_execution = str(now)
SchedulerPlugin.record[self.name] = {'last_execution': str(now)}
SchedulerPlugin.record_handler.write(self.name, str(now))
if manual:
logging.info('== Finished Task: {} =='.format(self.name))
else:
logging.info('== Finished Scheduled Task: {} =='.format(self.name))
def start(self):
''' Starts timer Thread for task '''
delay = self._calc_delay(*self._init_args[:3])
self.next_execution = datetime.now().replace(microsecond=0) + timedelta(seconds=delay)
self.timer = Timer(delay, self._task)
self.timer.start()
def stop(self):
''' Stops Timer if currently running
Logs and prints task name being cancelled
Cancels timer for next task
Will not stop a task currently being executed
Allows in-process tasks to finish, will not kill thread.
'''
if self.timer and self.timer.is_alive():
logging.info('Stopping scheduled task {}.'.format(self.name))
print('Stopping scheduled task: {}'.format(self.name))
self.timer.cancel()
def reload(self, hr, min, interval, auto_start=True):
''' Reloads scheduled task to change time or interval
See self.__init__ for param descriptions.
Does not require 'task', or 'name' args since that is
stored in the class instance as self.task & self.name
Stops current timer (allowing in-process tasks to finish, same as self.stop())
Calls self.__init__ with passed args.
Starts timer.
Use to change start time or interval.
Returns bool
'''
logging.info('Reloading scheduler for {}'.format(self.name))
self.stop()
try:
self.__init__(hr, min, interval, self.task, auto_start=auto_start, name=self.name)
if auto_start:
self.start()
return True
except Exception as _: # noqa
logging.error('Unable to start task', exc_info=True)
return False
def restart(self):
''' Restarts stopped task using initial parameters
Unlike self.reload(), does not change any timing information.
Calls self.stop() to ensure timer is stopped before restarting.
Uses self._init_args to start timer based on original params.
Does not return
'''
logging.info('Restarting schduled task {}'.format(self.name))
self.stop()
self.__init__(*self._init_args)
if self.auto_start:
self.start()
def now(self):
''' Skips Thread timer and executes task now
If _task isn't being executed currently, stops timer and runs task.
Will restart timer if it was running when now() was called. New timer
will start from the current time
Raises TimerConflictError if task is alread running
Returns str last execution timestamp
'''
if self.running:
raise TimerConflictError('The task {} is currently being executed.'.format(self.name))
else:
restart = False
if self.timer:
self.timer.cancel()
restart = self.timer.is_alive()
self._task(manual=True, restart=restart)
return self.last_execution
class TimerConflictError(Exception):
''' Raised when a timed task is in conflict with itself '''
def __init__(self, msg=None):
self.msg = msg if msg else 'An error occured with the timer for a scheduled task.'
|
torchbench/datasets/ade20k.py | xperience-ai/torchbench | 143 | 12639882 | import os
from collections import namedtuple
from PIL import Image
from torchvision.datasets.vision import VisionDataset
from torchbench.datasets.utils import download_and_extract_archive
ARCHIVE_DICT = {
"trainval": {
"url": (
"http://data.csail.mit.edu/places/ADEchallenge/"
"ADEChallengeData2016.zip"
),
"md5": "7328b3957e407ddae1d3cbf487f149ef",
"base_dir": "ADEChallengeData2016",
}
}
class ADE20K(VisionDataset):
"""`ADE20K Dataset.
ADE20K <https://groups.csail.mit.edu/vision/datasets/ADE20K/>`_
Args:
root (string): Root directory of the ADE20K dataset
split (string, optional): The image split to use, ``train`` or ``val``
download (bool, optional): If true, downloads the dataset from the
internet and puts it in root directory. If dataset is already
downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that takes in a
PIL image and returns a transformed version. E.g,
``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes
in the PIL image target and transforms it.
transforms (callable, optional): A function/transform that takes input
sample and its target as entry and returns a transformed version.
Examples:
Get dataset for training and download from internet
.. code-block:: python
dataset = ADE20K('./data/ade20k', split='train', download=True)
img, target = dataset[0]
Get dataset for validation and download from internet
.. code-block:: python
dataset = ADE20K('./data/ade20k', split='val', download=True)
img, target = dataset[0]
"""
ADE20KClass = namedtuple("ADE20KClass", ["name", "id", "color"])
classes = [
ADE20KClass("wall", 1, (120, 120, 120)),
ADE20KClass("building;edifice", 2, (180, 120, 120)),
ADE20KClass("sky", 3, (6, 230, 230)),
ADE20KClass("floor;flooring", 4, (80, 50, 50)),
ADE20KClass("tree", 5, (4, 200, 3)),
ADE20KClass("ceiling", 6, (120, 120, 80)),
ADE20KClass("road;route", 7, (140, 140, 140)),
ADE20KClass("bed", 8, (204, 5, 255)),
ADE20KClass("windowpane;window", 9, (230, 230, 230)),
ADE20KClass("grass", 10, (4, 250, 7)),
ADE20KClass("cabinet", 11, (224, 5, 255)),
ADE20KClass("sidewalk;pavement", 12, (235, 255, 7)),
ADE20KClass("person", 13, (150, 5, 61)),
ADE20KClass("earth;ground", 14, (120, 120, 70)),
ADE20KClass("door;double;door", 15, (8, 255, 51)),
ADE20KClass("table", 16, (255, 6, 82)),
ADE20KClass("mountain;mount", 17, (143, 255, 140)),
ADE20KClass("plant;flora;plant;life", 18, (204, 255, 4)),
ADE20KClass("curtain;drape;drapery;mantle;pall", 19, (255, 51, 7)),
ADE20KClass("chair", 20, (204, 70, 3)),
ADE20KClass("car;auto;automobile;machine;motorcar", 21, (0, 102, 200)),
ADE20KClass("water", 22, (61, 230, 250)),
ADE20KClass("painting;picture", 23, (255, 6, 51)),
ADE20KClass("sofa;couch;lounge", 24, (11, 102, 255)),
ADE20KClass("shelf", 25, (255, 7, 71)),
ADE20KClass("house", 26, (255, 9, 224)),
ADE20KClass("sea", 27, (9, 7, 230)),
ADE20KClass("mirror", 28, (220, 220, 220)),
ADE20KClass("rug;carpet;carpeting", 29, (255, 9, 92)),
ADE20KClass("field", 30, (112, 9, 255)),
ADE20KClass("armchair", 31, (8, 255, 214)),
ADE20KClass("seat", 32, (7, 255, 224)),
ADE20KClass("fence;fencing", 33, (255, 184, 6)),
ADE20KClass("desk", 34, (10, 255, 71)),
ADE20KClass("rock;stone", 35, (255, 41, 10)),
ADE20KClass("wardrobe;closet;press", 36, (7, 255, 255)),
ADE20KClass("lamp", 37, (224, 255, 8)),
ADE20KClass("bathtub;bathing;tub;bath;tub", 38, (102, 8, 255)),
ADE20KClass("railing;rail", 39, (255, 61, 6)),
ADE20KClass("cushion", 40, (255, 194, 7)),
ADE20KClass("base;pedestal;stand", 41, (255, 122, 8)),
ADE20KClass("box", 42, (0, 255, 20)),
ADE20KClass("column;pillar", 43, (255, 8, 41)),
ADE20KClass("signboard;sign", 44, (255, 5, 153)),
ADE20KClass("chest;of;drawers;chest;bureau;dresser", 45, (6, 51, 255)),
ADE20KClass("counter", 46, (235, 12, 255)),
ADE20KClass("sand", 47, (160, 150, 20)),
ADE20KClass("sink", 48, (0, 163, 255)),
ADE20KClass("skyscraper", 49, (140, 140, 140)),
ADE20KClass("fireplace;hearth;open;fireplace", 50, (250, 10, 15)),
ADE20KClass("refrigerator;icebox", 51, (20, 255, 0)),
ADE20KClass("grandstand;covered;stand", 52, (31, 255, 0)),
ADE20KClass("path", 53, (255, 31, 0)),
ADE20KClass("stairs;steps", 54, (255, 224, 0)),
ADE20KClass("runway", 55, (153, 255, 0)),
ADE20KClass("case;display;case;showcase;vitrine", 56, (0, 0, 255)),
ADE20KClass(
"pool;table;billiard;table;snooker;table", 57, (255, 71, 0)
),
ADE20KClass("pillow", 58, (0, 235, 255)),
ADE20KClass("screen;door;screen", 59, (0, 173, 255)),
ADE20KClass("stairway;staircase", 60, (31, 0, 255)),
ADE20KClass("river", 61, (11, 200, 200)),
ADE20KClass("bridge;span", 62, (255, 82, 0)),
ADE20KClass("bookcase", 63, (0, 255, 245)),
ADE20KClass("blind;screen", 64, (0, 61, 255)),
ADE20KClass("coffee;table;cocktail;table", 65, (0, 255, 112)),
ADE20KClass(
"toilet;can;commode;crapper;pot;potty;stool", 66, (0, 255, 133)
),
ADE20KClass("flower", 67, (255, 0, 0)),
ADE20KClass("book", 68, (255, 163, 0)),
ADE20KClass("hill", 69, (255, 102, 0)),
ADE20KClass("bench", 70, (194, 255, 0)),
ADE20KClass("countertop", 71, (0, 143, 255)),
ADE20KClass(
"stove;kitchen;stove;range;kitchen;cooking;stove", 72, (51, 255, 0)
),
ADE20KClass("palm;palm;tree", 73, (0, 82, 255)),
ADE20KClass("kitchen;island", 74, (0, 255, 41)),
ADE20KClass("computer", 75, (0, 255, 173)),
ADE20KClass("swivel;chair", 76, (10, 0, 255)),
ADE20KClass("boat", 77, (173, 255, 0)),
ADE20KClass("bar", 78, (0, 255, 153)),
ADE20KClass("arcade;machine", 79, (255, 92, 0)),
ADE20KClass("hovel;hut;hutch;shack;shanty", 80, (255, 0, 255)),
ADE20KClass(
"bus;coach;double-decker;passenger;vehicle", 81, (255, 0, 245)
),
ADE20KClass("towel", 82, (255, 0, 102)),
ADE20KClass("light;light;source", 83, (255, 173, 0)),
ADE20KClass("truck;motortruck", 84, (255, 0, 20)),
ADE20KClass("tower", 85, (255, 184, 184)),
ADE20KClass("chandelier;pendant;pendent", 86, (0, 31, 255)),
ADE20KClass("awning;sunshade;sunblind", 87, (0, 255, 61)),
ADE20KClass("streetlight;street;lamp", 88, (0, 71, 255)),
ADE20KClass("booth;cubicle;stall;kiosk", 89, (255, 0, 204)),
ADE20KClass("television", 90, (0, 255, 194)),
ADE20KClass("airplane;aeroplane;plane", 91, (0, 255, 82)),
ADE20KClass("dirt;track", 92, (0, 10, 255)),
ADE20KClass(
"apparel;wearing;apparel;dress;clothes", 93, (0, 112, 255)
),
ADE20KClass("pole", 94, (51, 0, 255)),
ADE20KClass("land;ground;soil", 95, (0, 194, 255)),
ADE20KClass(
"bannister;banister;balustrade;balusters;handrail",
96,
(0, 122, 255),
),
ADE20KClass(
"escalator;moving;staircase;moving;stairway", 97, (0, 255, 163)
),
ADE20KClass("ottoman;pouf;pouffe;puff;hassock", 98, (255, 153, 0)),
ADE20KClass("bottle", 99, (0, 255, 10)),
ADE20KClass("buffet;counter;sideboard", 100, (255, 112, 0)),
ADE20KClass(
"poster;posting;placard;notice;bill;card", 101, (143, 255, 0)
),
ADE20KClass("stage", 102, (82, 0, 255)),
ADE20KClass("van", 103, (163, 255, 0)),
ADE20KClass("ship", 104, (255, 235, 0)),
ADE20KClass("fountain", 105, (8, 184, 170)),
ADE20KClass(
"conveyer;belt;conveyor;belt;conveyor;transporter",
106,
(133, 0, 255),
),
ADE20KClass("canopy", 107, (0, 255, 92)),
ADE20KClass(
"washer;automatic;washer;washing;machine", 108, (184, 0, 255)
),
ADE20KClass("plaything;toy", 109, (255, 0, 31)),
ADE20KClass(
"swimming;pool;swimming;bath;natatorium", 110, (0, 184, 255)
),
ADE20KClass("stool", 111, (0, 214, 255)),
ADE20KClass("barrel;cask", 112, (255, 0, 112)),
ADE20KClass("basket;handbasket", 113, (92, 255, 0)),
ADE20KClass("waterfall;falls", 114, (0, 224, 255)),
ADE20KClass("tent;collapsible;shelter", 115, (112, 224, 255)),
ADE20KClass("bag", 116, (70, 184, 160)),
ADE20KClass("minibike;motorbike", 117, (163, 0, 255)),
ADE20KClass("cradle", 118, (153, 0, 255)),
ADE20KClass("oven", 119, (71, 255, 0)),
ADE20KClass("ball", 120, (255, 0, 163)),
ADE20KClass("food;solid;food", 121, (255, 204, 0)),
ADE20KClass("step;stair", 122, (255, 0, 143)),
ADE20KClass("tank;storage;tank", 123, (0, 255, 235)),
ADE20KClass("trade;name;brand;name;brand;marque", 124, (133, 255, 0)),
ADE20KClass("microwave;microwave;oven", 125, (255, 0, 235)),
ADE20KClass("pot;flowerpot", 126, (245, 0, 255)),
ADE20KClass(
"animal;animate;being;beast;brute;creature;fauna",
127,
(255, 0, 122),
),
ADE20KClass("bicycle;bike;wheel;cycle", 128, (255, 245, 0)),
ADE20KClass("lake", 129, (10, 190, 212)),
ADE20KClass(
"dishwasher;dish;washer;dishwashing;machine", 130, (214, 255, 0)
),
ADE20KClass(
"screen;silver;screen;projection;screen", 131, (0, 204, 255)
),
ADE20KClass("blanket;cover", 132, (20, 0, 255)),
ADE20KClass("sculpture", 133, (255, 255, 0)),
ADE20KClass("hood;exhaust;hood", 134, (0, 153, 255)),
ADE20KClass("sconce", 135, (0, 41, 255)),
ADE20KClass("vase", 136, (0, 255, 204)),
ADE20KClass(
"traffic;light;traffic;signal;stoplight", 137, (41, 0, 255)
),
ADE20KClass("tray", 138, (41, 255, 0)),
ADE20KClass(
"trash;can;garbage;wastebin;bin;ashbin;dustbin;barrel;bin",
139,
(173, 0, 255),
),
ADE20KClass("fan", 140, (0, 245, 255)),
ADE20KClass("pier;wharf;wharfage;dock", 141, (71, 0, 255)),
ADE20KClass("crt;screen", 142, (122, 0, 255)),
ADE20KClass("plate", 143, (0, 255, 184)),
ADE20KClass("monitor;monitoring;device", 144, (0, 92, 255)),
ADE20KClass("bulletin;board;notice;board", 145, (184, 255, 0)),
ADE20KClass("shower", 146, (0, 133, 255)),
ADE20KClass("radiator", 147, (255, 214, 0)),
ADE20KClass("glass;drinking;glass", 148, (25, 194, 194)),
ADE20KClass("clock", 149, (102, 255, 0)),
ADE20KClass("flag", 150, (92, 0, 255)),
]
def __init__(
self,
root,
split="train",
download=False,
transform=None,
target_transform=None,
transforms=None,
):
super(ADE20K, self).__init__(
root, transforms, transform, target_transform
)
base_dir = ARCHIVE_DICT["trainval"]["base_dir"]
if split not in ["train", "val"]:
raise ValueError(
'Invalid split! Please use split="train" or split="val"'
)
if split == "train":
self.images_dir = os.path.join(
self.root, base_dir, "images", "training"
)
self.targets_dir = os.path.join(
self.root, base_dir, "annotations", "training"
)
elif split == "val":
self.images_dir = os.path.join(
self.root, base_dir, "images", "validation"
)
self.targets_dir = os.path.join(
self.root, base_dir, "annotations", "validation"
)
self.split = split
if download:
self.download()
self.images = []
self.targets = []
for file_name in os.listdir(self.images_dir):
self.images.append(os.path.join(self.images_dir, file_name))
self.targets.append(
os.path.join(self.targets_dir, file_name.replace("jpg", "png"))
)
def download(self):
if not os.path.isdir(self.images_dir) or not os.path.isdir(
self.targets_dir
):
archive_dict = ARCHIVE_DICT["trainval"]
download_and_extract_archive(
archive_dict["url"],
self.root,
extract_root=self.root,
md5=archive_dict["md5"],
)
else:
msg = (
"You set download=True, but a folder VOCdevkit already exist "
"in the root directory. If you want to re-download or "
"re-extract the archive, delete the folder."
)
print(msg)
def __getitem__(self, index):
"""Getitem special method.
Args:
index (int): Index
Returns:
tuple: (image, target)
"""
image = Image.open(self.images[index]).convert("RGB")
target = Image.open(self.targets[index])
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self):
return len(self.images)
def extra_repr(self):
lines = ["Split: {split}"]
return "\n".join(lines).format(**self.__dict__)
|
Python/tdw/FBOutput/Raycast.py | felixbinder/tdw | 307 | 12639897 | <reponame>felixbinder/tdw<filename>Python/tdw/FBOutput/Raycast.py
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
class Raycast(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsRaycast(cls, buf, offset):
n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset)
x = Raycast()
x.Init(buf, n + offset)
return x
# Raycast
def Init(self, buf, pos):
self._tab = tdw.flatbuffers.table.Table(buf, pos)
# Raycast
def Hit(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Raycast
def HitObject(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Raycast
def RaycastId(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Raycast
def ObjectId(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Raycast
def Normal(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = o + self._tab.Pos
from .Vector3 import Vector3
obj = Vector3()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Raycast
def Point(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = o + self._tab.Pos
from .Vector3 import Vector3
obj = Vector3()
obj.Init(self._tab.Bytes, x)
return obj
return None
def RaycastStart(builder): builder.StartObject(6)
def RaycastAddHit(builder, hit): builder.PrependBoolSlot(0, hit, 0)
def RaycastAddHitObject(builder, hitObject): builder.PrependBoolSlot(1, hitObject, 0)
def RaycastAddRaycastId(builder, raycastId): builder.PrependInt32Slot(2, raycastId, 0)
def RaycastAddObjectId(builder, objectId): builder.PrependInt32Slot(3, objectId, 0)
def RaycastAddNormal(builder, normal): builder.PrependStructSlot(4, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(normal), 0)
def RaycastAddPoint(builder, point): builder.PrependStructSlot(5, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(point), 0)
def RaycastEnd(builder): return builder.EndObject()
|
readthedocs/subscriptions/managers.py | rtfd/readthedocs.org | 4,054 | 12639936 | <reponame>rtfd/readthedocs.org
"""Subscriptions managers."""
from datetime import datetime
import stripe
import structlog
from django.conf import settings
from django.db import models
from django.utils import timezone
from readthedocs.core.history import set_change_reason
from readthedocs.subscriptions.utils import get_or_create_stripe_customer
log = structlog.get_logger(__name__)
class SubscriptionManager(models.Manager):
"""Model manager for Subscriptions."""
def get_or_create_default_subscription(self, organization):
"""
Get or create a trialing subscription for `organization`.
If the organization doesn't have a subscription attached,
the following steps are executed.
- If the organization doesn't have a stripe customer, one is created.
- A new stripe subscription is created using the default plan.
- A new subscription object is created in our database
with the information from the stripe subscription.
"""
if hasattr(organization, 'subscription'):
return organization.subscription
from readthedocs.subscriptions.models import Plan
plan = Plan.objects.filter(slug=settings.ORG_DEFAULT_SUBSCRIPTION_PLAN_SLUG).first()
# This should happen only on development.
if not plan:
log.warning(
'No default plan found, not creating a subscription.',
organization_slug=organization.slug,
)
return None
stripe_customer = get_or_create_stripe_customer(organization)
stripe_subscription = stripe.Subscription.create(
customer=stripe_customer.id,
items=[{"price": plan.stripe_id}],
trial_period_days=plan.trial,
)
# Stripe renamed ``start`` to ``start_date``,
# our API calls will return the new object,
# but webhooks will still return the old object
# till we change the default version.
# TODO: use stripe_subscription.start_date after the webhook version has been updated.
start_date = getattr(
stripe_subscription, "start", getattr(stripe_subscription, "start_date")
)
return self.create(
plan=plan,
organization=organization,
stripe_id=stripe_subscription.id,
status=stripe_subscription.status,
start_date=timezone.make_aware(
datetime.fromtimestamp(int(start_date)),
),
end_date=timezone.make_aware(
datetime.fromtimestamp(int(stripe_subscription.current_period_end)),
),
trial_end_date=timezone.make_aware(
datetime.fromtimestamp(int(stripe_subscription.trial_end)),
),
)
def update_from_stripe(self, *, rtd_subscription, stripe_subscription):
"""
Update the RTD subscription object with the information of the stripe subscription.
:param subscription: Subscription object to update.
:param stripe_subscription: Stripe subscription object from API
:type stripe_subscription: stripe.Subscription
"""
# Documentation doesn't say what will be this value once the
# subscription is ``canceled``. I'm assuming that ``current_period_end``
# will have the same value than ``ended_at``
# https://stripe.com/docs/api/subscriptions/object?lang=python#subscription_object-current_period_end
start_date = getattr(stripe_subscription, 'current_period_start', None)
end_date = getattr(stripe_subscription, 'current_period_end', None)
try:
start_date = timezone.make_aware(
datetime.fromtimestamp(start_date),
)
end_date = timezone.make_aware(
datetime.fromtimestamp(end_date),
)
except TypeError:
log.error(
'Stripe subscription invalid date.',
start_date=start_date,
end_date=end_date,
stripe_subscription=stripe_subscription.id,
)
start_date = None
end_date = None
trial_end_date = None
rtd_subscription.status = stripe_subscription.status
# This should only happen if an existing user creates a new subscription,
# after their previous subscription was cancelled.
if stripe_subscription.id != rtd_subscription.stripe_id:
log.info(
'Replacing stripe subscription.',
old_stripe_subscription=rtd_subscription.stripe_id,
new_stripe_subscription=stripe_subscription.id,
)
rtd_subscription.stripe_id = stripe_subscription.id
# Update trial end date if it's present
trial_end_date = getattr(stripe_subscription, 'trial_end', None)
if trial_end_date:
try:
trial_end_date = timezone.make_aware(
datetime.fromtimestamp(trial_end_date),
)
rtd_subscription.trial_end_date = trial_end_date
except TypeError:
log.error(
'Stripe subscription trial end date invalid. ',
trial_end_date=trial_end_date,
stripe_subscription=stripe_subscription.id,
)
# Update the plan in case it was changed from the Portal.
# Try our best to match a plan that is not custom. This mostly just
# updates the UI now that we're using the Stripe Portal. A miss here
# just won't update the UI, but this shouldn't happen for most users.
from readthedocs.subscriptions.models import Plan
try:
plan = (
Plan.objects
# Exclude "custom" here, as we historically reused Stripe plan
# id for custom plans. We don't have a better attribute to
# filter on here.
.exclude(slug__contains='custom')
.exclude(name__icontains='Custom')
.get(stripe_id=stripe_subscription.plan.id)
)
rtd_subscription.plan = plan
except (Plan.DoesNotExist, Plan.MultipleObjectsReturned):
log.error(
'Plan lookup failed, skipping plan update.',
stripe_subscription=stripe_subscription.id,
stripe_plan=stripe_subscription.plan.id,
)
if stripe_subscription.status == 'canceled':
# Remove ``stripe_id`` when canceled so the customer can
# re-subscribe using our form.
rtd_subscription.stripe_id = None
elif stripe_subscription.status == 'active' and end_date:
# Save latest active date (end_date) to notify owners about their subscription
# is ending and disable this organization after N days of unpaid. We check for
# ``active`` here because Stripe will continue sending updates for the
# subscription, with a new ``end_date``, even after the subscription enters
# an unpaid state.
rtd_subscription.end_date = end_date
elif stripe_subscription.status == 'past_due' and start_date:
# When Stripe marks the subscription as ``past_due``,
# it means the usage of RTD service for the current period/month was not paid at all.
# At this point, we need to update our ``end_date`` to the last period the customer paid
# (which is the start date of the current ``past_due`` period --it could be the end date
# of the trial or the end date of the last paid period).
rtd_subscription.end_date = start_date
klass = self.__class__.__name__
change_reason = f'origin=stripe-subscription class={klass}'
# Ensure that the organization is in the correct state.
# We want to always ensure the organization is never disabled
# if the subscription is valid.
organization = rtd_subscription.organization
if stripe_subscription.status == 'active' and organization.disabled:
log.warning(
'Re-enabling organization with valid subscription.',
organization_slug=organization.slug,
stripe_subscription=rtd_subscription.id,
)
organization.disabled = False
set_change_reason(organization, change_reason)
organization.save()
set_change_reason(rtd_subscription, change_reason)
rtd_subscription.save()
return rtd_subscription
class PlanFeatureManager(models.Manager):
"""Model manager for PlanFeature."""
# pylint: disable=redefined-builtin
def get_feature(self, obj, type):
"""
Get feature `type` for `obj`.
:param obj: An organization or project instance.
:param type: The type of the feature (PlanFeature.TYPE_*).
:returns: A PlanFeature object or None.
"""
# Avoid circular imports.
from readthedocs.organizations.models import Organization
from readthedocs.projects.models import Project
if isinstance(obj, Project):
organization = obj.organizations.first()
elif isinstance(obj, Organization):
organization = obj
else:
raise TypeError
feature = self.filter(
feature_type=type,
plan__subscriptions__organization=organization,
)
return feature.first()
|
examples/app-event-loop.py | Frekby/glumpy | 1,074 | 12639967 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
""" This example shows how to run the event loop manually """
from glumpy import app
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
backend = app.__backend__
clock = app.__init__(backend=backend)
count = len(backend.windows())
while count:
count = backend.process(clock.tick())
|
src/rust/iced-x86-py/src/iced_x86/OpCodeOperandKind.py | clayne/iced | 1,018 | 12639975 | <gh_stars>1000+
# SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
Operand kind
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import OpCodeOperandKind
else:
OpCodeOperandKind = int
NONE: OpCodeOperandKind = 0 # type: ignore
"""
No operand
"""
FARBR2_2: OpCodeOperandKind = 1 # type: ignore
"""
Far branch 16-bit offset, 16-bit segment/selector
"""
FARBR4_2: OpCodeOperandKind = 2 # type: ignore
"""
Far branch 32-bit offset, 16-bit segment/selector
"""
MEM_OFFS: OpCodeOperandKind = 3 # type: ignore
"""
Memory offset without a modrm byte (eg. ``MOV AL,[offset]``)
"""
MEM: OpCodeOperandKind = 4 # type: ignore
"""
Memory (modrm)
"""
MEM_MPX: OpCodeOperandKind = 5 # type: ignore
"""
: Memory (modrm), MPX:
16/32-bit mode: must be 32-bit addressing
64-bit mode: 64-bit addressing is forced and must not be RIP relative
"""
MEM_MIB: OpCodeOperandKind = 6 # type: ignore
"""
: Memory (modrm), MPX:
16/32-bit mode: must be 32-bit addressing
64-bit mode: 64-bit addressing is forced and must not be RIP relative
"""
MEM_VSIB32X: OpCodeOperandKind = 7 # type: ignore
"""
Memory (modrm), vsib32, ``XMM`` registers
"""
MEM_VSIB64X: OpCodeOperandKind = 8 # type: ignore
"""
Memory (modrm), vsib64, ``XMM`` registers
"""
MEM_VSIB32Y: OpCodeOperandKind = 9 # type: ignore
"""
Memory (modrm), vsib32, ``YMM`` registers
"""
MEM_VSIB64Y: OpCodeOperandKind = 10 # type: ignore
"""
Memory (modrm), vsib64, ``YMM`` registers
"""
MEM_VSIB32Z: OpCodeOperandKind = 11 # type: ignore
"""
Memory (modrm), vsib32, ``ZMM`` registers
"""
MEM_VSIB64Z: OpCodeOperandKind = 12 # type: ignore
"""
Memory (modrm), vsib64, ``ZMM`` registers
"""
R8_OR_MEM: OpCodeOperandKind = 13 # type: ignore
"""
8-bit GPR or memory
"""
R16_OR_MEM: OpCodeOperandKind = 14 # type: ignore
"""
16-bit GPR or memory
"""
R32_OR_MEM: OpCodeOperandKind = 15 # type: ignore
"""
32-bit GPR or memory
"""
R32_OR_MEM_MPX: OpCodeOperandKind = 16 # type: ignore
"""
: 32-bit GPR or memory, MPX: 16/32-bit mode: must be 32-bit addressing, 64-bit mode: 64-bit addressing is forced
"""
R64_OR_MEM: OpCodeOperandKind = 17 # type: ignore
"""
64-bit GPR or memory
"""
R64_OR_MEM_MPX: OpCodeOperandKind = 18 # type: ignore
"""
: 64-bit GPR or memory, MPX: 16/32-bit mode: must be 32-bit addressing, 64-bit mode: 64-bit addressing is forced
"""
MM_OR_MEM: OpCodeOperandKind = 19 # type: ignore
"""
``MM`` register or memory
"""
XMM_OR_MEM: OpCodeOperandKind = 20 # type: ignore
"""
``XMM`` register or memory
"""
YMM_OR_MEM: OpCodeOperandKind = 21 # type: ignore
"""
``YMM`` register or memory
"""
ZMM_OR_MEM: OpCodeOperandKind = 22 # type: ignore
"""
``ZMM`` register or memory
"""
BND_OR_MEM_MPX: OpCodeOperandKind = 23 # type: ignore
"""
: ``BND`` register or memory, MPX: 16/32-bit mode: must be 32-bit addressing, 64-bit mode: 64-bit addressing is forced
"""
K_OR_MEM: OpCodeOperandKind = 24 # type: ignore
"""
``K`` register or memory
"""
R8_REG: OpCodeOperandKind = 25 # type: ignore
"""
8-bit GPR encoded in the ``reg`` field of the modrm byte
"""
R8_OPCODE: OpCodeOperandKind = 26 # type: ignore
"""
8-bit GPR encoded in the low 3 bits of the opcode
"""
R16_REG: OpCodeOperandKind = 27 # type: ignore
"""
16-bit GPR encoded in the ``reg`` field of the modrm byte
"""
R16_REG_MEM: OpCodeOperandKind = 28 # type: ignore
"""
16-bit GPR encoded in the ``reg`` field of the modrm byte. This is a memory operand and it uses the address size prefix (``67h``) not the operand size prefix (``66h``).
"""
R16_RM: OpCodeOperandKind = 29 # type: ignore
"""
16-bit GPR encoded in the ``mod + r/m`` fields of the modrm byte
"""
R16_OPCODE: OpCodeOperandKind = 30 # type: ignore
"""
16-bit GPR encoded in the low 3 bits of the opcode
"""
R32_REG: OpCodeOperandKind = 31 # type: ignore
"""
32-bit GPR encoded in the ``reg`` field of the modrm byte
"""
R32_REG_MEM: OpCodeOperandKind = 32 # type: ignore
"""
32-bit GPR encoded in the ``reg`` field of the modrm byte. This is a memory operand and it uses the address size prefix (``67h``) not the operand size prefix (``66h``).
"""
R32_RM: OpCodeOperandKind = 33 # type: ignore
"""
32-bit GPR encoded in the ``mod + r/m`` fields of the modrm byte
"""
R32_OPCODE: OpCodeOperandKind = 34 # type: ignore
"""
32-bit GPR encoded in the low 3 bits of the opcode
"""
R32_VVVV: OpCodeOperandKind = 35 # type: ignore
"""
32-bit GPR encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
R64_REG: OpCodeOperandKind = 36 # type: ignore
"""
64-bit GPR encoded in the ``reg`` field of the modrm byte
"""
R64_REG_MEM: OpCodeOperandKind = 37 # type: ignore
"""
64-bit GPR encoded in the ``reg`` field of the modrm byte. This is a memory operand and it uses the address size prefix (``67h``) not the operand size prefix (``66h``).
"""
R64_RM: OpCodeOperandKind = 38 # type: ignore
"""
64-bit GPR encoded in the ``mod + r/m`` fields of the modrm byte
"""
R64_OPCODE: OpCodeOperandKind = 39 # type: ignore
"""
64-bit GPR encoded in the low 3 bits of the opcode
"""
R64_VVVV: OpCodeOperandKind = 40 # type: ignore
"""
64-bit GPR encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
SEG_REG: OpCodeOperandKind = 41 # type: ignore
"""
Segment register encoded in the ``reg`` field of the modrm byte
"""
K_REG: OpCodeOperandKind = 42 # type: ignore
"""
``K`` register encoded in the ``reg`` field of the modrm byte
"""
KP1_REG: OpCodeOperandKind = 43 # type: ignore
"""
``K`` register (+1) encoded in the ``reg`` field of the modrm byte
"""
K_RM: OpCodeOperandKind = 44 # type: ignore
"""
``K`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
K_VVVV: OpCodeOperandKind = 45 # type: ignore
"""
``K`` register encoded in the the ``V'vvvv`` field (VEX/EVEX/MVEX/XOP)
"""
MM_REG: OpCodeOperandKind = 46 # type: ignore
"""
``MM`` register encoded in the ``reg`` field of the modrm byte
"""
MM_RM: OpCodeOperandKind = 47 # type: ignore
"""
``MM`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
XMM_REG: OpCodeOperandKind = 48 # type: ignore
"""
``XMM`` register encoded in the ``reg`` field of the modrm byte
"""
XMM_RM: OpCodeOperandKind = 49 # type: ignore
"""
``XMM`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
XMM_VVVV: OpCodeOperandKind = 50 # type: ignore
"""
``XMM`` register encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
XMMP3_VVVV: OpCodeOperandKind = 51 # type: ignore
"""
``XMM`` register (+3) encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
XMM_IS4: OpCodeOperandKind = 52 # type: ignore
"""
``XMM`` register encoded in the the high 4 bits of the last 8-bit immediate (VEX/XOP only so only ``XMM0``-``XMM15``)
"""
XMM_IS5: OpCodeOperandKind = 53 # type: ignore
"""
``XMM`` register encoded in the the high 4 bits of the last 8-bit immediate (VEX/XOP only so only ``XMM0``-``XMM15``)
"""
YMM_REG: OpCodeOperandKind = 54 # type: ignore
"""
``YMM`` register encoded in the ``reg`` field of the modrm byte
"""
YMM_RM: OpCodeOperandKind = 55 # type: ignore
"""
``YMM`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
YMM_VVVV: OpCodeOperandKind = 56 # type: ignore
"""
``YMM`` register encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
YMM_IS4: OpCodeOperandKind = 57 # type: ignore
"""
``YMM`` register encoded in the the high 4 bits of the last 8-bit immediate (VEX/XOP only so only ``YMM0``-``YMM15``)
"""
YMM_IS5: OpCodeOperandKind = 58 # type: ignore
"""
``YMM`` register encoded in the the high 4 bits of the last 8-bit immediate (VEX/XOP only so only ``YMM0``-``YMM15``)
"""
ZMM_REG: OpCodeOperandKind = 59 # type: ignore
"""
``ZMM`` register encoded in the ``reg`` field of the modrm byte
"""
ZMM_RM: OpCodeOperandKind = 60 # type: ignore
"""
``ZMM`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
ZMM_VVVV: OpCodeOperandKind = 61 # type: ignore
"""
``ZMM`` register encoded in the the ``V'vvvv`` field (VEX/EVEX/MVEX/XOP)
"""
ZMMP3_VVVV: OpCodeOperandKind = 62 # type: ignore
"""
``ZMM`` register (+3) encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
CR_REG: OpCodeOperandKind = 63 # type: ignore
"""
``CR`` register encoded in the ``reg`` field of the modrm byte
"""
DR_REG: OpCodeOperandKind = 64 # type: ignore
"""
``DR`` register encoded in the ``reg`` field of the modrm byte
"""
TR_REG: OpCodeOperandKind = 65 # type: ignore
"""
``TR`` register encoded in the ``reg`` field of the modrm byte
"""
BND_REG: OpCodeOperandKind = 66 # type: ignore
"""
``BND`` register encoded in the ``reg`` field of the modrm byte
"""
ES: OpCodeOperandKind = 67 # type: ignore
"""
``ES`` register
"""
CS: OpCodeOperandKind = 68 # type: ignore
"""
``CS`` register
"""
SS: OpCodeOperandKind = 69 # type: ignore
"""
``SS`` register
"""
DS: OpCodeOperandKind = 70 # type: ignore
"""
``DS`` register
"""
FS: OpCodeOperandKind = 71 # type: ignore
"""
``FS`` register
"""
GS: OpCodeOperandKind = 72 # type: ignore
"""
``GS`` register
"""
AL: OpCodeOperandKind = 73 # type: ignore
"""
``AL`` register
"""
CL: OpCodeOperandKind = 74 # type: ignore
"""
``CL`` register
"""
AX: OpCodeOperandKind = 75 # type: ignore
"""
``AX`` register
"""
DX: OpCodeOperandKind = 76 # type: ignore
"""
``DX`` register
"""
EAX: OpCodeOperandKind = 77 # type: ignore
"""
``EAX`` register
"""
RAX: OpCodeOperandKind = 78 # type: ignore
"""
``RAX`` register
"""
ST0: OpCodeOperandKind = 79 # type: ignore
"""
``ST(0)`` register
"""
STI_OPCODE: OpCodeOperandKind = 80 # type: ignore
"""
``ST(i)`` register encoded in the low 3 bits of the opcode
"""
IMM4_M2Z: OpCodeOperandKind = 81 # type: ignore
"""
4-bit immediate (m2z field, low 4 bits of the /is5 immediate, eg. ``VPERMIL2PS``)
"""
IMM8: OpCodeOperandKind = 82 # type: ignore
"""
8-bit immediate
"""
IMM8_CONST_1: OpCodeOperandKind = 83 # type: ignore
"""
Constant 1 (8-bit immediate)
"""
IMM8SEX16: OpCodeOperandKind = 84 # type: ignore
"""
8-bit immediate sign extended to 16 bits
"""
IMM8SEX32: OpCodeOperandKind = 85 # type: ignore
"""
8-bit immediate sign extended to 32 bits
"""
IMM8SEX64: OpCodeOperandKind = 86 # type: ignore
"""
8-bit immediate sign extended to 64 bits
"""
IMM16: OpCodeOperandKind = 87 # type: ignore
"""
16-bit immediate
"""
IMM32: OpCodeOperandKind = 88 # type: ignore
"""
32-bit immediate
"""
IMM32SEX64: OpCodeOperandKind = 89 # type: ignore
"""
32-bit immediate sign extended to 64 bits
"""
IMM64: OpCodeOperandKind = 90 # type: ignore
"""
64-bit immediate
"""
SEG_RSI: OpCodeOperandKind = 91 # type: ignore
"""
``seg:[rSI]`` memory operand (string instructions)
"""
ES_RDI: OpCodeOperandKind = 92 # type: ignore
"""
``es:[rDI]`` memory operand (string instructions)
"""
SEG_RDI: OpCodeOperandKind = 93 # type: ignore
"""
``seg:[rDI]`` memory operand (``(V)MASKMOVQ`` instructions)
"""
SEG_RBX_AL: OpCodeOperandKind = 94 # type: ignore
"""
``seg:[rBX+al]`` memory operand (``XLATB`` instruction)
"""
BR16_1: OpCodeOperandKind = 95 # type: ignore
"""
16-bit branch, 1-byte signed relative offset
"""
BR32_1: OpCodeOperandKind = 96 # type: ignore
"""
32-bit branch, 1-byte signed relative offset
"""
BR64_1: OpCodeOperandKind = 97 # type: ignore
"""
64-bit branch, 1-byte signed relative offset
"""
BR16_2: OpCodeOperandKind = 98 # type: ignore
"""
16-bit branch, 2-byte signed relative offset
"""
BR32_4: OpCodeOperandKind = 99 # type: ignore
"""
32-bit branch, 4-byte signed relative offset
"""
BR64_4: OpCodeOperandKind = 100 # type: ignore
"""
64-bit branch, 4-byte signed relative offset
"""
XBEGIN_2: OpCodeOperandKind = 101 # type: ignore
"""
``XBEGIN``, 2-byte signed relative offset
"""
XBEGIN_4: OpCodeOperandKind = 102 # type: ignore
"""
``XBEGIN``, 4-byte signed relative offset
"""
BRDISP_2: OpCodeOperandKind = 103 # type: ignore
"""
2-byte branch offset (``JMPE`` instruction)
"""
BRDISP_4: OpCodeOperandKind = 104 # type: ignore
"""
4-byte branch offset (``JMPE`` instruction)
"""
SIBMEM: OpCodeOperandKind = 105 # type: ignore
"""
Memory (modrm) and the sib byte must be present
"""
TMM_REG: OpCodeOperandKind = 106 # type: ignore
"""
``TMM`` register encoded in the ``reg`` field of the modrm byte
"""
TMM_RM: OpCodeOperandKind = 107 # type: ignore
"""
``TMM`` register encoded in the ``mod + r/m`` fields of the modrm byte
"""
TMM_VVVV: OpCodeOperandKind = 108 # type: ignore
"""
``TMM`` register encoded in the the ``V'vvvv`` field (VEX/EVEX/XOP)
"""
|
kashgari/tasks/classification/__init__.py | SharpKoi/Kashgari | 2,422 | 12639979 | # encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: __init__.py
# time: 4:05 下午
from .abc_model import ABCClassificationModel
from .bi_gru_model import BiGRU_Model
from .bi_lstm_model import BiLSTM_Model
from .cnn_attention_model import CNN_Attention_Model
from .cnn_gru_model import CNN_GRU_Model
from .cnn_lstm_model import CNN_LSTM_Model
from .cnn_model import CNN_Model
ALL_MODELS = [
BiGRU_Model,
BiLSTM_Model,
CNN_Attention_Model,
CNN_GRU_Model,
CNN_LSTM_Model,
CNN_Model
]
if __name__ == "__main__":
pass
|
datashape/type_symbol_table.py | quantopian-enterprise/datashape | 140 | 12639980 | """
A symbol table object to hold types for the parser.
"""
from __future__ import absolute_import, division, print_function
import ctypes
from itertools import chain
from . import coretypes as ct
__all__ = ['TypeSymbolTable', 'sym']
_is_64bit = (ctypes.sizeof(ctypes.c_void_p) == 8)
def _complex(tp):
"""Simple temporary type constructor for complex"""
if tp == ct.DataShape(ct.float32):
return ct.complex_float32
elif tp == ct.DataShape(ct.float64):
return ct.complex_float64
else:
raise TypeError(
'Cannot contruct a complex type with real component %s' % tp)
def _struct(names, dshapes):
"""Simple temporary type constructor for struct"""
return ct.Record(list(zip(names, dshapes)))
def _funcproto(args, ret):
"""Simple temporary type constructor for funcproto"""
return ct.Function(*chain(args, (ret,)))
def _typevar_dim(name):
"""Simple temporary type constructor for typevar as a dim"""
# Note: Presently no difference between dim and dtype typevar
return ct.TypeVar(name)
def _typevar_dtype(name):
"""Simple temporary type constructor for typevar as a dtype"""
# Note: Presently no difference between dim and dtype typevar
return ct.TypeVar(name)
def _ellipsis(name):
return ct.Ellipsis(ct.TypeVar(name))
# data types with no type constructor
no_constructor_types = [
('bool', ct.bool_),
('int8', ct.int8),
('int16', ct.int16),
('int32', ct.int32),
('int64', ct.int64),
('intptr', ct.int64 if _is_64bit else ct.int32),
('int', ct.int32),
('uint8', ct.uint8),
('uint16', ct.uint16),
('uint32', ct.uint32),
('uint64', ct.uint64),
('uintptr', ct.uint64 if _is_64bit else ct.uint32),
('float16', ct.float16),
('float32', ct.float32),
('float64', ct.float64),
('complex64', ct.complex64),
('complex128', ct.complex128),
('real', ct.float64),
('complex', ct.complex_float64),
('string', ct.string),
('json', ct.json),
('date', ct.date_),
('time', ct.time_),
('datetime', ct.datetime_),
('timedelta', ct.timedelta_),
('null', ct.null),
('void', ct.void),
('object', ct.object_),
]
# data types with a type constructor
constructor_types = [
('complex', _complex),
('string', ct.String),
('struct', _struct),
('tuple', ct.Tuple),
('funcproto', _funcproto),
('typevar', _typevar_dtype),
('option', ct.Option),
('map', ct.Map),
('time', ct.Time),
('datetime', ct.DateTime),
('timedelta', ct.TimeDelta),
('units', ct.Units),
('decimal', ct.Decimal),
('categorical', ct.Categorical),
]
# dim types with no type constructor
dim_no_constructor = [
('var', ct.Var()),
('ellipsis', ct.Ellipsis()),
]
# dim types with a type constructor
dim_constructor = [
('fixed', ct.Fixed),
('typevar', _typevar_dim),
('ellipsis', _ellipsis),
]
class TypeSymbolTable(object):
"""
This is a class which holds symbols for types and type constructors,
and is used by the datashape parser to build types during its parsing.
A TypeSymbolTable sym has four tables, as follows:
sym.dtype
Data type symbols with no type constructor.
sym.dtype_constr
Data type symbols with a type constructor. This may contain
symbols also in sym.dtype, e.g. for 'complex' and 'complex[float64]'.
sym.dim
Dimension symbols with no type constructor.
sym.dim_constr
Dimension symbols with a type constructor.
"""
__slots__ = ['dtype', 'dtype_constr', 'dim', 'dim_constr']
def __init__(self, bare=False):
# Initialize all the symbol tables to empty dicts1
self.dtype = {}
self.dtype_constr = {}
self.dim = {}
self.dim_constr = {}
if not bare:
self.add_default_types()
def add_default_types(self):
"""
Adds all the default datashape types to the symbol table.
"""
self.dtype.update(no_constructor_types)
self.dtype_constr.update(constructor_types)
self.dim.update(dim_no_constructor)
self.dim_constr.update(dim_constructor)
# Create the default global type symbol table
sym = TypeSymbolTable()
|
examples/custom_bootloader/bootloader_override/example_test.py | lovyan03/esp-idf | 8,747 | 12640007 | from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32s2', 'esp32c3'])
def test_custom_bootloader_impl_example(env, _): # type: ignore
# Test with default build configurations
dut = env.get_dut('main', 'examples/custom_bootloader/bootloader_override')
dut.start_app()
# Expect to read a message from the custom bootloader
# This message is defined in the Kconfig file, retrieve it while deleting
# leading and trailing quotes (")
welcome_message = dut.app.get_sdkconfig()['CONFIG_EXAMPLE_BOOTLOADER_WELCOME_MESSAGE'].strip("\"")
dut.expect(welcome_message)
# Expect to read a message from the user application
dut.expect('Application started!')
if __name__ == '__main__':
test_custom_bootloader_impl_example()
|
misc/update/python/nntpproxy.py | sy3kic/nZEDb | 472 | 12640011 | <reponame>sy3kic/nZEDb<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import random
import socket
import SocketServer
import socketpool
from lib.pynntp.nntp import nntp
from lib.info import bcolors
from socketpool import util
class NNTPClientConnector(socketpool.Connector, nntp.NNTPClient):
def __init__(self, host, port, backend_mod, pool=None,
username="anonymous", password="<PASSWORD>", timeout=60, use_ssl=False):
self.host = host
self.port = port
self.backend_mod = backend_mod
self._pool = pool
self._connected = False
self._life = time.time() - random.randint(0, 10)
self._start_time = time.time()
if backend_mod.Socket != socket.socket:
raise ValueError("Bad backend")
nntp.NNTPClient.__init__(self, self.host, self.port, username, password, timeout=timeout, use_ssl=use_ssl)
self.id = self.socket.getsockname()[1]
print(bcolors.PRIMARY + "New NNTP connection to %s established with ID #%5d" %
(self.host, self.id) + bcolors.ENDC)
self._connected = True
self.xfeature_compress_gzip()
def __del__(self):
self.release()
def matches(self, **match_options):
target_host = match_options.get('host')
target_port = match_options.get('port')
return target_host == self.host and target_port == self.port
def is_connected(self):
if self._connected:
return socketpool.util.is_connected(self.socket)
return False
def handle_exception(self, exception):
print(bcolors.ERROR + str(exception) + bcolors.ENDC)
self.release()
self.invalidate()
def get_lifetime(self):
return self._life
def invalidate(self):
print(bcolors.PRIMARY + "Disconnecting from NNTP connection ID #%5d after %d seconds." %
(self.id, (time.time() - self._start_time)) + bcolors.ENDC)
self.close()
self._connected = False
self._life = -1
def release(self):
if self._pool is not None:
if self._connected:
self._pool.release_connection(self)
else:
self._pool = None
# NNTP proxy request handler for nZEDb
class NNTPProxyRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
with self.server.nntp_client_pool.connection() as nntp_client:
self.wfile.write("200 localhost NNRP Service Ready.\r\n")
for line in self.rfile:
data = line.strip()
if not data.startswith("POST"):
if data.startswith("GROUP"):
print(bcolors.ALTERNATE + "%5d %s" % (nntp_client.id, data))
else:
print(bcolors.HEADER + "%5d " % nntp_client.id) + (bcolors.PRIMARY + "%s" % data)
if data.startswith("XOVER"):
try:
rng = data.split(None, 1)[1]
rng = tuple(map(int, rng.split("-")))
xover_gen = nntp_client.xover_gen(rng)
self.wfile.write("224 data follows\r\n")
for entry in xover_gen:
self.wfile.write("\t".join(entry) + "\r\n")
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("503 internal server error\r\n")
elif data.startswith("BODY"):
msgid = data.split(None, 1)[1]
try:
body = nntp_client.body(msgid)
self.wfile.write("222 %s\r\n" % msgid)
self.wfile.write(body.replace("\r\n.", "\r\n.."))
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("430 no such article\r\n")
elif data.startswith("GROUP"):
try:
total, first, last, group = nntp_client.group(data.split(None, 1)[1])
self.wfile.write("211 %d %d %d %s\r\n" % (total, first, last, group))
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("411 no such news group\r\n")
elif data.startswith("LIST OVERVIEW.FMT"):
try:
fmt = nntp_client.list_overview_fmt()
self.wfile.write("215 Order of fields in overview database.\r\n")
fmt = "\r\n".join(["%s:%s" % (f[0], "full" if f[1] else "") for f in fmt]) + "\r\n"
self.wfile.write(fmt)
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("503 internal server error\r\n")
elif data.startswith("HEAD"):
msgid = data.split(None, 1)[1]
try:
head = nntp_client.head(msgid)
self.wfile.write("221 %s\r\n" % msgid)
head = "\r\n".join([": ".join(item) for item in head.items()]) + "\r\n\r\n"
self.wfile.write(head)
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("430 no such article\r\n")
elif data.startswith("ARTICLE"):
msgid = data.split(None, 1)[1]
try:
article = nntp_client.article(msgid, False)
# check no of return values for compatibility with pynntp<=0.8.3
if len(article) == 2:
articleno, head, body = 0, article[0], article[1]
else:
articleno, head, body = article
self.wfile.write("220 %d %s\r\n" % (articleno, msgid))
head = "\r\n".join([": ".join(item) for item in head.items()]) + "\r\n\r\n"
self.wfile.write(head)
self.wfile.write(body.replace("\r\n.", "\r\n.."))
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("430 no such article\r\n")
elif data == "LIST":
try:
list_gen = nntp_client.list_gen()
self.wfile.write("215 list of newsgroups follows\r\n")
for entry in list_gen:
self.wfile.write("%s %d %d %s\r\n" % entry)
self.wfile.write(".\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("503 internal server error\r\n")
elif data.startswith("LIST ACTIVE") and not data.startswith("LIST ACTIVE.TIMES"):
try:
pattern = data[11:].strip() or None
active_gen = nntp_client.list_active_gen(pattern)
self.wfile.write("215 list of newsgroups follows\r\n")
for entry in active_gen:
self.wfile.write("%s %d %d %s\r\n" % entry)
self.wfile.write(".\r\n")
self.wfile.write(str(e) + "\r\n")
except Exception as ex:
print(bcolors.ERROR + str(ex.message) + bcolors.ENDC)
self.wfile.write("503 internal server error\r\n")
elif data.startswith("AUTHINFO user") or data.startswith("AUTHINFO pass"):
self.wfile.write("281 Ok\r\n")
elif data.startswith("XFEATURE"):
self.wfile.write("290 feature enabled\r\n")
elif data.startswith("QUIT"):
self.wfile.write("205 Connection closing\r\n")
break
else:
self.wfile.write("500 What?\r\n")
# NNTP proxy server for nZEDb
class NNTPProxyServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, request_handler, nntp_client_pool_obj, bind_and_activate=True):
SocketServer.TCPServer.__init__(self, server_address, request_handler, bind_and_activate=bind_and_activate)
self.nntp_client_pool = nntp_client_pool_obj
if __name__ == "__main__":
import sys
try:
if len(sys.argv) == 1:
import os
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
with open(pathname+"/lib/nntpproxy.conf", "rb") as fd:
config = json.load(fd)
else:
with open(sys.argv[1], "rb") as fd:
config = json.load(fd)
except IndexError:
sys.stderr.write("Usage: %s configfile\n" % sys.argv[0])
sys.exit(1)
except IOError as e:
sys.stderr.write("Failed to open config file (%s)\n" % e)
sys.exit(1)
except ValueError as e:
sys.stderr.write("Failed to parse config file (%s)\n" % e)
sys.exit(1)
nntp_client_pool = socketpool.ConnectionPool(
NNTPClientConnector,
retry_max=3,
retry_delay=1,
timeout=-1,
max_lifetime=30000.,
max_size=int(config["pool"]["size"]),
options=config["usenet"]
)
proxy = NNTPProxyServer((config["proxy"]["host"], config["proxy"]["port"]),
NNTPProxyRequestHandler, nntp_client_pool)
remote = (config["usenet"]["host"], config["usenet"]["port"])
print(bcolors.PRIMARY +
"NNTP proxy server started on: %s:%d, using a maximum pool size of %d." %
(config["proxy"]["host"], config["proxy"]["port"], config["pool"]["size"]))
proxy.serve_forever()
|
hoomd/data/__init__.py | XT-Lee/hoomd-blue | 204 | 12640017 | """Particle data local access."""
from .array import HOOMDArray, HOOMDGPUArray
from .local_access import (AngleLocalAccessBase, BondLocalAccessBase,
ConstraintLocalAccessBase, DihedralLocalAccessBase,
ImproperLocalAccessBase, PairLocalAccessBase,
ParticleLocalAccessBase)
from .local_access_cpu import LocalSnapshot
from .local_access_gpu import LocalSnapshotGPU
|
linepx/datasets/dataloader.py | moliushang/wireframe_ | 148 | 12640020 | import sys
sys.path.append("..")
import datasets.init_dataset as init_dataset
from torch.utils.data.dataloader import *
class myDataLoader(DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False):
DataLoader.__init__(self, dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last)
def create(opt):
loaders = []
for split in ['train', 'val']:
dataset = init_dataset.create(opt, split)
if split == 'train':
loaders.append(myDataLoader(dataset=dataset, batch_size=opt.batchSize, shuffle=True, num_workers=opt.nThreads))
elif split == 'val':
loaders.append(myDataLoader(dataset=dataset, batch_size=opt.batchSize, shuffle=False, num_workers=opt.nThreads))
return loaders[0], loaders[1]
|
src/confluent/azext_confluent/tests/latest/test_confluent_term_accept_flow.py | haroonf/azure-cli-extensions | 207 | 12640022 | <filename>src/confluent/azext_confluent/tests/latest/test_confluent_term_accept_flow.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from unittest import mock
import time
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .test_confluent_scenario import mock_jwt_decode, mock_list_role_assignments
class TestTermAcceptFlow(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_term_accept_basic_flow', location='eastus2euap')
@AllowLargeResponse()
def test_term_accept_basic_flow(self, resource_group):
self.kwargs.update({
'rg': resource_group,
'offerId': 'confluent-cloud-azure-stag',
'planId': 'confluent-cloud-azure-payg-stag',
'publisherId': 'confluentinc',
'namespace': 'Microsoft.Confluent',
'organizationName': 'cliTestOrg-1'
})
self.cmd('az confluent offer-detail show '
'--publisher-id {publisherId} '
'--offer-id {offerId}')
self.cmd('az term accept '
'--product "{offerId}" '
'--plan "{planId}" '
'--publisher "{publisherId}"')
self.cmd('provider register -n {namespace} ')
result = self.cmd('provider show -n {namespace}').get_output_in_json()
while result['registrationState'] != 'Registered':
time.sleep(5)
result = self.cmd('provider show -n {namespace}').get_output_in_json()
with mock.patch('jwt.decode', mock_jwt_decode):
with mock.patch('azure.cli.command_modules.role.custom.list_role_assignments', mock_list_role_assignments):
self.cmd('az confluent organization create '
'--location "eastus2euap" '
'--offer-id "{offerId}" '
'--plan-id "{planId}" '
'--plan-name "Confluent Cloud - Pay as you Go" '
'--publisher-id "{publisherId}" '
'--term-unit "P1M" '
'--tags environment="Dev" '
'--name "{organizationName}" '
'--resource-group "{rg}"')
|
app/grandchallenge/participants/apps.py | njmhendrix/grand-challenge.org | 101 | 12640025 | from django.apps import AppConfig
class ParticipantsConfig(AppConfig):
name = "grandchallenge.participants"
def ready(self):
# noinspection PyUnresolvedReferences
import grandchallenge.participants.signals # noqa: F401
|
facebook_business/test/docs_utils.py | MyrikLD/facebook-python-business-sdk | 576 | 12640029 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import unittest
import inspect
import re
from ..objects import *
from ..specs import *
from ..exceptions import *
class DocsDataStore(object):
_data = {}
@classmethod
def set(self, key, value):
self._data[key] = value
handle = open(DocsDataStore.get('filename'), 'a')
handle.write('docs_data#' + key + "\n" + value + "\n\n")
handle.close()
@classmethod
def get(self, key):
return self._data[key]
linted_classes = []
class DocsTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DocsTestCase, self).__init__(*args, **kwargs)
def get_aco_methods():
sdk_obj = getattr(sys.modules[__name__], 'AbstractCrudObject')
members = inspect.getmembers(sdk_obj)
member_names = [m[0] for m in members]
return member_names
errors = []
warnings = []
sdk_class_name = re.sub(r'DocsTestCase$', '', self.__class__.__name__)
if sdk_class_name not in linted_classes:
sdk_obj = getattr(sys.modules[__name__], sdk_class_name)
sdk_members = inspect.getmembers(sdk_obj, inspect.ismethod)
sdk_members = [m[0] for m in sdk_members
if m[0] not in get_aco_methods() and
not m[0].startswith('remote_')]
members = inspect.getmembers(self, inspect.ismethod)
members = [m for m in members
if (m[0].startswith('test_'))]
for member in members:
expected_string = re.sub(r'^test_', '', member[0]) + "("
sourcelines = inspect.getsourcelines(member[1])[0]
sourcelines.pop(0)
source = "".join(sourcelines).strip()
if expected_string not in source and source != "pass":
errors.append(
"Error: Expected method call to " + expected_string +
") not used in " + self.__class__.__name__ + "::" +
member[0],
)
member_names = [m[0] for m in members]
for sdk_member in sdk_members:
if "test_" + sdk_member not in member_names:
warnings.append(
"Warning: Method defined in SDK not defined in " +
"test - " + sdk_class_name + "::" + sdk_member + "()",
)
if len(warnings) > 0:
print("\n".join(warnings))
if len(errors) > 0:
print("\n".join(errors))
sys.exit()
linted_classes.append(sdk_class_name)
def tearDown(self):
account = AdAccount(DocsDataStore.get('adaccount_id'))
campaigns = account.get_campaigns()
for campaign in campaigns:
campaign.remote_delete()
def verify(self, obj, output):
def strip_spacing(content):
content = str(content)
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'\n|\r', '', content)
return content
return strip_spacing(obj) == strip_spacing(output)
def create_campaign(self, counter):
campaign = Campaign(parent_id=DocsDataStore.get('adaccount_id'))
campaign['name'] = "Campaign " + str(counter)
campaign['status'] = "PAUSED"
campaign.remote_create()
return campaign
def create_adset(self, counter, campaign):
adset = AdSet(parent_id=DocsDataStore.get('adaccount_id'))
adset['name'] = "Ad Set " + str(counter)
adset['campaign_id'] = campaign['id']
adset['daily_budget'] = 1000
adset['bid_amount'] = 2
adset['billing_event'] = 'LINK_CLICKS'
adset['optimization_goal'] = 'LINK_CLICKS'
adset['status'] = 'PAUSED'
adset['daily_budget'] = 1000
adset['targeting'] = {
'geo_locations': {
'countries': ['US'],
},
'interests': [
{
"id": "6003232518610",
"name": "Parenting",
},
],
}
adset.remote_create()
return adset
def create_ad(self, counter, adset, creative):
adgroup = Ad(parent_id=DocsDataStore.get('adaccount_id'))
adgroup['name'] = "Ad " + str(counter)
adgroup['adset_id'] = adset['id']
adgroup['creative'] = {'creative_id': creative.get_id()}
adgroup['status'] = 'PAUSED'
adgroup.remote_create()
return adgroup
def create_creative(self, counter):
creative = AdCreative(parent_id=DocsDataStore.get('adaccount_id'))
creative['title'] = "My Creative " + str(counter)
creative['body'] = "This is my creative's body"
creative['object_url'] = "https://internet.org"
creative['image_hash'] = self.create_image()['hash']
creative.remote_create()
return creative
def create_creative_leads(self, counter):
image_hash = self.create_image()['hash']
link_data = LinkData()
link_data[LinkData.Field.message] = 'try it out'
link_data[LinkData.Field.link] = "www.wikipedia.com"
link_data[LinkData.Field.caption] = 'Caption'
link_data[LinkData.Field.image_hash] = image_hash
object_story_spec = ObjectStorySpec()
page_id = DocsDataStore.get('page_id')
object_story_spec[ObjectStorySpec.Field.page_id] = page_id
object_story_spec[ObjectStorySpec.Field.link_data] = link_data
creative = AdCreative(parent_id=DocsDataStore.get('adaccount_id'))
creative[AdCreative.Field.name] = 'Test Creative'
creative[AdCreative.Field.object_story_spec] = object_story_spec
creative.remote_create()
return creative
def create_image(self):
image = AdImage(parent_id=DocsDataStore.get('adaccount_id'))
image['filename'] = './facebook_business/test/misc/image.png'
image.remote_create()
return image
def create_adlabel(self):
label = AdLabel(parent_id=DocsDataStore.get('adaccount_id'))
label[AdLabel.Field.name] = 'AdLabel name'
label.remote_create()
return label
def create_custom_audience(self):
audience = CustomAudience(parent_id=DocsDataStore.get('adaccount_id'))
audience[CustomAudience.Field.subtype] = CustomAudience.Subtype.custom
audience[CustomAudience.Field.name] = 'Test Audience'
audience[CustomAudience.Field.description] = 'Autogen-docs example'
audience.remote_create()
return audience
def create_reach_frequency_prediction(self):
act_id = DocsDataStore.get('adaccount_id')
rfp = ReachFrequencyPrediction(parent_id=act_id)
rfp['frequency_cap'] = 2
rfp['start_time'] = 1449080260
rfp['stop_time'] = 1449083860
rfp['reach'] = 20
rfp['story_event_type'] = 0
rfp['prediction_mode'] = 0
rfp['target_spec'] = {
'geo_locations': {
'countries': ['US'],
},
}
rfp.remote_create()
return rfp
def create_ads_pixel(self):
account = AdAccount(DocsDataStore.get('adaccount_id'))
pixel = account.get_ads_pixels([AdsPixel.Field.code])
if pixel is None:
pixel = AdsPixel(parent_id=DocsDataStore.get('adaccount_id'))
pixel[AdsPixel.Field.name] = unique_name('Test Pixel')
pixel.remote_create()
return pixel
def create_product_catalog(self):
params = {}
params['name'] = 'Test Catalog'
product_catalog = ProductCatalog(
parent_id=DocsDataStore.get('business_id')
)
product_catalog.update(params)
product_catalog.remote_create()
return product_catalog
def create_product_set(self, product_catalog_id):
params = {}
params['name'] = 'Test Product Set'
product_set = ProductSet(parent_id=product_catalog_id)
product_set.update(params)
product_set.remote_create()
return product_set
def create_product_feed(self, product_catalog_id):
product_feed = ProductFeed(parent_id=product_catalog_id)
product_feed[ProductFeed.Field.name] = 'Test Feed'
product_feed[ProductFeed.Field.schedule] = {
'interval': 'DAILY',
'url': 'http://www.example.com/sample_feed.tsv',
'hour': 22,
}
product_feed.remote_create()
return product_feed
def store_response(self, obj):
class_name = re.sub(r'DocsTestCase$', '', self.__class__.__name__)
method = inspect.stack()[1][3]
handle = open(DocsDataStore.get('filename'), 'a')
obj_str = str(obj)
obj_str = re.sub('<', '<', obj_str)
obj_str = re.sub('>', '>', obj_str)
handle.write(class_name + '#' + method + "\n" + obj_str + "\n\n")
handle.close()
DocsDataStore.set('filename', '/tmp/python_sdk_docs.nlsv')
|
ga/Life.py | zhong110020/TSP | 121 | 12640053 | # -*- encoding: utf-8 -*-
SCORE_NONE = -1
class Life(object):
"""个体类"""
def __init__(self, aGene = None):
self.gene = aGene
self.score = SCORE_NONE
|
chrome/test/functional/chromeos_basic.py | nagineni/chromium-crosswalk | 231 | 12640054 | <reponame>nagineni/chromium-crosswalk
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional
import pyauto
class ChromeosBasic(pyauto.PyUITest):
"""Basic tests for ChromeOS.
Requires ChromeOS to be logged in.
"""
def testAppendTabs(self):
"""Basic test for primary chrome on ChromeOS (named testing interface)."""
self.AppendTab(pyauto.GURL('about:version'))
self.assertEqual(self.GetTabCount(), 2, msg='Expected 2 tabs')
def testRestart(self):
"""Basic test which involves restarting chrome on ChromeOS."""
file_url = self.GetFileURLForDataPath('title2.html')
self.NavigateToURL(file_url)
self.assertEqual(1, len(self.GetHistoryInfo().History()))
self.RestartBrowser(clear_profile=False)
self.assertEqual(1, len(self.GetHistoryInfo().History()))
def testSetDownloadShelfVisible(self):
self.assertFalse(self.IsDownloadShelfVisible())
self.SetDownloadShelfVisible(True)
self.assertTrue(self.IsDownloadShelfVisible())
self.SetDownloadShelfVisible(False)
self.assertFalse(self.IsDownloadShelfVisible())
def testSetVolume(self):
"""Basic test for setting and getting the volume and mute state."""
volume_info = self.GetVolumeInfo()
for mute_setting in (False, True, False):
self.SetMute(mute_setting)
self.assertEqual(mute_setting, self.GetVolumeInfo()['is_mute'])
for volume_setting in (40, 0, 100, 70):
self.SetVolume(volume_setting)
self.assertEqual(volume_setting, round(self.GetVolumeInfo()['volume']))
self.SetVolume(volume_info['volume'])
self.SetMute(volume_info['is_mute'])
self.assertEqual(volume_info, self.GetVolumeInfo())
if __name__ == '__main__':
pyauto_functional.Main()
|
examples/pytorch/hilander/utils/deduce.py | ketyi/dgl | 9,516 | 12640095 | <gh_stars>1000+
"""
This file re-uses implementation from https://github.com/yl-1993/learn-to-cluster
"""
import numpy as np
from sklearn import mixture
import torch
import dgl
from .density import density_to_peaks_vectorize, density_to_peaks
__all__ = ['peaks_to_labels', 'edge_to_connected_graph', 'decode', 'build_next_level']
def _find_parent(parent, u):
idx = []
# parent is a fixed point
while (u != parent[u]):
idx.append(u)
u = parent[u]
for i in idx:
parent[i] = u
return u
def edge_to_connected_graph(edges, num):
parent = list(range(num))
for u, v in edges:
p_u = _find_parent(parent, u)
p_v = _find_parent(parent, v)
parent[p_u] = p_v
for i in range(num):
parent[i] = _find_parent(parent, i)
remap = {}
uf = np.unique(np.array(parent))
for i, f in enumerate(uf):
remap[f] = i
cluster_id = np.array([remap[f] for f in parent])
return cluster_id
def peaks_to_edges(peaks, dist2peak, tau):
edges = []
for src in peaks:
dsts = peaks[src]
dists = dist2peak[src]
for dst, dist in zip(dsts, dists):
if src == dst or dist >= 1 - tau:
continue
edges.append([src, dst])
return edges
def peaks_to_labels(peaks, dist2peak, tau, inst_num):
edges = peaks_to_edges(peaks, dist2peak, tau)
pred_labels = edge_to_connected_graph(edges, inst_num)
return pred_labels, edges
def get_dists(g, nbrs, use_gt):
k = nbrs.shape[1]
src_id = nbrs[:,1:].reshape(-1)
dst_id = nbrs[:,0].repeat(k - 1)
eids = g.edge_ids(src_id, dst_id)
if use_gt:
new_dists = (1 - g.edata['labels_edge'][eids]).reshape(-1, k - 1).float()
else:
new_dists = g.edata['prob_conn'][eids, 0].reshape(-1, k - 1)
ind = torch.argsort(new_dists, 1)
offset = torch.LongTensor((nbrs[:, 0] * (k - 1)).repeat(k - 1).reshape(-1, k - 1)).to(g.device)
ind = ind + offset
nbrs = torch.LongTensor(nbrs).to(g.device)
new_nbrs = torch.take(nbrs[:,1:], ind)
new_dists = torch.cat([torch.zeros((new_dists.shape[0], 1)).to(g.device), new_dists], dim=1)
new_nbrs = torch.cat([torch.arange(new_nbrs.shape[0]).view(-1, 1).to(g.device), new_nbrs], dim=1)
return new_nbrs.cpu().detach().numpy(), new_dists.cpu().detach().numpy()
def get_edge_dist(g, threshold):
if threshold == 'prob':
return g.edata['prob_conn'][:,0]
return 1 - g.edata['raw_affine']
def tree_generation(ng):
ng.ndata['keep_eid'] = torch.zeros(ng.number_of_nodes()).long() - 1
def message_func(edges):
return {'mval': edges.data['edge_dist'],
'meid': edges.data[dgl.EID]}
def reduce_func(nodes):
ind = torch.min(nodes.mailbox['mval'], dim=1)[1]
keep_eid = nodes.mailbox['meid'].gather(1, ind.view(-1, 1))
return {'keep_eid': keep_eid[:, 0]}
node_order = dgl.traversal.topological_nodes_generator(ng)
ng.prop_nodes(node_order, message_func, reduce_func)
eids = ng.ndata['keep_eid']
eids = eids[eids > -1]
edges = ng.find_edges(eids)
treeg = dgl.graph(edges, num_nodes=ng.number_of_nodes())
return treeg
def peak_propogation(treeg):
treeg.ndata['pred_labels'] = torch.zeros(treeg.number_of_nodes()).long() - 1
peaks = torch.where(treeg.in_degrees() == 0)[0].cpu().numpy()
treeg.ndata['pred_labels'][peaks] = torch.arange(peaks.shape[0])
def message_func(edges):
return {'mlb': edges.src['pred_labels']}
def reduce_func(nodes):
return {'pred_labels': nodes.mailbox['mlb'][:, 0]}
node_order = dgl.traversal.topological_nodes_generator(treeg)
treeg.prop_nodes(node_order, message_func, reduce_func)
pred_labels = treeg.ndata['pred_labels'].cpu().numpy()
return peaks, pred_labels
def decode(g, tau, threshold, use_gt,
ids=None, global_edges=None, global_num_nodes=None, global_peaks=None):
# Edge filtering with tau and density
den_key = 'density' if use_gt else 'pred_den'
g = g.local_var()
g.edata['edge_dist'] = get_edge_dist(g, threshold)
g.apply_edges(lambda edges: {'keep': (edges.src[den_key] > edges.dst[den_key]).long() * \
(edges.data['edge_dist'] < 1 - tau).long()})
eids = torch.where(g.edata['keep'] == 0)[0]
ng = dgl.remove_edges(g, eids)
# Tree generation
ng.edata[dgl.EID] = torch.arange(ng.number_of_edges())
treeg = tree_generation(ng)
# Label propogation
peaks, pred_labels = peak_propogation(treeg)
if ids is None:
return pred_labels, peaks
# Merge with previous layers
src, dst = treeg.edges()
new_global_edges = (global_edges[0] + ids[src.numpy()].tolist(),
global_edges[1] + ids[dst.numpy()].tolist())
global_treeg = dgl.graph(new_global_edges, num_nodes=global_num_nodes)
global_peaks, global_pred_labels = peak_propogation(global_treeg)
return pred_labels, peaks, new_global_edges, global_pred_labels, global_peaks
def build_next_level(features, labels, peaks,
global_features, global_pred_labels, global_peaks):
global_peak_to_label = global_pred_labels[global_peaks]
global_label_to_peak = np.zeros_like(global_peak_to_label)
for i, pl in enumerate(global_peak_to_label):
global_label_to_peak[pl] = i
cluster_ind = np.split(np.argsort(global_pred_labels),
np.unique(np.sort(global_pred_labels), return_index=True)[1][1:])
cluster_features = np.zeros((len(peaks), global_features.shape[1]))
for pi in range(len(peaks)):
cluster_features[global_label_to_peak[pi],:] = np.mean(global_features[cluster_ind[pi],:], axis=0)
features = features[peaks]
labels = labels[peaks]
return features, labels, cluster_features
|
npm/templates/deploy.py | PaulLiang1/bazel-distribution | 275 | 12640101 | <reponame>PaulLiang1/bazel-distribution<filename>npm/templates/deploy.py
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import os
import subprocess
import shutil
import re
# usual importing is not possible because
# this script and module with common functions
# are at different directory levels in sandbox
import tempfile
parser = argparse.ArgumentParser()
parser.add_argument('repo_type')
args = parser.parse_args()
repo_type = args.repo_type
npm_repositories = {
"snapshot": "{snapshot}",
"release": "{release}",
}
npm_registry = npm_repositories[repo_type]
npm_username, npm_password, npm_email = (
os.getenv('DEPLOY_NPM_USERNAME'),
os.getenv('DEPLOY_NPM_PASSWORD'),
os.getenv('DEPLOY_NPM_EMAIL'),
)
if not npm_username:
raise Exception(
'username should be passed via '
'$DEPLOY_NPM_USERNAME env variable'
)
if not npm_password:
raise Exception(
'password should be passed via '
'$DEPLOY_NPM_PASSWORD env variable'
)
if not npm_email:
raise Exception(
'email should be passed via '
'$DEPLOY_NPM_EMAIL env variable'
)
expect_input_tmpl = '''spawn npm adduser --registry={registry}
expect {{
"Username:" {{send "{username}\r"; exp_continue}}
"Password:" {{send "$env(PASSWORD)\r"; exp_continue}}
"Email: (this IS public)" {{send "{email}\r"; exp_continue}}
}}'''
with tempfile.NamedTemporaryFile('wt', delete=False) as expect_input_file:
expect_input_file.write(expect_input_tmpl.format(
registry=npm_registry,
username=npm_username,
email=npm_email,
))
node_path = ':'.join([
'/usr/bin/',
'/bin/',
os.path.realpath('external/nodejs/bin/nodejs/bin/'),
os.path.realpath('external/nodejs_darwin_amd64/bin/'),
os.path.realpath('external/nodejs_linux_amd64/bin/'),
os.path.realpath('external/nodejs_windows_amd64/bin/'),
])
with open(expect_input_file.name) as expect_input:
subprocess.check_call([
'/usr/bin/expect',
], stdin=expect_input, env={
'PATH': node_path,
'PASSWORD': <PASSWORD>
})
subprocess.check_call([
'npm',
'publish',
'--registry={}'.format(npm_registry),
'deploy_npm.tgz'
], env={
'PATH': node_path
})
|
capstone/capdb/migrations/0093_auto_20200226_1948.py | rachelaus/capstone | 134 | 12640106 | <reponame>rachelaus/capstone<filename>capstone/capdb/migrations/0093_auto_20200226_1948.py
# Generated by Django 2.2.10 on 2020-02-26 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capdb', '0092_auto_20200225_1511'),
]
operations = [
migrations.AddField(
model_name='casemetadata',
name='first_page_order',
field=models.SmallIntegerField(blank=True, help_text='1-based page order of first page', null=True),
),
migrations.AddField(
model_name='casemetadata',
name='last_page_order',
field=models.SmallIntegerField(blank=True, help_text='1-based page order of last page', null=True),
),
migrations.AddField(
model_name='historicalcasemetadata',
name='first_page_order',
field=models.SmallIntegerField(blank=True, help_text='1-based page order of first page', null=True),
),
migrations.AddField(
model_name='historicalcasemetadata',
name='last_page_order',
field=models.SmallIntegerField(blank=True, help_text='1-based page order of last page', null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='first_page',
field=models.CharField(blank=True, help_text='Label of first page', max_length=255, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='last_page',
field=models.CharField(blank=True, help_text='Label of first page', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='first_page',
field=models.CharField(blank=True, help_text='Label of first page', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='last_page',
field=models.CharField(blank=True, help_text='Label of first page', max_length=255, null=True),
),
]
|
support/project.py | rknop/amuse | 131 | 12640108 | <gh_stars>100-1000
DIRECTORIES = ['doc', 'src', 'test', 'examples/applications']
|
compile/make_compile_test.py | fuz-woo/gpython | 520 | 12640121 | #!/usr/bin/env python3.4
# Copyright 2018 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
Write compile_data_test.go
"""
import sys
import ast
import subprocess
inp = [
# Constants
('''1''', "eval"),
('''"hello"''', "eval"),
('''a''', "eval"),
('''b"hello"''', "eval"),
# BinOps - strange operations to defeat constant optimizer!
('''"a"+1''', "eval"),
('''"a"-1''', "eval"),
('''"a"*"b"''', "eval"),
('''"a"/1''', "eval"),
('''"a"%1''', "eval"),
('''"a"**1''', "eval"),
('''"a"<<1''', "eval"),
('''"a">>1''', "eval"),
('''"a"|1''', "eval"),
('''"a"^1''', "eval"),
('''"a"&1''', "eval"),
('''"a"//1''', "eval"),
('''a+a''', "eval"),
('''"a"*"a"''', "eval"),
('''1''', "exec"),
('''1\n"hello"''', "exec"),
('''a+a''', "exec"),
# UnaryOps
('''~ "a"''', "eval"),
('''not "a"''', "eval"),
('''+"a"''', "eval"),
('''-"a"''', "eval"),
# Bool Ops
('''1 and 2''', "eval"),
('''1 and 2 and 3 and 4''', "eval"),
('''1 and 2''', "eval"),
('''1 or 2''', "eval"),
('''1 or 2 or 3 or 4''', "eval"),
# With brackets
('''"1"+"2"*"3"''', "eval"),
('''"1"+("2"*"3")''', "eval"),
('''(1+"2")*"3"''', "eval"),
# If expression
('''(a if b else c)+0''', "eval"),
# Compare
('''a == b''', "eval"),
('''a != b''', "eval"),
('''a < b''', "eval"),
('''a <= b''', "eval"),
('''a > b''', "eval"),
('''a >= b''', "eval"),
('''a is b''', "eval"),
('''a is not b''', "eval"),
('''a in b''', "eval"),
('''a not in b''', "eval"),
('''(a < b < c)+0''', "eval"),
('''(a < b < c < d)+0''', "eval"),
('''(a < b < c < d < e)+0''', "eval"),
# tuple
('''()''', "eval"),
#('''(1,)''', "eval"),
#('''(1,1)''', "eval"),
#('''(1,1,3,1)''', "eval"),
('''(a,)''', "eval"),
('''(a,b)''', "eval"),
('''(a,b,c,d)''', "eval"),
# list
('''[]''', "eval"),
('''[1]''', "eval"),
('''[1,1]''', "eval"),
('''[1,1,3,1]''', "eval"),
('''[a]''', "eval"),
('''[a,b]''', "eval"),
('''[a,b,c,d]''', "eval"),
# named constant
('''True''', "eval"),
('''False''', "eval"),
('''None''', "eval"),
# attribute
('''a.b''', "eval"),
('''a.b.c''', "eval"),
('''a.b.c.d''', "eval"),
('''a.b = 1''', "exec"),
('''a.b.c.d = 1''', "exec"),
('''a.b += 1''', "exec"),
('''a.b.c.d += 1''', "exec"),
('''del a.b''', "exec"),
('''del a.b.c.d''', "exec"),
# dict
('''{}''', "eval"),
('''{1:2,a:b}''', "eval"),
# set
# ('''set()''', "eval"),
('''{1}''', "eval"),
('''{1,2,a,b}''', "eval"),
# lambda
('''lambda: 0''', "eval"),
('''lambda x: 2*x''', "eval"),
('''lambda a,b=42,*args,**kw: a*b*args*kw''', "eval"),
# pass statment
('''pass''', "exec"),
# expr statement
('''(a+b)''', "exec"),
('''(a+\nb+\nc)\n''', "exec"),
# assert
('''assert a, "hello"''', "exec"),
('''assert 1, 2''', "exec"),
('''assert a''', "exec"),
('''assert 1''', "exec"),
# assign
('''a = 1''', "exec"),
('''a = b = c = 1''', "exec"),
('''a[1] = 1''', "exec"),
('''f() = 1''', "exec", SyntaxError),
# aug assign
('''a+=1''', "exec"),
('''a-=1''', "exec"),
('''a*=b''', "exec"),
('''a/=1''', "exec"),
('''a%=1''', "exec"),
('''a**=1''', "exec"),
('''a<<=1''', "exec"),
('''a>>=1''', "exec"),
('''a|=1''', "exec"),
('''a^=1''', "exec"),
('''a&=1''', "exec"),
('''a//=1''', "exec"),
('''a[1]+=1''', "exec"),
('''f() += 1''', "exec", SyntaxError),
# delete
('''del a''', "exec"),
('''del a, b''', "exec"),
('''del a[1]''', "exec"),
('''del f()''', "exec", SyntaxError),
('''\
def fn(b):
global a
del a
c = 1
def nested(d):
nonlocal b
e = b+c+d+e
f(e)
del b,c,d,e
''', "exec"),
# raise
('''raise''', "exec"),
('''raise a''', "exec"),
('''raise a from b''', "exec"),
# if
('''if a: b = c''', "exec"),
('''if a:\n b = c\nelse:\n c = d\n''', "exec"),
# while
('''while a:\n b = c''', "exec"),
('''while a:\n b = c\nelse:\n b = d\n''', "exec"),
('''while a:\n if b: break\n b = c\n''', "exec"),
('''while a:\n if b: continue\n b = c\n''', "exec"),
('''continue''', "exec", SyntaxError),
('''break''', "exec", SyntaxError),
# for
('''for a in b: pass''', "exec"),
('''for a in b:\n if a:\n break\n c = e\nelse: c = d\n''', "exec"),
('''for a in b:\n if a:\n continue\n c = e\nelse: c = d\n''', "exec"),
# call
('''f()''', "eval"),
('''f(a)''', "eval"),
('''f(a,b,c)''', "eval"),
('''f(A=a)''', "eval"),
('''f(a, b, C=d, D=d)''', "eval"),
('''f(*args)''', "eval"),
('''f(*args, **kwargs)''', "eval"),
('''f(**kwargs)''', "eval"),
('''f(a, b, *args)''', "eval"),
('''f(a, b, *args, d=e, **kwargs)''', "eval"),
('''f(a, d=e, **kwargs)''', "eval"),
('''f(a=1,a=2)''', "eval", SyntaxError),
# return
('''return''', "exec", SyntaxError),
# def
('''def fn(): pass''', "exec"),
('''def fn(a): pass''', "exec"),
('''def fn(a,b,c): pass''', "exec"),
('''def fn(a,b=1,c=2): pass''', "exec"),
('''def fn(a,*arg,b=1,c=2): pass''', "exec"),
('''def fn(a,*arg,b=1,c=2,**kwargs): pass''', "exec"),
('''def fn(a:"a",*arg:"arg",b:"b"=1,c:"c"=2,**kwargs:"kw") -> "ret": pass''', "exec"),
('''def fn(): a+b''', "exec"),
('''def fn(a,b): a+b+c+d''', "exec"),
('''def fn(x,*,a,b): x+a+b''', "exec"),
('''\
def fn(a):
global b
b = a''', "exec"),
('''def fn(): return''', "exec"),
('''def fn(): return a''', "exec"),
('''def fn():\n "docstring"\n return True''', "exec"),
('''\
def outer(o):
def inner(i):
x = 2''', "exec"),
('''\
def outer(o1,o2):
x = 1
def inner(i1,i2):
nonlocal x
x = 2
def inner2(s):
return 2*s
f = inner2(x)
l = o1+o2+i1+i2+f
return l
return inner''', "exec"),
('''\
def outer(o):
x = 17
return lambda a,b=42,*args,**kw: a*b*args*kw*x*o''', "exec"),
('''\
@wrap
def fn(o):
return o''', "exec"),
('''\
@wrap1
@wrap2("potato", 2)
@wrap3("sausage")
@wrap4
def fn(o):
return o''', "exec"),
('''\
def outer(o):
@wrap1
@wrap2("potato", o)
def inner(i):
return o+i''', "exec"),
# module docstrings
('''\
# Module
"""
A module docstring
"""
''', "exec"),
('''\
# Empty docstring
""
''', "exec"),
# class
('''\
class Dummy:
pass
''', "exec"),
('''\
@d1
@d2
class Dummy(a,b,c=d):
"A class"
pass
''', "exec"),
('''\
class Dummy:
def method(self):
return self+1
''', "exec"),
('''\
@class1
@class2(arg2)
class Dummy:
"Dummy"
@fn1
@fn2(arg2)
def method(self):
"method"
return self+1
def method2(self, m2):
"method2"
return self.method()+m2
''', "exec"),
('''\
def closure_class(a):
b = 42
class AClass:
def method(self, c):
return a+b+c
return AClass
''', "exec"),
('''\
@potato
@sausage()
class A(a,b,c=\"1\",d=\"2\",*args,**kwargs):
VAR = x
def method(self):
super().method()
return VAR
''', "exec"),
('''\
def outer(x):
class DeRefTest:
VAR = x
''', "exec"),
# comprehensions
('''[ x for x in xs ]''', "eval"),
('''{ x: y for x in xs }''', "eval"),
('''{ x for x in xs }''', "eval"),
('''( x for x in xs )''', "eval"),
('''[ x for x in xs if a ]''', "eval"),
('''{ x: y for x in xs if a if b }''', "eval"),
('''{ x for x in xs if a}''', "eval"),
('''( x for x in xs if a if b if c)''', "eval"),
('''{ x for x in [ x for x in xs if c if d ] if a if b}''', "eval"),
('''[ (x,y,z) for x in xs for y in ys for z in zs ]''', "eval"),
('''{ (x,y,z) for x in xs for y in ys if a if b for z in zs if c if d }''', "eval"),
('''{ x:(y,z) for x in xs for y in ys for z in zs }''', "eval"),
('''( (x,y,z) for x in xs for y in ys if a if b for z in zs if c if d )''', "eval"),
# with
('''\
with a:
f()
''', "exec"),
('''\
with a() as b:
f(b)
''', "exec"),
('''\
with A() as a, B() as b:
f(a,b)
''', "exec"),
('''\
with A() as a:
with B() as b:
f(a,b)
''', "exec"),
# try/except/finally/else
('''\
ok = False
try:
raise SyntaxError
except SyntaxError:
ok = True
assert ok
''', "exec"),
('''\
ok = False
try:
raise SyntaxError
except SyntaxError as e:
ok = True
assert ok
''', "exec"),
('''\
try:
f()
except Exception:
h()
''', "exec"),
('''\
try:
f()
except Exception as e:
h(e)
except (Exception1, Exception2) as e:
i(e)
except:
j()
else:
potato()
''', "exec"),
('''\
try:
f()
except:
j()
except Exception as e:
h(e)
''', "exec", SyntaxError),
('''\
try:
f()
finally:
j()
''', "exec"),
('''\
try:
f()
except Exception as e:
h(e)
finally:
j()
''', "exec"),
# import / from import
('''import mod''', "exec"),
('''import mod1, mod2, mod3''', "exec"),
('''import mod as pod, mod2 as pod2''', "exec"),
('''import mod1.mod2''', "exec"),
('''import mod1.mod2.mod3''', "exec"),
('''import mod1.mod2.mod3.mod4''', "exec"),
('''import mod1.mod2.mod3.mod4 as potato''', "exec"),
('''from mod import a''', "exec"),
('''from mod1.mod2.mod3 import *''', "exec"),
('''from mod1.mod2.mod3 import a as aa, b as bb, c''', "exec"),
# yield
('''yield''', "exec", SyntaxError),
('''yield potato''', "exec", SyntaxError),
('''\
def f():
yield
''', "exec"),
('''\
def f():
yield potato
''', "exec"),
# yield from
('''yield from range(10)''', "exec", SyntaxError),
('''\
def f():
yield from range(10)
''', "exec"),
# ellipsis
('''...''', "exec"),
# starred...
('''*a = t''', "exec", SyntaxError),
('''a, *b = t''', "exec"),
('''(a, *b) = t''', "exec"),
('''[a, *b] = t''', "exec"),
('''a, *b, c = t''', "exec"),
('''a, *b, *c = t''', "exec", SyntaxError),
('''a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,*a = t''', "exec", SyntaxError),
('''a, b, *c''', "exec", SyntaxError),
('''a, (b, c), d = t''', "exec"),
# subscript - load
("x[a]", "exec"),
("x[a:b]", "exec"),
("x[:b]", "exec"),
("x[b:]", "exec"),
("x[:]", "exec"),
("x[a:b:c]", "exec"),
("x[:b:c]", "exec"),
("x[a::c]", "exec"),
("x[a:b:]", "exec"),
("x[::c]", "exec"),
("x[:b:]", "exec"),
("x[::c]", "exec"),
("x[::]", "exec"),
("x[a,p]", "exec"),
("x[a, b]", "exec"),
("x[a, b, c]", "exec"),
("x[a, b:c, ::d]", "exec"),
("x[0, 1:2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ...]", "exec"),
# subscript - store
("x[a] = y", "exec"),
("x[a:b] = y", "exec"),
("x[:b] = y", "exec"),
("x[b:] = y", "exec"),
("x[:] = y", "exec"),
("x[a:b:c] = y", "exec"),
("x[:b:c] = y", "exec"),
("x[a::c] = y", "exec"),
("x[a:b:] = y", "exec"),
("x[::c] = y", "exec"),
("x[:b:] = y", "exec"),
("x[::c] = y", "exec"),
("x[::] = y", "exec"),
("x[a,p] = y", "exec"),
("x[a, b] = y", "exec"),
("x[a, b, c] = y", "exec"),
("x[a, b:c, ::d] = y", "exec"),
("x[0, 1:2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ...] = y", "exec"),
# subscript - aug assign (AugLoad and AugStore)
("x[a] += y", "exec"),
("x[a:b] += y", "exec"),
("x[:b] += y", "exec"),
("x[b:] += y", "exec"),
("x[:] += y", "exec"),
("x[a:b:c] += y", "exec"),
("x[:b:c] += y", "exec"),
("x[a::c] += y", "exec"),
("x[a:b:] += y", "exec"),
("x[::c] += y", "exec"),
("x[:b:] += y", "exec"),
("x[::c] += y", "exec"),
("x[::] += y", "exec"),
("x[a,p] += y", "exec"),
("x[a, b] += y", "exec"),
("x[a, b, c] += y", "exec"),
("x[a, b:c, ::d] += y", "exec"),
("x[0, 1:2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ...] += y", "exec"),
# subscript - delete
("del x[a]", "exec"),
("del x[a:b]", "exec"),
("del x[:b]", "exec"),
("del x[b:]", "exec"),
("del x[:]", "exec"),
("del x[a:b:c]", "exec"),
("del x[:b:c]", "exec"),
("del x[a::c]", "exec"),
("del x[a:b:]", "exec"),
("del x[::c]", "exec"),
("del x[:b:]", "exec"),
("del x[::c]", "exec"),
("del x[::]", "exec"),
("del x[a,p]", "exec"),
("del x[a, b]", "exec"),
("del x[a, b, c]", "exec"),
("del x[a, b:c, ::d]", "exec"),
("del x[0, 1:2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ...]", "exec"),
# continue
('''\
try:
continue
except:
pass
''', "exec", SyntaxError),
('''\
try:
pass
except:
continue
''', "exec", SyntaxError),
('''\
for x in xs:
try:
f()
except:
continue
f()
''', "exec"),
('''\
for x in xs:
try:
f()
continue
finally:
f()
''', "exec"),
('''\
for x in xs:
try:
f()
finally:
continue
''', "exec", SyntaxError),
('''\
for x in xs:
try:
f()
finally:
try:
continue
except:
pass
''', "exec", SyntaxError),
('''\
try:
continue
except:
pass
''', "exec", SyntaxError),
('''\
try:
pass
except:
continue
''', "exec", SyntaxError),
('''\
while truth():
try:
f()
except:
continue
f()
''', "exec"),
('''\
while truth():
try:
f()
continue
finally:
f()
''', "exec"),
('''\
while truth():
try:
f()
finally:
continue
''', "exec", SyntaxError),
('''\
while truth():
try:
f()
finally:
try:
continue
except:
pass
''', "exec", SyntaxError),
('''\
while x:
with c:
continue
''', "exec"),
# interactive
('''print("hello world!")\n''', "single"),
# FIXME ('''if True:\n "hello world!"\n''', "single"),
# FIXME ('''def fn(x):\n "hello world!"\n''', "single"),
]
def string(s):
if isinstance(s, str):
return '"%s"' % s
elif isinstance(s, bytes):
out = '"'
for b in s:
out += "\\x%02x" % b
out += '"'
return out
else:
raise AssertionError("Unknown string %r" % s)
def strings(ss):
"""Dump a list of py strings into go format"""
return "[]string{"+",".join(string(s) for s in ss)+"}"
codeObjectType = type(strings.__code__)
def const(x):
if isinstance(x, str):
return 'py.String("%s")' % x.encode("unicode-escape").decode("utf-8")
elif isinstance(x, bool):
if x:
return 'py.True'
return 'py.False'
elif isinstance(x, int):
return 'py.Int(%d)' % x
elif isinstance(x, float):
return 'py.Float(%g)' % x
elif isinstance(x, bytes):
return 'py.Bytes("%s")' % x.decode("latin1")
elif isinstance(x, tuple):
return 'py.Tuple{%s}' % ",".join(const(y) for y in x)
elif isinstance(x, codeObjectType):
return "\n".join([
"&py.Code{",
"Argcount: %s," % x.co_argcount,
"Kwonlyargcount: %s," % x.co_kwonlyargcount,
"Nlocals: %s," % x.co_nlocals,
"Stacksize: %s," % x.co_stacksize,
"Flags: %s," % x.co_flags,
"Code: %s," % string(x.co_code),
"Consts: %s," % consts(x.co_consts),
"Names: %s," % strings(x.co_names),
"Varnames: %s," % strings(x.co_varnames),
"Freevars: %s," % strings(x.co_freevars),
"Cellvars: %s," % strings(x.co_cellvars),
# "Cell2arg []byte // Maps cell vars which are arguments".
"Filename: %s," % string(x.co_filename),
"Name: %s," % string(x.co_name),
"Firstlineno: %d," % x.co_firstlineno,
"Lnotab: %s," % string(x.co_lnotab),
"}",
])
elif x is None:
return 'py.None'
elif x is ...:
return 'py.Ellipsis'
else:
raise AssertionError("Unknown const %r" % x)
def consts(xs):
return "[]py.Object{"+",".join(const(x) for x in xs)+"}"
def _compile(source, mode):
"""compile source with mode"""
a = compile(source=source, filename="<string>", mode=mode, dont_inherit=True, optimize=0)
return a, const(a)
def escape(x):
"""Encode strings with backslashes for python/go"""
return x.replace('\\', "\\\\").replace('"', r'\"').replace("\n", r'\n').replace("\t", r'\t')
def main():
"""Write compile_data_test.go"""
path = "compile_data_test.go"
out = ["""// Test data generated by make_compile_test.py - do not edit
package compile
import (
"github.com/go-python/gpython/py"
)
var compileTestData = []struct {
in string
mode string // exec, eval or single
out *py.Code
exceptionType *py.Type
errString string
}{"""]
for x in inp:
source, mode = x[:2]
if len(x) > 2:
exc = x[2]
try:
_compile(source, mode)
except exc as e:
error = e.msg
else:
raise ValueError("Expecting exception %s" % exc)
gostring = "nil"
exc_name = "py.%s" % exc.__name__
else:
code, gostring = _compile(source, mode)
exc_name = "nil"
error = ""
out.append('{"%s", "%s", %s, %s, "%s"},' % (escape(source), mode, gostring, exc_name, escape(error)))
out.append("}")
print("Writing %s" % path)
with open(path, "w") as f:
f.write("\n".join(out))
f.write("\n")
subprocess.check_call(["gofmt", "-w", path])
if __name__ == "__main__":
main()
|
discomll/tests/tests_regression.py | romanorac/discomll | 103 | 12640124 | <reponame>romanorac/discomll<filename>discomll/tests/tests_regression.py
import unittest
import numpy as np
from disco.core import result_iterator
import datasets
class Tests_Regression(unittest.TestCase):
@classmethod
def setUpClass(self):
import chunk_testdata
from disco import ddfs
ddfs = ddfs.DDFS()
if not ddfs.exists("test:ex3"):
print "Chunking test datasets to DDFS..."
chunk_testdata.chunk_testdata()
def test_lwlr(self):
# python -m unittest tests_regression.Tests_Regression.test_lwlr
import locally_weighted_linear_regression as lwlr1
from discomll.regression import locally_weighted_linear_regression as lwlr2
x_train, y_train, x_test, y_test = datasets.regression_data()
train_data, test_data = datasets.regression_data_discomll()
lwlr1 = lwlr1.Locally_Weighted_Linear_Regression()
taus = [1, 10, 25]
sorted_indices = np.argsort([str(el) for el in x_test[:, 1].tolist()])
for tau in taus:
thetas1, estimation1 = lwlr1.fit(x_train, y_train, x_test, tau=tau)
thetas1, estimation1 = np.array(thetas1)[sorted_indices], np.array(estimation1)[sorted_indices]
results = lwlr2.fit_predict(train_data, test_data, tau=tau)
thetas2, estimation2 = [], []
for x_id, (est, thetas) in result_iterator(results):
estimation2.append(est)
thetas2.append(thetas)
self.assertTrue(np.allclose(thetas1, thetas2, atol=1e-8))
self.assertTrue(np.allclose(estimation1, estimation2, atol=1e-3))
def test_lin_reg(self):
# python -m unittest tests_regression.Tests_Regression.test_lin_reg
from sklearn import linear_model
from discomll.regression import linear_regression
x_train, y_train, x_test, y_test = datasets.ex3()
train_data, test_data = datasets.ex3_discomll()
lin_reg = linear_model.LinearRegression() # Create linear regression object
lin_reg.fit(x_train, y_train) # Train the model using the training sets
thetas1 = [lin_reg.intercept_] + lin_reg.coef_[1:].tolist()
prediction1 = lin_reg.predict(x_test)
thetas_url = linear_regression.fit(train_data)
thetas2 = [v for k, v in result_iterator(thetas_url["linreg_fitmodel"])]
results = linear_regression.predict(test_data, thetas_url)
prediction2 = [v[0] for k, v in result_iterator(results)]
self.assertTrue(np.allclose(thetas1, thetas2))
self.assertTrue(np.allclose(prediction1, prediction2))
if __name__ == '__main__':
unittest.main()
|
cupid/utils.py | wjsi/aliyun-odps-python-sdk | 412 | 12640145 | <reponame>wjsi/aliyun-odps-python-sdk<filename>cupid/utils.py<gh_stars>100-1000
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def get_property(name, args=None):
environ_name = '_'.join(n.upper() for n in name.split('.'))
args = args or sys.argv[1:]
for arg in args:
if arg.startswith('-D%s=' % arg):
_, value_part = arg.split('=', 1)
if value_part.startswith('\"') and value_part.endswith('\"'):
value_part = value_part[1:-1].replace('""', '"')
return value_part
return get_environ(environ_name)
# resolve the mdzz environ values
def get_environ(key, default=None):
val = os.environ.get(key)
if val:
if val.startswith('"'):
val = val.strip('"')
return val
return default
def build_image_name(app_name):
from .config import options
prefix = options.cupid.image_prefix
version = options.cupid.image_version
if prefix is None:
if version is None:
return None
else:
return app_name + ':' + version
else:
return prefix + app_name + ':' + version
|
tools/perf/page_sets/rendering/top_real_world_desktop.py | zealoussnow/chromium | 14,668 | 12640158 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import shared_page_state
from telemetry.util import wpr_modes
from page_sets.login_helpers import google_login
from page_sets.login_helpers import linkedin_login
from page_sets.rendering import rendering_story
from page_sets.rendering import story_tags
class TopRealWorldDesktopPage(rendering_story.RenderingStory):
ABSTRACT_STORY = True
TAGS = [story_tags.GPU_RASTERIZATION, story_tags.TOP_REAL_WORLD_DESKTOP]
def __init__(self,
page_set,
shared_page_state_class,
name_suffix='',
extra_browser_args=None):
super(TopRealWorldDesktopPage, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunPageInteractions(self, action_runner):
action_runner.Wait(1)
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
if self.story_set.scroll_forever:
while True:
action_runner.ScrollPage(direction='up')
action_runner.ScrollPage(direction='down')
class GoogleWebSearch2018Page(TopRealWorldDesktopPage):
""" Why: top google property; a google tab is often open """
BASE_NAME = 'google_web_search'
YEAR = '2018'
URL = 'https://www.google.com/#hl=en&q=barack+obama'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(GoogleWebSearch2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(GoogleWebSearch2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class GoogleImageSearch2018Page(TopRealWorldDesktopPage):
""" Why: tough image case; top google properties """
BASE_NAME = 'google_image_search'
YEAR = '2018'
URL = 'https://www.google.com/search?q=cats&tbm=isch'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(GoogleImageSearch2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(GoogleImageSearch2018Page, self).RunNavigateSteps(action_runner)
class GooglePlus2018Page(TopRealWorldDesktopPage):
""" Why: social; top google property; Public profile; infinite scrolls """
BASE_NAME = 'google_plus'
YEAR = '2018'
URL = 'https://plus.google.com/110031535020051778989/posts'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(GooglePlus2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(GooglePlus2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Posts')
class Youtube2018Page(TopRealWorldDesktopPage):
""" Why: #3 (Alexa global) """
BASE_NAME = 'youtube'
YEAR = '2018'
URL = 'http://www.youtube.com'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Youtube2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(Youtube2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(selector='#buttons')
class Blogspot2018Page(TopRealWorldDesktopPage):
""" Why: #11 (Alexa global), google property; some blogger layouts have
infinite scroll but more interesting """
BASE_NAME = 'blogspot'
YEAR = '2018'
URL = 'http://googlewebmastercentral.blogspot.com/'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Blogspot2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(Blogspot2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement('div[class="searchBox"]')
class Wordpress2018Page(TopRealWorldDesktopPage):
""" Why: #18 (Alexa global), Picked an interesting post """
BASE_NAME = 'wordpress'
YEAR = '2018'
# pylint: disable=line-too-long
URL = 'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Wordpress2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(Wordpress2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(
# pylint: disable=line-too-long
'a[href="https://en.blog.wordpress.com/2012/08/30/new-themes-able-and-sight/"]'
)
class Facebook2018Page(TopRealWorldDesktopPage):
""" Why: top social,Public profile """
BASE_NAME = 'facebook'
YEAR = '2018'
URL = 'https://www.facebook.com/barackobama'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Facebook2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(Facebook2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Videos')
class Linkedin2018Page(TopRealWorldDesktopPage):
""" Why: #12 (Alexa global), Public profile. """
BASE_NAME = 'linkedin'
YEAR = '2018'
URL = 'http://www.linkedin.com/in/linustorvalds'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Linkedin2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
linkedin_login.LoginDesktopAccount(action_runner, 'linkedin')
super(Linkedin2018Page, self).RunNavigateSteps(action_runner)
class Wikipedia2018Page(TopRealWorldDesktopPage):
""" Why: #6 (Alexa) most visited worldwide,Picked an interesting page. """
BASE_NAME = 'wikipedia'
YEAR = '2018'
URL = 'http://en.wikipedia.org/wiki/Wikipedia'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Wikipedia2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
class Twitter2018Page(TopRealWorldDesktopPage):
""" Why: #8 (Alexa global),Picked an interesting page """
BASE_NAME = 'twitter'
YEAR = '2018'
URL = 'https://twitter.com/katyperry'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Twitter2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(Twitter2018Page, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(selector='.ProfileNav')
class Pinterest2018Page(TopRealWorldDesktopPage):
""" Why: #37 (Alexa global) """
BASE_NAME = 'pinterest'
YEAR = '2018'
URL = 'https://www.pinterest.com/search/pins/?q=flowers&rs=typed'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Pinterest2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
class AccuWeather2018Page(TopRealWorldDesktopPage):
""" Why: #2 weather according to Alexa """
BASE_NAME = 'accu_weather'
YEAR = '2018'
URL = 'https://www.accuweather.com/en/us/new-york-ny/10017/weather-forecast/349727'
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(AccuWeather2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
class Twitch2018Page(TopRealWorldDesktopPage):
""" Why: #1 games according to Alexa """
BASE_NAME = 'twitch'
YEAR = '2018'
URL = 'https://www.twitch.tv'
TAGS = TopRealWorldDesktopPage.TAGS + [
story_tags.REPRESENTATIVE_MAC_DESKTOP
]
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name_suffix='',
extra_browser_args=None):
super(Twitch2018Page, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
def RunPageInteractions(self, action_runner):
action_runner.WaitForElement(selector='#mantle_skin')
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPageToElement(selector='.footer')
if self.story_set.scroll_forever:
while True:
action_runner.ScrollPage(direction='up')
action_runner.ScrollPage(direction='down')
class Gmail2018SmoothPage(TopRealWorldDesktopPage):
""" Why: productivity, top google properties """
BASE_NAME = 'gmail'
YEAR = '2018'
URL = 'https://mail.google.com/mail/'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
google_login.NewLoginGoogleAccount(action_runner, 'googletest')
super(Gmail2018SmoothPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null &&'
'document.readyState == "complete"')
def RunPageInteractions(self, action_runner):
action_runner.WaitForElement(selector='.Tm.aeJ')
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(selector='.Tm.aeJ')
if self.story_set.scroll_forever:
while True:
action_runner.ScrollElement(
direction='up', selector='.Tm.aeJ')
action_runner.ScrollElement(
direction='down', selector='.Tm.aeJ')
class GoogleCalendar2018SmoothPage(TopRealWorldDesktopPage):
""" Why: productivity, top google properties """
BASE_NAME='google_calendar'
YEAR = '2018'
URL='https://www.google.com/calendar/'
def RunNavigateSteps(self, action_runner):
if self.wpr_mode != wpr_modes.WPR_REPLAY:
google_login.NewLoginGoogleAccount(action_runner, 'googletest')
super(GoogleCalendar2018SmoothPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement('span[class~="sm8sCf"]')
action_runner.ExecuteJavaScript("""
(function() {
var elem = document.createElement('meta');
elem.name='viewport';
elem.content='initial-scale=1';
document.body.appendChild(elem);
})();""")
action_runner.Wait(1)
def RunPageInteractions(self, action_runner):
action_runner.WaitForElement('span[class~="sm8sCf"]')
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(selector='#YPCqFe')
if self.story_set.scroll_forever:
while True:
action_runner.ScrollElement(
direction='up', selector='#YPCqFe')
action_runner.ScrollElement(
direction='down', selector='#YPCqFe')
class GoogleDoc2018SmoothPage(TopRealWorldDesktopPage):
""" Why: productivity, top google properties; Sample doc in the link """
# pylint: disable=line-too-long
URL = 'https://docs.google.com/document/d/1X-IKNjtEnx-WW5JIKRLsyhz5sbsat3mfTpAPUSX3_s4/view'
BASE_NAME='google_docs'
YEAR = '2018'
def RunNavigateSteps(self, action_runner):
super(GoogleDoc2018SmoothPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("kix-appview-editor").length')
def RunPageInteractions(self, action_runner):
action_runner.WaitForElement(selector='#printButton')
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(selector='.kix-appview-editor')
if self.story_set.scroll_forever:
while True:
action_runner.ScrollElement(
direction='up', selector='.kix-appview-editor')
action_runner.ScrollElement(
direction='down', selector='.kix-appview-editor')
class ESPN2018SmoothPage(TopRealWorldDesktopPage):
""" Why: #1 sports """
BASE_NAME='espn'
YEAR = '2018'
URL = 'http://espn.go.com'
def RunPageInteractions(self, action_runner):
action_runner.WaitForElement(selector='#global-scoreboard')
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(left_start_ratio=0.1)
if self.story_set.scroll_forever:
while True:
action_runner.ScrollPage(direction='up', left_start_ratio=0.1)
action_runner.ScrollPage(direction='down', left_start_ratio=0.1)
class YahooNews2018Page(TopRealWorldDesktopPage):
"""Why: #1 news worldwide (Alexa global)"""
BASE_NAME = 'yahoo_news'
YEAR = '2018'
URL = 'http://news.yahoo.com'
class CNNNews2018Page(TopRealWorldDesktopPage):
"""Why: #2 news worldwide"""
BASE_NAME = 'cnn'
YEAR = '2018'
URL = 'http://www.cnn.com'
class Amazon2018Page(TopRealWorldDesktopPage):
# Why: #1 world commerce website by visits; #3 commerce in the US by
# time spent
BASE_NAME = 'amazon'
YEAR = '2018'
URL = 'http://www.amazon.com'
class Ebay2018Page(TopRealWorldDesktopPage):
# Why: #1 commerce website by time spent by users in US
BASE_NAME = 'ebay'
YEAR = '2018'
URL = 'http://www.ebay.com'
class Booking2018Page(TopRealWorldDesktopPage):
# Why: #1 Alexa recreation
BASE_NAME = 'booking.com'
YEAR = '2018'
URL = 'http://booking.com'
class YahooAnswers2018Page(TopRealWorldDesktopPage):
# Why: #1 Alexa reference
BASE_NAME = 'yahoo_answers'
YEAR = '2018'
URL = 'http://answers.yahoo.com'
class YahooSports2018Page(TopRealWorldDesktopPage):
# Why: #1 Alexa sports
BASE_NAME = 'yahoo_sports'
YEAR = '2018'
URL = 'http://sports.yahoo.com/'
class TechCrunch2018Page(TopRealWorldDesktopPage):
# Why: top tech blog
BASE_NAME = 'techcrunch'
YEAR = '2018'
URL = 'http://techcrunch.com'
|
photologue/tests/test_photosize.py | erdnaxe/django-photologue | 364 | 12640161 | from django.core.exceptions import ValidationError
from .factories import PhotoSizeFactory
from .helpers import PhotologueBaseTest
class PhotoSizeNameTest(PhotologueBaseTest):
def test_valid_name(self):
"""We are restricted in what names we can enter."""
photosize = PhotoSizeFactory()
photosize.name = None
with self.assertRaisesMessage(ValidationError, 'This field cannot be null.'):
photosize.full_clean()
photosize = PhotoSizeFactory(name='')
with self.assertRaisesMessage(ValidationError, 'This field cannot be blank.'):
photosize.full_clean()
for name in ('a space', 'UPPERCASE', 'bad?chars'):
photosize = PhotoSizeFactory(name=name)
with self.assertRaisesMessage(ValidationError,
'Use only plain lowercase letters (ASCII), numbers and underscores.'):
photosize.full_clean()
for name in ('label', '2_words'):
photosize = PhotoSizeFactory(name=name)
photosize.full_clean()
|
outside/commonmark/wrapper.py | PowerOlive/urbit | 318 | 12640168 | #!/usr/bin/env python
# Example for using the shared library from python
from ctypes import CDLL, c_char_p, c_long
import sys
import platform
sysname = platform.system()
if sysname == 'Darwin':
cmark = CDLL("build/src/libcmark.dylib")
else:
cmark = CDLL("build/src/libcmark.so")
markdown = cmark.cmark_markdown_to_html
markdown.restype = c_char_p
markdown.argtypes = [c_char_p, c_long]
def md2html(text):
return markdown(text, len(text))
sys.stdout.write(md2html(sys.stdin.read()))
|
python-leetcode/laozhang/tree/leetcode_1379_.py | sweeneycai/cs-summary-reflection | 227 | 12640178 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# coding=utf-8
"""
1379. 找出克隆二叉树中的相同节点
"""
from laozhang import TreeNode
class Solution:
def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:
if original and cloned:
if original.val == target.val:
return cloned
a = self.getTargetCopy(original.left, cloned.left, target)
if not a:
b = self.getTargetCopy(original.right, cloned.right, target)
return b
else:
return a
|
src/back/tests/test_jsonfile.py | khamidou/kite | 136 | 12640181 | import unittest
import tempfile
import fcntl
import os
import datetime
import kite.jsonfile
import tmpdir
from kite.jsonfile import JsonFile
class TestJsonfile(tmpdir.TestCaseWithTempFile):
def test_locked_loading(self):
fd = os.fdopen(self.tmpfd, "w")
fd.write("{}")
fd.close()
json_file = JsonFile(self.tmpfile)
self.assertEqual(json_file.data, {}, msg="Value is not deserialized correctly")
fd = open(self.tmpfile, "w+")
fd.write("[]")
fd.close()
json_file.refresh()
self.assertEqual(json_file.data, [], msg="Value is not refreshed correctly")
def test_datetime_serialization(self):
d = datetime.datetime.now()
json_file = JsonFile()
json_file.data = {"date": d}
json_file.save(self.tmpfile)
json_file2 = JsonFile(self.tmpfile)
self.assertTrue(isinstance(json_file2.data["date"], datetime.datetime), msg="Datetime value is not deserialized correctly")
|
insights/combiners/tests/test_nginx_conf_parser.py | lhuett/insights-core | 121 | 12640205 | <gh_stars>100-1000
from insights.combiners.nginx_conf import _NginxConf
from insights.tests import context_wrap
NGINXCONF = """
user root;
worker_processes 5;
error_log logs/error.log;
pid logs/nginx.pid;
worker_rlimit_nofile 8192;
events {
worker_connections 4096;
}
mail {
server_name mail.example.com;
auth_http localhost:9000/cgi-bin/auth;
server {
listen 143;
protocol imap;
}
}
http {
include conf/mime.types;
include /etc/nginx/proxy.conf;
include /etc/nginx/fastcgi.conf;
index index.html index.htm index.php;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
default_type application/octet-stream;
access_log logs/access.log main;
sendfile on;
tcp_nopush on;
server_names_hash_bucket_size 128;
server { # php/fastcgi
listen 80;
server_name domain1.com www.domain1.com;
access_log logs/domain1.access.log main;
root html;
location ~ \.php$ {
fastcgi_pass 127.0.0.1:1025;
}
}
server { # simple reverse-proxy
listen 80;
server_name domain2.com www.domain2.com;
access_log logs/domain2.access.log main;
location ~ ^/(images|javascript|js|css|flash|media|static)/ {
root /var/www/virtual/big.server.com/htdocs;
expires 30d;
}
location / {
proxy_pass http://127.0.0.1:8080;
}
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket {
server 10.66.208.205:8010;
}
upstream big_server_com {
server 127.0.0.3:8000 weight=5;
server 127.0.0.3:8001 weight=5;
server 192.168.0.1:8000;
server 192.168.0.1:8001;
}
server { # simple load balancing
listen 80;
server_name big.server.com;
access_log logs/big.server.access.log main;
location / {
proxy_pass http://big_server_com;
location /inner/ {
proxy_pass http://u2;
limit_except GET {
allow 192.168.2.0/32;
}
}
}
}
}
""".strip()
def test_nginx_conf_parser():
nginxconf = _NginxConf(context_wrap(NGINXCONF))
assert nginxconf['user'][-1].value == 'root'
assert nginxconf['events'][-1]['worker_connections'][-1].value == 4096
assert nginxconf['mail'][-1]['server'][0]['listen'][-1].value == 143
assert nginxconf['http'][-1]['access_log'][-1].value == 'logs/access.log main'
assert nginxconf['http'][-1]['server'][0]['location'][0]['fastcgi_pass'][-1].value == '127.0.0.1:1025'
assert nginxconf['http'][-1]['server'][1]['location'][-1].value == '/'
assert nginxconf['http'][-1]['upstream'][1].value == 'big_server_com'
assert nginxconf["http"][-1]["include"][0].value == 'conf/mime.types'
assert nginxconf['http'][-1]['upstream'][1]['server'][0].value == '127.0.0.3:8000 weight=5'
assert nginxconf['http'][-1]['log_format'][-1].value == 'main $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"'
assert nginxconf['http'][-1]['server'][2]['location'][0]['location'][0]['limit_except'][-1]['allow'][-1].value == '192.168.2.0/32'
assert nginxconf['http']['server']['location']['location']['limit_except']['allow'][-1].value == '192.168.2.0/32'
assert nginxconf['http']['server'][0]['location'][-1].value == r'~ \.php$'
assert nginxconf['http']['server'][1]['location'][0].value == '~ ^/(images|javascript|js|css|flash|media|static)/'
assert nginxconf['http']['server'][1]['location'][-1].value == '/'
assert nginxconf['http']['server'][-1] == nginxconf['http']['server'][2]
|
boto3_type_annotations/boto3_type_annotations/route53domains/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12640229 | <gh_stars>100-1000
from typing import Dict
from datetime import datetime
from botocore.paginate import Paginator
class ListDomains(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class ListOperations(Paginator):
def paginate(self, SubmittedSince: datetime = None, PaginationConfig: Dict = None) -> Dict:
pass
class ViewBilling(Paginator):
def paginate(self, Start: datetime = None, End: datetime = None, PaginationConfig: Dict = None) -> Dict:
pass
|
mozi/datasets/imdb.py | hycis/Mozi | 122 | 12640247 | <filename>mozi/datasets/imdb.py
import logging
logger = logging.getLogger(__name__)
import os
import cPickle
import numpy as np
import theano
floatX = theano.config.floatX
from mozi.utils.utils import get_file, make_one_hot, pad_sequences
from mozi.datasets.dataset import SingleBlock
class IMDB(SingleBlock):
def __init__(self, nb_words=None, skip_top=0, maxlen=None, seed=113,
pad_zero=False, start_char=1, oov_char=2, index_from=3, **kwargs):
'''
adapted from keras
'''
im_dir = os.environ['MOZI_DATA_PATH'] + '/imdb/'
path = "https://s3.amazonaws.com/text-datasets/imdb.pkl"
im_dir = get_file(fpath="{}/imdb.pkl".format(im_dir), origin=path, untar=False)
with open('{}/imdb.pkl'.format(im_dir)) as fin:
X, labels = np.load(fin)
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
if pad_zero and maxlen:
X = pad_sequences(X, maxlen=maxlen)
super(IMDB, self).__init__(X=np.asarray(X), y=np.asarray(labels).reshape((len(labels),1)), **kwargs)
|
src/oci/artifacts/models/generic_artifact_summary.py | Manny27nyc/oci-python-sdk | 249 | 12640281 | <reponame>Manny27nyc/oci-python-sdk
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class GenericArtifactSummary(object):
"""
Summary information for an artifact.
"""
def __init__(self, **kwargs):
"""
Initializes a new GenericArtifactSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this GenericArtifactSummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this GenericArtifactSummary.
:type display_name: str
:param compartment_id:
The value to assign to the compartment_id property of this GenericArtifactSummary.
:type compartment_id: str
:param repository_id:
The value to assign to the repository_id property of this GenericArtifactSummary.
:type repository_id: str
:param artifact_path:
The value to assign to the artifact_path property of this GenericArtifactSummary.
:type artifact_path: str
:param version:
The value to assign to the version property of this GenericArtifactSummary.
:type version: str
:param sha256:
The value to assign to the sha256 property of this GenericArtifactSummary.
:type sha256: str
:param size_in_bytes:
The value to assign to the size_in_bytes property of this GenericArtifactSummary.
:type size_in_bytes: int
:param lifecycle_state:
The value to assign to the lifecycle_state property of this GenericArtifactSummary.
:type lifecycle_state: str
:param freeform_tags:
The value to assign to the freeform_tags property of this GenericArtifactSummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this GenericArtifactSummary.
:type defined_tags: dict(str, dict(str, object))
:param time_created:
The value to assign to the time_created property of this GenericArtifactSummary.
:type time_created: datetime
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'compartment_id': 'str',
'repository_id': 'str',
'artifact_path': 'str',
'version': 'str',
'sha256': 'str',
'size_in_bytes': 'int',
'lifecycle_state': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'time_created': 'datetime'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'compartment_id': 'compartmentId',
'repository_id': 'repositoryId',
'artifact_path': 'artifactPath',
'version': 'version',
'sha256': 'sha256',
'size_in_bytes': 'sizeInBytes',
'lifecycle_state': 'lifecycleState',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'time_created': 'timeCreated'
}
self._id = None
self._display_name = None
self._compartment_id = None
self._repository_id = None
self._artifact_path = None
self._version = None
self._sha256 = None
self._size_in_bytes = None
self._lifecycle_state = None
self._freeform_tags = None
self._defined_tags = None
self._time_created = None
@property
def id(self):
"""
**[Required]** Gets the id of this GenericArtifactSummary.
The `OCID`__ of the artifact.
Example: `ocid1.genericartifact.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The id of this GenericArtifactSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this GenericArtifactSummary.
The `OCID`__ of the artifact.
Example: `ocid1.genericartifact.oc1..exampleuniqueID`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param id: The id of this GenericArtifactSummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this GenericArtifactSummary.
The artifact name with the format of `<artifact-path>:<artifact-version>`. The artifact name is truncated to a maximum length of 255.
Example: `project01/my-web-app/artifact-abc:1.0.0`
:return: The display_name of this GenericArtifactSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this GenericArtifactSummary.
The artifact name with the format of `<artifact-path>:<artifact-version>`. The artifact name is truncated to a maximum length of 255.
Example: `project01/my-web-app/artifact-abc:1.0.0`
:param display_name: The display_name of this GenericArtifactSummary.
:type: str
"""
self._display_name = display_name
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this GenericArtifactSummary.
The OCID of the artifact's compartment.
:return: The compartment_id of this GenericArtifactSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this GenericArtifactSummary.
The OCID of the artifact's compartment.
:param compartment_id: The compartment_id of this GenericArtifactSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def repository_id(self):
"""
**[Required]** Gets the repository_id of this GenericArtifactSummary.
The `OCID`__ of the repository.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The repository_id of this GenericArtifactSummary.
:rtype: str
"""
return self._repository_id
@repository_id.setter
def repository_id(self, repository_id):
"""
Sets the repository_id of this GenericArtifactSummary.
The `OCID`__ of the repository.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param repository_id: The repository_id of this GenericArtifactSummary.
:type: str
"""
self._repository_id = repository_id
@property
def artifact_path(self):
"""
**[Required]** Gets the artifact_path of this GenericArtifactSummary.
A user-defined path to describe the location of an artifact. Slashes do not create a directory structure, but you can use slashes to organize the repository. An artifact path does not include an artifact version.
Example: `project01/my-web-app/artifact-abc`
:return: The artifact_path of this GenericArtifactSummary.
:rtype: str
"""
return self._artifact_path
@artifact_path.setter
def artifact_path(self, artifact_path):
"""
Sets the artifact_path of this GenericArtifactSummary.
A user-defined path to describe the location of an artifact. Slashes do not create a directory structure, but you can use slashes to organize the repository. An artifact path does not include an artifact version.
Example: `project01/my-web-app/artifact-abc`
:param artifact_path: The artifact_path of this GenericArtifactSummary.
:type: str
"""
self._artifact_path = artifact_path
@property
def version(self):
"""
**[Required]** Gets the version of this GenericArtifactSummary.
A user-defined string to describe the artifact version.
Example: `1.1.0` or `1.2-beta-2`
:return: The version of this GenericArtifactSummary.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this GenericArtifactSummary.
A user-defined string to describe the artifact version.
Example: `1.1.0` or `1.2-beta-2`
:param version: The version of this GenericArtifactSummary.
:type: str
"""
self._version = version
@property
def sha256(self):
"""
**[Required]** Gets the sha256 of this GenericArtifactSummary.
The SHA256 digest for the artifact. When you upload an artifact to the repository, a SHA256 digest is calculated and added to the artifact properties.
:return: The sha256 of this GenericArtifactSummary.
:rtype: str
"""
return self._sha256
@sha256.setter
def sha256(self, sha256):
"""
Sets the sha256 of this GenericArtifactSummary.
The SHA256 digest for the artifact. When you upload an artifact to the repository, a SHA256 digest is calculated and added to the artifact properties.
:param sha256: The sha256 of this GenericArtifactSummary.
:type: str
"""
self._sha256 = sha256
@property
def size_in_bytes(self):
"""
**[Required]** Gets the size_in_bytes of this GenericArtifactSummary.
The size of the artifact in bytes.
:return: The size_in_bytes of this GenericArtifactSummary.
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""
Sets the size_in_bytes of this GenericArtifactSummary.
The size of the artifact in bytes.
:param size_in_bytes: The size_in_bytes of this GenericArtifactSummary.
:type: int
"""
self._size_in_bytes = size_in_bytes
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this GenericArtifactSummary.
The current state of the generic artifact.
:return: The lifecycle_state of this GenericArtifactSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this GenericArtifactSummary.
The current state of the generic artifact.
:param lifecycle_state: The lifecycle_state of this GenericArtifactSummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this GenericArtifactSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this GenericArtifactSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this GenericArtifactSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this GenericArtifactSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this GenericArtifactSummary.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this GenericArtifactSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this GenericArtifactSummary.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this GenericArtifactSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this GenericArtifactSummary.
An RFC 3339 timestamp indicating when the artifact was created.
:return: The time_created of this GenericArtifactSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this GenericArtifactSummary.
An RFC 3339 timestamp indicating when the artifact was created.
:param time_created: The time_created of this GenericArtifactSummary.
:type: datetime
"""
self._time_created = time_created
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
examples/djopenid/server/urls.py | cjwatson/python-openid | 176 | 12640314 | """Server URLs."""
from __future__ import unicode_literals
from django.conf.urls import url
from django.views.generic import TemplateView
from djopenid.server.views import endpoint, idPage, idpXrds, processTrustResult, server
urlpatterns = [
url(r'^$', server, name='index'),
url(r'^xrds/$', idpXrds, name='xrds'),
url(r'^user/$', idPage, name='local_id'),
url(r'^endpoint/$', endpoint, name='endpoint'),
url(r'^trust/$', TemplateView.as_view(template_name='server/trust.html'), name='confirmation'),
url(r'^processTrustResult/$', processTrustResult, name='process-confirmation'),
]
|
skmultilearn/embedding/skembeddings.py | emrecncelik/scikit-multilearn | 763 | 12640324 | from __future__ import absolute_import
from sklearn.base import BaseEstimator
class SKLearnEmbedder(BaseEstimator):
"""Embed the label space using a scikit-compatible matrix-based embedder
Parameters
----------
embedder : sklearn.base.BaseEstimator
a clonable instance of a scikit-compatible embedder, will be automatically
put under :code:`self.embedder`, see .
pass_input_space : bool (default is False)
whether to take :code:`X` into consideration upon clustering,
use only if you know that the embedder can handle two
parameters for clustering, will be automatically
put under :code:`self.pass_input_space`.
Example code for using this embedder looks like this:
.. code-block:: python
from skmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from skmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
def __init__(self, embedder=None, pass_input_space=False):
super(BaseEstimator, self).__init__()
self.embedder = embedder
self.pass_input_space = pass_input_space
def fit(self, X, y):
"""Fits the embedder to data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self.embedder.fit(X, y)
def fit_transform(self, X, y):
"""Fit the embedder and transform the output space
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
X, y_embedded
results of the embedding, input and output space
"""
if self.pass_input_space:
result = self.embedder.fit_transform(X, y)
else:
result = self.embedder.fit_transform(y)
return X, result
|
tests/test_01_dxf_entities/test_114_factory.py | jkjt/ezdxf | 515 | 12640335 | # Copyright (c) 2019-2020 <NAME>
# License: MIT License
# created 2019-02-18
import pytest
import ezdxf
from ezdxf.lldxf.extendedtags import ExtendedTags
from ezdxf.entities import factory
from ezdxf.entities.factory import ENTITY_CLASSES
@pytest.fixture(scope="module")
def doc():
return ezdxf.new()
def test_registered_structural_entities():
assert "CLASS" in ENTITY_CLASSES
assert "TABLE" in ENTITY_CLASSES
assert "BLOCK" in ENTITY_CLASSES
assert "ENDBLK" in ENTITY_CLASSES
def test_registered_table_entries():
assert "LAYER" in ENTITY_CLASSES
assert "LTYPE" in ENTITY_CLASSES
assert "STYLE" in ENTITY_CLASSES
assert "DIMSTYLE" in ENTITY_CLASSES
assert "APPID" in ENTITY_CLASSES
assert "UCS" in ENTITY_CLASSES
assert "VIEW" in ENTITY_CLASSES
assert "VPORT" in ENTITY_CLASSES
assert "BLOCK_RECORD" in ENTITY_CLASSES
def test_new():
e = factory.new("POINT")
assert e.doc is None
assert e.dxf.handle is None
assert e.dxf.owner is None
assert e.is_alive is True
assert e.is_virtual is True
POINT = """0
POINT
5
FEFE
8
0
10
0.0
20
0.0
30
0.0
"""
def test_factory_load():
tags = ExtendedTags.from_text(POINT)
e = factory.load(tags)
assert e.dxftype() == "POINT"
assert e.doc is None
assert e.dxf.handle == "FEFE"
assert e.dxf.owner is None
assert e.is_alive is True
assert e.is_virtual is True
def test_bind_entity_to_doc(doc):
e = factory.new("POINT")
factory.bind(e, doc)
assert e.doc is doc
assert e.dxf.handle is not None, "should have a handle"
assert (
e.dxf.handle in doc.entitydb
), "should be stored in the entity database"
assert e.dxf.owner is None, "should not be linked to a layout or owner"
assert e.is_virtual is False, "is not a virtual entity"
def test_bind_entity_with_existing_handle_to_doc(doc):
e = factory.new("POINT")
e.dxf.handle = "ABBA"
factory.bind(e, doc)
assert e.doc is doc
assert e.dxf.handle == "ABBA", "should have the original handle"
assert (
e.dxf.handle in doc.entitydb
), "should be stored in the entity database"
def test_bind_dead_entity_to_doc(doc):
e = factory.new("POINT")
e.destroy()
with pytest.raises(AssertionError):
factory.bind(e, doc)
def test_is_bound_true(doc):
e = factory.new("POINT")
factory.bind(e, doc)
assert factory.is_bound(e, doc) is True
assert e.is_bound is True
def test_is_bound_false(doc):
e = factory.new("POINT")
assert factory.is_bound(e, doc) is False
assert e.is_bound is False
def test_if_destroyed_entity_is_bound(doc):
e = factory.new("POINT")
factory.bind(e, doc)
e.destroy()
assert factory.is_bound(e, doc) is False
assert e.is_bound is False
def test_create_db_entry(doc):
e = factory.create_db_entry("POINT", {}, doc)
assert e.doc is doc
assert e.dxf.handle is not None, "should have a handle"
assert (
e.dxf.handle in doc.entitydb
), "should be stored in the entity database"
assert e.dxf.owner is None, "should not be linked to a layout or owner"
assert e.is_virtual is False, "is not a virtual entity"
def test_unbind_bound_entity(doc):
e = factory.create_db_entry("POINT", {}, doc)
doc.modelspace().add_entity(e)
factory.unbind(e)
assert e.is_alive, "should not be destroyed"
assert e.is_virtual, "should be virtual entity"
assert e.doc is None
assert e.dxf.owner is None
assert e.dxf.handle is None
def test_unbind_unbound_entity(doc):
e = factory.new("POINT")
# should not raise an exception
factory.unbind(e)
assert e.is_alive, "should not be destroyed"
def test_unbind_destroyed_entity(doc):
e = factory.new("POINT")
e.destroy()
# should not raise an exception
factory.unbind(e)
assert e.is_alive is False
|
examples/NCF/data_utils.py | jessezbj/adaptdl | 294 | 12640350 | <reponame>jessezbj/adaptdl
# Code adapted from https://github.com/guoyang9/NCF
import numpy as np
import pandas as pd
import scipy.sparse as sp
import os
import torch.utils.data as data
import hashlib
import adaptdl.env
from urllib.request import urlretrieve
base_url = "https://raw.githubusercontent.com/hexiangnan/neural_collaborative_filtering/312aff0ee6f4df0ba60041329a74ed29bc7ac4b4/Data/"
def download_all(root_path, dataset="ml-1m"):
datafiles = ['{}.{}'.format(dataset, suffix) for suffix in
['train.rating', 'test.rating', 'test.negative']]
for filename in datafiles:
local_filename = os.path.join(root_path, filename)
# Todo: verify file
if not os.path.exists(local_filename):
print(base_url + filename, local_filename)
urlretrieve(base_url + filename, local_filename)
def load_all(root_path, train_rating, test_negative, dataset, test_num=100):
""" We load all the three file here to save time in each epoch. """
download_all(root_path, dataset)
train_data = pd.read_csv(
train_rating,
sep='\t', header=None, names=['user', 'item'],
usecols=[0, 1], dtype={0: np.int32, 1: np.int32})
user_num = train_data['user'].max() + 1
item_num = train_data['item'].max() + 1
train_data = train_data.values.tolist()
# load ratings as a dok matrix
train_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)
for x in train_data:
train_mat[x[0], x[1]] = 1.0
test_data = []
with open(test_negative, 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
u = eval(arr[0])[0]
test_data.append([u, eval(arr[0])[1]])
for i in arr[1:]:
test_data.append([u, int(i)])
line = fd.readline()
return train_data, test_data, user_num, item_num, train_mat
class NCFData(data.Dataset):
def __init__(self, features,
num_item, train_mat=None, num_ng=0, is_training=None):
super(NCFData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
"""
self.features_ps = features
self.num_item = num_item
self.train_mat = train_mat
self.num_ng = num_ng
self.is_training = is_training
self.labels = [0 for _ in range(len(features))]
def ng_sample(self):
assert self.is_training, 'no need to sampling when testing'
self.features_ng = []
for x in self.features_ps:
u = x[0]
for t in range(self.num_ng):
j = np.random.randint(self.num_item)
while (u, j) in self.train_mat:
j = np.random.randint(self.num_item)
self.features_ng.append([u, j])
labels_ps = [1 for _ in range(len(self.features_ps))]
labels_ng = [0 for _ in range(len(self.features_ng))]
self.features_fill = self.features_ps + self.features_ng
self.labels_fill = labels_ps + labels_ng
def __len__(self):
return (self.num_ng + 1) * len(self.labels)
def __getitem__(self, idx):
features = self.features_fill if self.is_training \
else self.features_ps
labels = self.labels_fill if self.is_training \
else self.labels
user = features[idx][0]
item = features[idx][1]
label = labels[idx]
return user, item ,label
|
alipay/aop/api/domain/BizActionConsumedAmountsDTO.py | antopen/alipay-sdk-python-all | 213 | 12640355 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BizActionConsumedAmountDTO import BizActionConsumedAmountDTO
from alipay.aop.api.domain.BizActionComsumedAmountDTO import BizActionComsumedAmountDTO
class BizActionConsumedAmountsDTO(object):
def __init__(self):
self._biz_action_consumed_amount_list = None
self._biz_action_consumed_amounts = None
self._biz_uk_id = None
@property
def biz_action_consumed_amount_list(self):
return self._biz_action_consumed_amount_list
@biz_action_consumed_amount_list.setter
def biz_action_consumed_amount_list(self, value):
if isinstance(value, list):
self._biz_action_consumed_amount_list = list()
for i in value:
if isinstance(i, BizActionConsumedAmountDTO):
self._biz_action_consumed_amount_list.append(i)
else:
self._biz_action_consumed_amount_list.append(BizActionConsumedAmountDTO.from_alipay_dict(i))
@property
def biz_action_consumed_amounts(self):
return self._biz_action_consumed_amounts
@biz_action_consumed_amounts.setter
def biz_action_consumed_amounts(self, value):
if isinstance(value, list):
self._biz_action_consumed_amounts = list()
for i in value:
if isinstance(i, BizActionComsumedAmountDTO):
self._biz_action_consumed_amounts.append(i)
else:
self._biz_action_consumed_amounts.append(BizActionComsumedAmountDTO.from_alipay_dict(i))
@property
def biz_uk_id(self):
return self._biz_uk_id
@biz_uk_id.setter
def biz_uk_id(self, value):
self._biz_uk_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_action_consumed_amount_list:
if isinstance(self.biz_action_consumed_amount_list, list):
for i in range(0, len(self.biz_action_consumed_amount_list)):
element = self.biz_action_consumed_amount_list[i]
if hasattr(element, 'to_alipay_dict'):
self.biz_action_consumed_amount_list[i] = element.to_alipay_dict()
if hasattr(self.biz_action_consumed_amount_list, 'to_alipay_dict'):
params['biz_action_consumed_amount_list'] = self.biz_action_consumed_amount_list.to_alipay_dict()
else:
params['biz_action_consumed_amount_list'] = self.biz_action_consumed_amount_list
if self.biz_action_consumed_amounts:
if isinstance(self.biz_action_consumed_amounts, list):
for i in range(0, len(self.biz_action_consumed_amounts)):
element = self.biz_action_consumed_amounts[i]
if hasattr(element, 'to_alipay_dict'):
self.biz_action_consumed_amounts[i] = element.to_alipay_dict()
if hasattr(self.biz_action_consumed_amounts, 'to_alipay_dict'):
params['biz_action_consumed_amounts'] = self.biz_action_consumed_amounts.to_alipay_dict()
else:
params['biz_action_consumed_amounts'] = self.biz_action_consumed_amounts
if self.biz_uk_id:
if hasattr(self.biz_uk_id, 'to_alipay_dict'):
params['biz_uk_id'] = self.biz_uk_id.to_alipay_dict()
else:
params['biz_uk_id'] = self.biz_uk_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizActionConsumedAmountsDTO()
if 'biz_action_consumed_amount_list' in d:
o.biz_action_consumed_amount_list = d['biz_action_consumed_amount_list']
if 'biz_action_consumed_amounts' in d:
o.biz_action_consumed_amounts = d['biz_action_consumed_amounts']
if 'biz_uk_id' in d:
o.biz_uk_id = d['biz_uk_id']
return o
|
Python/Algorithm/SumDigits.py | piovezan/SOpt | 148 | 12640370 | <filename>Python/Algorithm/SumDigits.py
def sum_digits(digit):
return sum(int(x) for x in digit if x.isdigit())
print(sum_digits('texto123numero456x7'))
#https://pt.stackoverflow.com/q/42280/101
|
mongodb_consistent_backup/Replication/Replset.py | akira-kurogane/mongodb_consistent_backup | 282 | 12640379 | <reponame>akira-kurogane/mongodb_consistent_backup<filename>mongodb_consistent_backup/Replication/Replset.py
import logging
import pymongo.errors
from math import ceil
from time import mktime
from mongodb_consistent_backup.Common import DB, MongoUri, parse_read_pref_tags
from mongodb_consistent_backup.Errors import Error, OperationError
class Replset:
def __init__(self, config, db):
self.config = config
self.db = db
self.read_pref_tags = self.config.replication.read_pref_tags
self.max_lag_secs = self.config.replication.max_lag_secs
self.min_priority = self.config.replication.min_priority
self.max_priority = self.config.replication.max_priority
self.hidden_only = self.config.replication.hidden_only
self.preferred_members = []
if self.config.replication.preferred_members:
self.preferred_members = self.config.replication.preferred_members.split(",")
logging.debug("Preferred members: %s" % self.preferred_members)
self.state_primary = 1
self.state_secondary = 2
self.state_arbiter = 7
self.hidden_weight = 0.20
self.pri0_weight = 0.10
self.replset = True
self.rs_config = None
self.rs_status = None
self.primary = None
self.secondary = None
self.mongo_config = None
self.replset_summary = {}
# Get a DB connection
try:
if isinstance(self.db, DB):
self.connection = self.db.connection()
else:
raise Error("'db' field is not an instance of class: 'DB'!")
except Exception, e:
logging.fatal("Could not get DB connection! Error: %s" % e)
raise OperationError(e)
def close(self):
pass
def summary(self):
return self.replset_summary
def get_rs_status(self, force=False, quiet=False):
try:
if force or not self.rs_status:
self.rs_status = self.db.admin_command('replSetGetStatus', quiet)
self.replset_summary['status'] = self.rs_status
return self.rs_status
except Exception, e:
logging.fatal("Error getting replica set status! Error: %s" % e)
raise OperationError(e)
def get_rs_config(self, force=False, quiet=False):
if force or not self.rs_config:
try:
if self.db.server_version() >= tuple("3.0.0".split(".")):
output = self.db.admin_command('replSetGetConfig', quiet)
self.rs_config = output['config']
else:
self.rs_config = self.connection['local'].system.replset.find_one()
self.replset_summary['config'] = self.rs_config
except pymongo.errors.OperationFailure, e:
raise OperationError("Error getting replica set config! Error: %s" % e)
return self.rs_config
def get_rs_config_member(self, member, force=False, quiet=False):
rs_config = self.get_rs_config(force, quiet)
if 'name' in member:
for cnf_member in rs_config['members']:
if member['name'] == cnf_member['host']:
return cnf_member
raise OperationError("Member does not exist in mongo config!")
def get_rs_name(self):
return self.get_rs_status()['set']
def get_mongo_config(self, force=False, quiet=False):
try:
if force or not self.mongo_config:
cmdline_opts = self.db.admin_command('getCmdLineOpts', quiet)
if 'parsed' in cmdline_opts:
self.mongo_config = cmdline_opts['parsed']
self.replset_summary['mongo_config'] = self.mongo_config
return self.mongo_config
except pymongo.errors.OperationFailure, e:
raise OperationError("Error getting mongo config! Error: %s" % e)
def get_mongo_config_member(self, member, force=False, quiet=False):
rs_config = self.get_mongo_config(force, quiet)
if 'members' in rs_config and 'name' in member:
for cnf_member in rs_config:
if member['name'] == cnf_member['name']:
return cnf_member
raise OperationError("Member does not exist in mongo config!")
def get_repl_op_lag(self, rs_status, rs_member):
op_lag = 0
if 'date' in rs_status and 'lastHeartbeat' in rs_member:
op_lag = mktime(rs_status['date'].timetuple()) - mktime(rs_member['lastHeartbeat'].timetuple())
return op_lag
def get_repl_lag(self, rs_member):
rs_status = self.get_rs_status(False, True)
self.find_primary(False, True)
member_optime_ts = rs_member['optime']
primary_optime_ts = self.primary_optime(False, True)
if isinstance(rs_member['optime'], dict) and 'ts' in rs_member['optime']:
member_optime_ts = rs_member['optime']['ts']
op_lag = self.get_repl_op_lag(rs_status, rs_member)
rep_lag = (primary_optime_ts.time - member_optime_ts.time) - op_lag
if rep_lag < 0:
rep_lag = 0
return rep_lag, member_optime_ts
def get_electable_members(self, force=False):
electable = []
rs_config = self.get_rs_config(force, True)
for member in rs_config['members']:
if 'arbiterOnly' in member and member['arbiterOnly'] is True:
continue
elif 'priority' in member and member['priority'] == 0:
continue
electable.append(member)
return electable
def get_rs_quorum(self):
electable_members = len(self.get_electable_members())
return ceil(electable_members / 2.0)
def is_member_electable(self, member):
for electable_member in self.get_electable_members():
if member == electable_member:
return True
return False
def has_read_pref_tags(self, member_config):
if "tags" not in member_config:
raise OperationError("Member config has no 'tags' field!")
tags = parse_read_pref_tags(self.read_pref_tags)
member_tags = member_config["tags"]
for key in tags:
if key not in member_tags:
return False
if member_tags[key] != tags[key]:
return False
return True
def find_primary(self, force=False, quiet=False):
if force or not self.primary:
rs_status = self.get_rs_status(force, quiet)
rs_name = rs_status['set']
for member in rs_status['members']:
if member['state'] == self.state_primary and member['health'] > 0:
member_uri = MongoUri(member['name'], 27017, rs_name)
optime_ts = member['optime']
if isinstance(member['optime'], dict) and 'ts' in member['optime']:
optime_ts = member['optime']['ts']
if quiet is False or not self.primary:
logging.info("Found PRIMARY: %s with optime %s" % (
member_uri,
str(optime_ts)
))
self.primary = {
'uri': member_uri,
'optime': optime_ts
}
self.replset_summary['primary'] = {"member": member, "uri": member_uri.str()}
if self.primary is None:
logging.error("Unable to locate a PRIMARY member for replset %s, giving up" % rs_name)
raise OperationError("Unable to locate a PRIMARY member for replset %s, giving up" % rs_name)
return self.primary
def find_secondary(self, force=False, quiet=False):
rs_status = self.get_rs_status(force, quiet)
self.get_rs_config(force, quiet)
self.get_mongo_config(force, quiet)
quorum = self.get_rs_quorum()
rs_name = rs_status['set']
if self.secondary and not force:
return self.secondary
electable_count = 0
for member in rs_status['members']:
member_uri = MongoUri(member['name'], 27017, rs_name)
member_config = self.get_rs_config_member(member)
if self.is_member_electable(member_config):
electable_count += 1
if member['state'] == self.state_arbiter:
logging.info("Found ARBITER %s, skipping" % member_uri)
elif member['state'] > self.state_secondary:
logging.warning("Found down or unhealthy SECONDARY %s with state: %s" % (member_uri, member['stateStr']))
elif member['state'] == self.state_secondary and member['health'] > 0:
log_data = {}
score = self.max_lag_secs * 10
score_scale = 100.00 / float(score)
priority = 0
if self.read_pref_tags and not self.has_read_pref_tags(member_config):
logging.info("Found SECONDARY %s without read preference tags: %s, skipping" % (
member_uri,
parse_read_pref_tags(self.read_pref_tags)
))
continue
if 'hidden' in member_config and member_config['hidden']:
score += (score * self.hidden_weight)
log_data['hidden'] = True
if 'priority' in member_config:
priority = int(member_config['priority'])
log_data['priority'] = priority
if member_config['priority'] > 1:
score -= priority - 1
elif member_config['priority'] == 0:
score += (score * self.pri0_weight)
if priority < self.min_priority or priority > self.max_priority:
logging.info("Found SECONDARY %s with out-of-bounds priority! Skipping" % member_uri)
continue
elif self.hidden_only and 'hidden' not in log_data:
logging.info("Found SECONDARY %s that is non-hidden and hidden-only mode is enabled! Skipping" % member_uri)
continue
if member_uri.str() in self.preferred_members:
logging.info("Bumping preferred SECONDARY member %s's score", member_uri)
score = 10000
rep_lag, optime_ts = self.get_repl_lag(member)
score = ceil((score - rep_lag) * score_scale)
if rep_lag < self.max_lag_secs:
if self.secondary is None or score > self.secondary['score']:
self.secondary = {
'replSet': rs_name,
'uri': member_uri,
'optime': optime_ts,
'score': score
}
log_msg = "Found SECONDARY %s" % member_uri
else:
log_msg = "Found SECONDARY %s with too high replication lag! Skipping" % member_uri
if self.secondary is not None and self.secondary['score'] == 0:
logging.error("Chosen SECONDARY %s has a score of zero/0! This is unexpected, exiting" % member_uri)
raise OperationError("Chosen SECONDARY %s has a score of zero/0!" % member_uri)
if 'configsvr' in rs_status and rs_status['configsvr']:
log_data['configsvr'] = True
log_data['lag'] = rep_lag
log_data['optime'] = optime_ts
log_data['score'] = int(score)
logging.info("%s: %s" % (log_msg, str(log_data)))
self.replset_summary['secondary'] = {"member": member, "uri": member_uri.str(), "data": log_data}
if self.secondary is None or electable_count < quorum:
logging.error("Not enough valid secondaries in replset %s to take backup! Num replset electable members: %i, required quorum: %i" % (
rs_name,
electable_count,
quorum
))
raise OperationError("Not enough secondaries in replset %s to safely take backup!" % rs_name)
logging.info("Choosing SECONDARY %s for replica set %s (score: %i)" % (self.secondary['uri'], rs_name, self.secondary['score']))
return self.secondary
def primary_optime(self, force=False, quiet=False):
rs_primary = self.find_primary(force, quiet)
if 'optime' in rs_primary:
return rs_primary['optime']
|
machin/parallel/assigner.py | lorenzosteccanella/machin | 287 | 12640380 | <filename>machin/parallel/assigner.py
from typing import Union, List, Dict, Tuple
from machin.utils.logging import default_logger
import psutil
import GPUtil
import numpy as np
import torch as t
import torch.nn as nn
class ModelSizeEstimator:
"""
Size estimator for pytorch modules.
"""
def __init__(self, model: nn.Module, size_multiplier=2):
"""
Estimates the size of PyTorch models in memory.
Note:
This estimator can only estimate the total size of parameters and
buffers. Therefore we need to multiply the raw estimated size with
a correction coefficient to reserve enough space for models.
Args:
model: Model to be estimated.
size_multiplier: Model estimated size will be
multiplied with this value, to ensure enough space
will be reserved to contain your model and inputs.
"""
self.model = model
self.size_multiplier = size_multiplier
self.sizes = {}
self.dtype_sizes = {}
def get_parameter_sizes(self) -> float:
"""Get sizes of all parameters in ``model`` in mega bytes."""
sizes, dtype_sizes = [], []
for param in self.model.parameters():
sizes.append(np.array(param.shape))
dtype_sizes.append(self._get_dtype_in_bytes(param.dtype))
self.sizes["param"] = sizes
self.dtype_sizes["param"] = dtype_sizes
return float(
np.sum(
np.array([np.prod(s) for s in self.sizes["param"]])
* np.array(self.dtype_sizes["param"])
)
) / (1024 ** 2)
def get_buffer_sizes(self) -> float:
"""Get sizes of all buffers in ``model`` in mega bytes."""
sizes, dtype_sizes = [], []
for buffer in self.model.buffers():
sizes.append(np.array(buffer.shape))
dtype_sizes.append(self._get_dtype_in_bytes(buffer.dtype))
self.sizes["buffer"] = sizes
self.dtype_sizes["buffer"] = dtype_sizes
return float(
np.sum(
np.array([np.prod(s) for s in self.sizes["buffer"]])
* np.array(self.dtype_sizes["buffer"])
)
) / (1024 ** 2)
def estimate_size(self):
"""Estimate model size in memory in megabytes."""
total = self.get_parameter_sizes() + self.get_buffer_sizes()
return total * self.size_multiplier
@staticmethod
def _get_dtype_in_bytes(dtype: t.dtype):
if dtype in (t.int8, t.uint8, t.bool):
return 1
elif dtype in (t.int16, t.float16, t.short, t.half):
return 2
elif dtype in (t.int32, t.float32, t.int, t.float):
return 4
elif dtype in (t.int64, t.float64, t.long, t.double, t.complex32):
return 8
else: # pragma: no cover
raise ValueError("Invalid data type.")
class ModelAssigner:
"""
Assigner for pytorch modules.
"""
def __init__(
self,
models: List[nn.Module],
model_connection: Dict[Tuple[int, int], int],
devices: List[Union[t.device, str]] = None,
model_size_multiplier=2,
max_mem_ratio=0.5,
cpu_weight=0,
connection_weight=2,
size_match_weight=1e-2,
complexity_match_weight=1,
entropy_weight=1,
iterations=500,
update_rate=0.01,
gpu_gpu_distance=1,
cpu_gpu_distance=10,
move_models=True,
):
"""
Assign models to different devices. In the scope of a single process.
Assigner assumes all GPUs have the **same processing power**.
Assignment is based on four aspects:
1. Distance and model connections. Connection is usually indicated
by the amount of data transmitted between two models.
2. Compute complexity.
3. Model size.
4. Entropy.
Four aspects are controlled by four weights:
1. ``connection_weight``, assigner will try to reduce the total
``distance * connection`` if this weight is larger.
2. ``size_match_weight``, this weight controls the total memory
space used on a single device, only works if total assigned
memory of models exceeds allowed device memory size
(internally it uses a relu activation), the larger,
the tighter and more restricted the fit.
3. ``complexity_match_weight``, this weights balance the model
computation cost across devices, assigner will try to even
the ``computation cost / compute power`` ratio for each device
if this weight is larger.
4. ``entropy_weight``, this weight minimize the uncertainty of
model placement probability, so ``model i`` will have a close to 1
probability of locating on some ``device j`` if this weight is
larger.
Assignment uses gradient descent to compute the probability matrix
of each ``model i`` locating on each available ``device j``.
See Also:
:class:`.ModelSizeEstimator`
Note:
When the sum of your model size is very close to the capacity of
your device memory, `ModelAssigner` does not respond very well
to the ``size_match_weight``, therefore, please consider about
increasing ``model_size_multiplier`` or decreasing
``max_mem_ratio``.
Args:
models: Models to assign.
model_connection: Connection weight between modules.
**Must be positive**
devices: Available devices.
model_size_multiplier: Size multiplier of models, used to reserve
enough space for models,
max_mem_ratio: Maximum percent of memory allowed.
cpu_weight: Weight of cpu. Relative to the computing power of one
GPU. By default it is 0 so no computation will be performed on
CPU. **Must be positive**
connection_weight: Weight of connection between models.
size_match_weight: Weight of size match.
complexity_match_weight: Weight of complexity match.
entropy_weight: Weight of entropy.
iterations: Number of optimization iterations.
update_rate: Learning rate of the adam optimizer.
gpu_gpu_distance: Estimated distance cost between gpu-gpu.
**Must be positive**
cpu_gpu_distance: Estimated distance cost between cpu-gpu.
**Must be positive**
move_models: Whether to automatically move the models after
assignment.
"""
if devices is None:
devices = [
t.device(type="cuda", index=i)
for i in GPUtil.getAvailable(order="load")
]
else:
devices = [t.device(d) for d in devices]
available_devices = [
t.device(type="cuda", index=i)
for i in GPUtil.getAvailable(order="load")
]
used_devices = []
for dev in devices:
if dev.type == "cuda" and dev not in available_devices:
default_logger.info(
f"Warning: device {dev} not available, removed."
)
else:
used_devices.append(dev)
devices = used_devices
if not devices:
devices = [t.device("cpu")]
default_logger.info(f"Using these devices: {devices}")
sizes = [
ModelSizeEstimator(model, model_size_multiplier).estimate_size()
for model in models
]
device_size_capacity = []
device_complexity_capacity = []
gpus = GPUtil.getGPUs()
for dev in devices:
if dev.type == "cpu":
device_size_capacity.append(
int(psutil.virtual_memory().available / 1024 ** 2) * max_mem_ratio
)
device_complexity_capacity.append(cpu_weight)
elif dev.type == "cuda":
device_size_capacity.append(gpus[dev.index].memoryFree * max_mem_ratio)
device_complexity_capacity.append(1 - gpus[dev.index].load)
if np.sum(np.array(sizes)) > np.sum(device_size_capacity):
raise RuntimeError(
f"Estimated model will use {np.sum(np.array(sizes)):.2f} MB, "
f"but only have {np.sum(device_size_capacity):.2f} MB allowed memory "
"in total."
)
# assign model to devices
# using heuristic and gradient decent
device_num = len(devices)
model_num = len(models)
# Important, the placement probability matrix! this matrix
# describes the probability of placement of:
# model i on device j
placement = t.randn([model_num, device_num], requires_grad=True)
optimizer = t.optim.Adam([placement], lr=update_rate)
model_size = t.tensor(sizes, dtype=t.float).view([1, model_num])
size_capacity = t.tensor(device_size_capacity, dtype=t.float).view(
[1, device_num]
)
model_complexity = model_size
# complexity_capacity is basically the estimated computing power
# of devices.
complexity_capacity = t.tensor(device_complexity_capacity, dtype=t.float).view(
[1, device_num]
)
# model connection indicates the amount of data transmitted between
# each pair of models, a weighted adjacency matrix.
model_conn = t.zeros([model_num, model_num])
for direction, conn in model_connection.items():
model_conn[direction[0], direction[1]] = conn
# device distance matrix
device_distance = t.zeros([device_num, device_num])
for i in range(device_num):
for j in range(i):
if (
devices[i].type == "cpu"
and devices[j].type == "cuda"
or devices[i].type == "cuda"
and devices[j].type == "cpu"
):
device_distance[i, j] = device_distance[j, i] = cpu_gpu_distance
elif (
devices[i].type == "cuda"
and devices[j].type == "cuda"
and devices[i].index != devices[j].index
):
device_distance[i, j] = device_distance[j, i] = gpu_gpu_distance
# optimize
for _ in range(iterations):
self.optimize_placement(
optimizer,
placement,
model_size,
size_capacity,
model_complexity,
complexity_capacity,
model_conn,
device_distance,
connection_weight,
size_match_weight,
complexity_match_weight,
entropy_weight,
)
self._assignment = [devices[d] for d in t.argmax(placement, dim=1).tolist()]
if move_models:
for model, ass_device in zip(models, self._assignment):
model.to(ass_device)
@property
def assignment(self):
"""
List[t.device]:
Assigned devices for each model in your model list.
"""
return self._assignment
@staticmethod
def optimize_placement(
optimizer,
placement: t.Tensor,
model_size: t.Tensor,
size_capacity: t.Tensor,
model_complexity: t.Tensor,
complexity_capacity: t.Tensor,
model_connection: t.Tensor,
device_distance: t.Tensor,
connection_weight: float,
size_match_weight: float,
complexity_match_weight: float,
entropy_weight: float,
):
"""
Suppose there are n models to place and m devices available.
Args:
optimizer: optimizer of placement
placement: shape ``[n, m]``
model_size: shape ``[1, n]``
size_capacity: shape ``[1, m]``
model_complexity: shape ``[1, n]``
complexity_capacity: shape ``[1, m]``
model_connection: shape ``[n, n]``
device_distance: shape ``[m, m]``
connection_weight: Weight of connection between models.
size_match_weight: Weight of size match.
complexity_match_weight: Weight of complexity match.
entropy_weight: weight of entropy.
"""
placement = t.softmax(placement, dim=-1)
model_num = placement.shape[0]
norm_model_conn = model_connection / t.sum(model_connection)
norm_dev_dist = device_distance / t.sum(device_distance)
model_distance = t.einsum("ij,mn,jn->im", placement, placement, norm_dev_dist)
# model distance to itself is 0
model_distance[np.arange(model_num), np.arange(model_num)] = 0
connection_cost = norm_model_conn * model_distance
# sum(model size) < capacity
size_match_cost = t.relu(
t.einsum("ij,jk->ik", model_size, placement) - size_capacity
)
# match computing power percent
norm_model_cmplx = model_complexity / t.sum(model_complexity)
norm_cmplx_capacity = complexity_capacity / t.sum(complexity_capacity)
cmplx_match_cost = (
t.einsum("ij,jk->ik", norm_model_cmplx, placement) - norm_cmplx_capacity
) ** 2
# entropy loss, prevent placement probability diffuse over devices
entropy_cost = placement * placement.log()
tmp = t.zeros_like(placement)
entropy_cost = -t.where(placement > 0, entropy_cost, tmp).sum(dim=-1)
total_cost = (
t.mean(connection_cost) * connection_weight
+ t.mean(size_match_cost) * size_match_weight
+ t.mean(cmplx_match_cost) * complexity_match_weight
+ t.mean(entropy_cost) * entropy_weight
)
optimizer.zero_grad()
total_cost.backward()
optimizer.step()
|
recipes/Python/579035_UNIXlike_which/recipe-579035.py | tdiprima/code | 2,023 | 12640392 | from __future__ import print_function
# which.py
# A minimal version of the UNIX which utility, in Python.
# Author: <NAME> - www.dancingbison.com
# Copyright 2015 <NAME> - http://www.dancingbison.com
import sys
import os
import os.path
import stat
def usage():
sys.stderr.write("Usage: python which.py name\n")
sys.stderr.write("or: which.py name\n")
def which(name):
found = 0
for path in os.getenv("PATH").split(os.path.pathsep):
full_path = path + os.sep + name
if os.path.exists(full_path):
"""
if os.stat(full_path).st_mode & stat.S_IXUSR:
found = 1
print(full_path)
"""
found = 1
print(full_path)
# Return a UNIX-style exit code so it can be checked by calling scripts.
# Programming shortcut to toggle the value of found: 1 => 0, 0 => 1.
sys.exit(1 - found)
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
which(sys.argv[1])
if "__main__" == __name__:
main()
|
docs/ssi_server.py | isabella232/dygraphs | 1,843 | 12640415 | #!/usr/bin/python
'''
Use this in the same way as Python's SimpleHTTPServer:
./ssi_server.py [port]
The only difference is that, for files ending in '.html', ssi_server will
inline SSI (Server Side Includes) of the form:
<!-- #include virtual="fragment.html" -->
Run ./ssi_server.py in this directory and visit localhost:8000 for an example.
'''
import os
import ssi
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SimpleHTTPServer
import tempfile
class SSIRequestHandler(SimpleHTTPRequestHandler):
"""Adds minimal support for <!-- #include --> directives.
The key bit is translate_path, which intercepts requests and serves them
using a temporary file which inlines the #includes.
"""
def __init__(self, request, client_address, server):
self.temp_files = []
SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
SimpleHTTPRequestHandler.do_GET(self)
self.delete_temp_files()
def do_HEAD(self):
SimpleHTTPRequestHandler.do_HEAD(self)
self.delete_temp_files()
def translate_path(self, path):
fs_path = SimpleHTTPRequestHandler.translate_path(self, path)
if self.path.endswith('/'):
for index in "index.html", "index.htm":
index = os.path.join(fs_path, index)
if os.path.exists(index):
fs_path = index
break
if fs_path.endswith('.html'):
content = ssi.InlineIncludes(fs_path)
fs_path = self.create_temp_file(fs_path, content)
return fs_path
def delete_temp_files(self):
for temp_file in self.temp_files:
os.remove(temp_file)
def create_temp_file(self, original_path, content):
_, ext = os.path.splitext(original_path)
fd, path = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
self.temp_files.append(path)
return path
if __name__ == '__main__':
SimpleHTTPServer.test(HandlerClass=SSIRequestHandler)
|
src/helper_sent.py | kaue/PyBitmessage | 1,583 | 12640416 | <gh_stars>1000+
"""
Insert values into sent table
"""
import time
import uuid
from addresses import decodeAddress
from bmconfigparser import BMConfigParser
from helper_ackPayload import genAckPayload
from helper_sql import sqlExecute
# pylint: disable=too-many-arguments
def insert(msgid=None, toAddress='[Broadcast subscribers]', fromAddress=None, subject=None,
message=None, status='msgqueued', ripe=None, ackdata=None, sentTime=None,
lastActionTime=None, sleeptill=0, retryNumber=0, encoding=2, ttl=None, folder='sent'):
"""Perform an insert into the `sent` table"""
# pylint: disable=unused-variable
# pylint: disable-msg=too-many-locals
valid_addr = True
if not ripe or not ackdata:
addr = fromAddress if toAddress == '[Broadcast subscribers]' else toAddress
new_status, addressVersionNumber, streamNumber, new_ripe = decodeAddress(addr)
valid_addr = True if new_status == 'success' else False
if not ripe:
ripe = new_ripe
if not ackdata:
stealthLevel = BMConfigParser().safeGetInt(
'bitmessagesettings', 'ackstealthlevel')
new_ackdata = genAckPayload(streamNumber, stealthLevel)
ackdata = new_ackdata
if valid_addr:
msgid = msgid if msgid else uuid.uuid4().bytes
sentTime = sentTime if sentTime else int(time.time()) # sentTime (this doesn't change)
lastActionTime = lastActionTime if lastActionTime else int(time.time())
ttl = ttl if ttl else BMConfigParser().getint('bitmessagesettings', 'ttl')
t = (msgid, toAddress, ripe, fromAddress, subject, message, ackdata,
sentTime, lastActionTime, sleeptill, status, retryNumber, folder,
encoding, ttl)
sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t)
return ackdata
else:
return None
|
BitTornado/clock.py | crossbrowsertesting/BitTornado | 116 | 12640459 | <filename>BitTornado/clock.py
"""Provide a non-decreasing clock() function.
In Windows, time.clock() provides number of seconds from first call, so use
that.
In Unix, time.clock() is CPU time, and time.time() reports system time, which
may not be non-decreasing."""
import time
import sys
_MAXFORWARD = 100
_FUDGE = 1
class RelativeTime(object): # pylint: disable=R0903
"""Non-decreasing time implementation for Unix"""
def __init__(self):
self.time = time.time()
self.offset = 0
def get_time(self):
"""Calculate a non-decreasing time representation"""
systemtime = time.time()
now = systemtime + self.offset
if self.time < now < self.time + _MAXFORWARD:
self.time = now
else:
# If time jump is outside acceptable bounds, move ahead one second
# and note the offset
self.time += _FUDGE
self.offset = self.time - systemtime
return self.time
if sys.platform != 'win32':
clock = RelativeTime().get_time # pylint: disable=C0103
else:
from time import clock
|
esphome/components/fastled_clockless/light.py | OttoWinter/esphomeyaml | 249 | 12640468 | <filename>esphome/components/fastled_clockless/light.py
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import fastled_base
from esphome.const import CONF_CHIPSET, CONF_NUM_LEDS, CONF_PIN, CONF_RGB_ORDER
AUTO_LOAD = ["fastled_base"]
CHIPSETS = [
"NEOPIXEL",
"TM1829",
"TM1809",
"TM1804",
"TM1803",
"UCS1903",
"UCS1903B",
"UCS1904",
"UCS2903",
"WS2812",
"WS2852",
"WS2812B",
"SK6812",
"SK6822",
"APA106",
"PL9823",
"WS2811",
"WS2813",
"APA104",
"WS2811_400",
"GW6205",
"GW6205_400",
"LPD1886",
"LPD1886_8BIT",
"SM16703",
]
def _validate(value):
if value[CONF_CHIPSET] == "NEOPIXEL" and CONF_RGB_ORDER in value:
raise cv.Invalid("NEOPIXEL doesn't support RGB order")
return value
CONFIG_SCHEMA = cv.All(
fastled_base.BASE_SCHEMA.extend(
{
cv.Required(CONF_CHIPSET): cv.one_of(*CHIPSETS, upper=True),
cv.Required(CONF_PIN): pins.internal_gpio_output_pin_number,
}
),
_validate,
cv.require_framework_version(
esp8266_arduino=cv.Version(2, 7, 4),
esp32_arduino=cv.Version(99, 0, 0),
max_version=True,
extra_message="Please see note on documentation for FastLED",
),
)
async def to_code(config):
var = await fastled_base.new_fastled_light(config)
rgb_order = None
if CONF_RGB_ORDER in config:
rgb_order = cg.RawExpression(config[CONF_RGB_ORDER])
template_args = cg.TemplateArguments(
cg.RawExpression(config[CONF_CHIPSET]), config[CONF_PIN], rgb_order
)
cg.add(var.add_leds(template_args, config[CONF_NUM_LEDS]))
|
src/dnc/azext_dnc/vendored_sdks/dnc/aio/_dnc.py | haroonf/azure-cli-extensions | 207 | 12640475 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import DNCConfiguration
from .operations import ControllerOperations
from .operations import DelegatedNetworkOperations
from .operations import OrchestratorInstanceServiceOperations
from .operations import DelegatedSubnetServiceOperations
from .operations import Operations
from .. import models
class DNC(object):
"""DNC web api provides way to create, get and delete dnc controller.
:ivar controller: ControllerOperations operations
:vartype controller: dnc.aio.operations.ControllerOperations
:ivar delegated_network: DelegatedNetworkOperations operations
:vartype delegated_network: dnc.aio.operations.DelegatedNetworkOperations
:ivar orchestrator_instance_service: OrchestratorInstanceServiceOperations operations
:vartype orchestrator_instance_service: dnc.aio.operations.OrchestratorInstanceServiceOperations
:ivar delegated_subnet_service: DelegatedSubnetServiceOperations operations
:vartype delegated_subnet_service: dnc.aio.operations.DelegatedSubnetServiceOperations
:ivar operations: Operations operations
:vartype operations: dnc.aio.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = DNCConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.controller = ControllerOperations(
self._client, self._config, self._serialize, self._deserialize)
self.delegated_network = DelegatedNetworkOperations(
self._client, self._config, self._serialize, self._deserialize)
self.orchestrator_instance_service = OrchestratorInstanceServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.delegated_subnet_service = DelegatedSubnetServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DNC":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
data/bangladesh-data/crawler_arcgis.py | HiteshMah-Jan/covid19 | 356 | 12640496 | <filename>data/bangladesh-data/crawler_arcgis.py
import requests
import json
import sys
from datetime import date, timedelta
# source: http://iedcr.gov.bd/
url = 'https://services3.arcgis.com/nIl76MjbPamkQiu8/arcgis/rest/services/corona_time_tracker_bd/FeatureServer/0/query?where=1%3D1&returnGeometry=false&outFields=*&f=json&resultRecordCount=2000&resultOffset='
offset = 0
features = []
while True:
curr_url = url + str(offset)
data = requests.get(url=curr_url).json()
features += data['features']
if (len(data['features']) == 0): break
offset += 2000
data_str = json.dumps(
features,
indent=2,
ensure_ascii=False,
)
f = open('data/bangladesh-data/time_series.json', 'w')
f.write(data_str)
f.close() |
unittest/scripts/py_devapi/scripts/mysqlx_table.py | mueller/mysql-shell | 119 | 12640517 | # Assumptions: ensure_schema_does_not_exist is available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
ensure_schema_does_not_exist(mySession, 'py_shell_test')
schema = mySession.create_schema('py_shell_test')
mySession.set_current_schema('py_shell_test')
result = mySession.sql('create table table1 (name varchar(50))').execute()
table = mySession.get_schema('py_shell_test').get_table('table1')
#@ Testing table name retrieving
print('get_name(): ' + table.get_name())
print('name: ' + table.name)
#@ Testing session retrieving
print('get_session():', table.get_session())
print('session:', table.session)
#@ Testing table schema retrieving
print('get_schema():', table.get_schema())
print('schema:', table.schema)
#@ Testing existence
print('Valid:', table.exists_in_database())
mySession.sql('drop table table1').execute()
print('Invalid:', table.exists_in_database())
#@ Testing view check
print('Is View:', table.is_view())
#@<> WL12412: Initialize Count Tests
result = mySession.sql('create table table_count (name varchar(50))').execute();
table = schema.get_table('table_count');
#@ WL12412-TS1_1: Count takes no arguments
table.count(1);
#@ WL12412-TS1_3: Count returns correct number of records
count = table.count();
print("Initial Row Count: %d" % count)
table.insert().values("First")
table.insert().values("Second")
table.insert().values("Third")
count = table.count()
print("Final Row Count: %d" % count)
#@ WL12412-TS2_2: Count throws error on unexisting table
mySession.sql('drop table py_shell_test.table_count');
count = table.count()
# Closes the session
mySession.drop_schema('py_shell_test')
mySession.close()
|
maha/parsers/rules/time/rule.py | TRoboto/Maha | 152 | 12640521 | <gh_stars>100-1000
__all__ = [
"RULE_TIME_YEARS",
"RULE_TIME_MONTHS",
"RULE_TIME_WEEKS",
"RULE_TIME_DAYS",
"RULE_TIME_HOURS",
"RULE_TIME_MINUTES",
"RULE_TIME_AM_PM",
"RULE_TIME_NOW",
"RULE_TIME",
"parse_time",
]
from datetime import datetime
from maha.parsers.rules.time.template import TimeInterval
from ..common import FROM, TO, combine_patterns
from .values import *
def get_combined_value(groups, expression: ExpressionGroup):
value = TimeValue()
for group in groups:
exp = expression.get_matched_expression(group)
value += next(iter(exp(group))).value # type: ignore
return value
def process_time_interval(start_time: TimeValue, end_time: TimeValue):
"""Ensures that the end time is greater than the start time."""
def set_start_if_none(value: str):
if getattr(end_time, value) is not None and getattr(start_time, value) is None:
setattr(start_time, value, getattr(end_time, value))
def get_end_if_none(value: str, none_value=None):
if (
getattr(start_time, value) is not none_value
and getattr(end_time, value) is none_value
):
return TimeValue(**{value: getattr(start_time, value)})
return TimeValue()
now = datetime(2021, 9, 1)
# always set am/pm to both if one is set
set_start_if_none("am_pm")
end_time += get_end_if_none("am_pm")
for prop in [
"microsecond",
"second",
"minute",
"hour",
"day",
"weekday",
"month",
"year",
"years",
"months",
"weeks",
"days",
"leapdays",
"hours",
"minutes",
"seconds",
"microseconds",
]:
from_time = start_time + now
to_time = end_time + now
if from_time < to_time:
break
end_time += get_end_if_none(prop, 0 if prop[-1] == "s" else None)
return TimeInterval(start_time, end_time)
def parse_time(match):
groups = match.capturesdict()
groups_keys = list(groups)
text = match.group(0)
def contains_to_time():
return "to_time" in groups_keys
# time interval
if (
groups.get("interval")
or contains_to_time()
and not TO.match(text)
and groups["to_time"]
):
to_time_start = match.starts(groups_keys.index("to_time") + 1)[0]
start_time = TimeValue()
end_time = TimeValue()
if groups.get("interval"):
value = list(interval_expressions.parse(match.group("interval")))[0].value
start_time += value.start
end_time += value.end
for group, exp_group in EXPERSSION_TIME_MAP.items():
g_start = match.starts(groups_keys.index(group) + 1)
if group not in groups_keys or not g_start:
continue
for m_start, m_group in zip(g_start, groups[group]):
if m_start < to_time_start:
start_time += get_combined_value([m_group], exp_group)
else:
end_time += get_combined_value([m_group], exp_group)
return process_time_interval(start_time, end_time)
value = TimeValue()
for group, exp_group in EXPERSSION_TIME_MAP.items():
if group in groups_keys and groups[group]:
value += get_combined_value(groups[group], exp_group)
# to time only
if contains_to_time() and TO.match(text):
return TimeInterval(end=value)
# from time only
elif FROM.match(text) and (
not contains_to_time() or contains_to_time() and not groups["to_time"]
):
return TimeInterval(start=value)
# time only
return value
years_expressions = ExpressionGroup(
AFTER_N_YEARS,
BEFORE_N_YEARS,
LAST_TWO_YEARS,
NEXT_TWO_YEARS,
LAST_YEAR,
NEXT_YEAR,
NUMERAL_YEAR,
ORDINAL_YEAR,
THIS_YEAR,
)
weeks_expressions = ExpressionGroup(
AFTER_N_WEEKS,
BEFORE_N_WEEKS,
LAST_TWO_WEEKS,
NEXT_TWO_WEEKS,
LAST_WEEK,
NEXT_WEEK,
THIS_WEEK,
)
days_expressions = ExpressionGroup(
AFTER_N_DAYS,
BEFORE_N_DAYS,
AFTER_NEXT_WEEKDAY,
PREVIOUS_WEEKDAY,
NEXT_WEEKDAY,
AFTER_TOMORROW,
TOMORROW,
LAST_SPECIFIC_DAY,
BEFORE_PREVIOUS_WEEKDAY,
BEFORE_YESTERDAY,
YESTERDAY,
LAST_DAY,
THIS_DAY,
WEEKDAY,
)
hours_expressions = ExpressionGroup(
AFTER_N_HOURS,
BEFORE_N_HOURS,
LAST_TWO_HOURS,
NEXT_TWO_HOURS,
LAST_HOUR,
NEXT_HOUR,
NUMERAL_HOUR,
ORDINAL_HOUR,
THIS_HOUR,
)
minutes_expressions = ExpressionGroup(
AFTER_N_MINUTES,
BEFORE_N_MINUTES,
LAST_TWO_MINUTES,
NEXT_TWO_MINUTES,
LAST_MINUTE,
NEXT_MINUTE,
NUMERAL_MINUTE,
ORDINAL_MINUTE,
THIS_MINUTE,
)
am_pm_expressions = ExpressionGroup(PM, AM)
now_expressions = ExpressionGroup(AT_THE_MOMENT)
month_day_expressions = ExpressionGroup(
DAY_MONTH_FORM,
ORDINAL_SPECIFIC_DAY,
LAST_SPECIFIC_DAY_OF_SPECIFIC_MONTH,
ORDINAL_AND_MONTH,
NUMERAL_AND_MONTH,
)
year_month_day_expressions = ExpressionGroup(
DAY_MONTH_YEAR_FORM,
)
year_month_expressions = ExpressionGroup(MONTH_YEAR_FORM, YEAR_WITH_MONTH)
hour_minute_expressions = ExpressionGroup(
NUMERAL_FRACTION_HOUR_MINUTE, ORDINAL_FRACTION_HOUR_MINUTE, HOUR_MINUTE_FORM
)
hour_minute_second_expressions = ExpressionGroup(HOUR_MINUTE_SECOND_FORM)
hour_am_pm_expressions = ExpressionGroup(
NUMERAL_FRACTION_HOUR_AM_PM,
ORDINAL_FRACTION_HOUR_AM_PM,
NUMERAL_HOUR_AM_PM,
ORDINAL_HOUR_AM_PM,
)
interval_expressions = ExpressionGroup(
INTERVAL_FRACTION_HOUR_MINUTE_AM_PM,
)
now_group = named_group("now", now_expressions.join())
years_group = named_group("years", years_expressions.join())
months_group = named_group("months", months_expressions.join())
weeks_group = named_group("weeks", weeks_expressions.join())
days_group = named_group("days", days_expressions.join())
hours_group = named_group("hours", hours_expressions.join())
minutes_group = named_group("minutes", minutes_expressions.join())
am_pm_group = named_group("am_pm", am_pm_expressions.join())
month_day_group = named_group("month_day", month_day_expressions.join())
year_month_group = named_group("year_month", year_month_expressions.join())
year_month_day_group = named_group("year_month_day", year_month_day_expressions.join())
hour_minute_group = named_group("hour_minute", hour_minute_expressions.join())
hour_minute_second_group = named_group("h_m_s", hour_minute_second_expressions.join())
hour_am_pm_group = named_group("hour_am_pm", hour_am_pm_expressions.join())
interval_expressions_group = named_group("interval", interval_expressions.join())
RULE_TIME_YEARS = FunctionValue(parse_time, combine_patterns(years_group))
RULE_TIME_MONTHS = FunctionValue(parse_time, combine_patterns(months_group))
RULE_TIME_WEEKS = FunctionValue(parse_time, combine_patterns(weeks_group))
RULE_TIME_DAYS = FunctionValue(parse_time, combine_patterns(days_group))
RULE_TIME_HOURS = FunctionValue(parse_time, combine_patterns(hours_group))
RULE_TIME_MINUTES = FunctionValue(parse_time, combine_patterns(minutes_group))
RULE_TIME_AM_PM = FunctionValue(parse_time, combine_patterns(am_pm_group))
RULE_TIME_NOW = FunctionValue(parse_time, combine_patterns(now_group))
_all_time_expressions_pattern = combine_patterns(
interval_expressions_group,
year_month_day_group,
year_month_group,
month_day_group,
hour_minute_second_group,
hour_minute_group,
hour_am_pm_group,
now_group,
years_group,
months_group,
weeks_group,
days_group,
hours_group,
minutes_group,
am_pm_group,
seperator=TIME_WORD_SEPARATOR,
combine_all=True,
)
to_time_group = named_group(
"to_time", TO + EXPRESSION_SPACE_OR_NONE + _all_time_expressions_pattern
)
RULE_TIME = FunctionValue(
parse_time,
optional_non_capturing_group(FROM + EXPRESSION_SPACE, TO + EXPRESSION_SPACE_OR_NONE)
+ _all_time_expressions_pattern
+ optional_non_capturing_group(EXPRESSION_SPACE + to_time_group),
)
EXPERSSION_TIME_MAP = {
"month_day": month_day_expressions,
"year_month": year_month_expressions,
"year_month_day": year_month_day_expressions,
"hour_minute": hour_minute_expressions,
"hour_am_pm": hour_am_pm_expressions,
"h_m_s": hour_minute_second_expressions,
"years": years_expressions,
"months": months_expressions,
"weeks": weeks_expressions,
"days": days_expressions,
"hours": hours_expressions,
"minutes": minutes_expressions,
"am_pm": am_pm_expressions,
"now": now_expressions,
}
|
airmozilla/cronlogger/cron.py | mozilla/airmozilla | 115 | 12640524 | <gh_stars>100-1000
import cronjobs
from .decorators import capture
from . import cleanup
@cronjobs.register
@capture
def purge_old_cronlogs():
cleanup.purge_old(verbose=True)
|
plugins/quetz_mamba_solve/quetz_mamba_solve/utils.py | maresb/quetz | 108 | 12640528 | from datetime import datetime, timedelta
from functools import lru_cache, wraps
def timed_lru_cache(hours: int, maxsize: int = 128):
def wrapper_cache(func):
func = lru_cache(maxsize=maxsize)(func)
func.lifetime = timedelta(hours=hours)
func.expiration = datetime.utcnow() + func.lifetime
@wraps(func)
def wrapped_func(*args, **kwargs):
if datetime.utcnow() >= func.expiration:
func.cache_clear()
func.expiration = datetime.utcnow() + func.lifetime
return func(*args, **kwargs)
return wrapped_func
return wrapper_cache
|
examples/highfreq/workflow.py | majiajue/qlib | 8,637 | 12640552 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import fire
import qlib
import pickle
from qlib.config import REG_CN, HIGH_FREQ_CONFIG
from qlib.utils import init_instance_by_config
from qlib.data.dataset.handler import DataHandlerLP
from qlib.data.ops import Operators
from qlib.data.data import Cal
from qlib.tests.data import GetData
from highfreq_ops import get_calendar_day, DayLast, FFillNan, BFillNan, Date, Select, IsNull, Cut
class HighfreqWorkflow:
SPEC_CONF = {"custom_ops": [DayLast, FFillNan, BFillNan, Date, Select, IsNull, Cut], "expression_cache": None}
MARKET = "all"
start_time = "2020-09-15 00:00:00"
end_time = "2021-01-18 16:00:00"
train_end_time = "2020-11-30 16:00:00"
test_start_time = "2020-12-01 00:00:00"
DATA_HANDLER_CONFIG0 = {
"start_time": start_time,
"end_time": end_time,
"fit_start_time": start_time,
"fit_end_time": train_end_time,
"instruments": MARKET,
"infer_processors": [{"class": "HighFreqNorm", "module_path": "highfreq_processor"}],
}
DATA_HANDLER_CONFIG1 = {
"start_time": start_time,
"end_time": end_time,
"instruments": MARKET,
}
task = {
"dataset": {
"class": "DatasetH",
"module_path": "qlib.data.dataset",
"kwargs": {
"handler": {
"class": "HighFreqHandler",
"module_path": "highfreq_handler",
"kwargs": DATA_HANDLER_CONFIG0,
},
"segments": {
"train": (start_time, train_end_time),
"test": (
test_start_time,
end_time,
),
},
},
},
"dataset_backtest": {
"class": "DatasetH",
"module_path": "qlib.data.dataset",
"kwargs": {
"handler": {
"class": "HighFreqBacktestHandler",
"module_path": "highfreq_handler",
"kwargs": DATA_HANDLER_CONFIG1,
},
"segments": {
"train": (start_time, train_end_time),
"test": (
test_start_time,
end_time,
),
},
},
},
}
def _init_qlib(self):
"""initialize qlib"""
# use cn_data_1min data
QLIB_INIT_CONFIG = {**HIGH_FREQ_CONFIG, **self.SPEC_CONF}
provider_uri = QLIB_INIT_CONFIG.get("provider_uri")
GetData().qlib_data(target_dir=provider_uri, interval="1min", region=REG_CN, exists_skip=True)
qlib.init(**QLIB_INIT_CONFIG)
def _prepare_calender_cache(self):
"""preload the calendar for cache"""
# This code used the copy-on-write feature of Linux to avoid calculating the calendar multiple times in the subprocess
# This code may accelerate, but may be not useful on Windows and Mac Os
Cal.calendar(freq="1min")
get_calendar_day(freq="1min")
def get_data(self):
"""use dataset to get highreq data"""
self._init_qlib()
self._prepare_calender_cache()
dataset = init_instance_by_config(self.task["dataset"])
xtrain, xtest = dataset.prepare(["train", "test"])
print(xtrain, xtest)
dataset_backtest = init_instance_by_config(self.task["dataset_backtest"])
backtest_train, backtest_test = dataset_backtest.prepare(["train", "test"])
print(backtest_train, backtest_test)
return
def dump_and_load_dataset(self):
"""dump and load dataset state on disk"""
self._init_qlib()
self._prepare_calender_cache()
dataset = init_instance_by_config(self.task["dataset"])
dataset_backtest = init_instance_by_config(self.task["dataset_backtest"])
##=============dump dataset=============
dataset.to_pickle(path="dataset.pkl")
dataset_backtest.to_pickle(path="dataset_backtest.pkl")
del dataset, dataset_backtest
##=============reload dataset=============
with open("dataset.pkl", "rb") as file_dataset:
dataset = pickle.load(file_dataset)
with open("dataset_backtest.pkl", "rb") as file_dataset_backtest:
dataset_backtest = pickle.load(file_dataset_backtest)
self._prepare_calender_cache()
##=============reinit dataset=============
dataset.config(
handler_kwargs={
"start_time": "2021-01-19 00:00:00",
"end_time": "2021-01-25 16:00:00",
},
segments={
"test": (
"2021-01-19 00:00:00",
"2021-01-25 16:00:00",
),
},
)
dataset.setup_data(
handler_kwargs={
"init_type": DataHandlerLP.IT_LS,
},
)
dataset_backtest.config(
handler_kwargs={
"start_time": "2021-01-19 00:00:00",
"end_time": "2021-01-25 16:00:00",
},
segments={
"test": (
"2021-01-19 00:00:00",
"2021-01-25 16:00:00",
),
},
)
dataset_backtest.setup_data(handler_kwargs={})
##=============get data=============
xtest = dataset.prepare("test")
backtest_test = dataset_backtest.prepare("test")
print(xtest, backtest_test)
return
if __name__ == "__main__":
fire.Fire(HighfreqWorkflow)
|
gnss_ins_sim/sim/sim_data_plot.py | Chris2L/gnss-ins-sim | 693 | 12640565 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Fielname = sim_data_plot.py
"""
Simulation data plot.
Created on 2020-07-24
@author: dongxiaoguang
"""
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import Axes3D
from . import sim_data
def plot(x, y, key, plot3d, mpl_opt):
'''
Plot x and y.
Args:
x: x axis data.
y: a sim_data object.
key is a tuple or list of keys corresponding to y.data.
plot3d: 1--3D plot, 2--3D plot projected on xy, xz and yz, otherwise--2D plot
mpl_opt: strings to specify matplotlib properties.
'''
if isinstance(y.data, dict):
plot_dict(x, y, key, plot3d, mpl_opt)
else:
plot_array(x, y, plot3d, mpl_opt)
def plot_dict(x, y, key, plot3d=0, mpl_opt=''):
'''
self.data is a dict. plot self.data according to key
Args:
x: x axis data Sim_data object.
y: a sim_data object.
key: a list of keys to specify what data in y.data is plotted.
If key is an empty list, plot all keys in y.data
plot3d: 1--3D plot, 2--3D plot projected on xy, xz and yz, otherwise--2D plot
mpl_opt: strings to specify matplotlib properties.
'''
if key == []:
key = y.data.keys()
for i in key:
y_data = y.data[i]
# x axis
if isinstance(x.data, dict):
if not x.data: # x.data could be an empty dict
x_data = None
else:
x_data = x.data[i]
else:
x_data = x.data
# unit conversion
y_data = sim_data.convert_unit(y_data, y.units, y.output_units)
# plot
if plot3d == 1:
plot3d_in_one_figure(y_data,\
title=y.name + '_' + str(i),\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
elif plot3d == 2:
plot3d_proj_in_one_figure(y_data,\
title=y.name + '_' + str(i),\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
else:
plot_in_one_figure(x_data, y_data,\
logx=y.logx, logy=y.logy,\
title=y.name + '_' + str(i),\
xlabel=x.name + ' (' + x.output_units[0] + ')',\
ylabel=y.name + ' (' + str(y.output_units) + ')',\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
def plot_array(x, y, plot3d=0, mpl_opt=''):
'''
self.data is a numpy.array
Args:
x: x axis data Sim_data object.
y: a sim_data object.
plot3d: 1--3D plot, 2--3D plot projected on xy, xz and yz, otherwise--2D plot
'''
# x axis
if isinstance(x.data, dict):
if not x.data: # x.data could be an empty dict
x_data = None
else:
# randomly choose data of any key
for i in x.data:
x_data = x.data[i]
break
else:
x_data = x.data
# y axis
y_data = y.data
# unit conversion
y_data = sim_data.convert_unit(y_data, y.units, y.output_units)
# plot
if plot3d == 1:
plot3d_in_one_figure(y_data,\
title=y.name,\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
elif plot3d == 2:
plot3d_proj_in_one_figure(y_data,\
title=y.name,\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
else:
plot_in_one_figure(x_data, y_data,\
logx=y.logx, logy=y.logy,\
xlabel=x.name + ' (' + x.output_units[0] + ')',\
ylabel=y.name + ' (' + str(y.output_units) + ')',\
title=y.name,\
grid=y.grid,\
legend=y.legend,\
mpl_opt=mpl_opt)
def plot_in_one_figure(x, y, logx=False, logy=False,\
title='Figure', xlabel=None, ylabel=None,\
grid='on', legend=None,\
mpl_opt=''):
'''
Create a figure and plot x/y in this figure.
Args:
x: x axis data, np.array of size (n,) or (n,1)
y: y axis data, np.array of size (n,m)
title: figure title
xlabel: x axis label
ylabel: y axis label
gird: if this is not 'off', it will be changed to 'on'
legend: tuple or list of strings of length m.
'''
# create figure and axis
fig = plt.figure(title)
axis = fig.add_subplot(111)
lines = []
# if not x data, generate default x data
if x is None:
x = np.array(range(y.shape[0]))
try:
dim = y.ndim
if dim == 1:
if logx and logy: # loglog
line, = axis.loglog(x, y, mpl_opt)
elif logx: # semilogx
line, = axis.semilogx(x, y, mpl_opt)
elif logy: # semilogy
line, = axis.semilogy(x, y, mpl_opt)
else: # plot
line, = axis.plot(x, y, mpl_opt)
lines.append(line)
elif dim == 2:
for i in range(0, y.shape[1]):
if logx and logy: # loglog
line, = axis.loglog(x, y[:, i], mpl_opt)
elif logx: # semilogx
line, = axis.semilogx(x, y[:, i], mpl_opt)
elif logy: # semilogy
line, = axis.semilogy(x, y[:, i], mpl_opt)
else: # plot
line, = axis.plot(x, y[:, i], mpl_opt)
lines.append(line)
else:
raise ValueError
except:
print('x-axis data len: ', x.shape)
print('y-axis data shape: ', y.shape)
raise ValueError('Check input data y.')
# label
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
# legend
if legend is not None:
plt.legend(lines, legend)
# grid
if grid.lower() != 'off':
plt.grid()
def plot3d_in_one_figure(y, title='Figure', grid='on', legend=None, mpl_opt=''):
'''
Create a figure and plot 3d trajectory in this figure.
Args:
y: y axis data, np.array of size (n,3)
title: figure title
gird: if this is not 'off', it will be changed to 'on'
legend: tuple or list of strings of length 3.
'''
# create figure and axis
fig = plt.figure(title)
axis = fig.add_subplot(111, projection='3d', aspect='equal')
try:
dim = y.ndim
if dim == 2: # y must be an numpy array of size (n,3), dim=2
if y.shape[1] != 3:
raise ValueError
else:
axis.plot(y[:, 0], y[:, 1], y[:, 2], mpl_opt)
else:
raise ValueError
except:
print(y.shape)
raise ValueError('Check input data y.')
# label
if isinstance(legend, (tuple, list)):
n = len(legend)
if n != 3:
legend = ['x', 'y', 'z']
else:
legend = ['x', 'y', 'z']
axis.set_xlabel(legend[0])
axis.set_ylabel(legend[1])
axis.set_zlabel(legend[2])
# grid
if grid.lower() != 'off':
plt.grid()
def plot3d_proj_in_one_figure(y, title='Figure', grid='on', legend=None, mpl_opt=''):
'''
Create a figure and plot 3d trajectory in this figure.
Args:
y: y axis data, np.array of size (n,3)
title: figure title
gird: if this is not 'off', it will be changed to 'on'
legend: tuple or list of strings of length 3.
'''
# plot data
try:
dim = y.ndim
if dim == 2: # y must be an numpy array of size (n,3), dim=2
if y.shape[1] != 3:
raise ValueError
else:
# check label
if isinstance(legend, (tuple, list)):
n = len(legend)
if n != 3:
legend = ['x', 'y', 'z']
else:
legend = ['x', 'y', 'z']
# check grid
show_grid = False
if grid.lower() != 'off':
show_grid = True
# create figure and axis
# xy
fig = plt.figure(title)
axis = fig.add_subplot(131, aspect='equal')
axis.plot(y[:, 0], y[:, 1], mpl_opt)
axis.set_xlabel(legend[0])
axis.set_ylabel(legend[1])
axis.grid(show_grid)
# xz
axis = fig.add_subplot(132, aspect='equal')
axis.plot(y[:, 0], y[:, 2], mpl_opt)
axis.set_xlabel(legend[0])
axis.set_ylabel(legend[2])
axis.grid(show_grid)
# yz
axis = fig.add_subplot(133, aspect='equal')
axis.plot(y[:, 1], y[:, 2], mpl_opt)
axis.set_xlabel(legend[1])
axis.set_ylabel(legend[2])
axis.grid(show_grid)
else:
raise ValueError
except:
print(y.shape)
raise ValueError('Check input data y.')
def show_plot():
'''
Show all plots
'''
plt.show()
|
example/flask-metric/api.py | c3-e/custom-pod-autoscaler | 209 | 12640583 | # Copyright 2019 The Custom Pod Autoscaler Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, abort
import json
app = Flask(__name__)
MAX_METRIC = 5
MIN_METRIC = 0
global_metric = 0
@app.route("/metric")
def metric():
return json.dumps({
"value": global_metric,
"available": MAX_METRIC - global_metric,
"min": MIN_METRIC,
"max": MAX_METRIC
})
@app.route("/increment", methods = ["POST"])
def increment():
global global_metric
if global_metric >= MAX_METRIC:
abort(400, f"Metric cannot be incremented beyond {MAX_METRIC}")
global_metric += 1
return json.dumps({
"value": global_metric,
"available": MAX_METRIC - global_metric,
"min": MIN_METRIC,
"max": MAX_METRIC
})
@app.route("/decrement", methods = ["POST"])
def decrement():
global global_metric
if global_metric <= MIN_METRIC:
abort(400, f"Metric cannot be decremented below {MIN_METRIC}")
global_metric -= 1
return json.dumps({
"value": global_metric,
"available": MAX_METRIC - global_metric,
"min": MIN_METRIC,
"max": MAX_METRIC
})
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
|
tests/functional/python_tests/comment_payment_tests/test_utils.py | drov0/hive | 283 | 12640600 | import sys
sys.path.append("../../")
import hive_utils
from uuid import uuid4
from time import sleep
import logging
LOG_LEVEL = logging.INFO
LOG_FORMAT = "%(asctime)-15s - %(name)s - %(levelname)s - %(message)s"
MAIN_LOG_PATH = "hdf_tester_utils.log"
MODULE_NAME = "Comment-Payment-Tester.Utils"
logger = logging.getLogger(MODULE_NAME)
logger.setLevel(LOG_LEVEL)
def create_accounts(node, creator, accounts):
for account in accounts:
logger.info("Creating account: {}".format(account['name']))
node.create_account(account['name'],
owner_key=account['public_key'],
active_key=account['public_key'],
posting_key=account['public_key'],
memo_key=account['public_key'],
store_keys = False,
creator=creator,
asset='TESTS'
)
hive_utils.common.wait_n_blocks(node.rpc.url, 5)
def transfer_to_vesting(node, from_account, accounts, amount, asset):
from beem.account import Account
for acnt in accounts:
logger.info("Transfer to vesting from {} to {} amount {} {}".format(
from_account, acnt['name'], amount, asset)
)
acc = Account(from_account, hive_instance=node)
acc.transfer_to_vesting(amount, to = acnt['name'], asset = asset)
hive_utils.common.wait_n_blocks(node.rpc.url, 5)
def transfer_assets_to_accounts(node, from_account, accounts, amount, asset):
from beem.account import Account
for acnt in accounts:
logger.info("Transfer from {} to {} amount {} {}".format(from_account,
acnt['name'], amount, asset)
)
acc = Account(from_account, hive_instance=node)
acc.transfer(acnt['name'], amount, asset, memo = "initial transfer")
hive_utils.common.wait_n_blocks(node.rpc.url, 5)
def get_post_permlink(account):
return "post-permlink-{}".format(account)
def create_posts(node, accounts):
logger.info("Creating posts...")
for acnt in accounts:
logger.info("New post ==> ({},{},{},{},{})".format(
"Post title [{}]".format(acnt['name']),
"Post body [{}]".format(acnt['name']),
acnt['name'],
get_post_permlink(acnt['name']),
"firstpost"
))
node.post("Post title [{}]".format(acnt['name']),
"Post body [{}]".format(acnt['name']),
author=acnt['name'],
permlink = get_post_permlink(acnt['name']),
reply_identifier=None,
json_metadata=None,
comment_options=None,
community=None,
app=None,
tags="firstpost",
beneficiaries=None,
self_vote=False,
parse_body=False)
hive_utils.common.wait_n_blocks(node.rpc.url, 1)
hive_utils.common.wait_n_blocks(node.rpc.url, 5)
def print_balance(node, accounts):
import prettytable
from beem.account import Account
table = prettytable.PrettyTable()
table.field_names = [
"Account",
"balance",
"savings",
"hbd",
"savings_hbd",
"reward_hbd",
"reward_hive",
"reward_vesting",
"reward_vesting_hive",
"vesting_shares",
"delegated_vesting_shares",
"received_vesting_shares",
"curation_rewards",
"posting_rewards"
]
balances = []
for acnt in accounts:
ret = Account(acnt['name'], hive_instance=node).json()
data = [
acnt['name'],
ret['balance']['amount'],
ret['savings_balance']['amount'],
ret['hbd_balance']['amount'],
ret['savings_hbd_balance']['amount'],
ret['reward_hbd_balance']['amount'],
ret['reward_hive_balance']['amount'],
ret['reward_vesting_balance']['amount'],
ret['reward_vesting_hive']['amount'],
ret['vesting_shares']['amount'],
ret['delegated_vesting_shares']['amount'],
ret['received_vesting_shares']['amount'],
ret['curation_rewards'],
ret['posting_rewards']
]
balances.append(data)
table.add_row(data)
print(table)
return balances
|
humor/datasets/prox_dataset.py | DalhousieAI/humor | 143 | 12640608 | <gh_stars>100-1000
import sys, os
cur_file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_file_path, '..'))
import os.path as osp
import glob, time, copy, pickle, json
from torch.utils.data import Dataset, DataLoader
from utils.transforms import batch_rodrigues, rotation_matrix_to_angle_axis
from fitting.fitting_utils import read_keypoints, resize_points, OP_FLIP_MAP, load_planercnn_res
import numpy as np
import torch
import cv2
TRIM_EDGES = 90 # number of frames to cut off beginning and end of qualitative
QUAL_FPS = 30
QUANT_FPS = 5
QUANT_TRAIN = ['vicon']
QUANT_TEST = ['vicon']
QUANT_SPLITS = [QUANT_TRAIN, QUANT_TEST]
QUAL_TRAIN = ['BasementSittingBooth', 'MPH16', 'N0SittingBooth', 'N3Office', 'MPH112', 'MPH1Library', 'N0Sofa', 'N3OpenArea',
'MPH11', 'MPH8', 'N3Library', 'Werkraum']
QUAL_TEST = ['N3Office', 'N0Sofa', 'N3Library', 'MPH1Library']
QUAL_SPLITS = [QUAL_TRAIN, QUAL_TEST]
# these are the only SMPL parameters we care about
# rename with our convention
SMPL_NAME_MAP = {
'transl' : 'trans',
'beta' : 'betas',
'body_pose' : 'pose_body',
'global_orient' : 'root_orient',
'betas' : 'betas' # sometimes it's named differently in qualitative data
}
SMPL_SIZES = {
'trans' : 3,
'betas' : 10,
'pose_body' : 63,
'root_orient' : 3
}
FEMALE_SUBJ_IDS = [162, 3452, 159, 3403]
DEPTH_SCALE = 1e-3
IMG_WIDTH, IMG_HEIGHT = 1920, 1080
def read_fitting_seq(fitting_paths, return_valid=False):
'''
Reads in a sequence of PROX/PROXD SMPL fits and concats into single data dict as
torch tensor.
- return_valid : if true, returns a corresponding bool list of which fits are not dummy (i.e. there was actually a fitting loaded in)
'''
val_names = [SMPL_NAME_MAP[k] for k in SMPL_NAME_MAP.keys()]
fit_dict = {val_name : [] for val_name in val_names}
valid_list = []
for fpath in fitting_paths:
if not os.path.exists(fpath):
print('No PROX results exist for %s, filling with dummy zeros!' % (fpath))
for k, v in SMPL_SIZES.items():
fit_dict[k].append(torch.zeros((v), dtype=torch.float).reshape((1, -1)))
valid_list.append(False)
else:
with open(fpath, 'rb') as f:
param = pickle.load(f, encoding='latin1')
cur_valid = True
for key in param.keys():
if key in SMPL_NAME_MAP.keys():
cur_valid = cur_valid and (torch.sum(torch.logical_not(torch.isfinite(torch.Tensor(param[key])))).item() == 0)
if cur_valid:
fit_dict[SMPL_NAME_MAP[key]].append(torch.Tensor(param[key]).reshape((1, -1)))
else:
fit_dict[SMPL_NAME_MAP[key]].append(torch.zeros((SMPL_SIZES[SMPL_NAME_MAP[key]]), dtype=torch.float).reshape((1, -1)))
if not cur_valid:
print('PROX results nan for %s, filling with dummy zeros!' % (fpath))
valid_list.append(cur_valid)
fit_dict = {k : torch.cat(v, dim=0) for k, v in fit_dict.items()}
if return_valid:
return fit_dict, valid_list
else:
return fit_dict
class ProxDataset(Dataset):
'''
NOTE: support for quantitative dataset has not been thoroughly tested.
'''
def __init__(self, root_path,
quant=False, # quant or qual dataset
split='train',
seq_len=10, # split the data into sequences of this length
load_depth=False,
max_pts=4096, # max number of points to return from depth image
load_img=False,
load_scene_mesh=False,
estimate_floor_plane=False, # if true, estimates the ground plane from the scene mesh and returns it
load_floor_plane=False, # if true, loads the PlaneRCNN floor plane from the dataset and uses this
mask_color=True, # whether to load mask from RGB (NN) or from Kinect
mask_joints=False, # whether to apply the mask to 2d joints so that occluded joints are (0,0,0)
return_mask=False, # whether to return the mask or not
recording=None, # if given, loads only this single recording
recording_subseq_idx=-1, # if given, loads only this single subsequence of a specified recording
return_fitting=True, # if true, loads SMPL params fit from MOSH (quant) or PROXD (qual) as "ground truth"
flip=True # reflects images and masks about y axis, MUST be true so that GT fitting matches data and given scene geometry
):
super(ProxDataset, self).__init__()
self.root_path = root_path
self.quant = quant
data_dir = 'quantitative' if self.quant else 'qualitative'
self.data_dir = os.path.join(self.root_path, data_dir)
self.seq_len = seq_len
self.load_depth = load_depth
self.max_pts = max_pts
self.load_img = load_img
self.load_scene_mesh = load_scene_mesh
self.estimate_floor_plane = estimate_floor_plane
self.load_floor_plane = load_floor_plane
self.mask_color = mask_color
self.mask_joints = mask_joints
self.return_mask = return_mask
self.split = split
self.recording = recording
self.recording_subseq_idx = recording_subseq_idx
if self.recording is None and self.recording_subseq_idx > 0:
print('Ignoring subseq_idx since recording not specified...')
self.recording_subseq_idx = -1
self.return_fitting = return_fitting
self.flip = flip
if self.mask_joints and not self.mask_color:
print('Must be using color mask in order to mask joints (since detected in RGB)! Will NOT mask joints...')
self.mask_joints = False
# data roots
self.rec_root = os.path.join(self.data_dir, 'recordings')
self.calib_dir = os.path.join(self.data_dir, 'calibration')
self.cam2world_root = os.path.join(self.data_dir, 'cam2world')
self.fitting_root = os.path.join(self.data_dir, 'fittings/mosh') if self.quant else \
os.path.join(self.data_dir, 'PROXD')
self.keypoints_root = os.path.join(self.data_dir, 'keypoints')
self.planes_root = os.path.join(self.data_dir, 'planes')
self.scenes_root = os.path.join(self.data_dir, 'scenes')
data_splits = QUANT_SPLITS if self.quant else QUAL_SPLITS
self.split_scenes = data_splits[0] if self.split == 'train' else data_splits[1]
# load (img) data paths
self.img_paths, self.subseq_inds = self.load_data()
self.data_len = len(self.img_paths)
print('This split contains %d sub-sequences...' % (self.data_len))
def load_data(self):
# camera intrinsics are the same for all sequences/scenes
self.projection = Projection(self.calib_dir)
# get the sequences we want
recording_list = []
if self.recording is not None:
rec_path = os.path.join(self.rec_root, self.recording)
if os.path.exists(rec_path):
recording_list = [rec_path]
else:
print('Could not find specified recording at %s!' % (rec_path))
else:
all_rec_dirs = [os.path.join(self.rec_root, f) for f in sorted(os.listdir(self.rec_root)) if f[0] != '.']
all_rec_dirs = [f for f in all_rec_dirs if os.path.isdir(f)]
recording_list = [f for f in all_rec_dirs if f.split('/')[-1].split('_')[0] in self.split_scenes]
recording_names = [f.split('/')[-1] for f in recording_list]
print('Found %d recordings in this split...' % (len(recording_names)))
print('Splitting into subsequences of length %d frames...' % (self.seq_len))
# split each recording into sequences and record information for loading data
img_path_list = []
subseq_idx_list = [] # sub index into the recording
for rec_path, rec_name in zip(recording_list, recording_names):
img_folder = osp.join(rec_path, 'Color')
img_paths = [osp.join(img_folder, img_fn)
for img_fn in os.listdir(img_folder)
if img_fn.endswith('.png') or
img_fn.endswith('.jpg') and
not img_fn.startswith('.')]
img_paths = sorted(img_paths)
# print(img_paths)
cur_rec_len = len(img_paths)
# cut off edges of qualitative data to avoid static parts
if not self.quant and (cur_rec_len - (2*TRIM_EDGES)) >= self.seq_len:
img_paths = img_paths[TRIM_EDGES:-TRIM_EDGES]
cur_rec_len = len(img_paths)
if len(img_paths) < self.seq_len:
continue
# split into max number of sequences of desired length
num_seqs = cur_rec_len // self.seq_len
if self.recording_subseq_idx > -1:
sidx = self.recording_subseq_idx*self.seq_len
eidx = sidx + self.seq_len
seq_paths = img_paths[sidx:eidx]
img_path_list.append(seq_paths)
subseq_idx_list.append(self.recording_subseq_idx)
else:
for i in range(num_seqs):
sidx = i*self.seq_len
eidx = sidx + self.seq_len
seq_paths = img_paths[sidx:eidx]
img_path_list.append(seq_paths)
subseq_idx_list.append(i)
return img_path_list, subseq_idx_list
def get_data_paths_from_img(self, img_paths):
# return paths for all other data modalities from the img_paths for a sequence
rec_path = '/'.join(img_paths[0].split('/')[:-2])
rec_name = rec_path.split('/')[-1]
frame_names = ['.'.join(f.split('/')[-1].split('.')[:-1]) for f in img_paths]
keyp_folder = osp.join(self.keypoints_root, rec_name)
depth_folder = os.path.join(rec_path, 'Depth')
mask_folder = os.path.join(rec_path, 'BodyIndex')
mask_color_folder = os.path.join(rec_path, 'BodyIndexColor')
fitting_folder = osp.join(self.fitting_root, rec_name, 'results')
keyp_paths = [osp.join(keyp_folder, f + '_keypoints.json') for f in frame_names]
depth_paths = [osp.join(depth_folder, f + '.png') for f in frame_names]
mask_paths = [osp.join(mask_folder, f + '.png') for f in frame_names]
mask_color_paths = [osp.join(mask_color_folder, f + '.png') for f in frame_names]
fitting_paths = [osp.join(fitting_folder, f, '000.pkl') for f in frame_names]
return keyp_paths, depth_paths, mask_paths, mask_color_paths, fitting_paths
def __len__(self):
return self.data_len
def __getitem__(self, idx):
obs_data = dict()
gt_data = dict()
cur_img_paths = self.img_paths[idx]
obs_data['img_paths'] = cur_img_paths
rec_name = cur_img_paths[0].split('/')[-3]
# get other data paths
keyp_paths, depth_paths, mask_paths, mask_color_paths, fitting_paths = self.get_data_paths_from_img(cur_img_paths)
obs_data['mask_paths'] = mask_color_paths if self.mask_color else mask_paths
# load desired data
# load mask or mask color depending on settings
mask_list = []
if self.mask_color:
for mask_file in mask_color_paths:
mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
mask_list.append(mask)
else:
for mask_file in mask_paths:
mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)[1]
mask_list.append(mask)
if self.flip:
mask_list = [cv2.flip(mask, 1) for mask in mask_list]
if self.return_mask:
mask_seq_out = torch.Tensor(np.stack(mask_list, axis=0))
obs_data['mask'] = mask_seq_out
# always load keypoints
keyp_frames = [read_keypoints(f) for f in keyp_paths]
joint2d_data = np.stack(keyp_frames, axis=0) # T x J x 3 (x,y,conf)
if (not self.quant and self.flip) or (self.quant and not self.flip):
# quant keypoints are already flipped (so need to unflip if necessary)
joint2d_data = joint2d_data[:, OP_FLIP_MAP, :] # reflect labeling
joint2d_data[:, :, 0] = IMG_WIDTH - joint2d_data[:, :, 0] # visually reflect about y
# mask out invisible joints if desired (give confidence 0)
if self.mask_joints and self.mask_color:
for t, mask in enumerate(mask_list):
uvs = np.round(joint2d_data[t, :, :2]).astype(int)
uvs[:,0][uvs[:,0] >= IMG_WIDTH] = (IMG_WIDTH-1)
uvs[:,1][uvs[:,1] >= IMG_HEIGHT] = (IMG_HEIGHT-1)
occluded_mask_idx = mask[uvs[:, 1], uvs[:, 0]] != 0
joint2d_data[t, :, :][occluded_mask_idx] = 0.0
obs_data['joints2d'] = torch.Tensor(joint2d_data)
# load images
if self.load_img:
img_list = []
for img_path in cur_img_paths:
img = cv2.imread(img_path).astype(np.float32)[:, :, ::-1] / 255.0
if self.flip:
img = cv2.flip(img, 1)
img_list.append(img)
img_out = torch.Tensor(np.stack(img_list, axis=0))
# print(img_out.size())
obs_data['RGB'] = img_out
if self.quant:
vicon2scene = np.eye(4)
with open(os.path.join(self.data_dir, 'vicon2scene.json'), 'r') as f:
vicon2scene = np.array(json.load(f))
gt_data['vicon2scene'] = torch.Tensor(vicon2scene)
# load GT fitting SMPL params
if self.return_fitting:
fitting_data = read_fitting_seq(fitting_paths)
for k, v in fitting_data.items():
gt_data[k] = v
# load depth and backproject to point cloud if desired, mask to be only visible points
if self.load_depth:
# load in each depth image
depth_img_list = []
for depth_path in depth_paths:
depth_im = cv2.imread(depth_path, flags=-1).astype(float)
depth_im = depth_im / 8.
depth_im = depth_im * DEPTH_SCALE
if self.flip:
depth_im = cv2.flip(depth_im, 1)
depth_img_list.append(depth_im)
# mask so only visible points stay
points_list = []
for depth_img, mask in zip(depth_img_list, mask_list):
scan_dict = self.projection.create_scan(mask, depth_img, mask_on_color=self.mask_color, coord='color') # want points in RGB camera system
cur_points = scan_dict['points']
if cur_points.shape[0] == 0:
print('No points in depth map!')
# the person is completely invisible, just copy over previous frame if possible
if len(points_list) > 0:
print('Copying previous frame...')
cur_points = points_list[-1]
else:
print('filling zeros...')
cur_points = np.zeros((self.max_pts, 3))
else:
cur_points = resize_points(cur_points, self.max_pts)
points_list.append(cur_points)
points = np.stack(points_list, axis=0)
obs_data['points3d'] = torch.Tensor(points)
# load camera extrinsics and intrinsics (return only RGB since already projected points)
scene_name = rec_name.split('_')[0]
cam2world_path = os.path.join(self.cam2world_root, scene_name + '.json')
cam2world = np.eye(4)
with open(cam2world_path, 'r') as f:
cam2world = np.array(json.load(f))
gt_data['cam2world'] = torch.Tensor(cam2world)
gt_data['cam_matx'] = torch.Tensor(self.projection.color_cam['camera_mtx'])
if self.load_floor_plane:
# load in parameters and masks
planes_path = os.path.join(self.planes_root, scene_name)
floor_plane = load_planercnn_res(planes_path)
obs_data['floor_plane'] = floor_plane
if self.load_scene_mesh or self.estimate_floor_plane:
import trimesh
scene_mesh = trimesh.load(os.path.join(self.scenes_root, scene_name + '.ply'))
scene_verts = np.array(scene_mesh.vertices)
scene_faces = np.array(scene_mesh.faces)
if self.load_scene_mesh:
gt_data['scene'] = (scene_verts, scene_faces)
if self.estimate_floor_plane:
from sklearn.cluster import DBSCAN
point_heights = scene_verts[:,2]
neg_mask = point_heights < 0
point_heights = point_heights[neg_mask]
neg_points = scene_verts[neg_mask, :]
samp_n_pts = 10000
if neg_points.shape[0] > samp_n_pts:
samp_inds = np.random.choice(np.arange(neg_points.shape[0]), size=samp_n_pts, replace=False)
neg_points = neg_points[samp_inds]
point_heights = point_heights[samp_inds]
# cluster point heights
clustering = DBSCAN(eps=0.005, min_samples=100).fit(point_heights.reshape((-1, 1)))
num_clusters = np.unique(clustering.labels_).shape[0] #clustering.components_.shape[0]
max_cluster_size = -float('inf')
max_clust_idx = -1
cluster_sizes = []
for clust_idx in range(num_clusters):
cur_clust_size = np.sum(clustering.labels_ == clust_idx)
cluster_sizes.append(cur_clust_size)
sort_inds = np.argsort(np.array(cluster_sizes))[::-1]
max_clust_label = sort_inds[0]
max_clust_mean = np.mean(neg_points[clustering.labels_ == max_clust_label], axis=0)
next_clust_label = sort_inds[1]
next_clust_mean = np.mean(neg_points[clustering.labels_ == next_clust_label], axis=0)
max_clust_idx = max_clust_label if max_clust_mean[2] <= next_clust_mean[2] else next_clust_label
floor_points = neg_points[clustering.labels_ == max_clust_idx]
# fit the floor to these points
from sklearn.linear_model import RANSACRegressor
reg = RANSACRegressor(random_state=0).fit(floor_points[:,:2], floor_points[:,2])
est = reg.estimator_
plane_normal = np.array([-est.coef_[0], -est.coef_[1], 1.0])
norm_mag = np.linalg.norm(plane_normal)
plane_normal = plane_normal / norm_mag
plane_intercept = est.intercept_ / norm_mag
a, b, c = plane_normal
d = plane_intercept
# transform into the camera frame
cam2world_R = cam2world[:3, :3]
cam2world_t = cam2world[:3, 3]
world2cam_R = cam2world_R.T
world2cam_t = -np.matmul(world2cam_R, cam2world_t)
new_normal = np.dot(world2cam_R, plane_normal)
point_on_old = np.array([0.0, 0.0, d / c])
point_on_new = np.dot(world2cam_R, point_on_old) + world2cam_t
new_intercept = np.dot(new_normal, point_on_new)
a, b, c = new_normal
d = new_intercept
floor_plane = np.array([a, b, c, d])
obs_data['floor_plane'] = floor_plane
# other meta-data
cur_name = rec_name + '_' + '%04d' % (self.subseq_inds[idx])
cur_subj_id = cur_name.split('_')[1]
gender = 'female' if int(cur_subj_id) in FEMALE_SUBJ_IDS else 'male'
gt_data['name'] = cur_name
gt_data['gender'] = gender
return obs_data, gt_data
#
# Adapted from https://github.com/mohamedhassanmus/prox/blob/master/prox/projection_utils.py
# Please see their license for usage restrictions.
#
class Projection():
def __init__(self, calib_dir):
with open(osp.join(calib_dir, 'IR.json'), 'r') as f:
self.depth_cam = json.load(f)
with open(osp.join(calib_dir, 'Color.json'), 'r') as f:
self.color_cam = json.load(f)
def row(self, A):
return A.reshape((1, -1))
def col(self, A):
return A.reshape((-1, 1))
def unproject_depth_image(self, depth_image, cam):
us = np.arange(depth_image.size) % depth_image.shape[1]
vs = np.arange(depth_image.size) // depth_image.shape[1]
ds = depth_image.ravel()
uvd = np.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
#unproject
xy_undistorted_camspace = cv2.undistortPoints(np.asarray(uvd[:, :2].reshape((1, -1, 2)).copy()),
np.asarray(cam['camera_mtx']), np.asarray(cam['k']))
xyz_camera_space = np.hstack((xy_undistorted_camspace.squeeze(), self.col(uvd[:, 2])))
xyz_camera_space[:, :2] *= self.col(xyz_camera_space[:, 2]) # scale x,y by z
other_answer = xyz_camera_space - self.row(np.asarray(cam['view_mtx'])[:, 3]) # translate
xyz = other_answer.dot(np.asarray(cam['view_mtx'])[:, :3]) # rotate
return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
def projectPoints(self, v, cam):
v = v.reshape((-1,3)).copy()
return cv2.projectPoints(v, np.asarray(cam['R']), np.asarray(cam['T']), np.asarray(cam['camera_mtx']), np.asarray(cam['k']))[0].squeeze()
def create_scan(self, mask, depth_im, color_im=None, mask_on_color=False, coord='color', TH=1e-2, default_color=[1.00, 0.75, 0.80]):
if not mask_on_color:
depth_im[mask != 0] = 0
if depth_im.size == 0:
return {'v': []}
points = self.unproject_depth_image(depth_im, self.depth_cam).reshape(-1, 3)
colors = np.tile(default_color, [points.shape[0], 1])
uvs = self.projectPoints(points, self.color_cam)
uvs = np.round(uvs).astype(int)
valid_x = np.logical_and(uvs[:, 1] >= 0, uvs[:, 1] < 1080)
valid_y = np.logical_and(uvs[:, 0] >= 0, uvs[:, 0] < 1920)
valid_idx = np.logical_and(valid_x, valid_y)
if mask_on_color:
valid_mask_idx = valid_idx.copy()
valid_mask_idx[valid_mask_idx == True] = mask[uvs[valid_idx == True][:, 1], uvs[valid_idx == True][:, 0]] == 0
uvs = uvs[valid_mask_idx == True]
points = points[valid_mask_idx]
colors = np.tile(default_color, [points.shape[0], 1])
# colors = colors[valid_mask_idx]
valid_idx = valid_mask_idx
if color_im is not None:
colors[:, :3] = color_im[uvs[:, 1], uvs[:, 0]] / 255.0
else:
uvs = uvs[valid_idx == True]
if color_im is not None:
colors[valid_idx == True,:3] = color_im[uvs[:, 1], uvs[:, 0]]/255.0
if coord == 'color':
# Transform to color camera coord
T = np.concatenate([np.asarray(self.color_cam['view_mtx']), np.array([0, 0, 0, 1]).reshape(1, -1)])
stacked = np.column_stack((points, np.ones(len(points)) ))
points = np.dot(T, stacked.T).T[:, :3]
points = np.ascontiguousarray(points)
ind = points[:, 2] > TH
return {'points':points[ind], 'colors':colors[ind]}
def align_color2depth(self, depth_im, color_im, interpolate=True):
(w_d, h_d) = (512, 424)
if interpolate:
# fill depth holes to avoid black spots in aligned rgb image
zero_mask = np.array(depth_im == 0.).ravel()
depth_im_flat = depth_im.ravel()
depth_im_flat[zero_mask] = np.interp(np.flatnonzero(zero_mask), np.flatnonzero(~zero_mask),
depth_im_flat[~zero_mask])
depth_im = depth_im_flat.reshape(depth_im.shape)
points = self.unproject_depth_image(depth_im, self.depth_cam).reshape(-1, 3)
uvs = self.projectPoints(points, self.color_cam)
uvs = np.round(uvs).astype(int)
valid_x = np.logical_and(uvs[:, 1] >= 0, uvs[:, 1] < 1080)
valid_y = np.logical_and(uvs[:, 0] >= 0, uvs[:, 0] < 1920)
valid_idx = np.logical_and(valid_x, valid_y)
uvs = uvs[valid_idx == True]
aligned_color = np.zeros((h_d, w_d, 3)).astype(color_im.dtype)
aligned_color[valid_idx.reshape(h_d, w_d)] = color_im[uvs[:, 1], uvs[:, 0]]
return aligned_color
def align_depth2color(self, depth_im, depth_raw):
(w_rgb, h_rgb) = (1920, 1080)
(w_d, h_d) = (512, 424)
points = self.unproject_depth_image(depth_im, self.depth_cam).reshape(-1, 3)
uvs = self.projectPoints(points, self.color_cam)
uvs = np.round(uvs).astype(int)
valid_x = np.logical_and(uvs[:, 1] >= 0, uvs[:, 1] < 1080)
valid_y = np.logical_and(uvs[:, 0] >= 0, uvs[:, 0] < 1920)
valid_idx = np.logical_and(valid_x, valid_y)
uvs = uvs[valid_idx == True]
aligned_depth = np.zeros((h_rgb, w_rgb)).astype('uint16')
aligned_depth[uvs[:, 1], uvs[:, 0]] = depth_raw[valid_idx.reshape(h_d, w_d)]
return aligned_depth |
regtests/c++/returns_subclasses.py | ahakingdom/Rusthon | 622 | 12640648 | <gh_stars>100-1000
'''
returns subclasses
'''
class A:
def __init__(self, x:int):
self.x = x
def method(self) -> int:
return self.x
class B(A):
def foo(self) ->int:
return self.x * 2
class C(A):
def bar(self) ->int:
return self.x + 200
class D(C):
def hey(self) ->int:
return self.x + 1
def some_subclass( x:int ) ->A:
switch x:
case 0:
a = A(1)
return a
case 1:
b = B(2)
return b
case 2:
c = C(3)
return c
case 3:
d = D(4)
return d
def main():
a = some_subclass(0)
b = some_subclass(1)
c = some_subclass(2)
d = some_subclass(3)
print(a.getclassname())
print(b.getclassname())
print(c.getclassname())
print(d.getclassname())
print(a.method())
print a.x
print(b.method())
print b.x
print(c.method())
print c.x
print(d.method())
print d.x
print('- - - - - - - ')
if isinstance(b, B):
print('b is type B')
print(b.method())
print(b.foo())
else:
print('error: b is not type B')
if isinstance(c, C):
print('c is type C')
print(c.method())
print(c.bar())
else:
print('error: c is not type C')
if isinstance(d, D):
print('d is type D')
#print(d.bar()) ## TODO, subclass from C.
print(d.hey())
else:
print('error: d is not type D')
print('------------------')
for i in range(3):
o = some_subclass(i)
print(o.method())
if isinstance(o, B):
print(o.foo())
if isinstance(o,C): ## TODO-FIX elif isinstance(o,C)
print(o.bar())
print('end of test') |
exercises/practice/hamming/hamming_test.py | samr1ddh1/python-1 | 1,177 | 12640653 | <gh_stars>1000+
import unittest
from hamming import (
distance,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class HammingTest(unittest.TestCase):
def test_empty_strands(self):
self.assertEqual(distance("", ""), 0)
def test_single_letter_identical_strands(self):
self.assertEqual(distance("A", "A"), 0)
def test_single_letter_different_strands(self):
self.assertEqual(distance("G", "T"), 1)
def test_long_identical_strands(self):
self.assertEqual(distance("GGACTGAAATCTG", "GGACTGAAATCTG"), 0)
def test_long_different_strands(self):
self.assertEqual(distance("GGACGGATTCTG", "AGGACGGATTCT"), 9)
def test_disallow_first_strand_longer(self):
with self.assertRaises(ValueError) as err:
distance("AATG", "AAA")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Strands must be of equal length.")
def test_disallow_second_strand_longer(self):
with self.assertRaises(ValueError) as err:
distance("ATA", "AGTG")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Strands must be of equal length.")
def test_disallow_empty_first_strand(self):
with self.assertRaises(ValueError) as err:
distance("", "G")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Strands must be of equal length.")
def test_disallow_right_empty_strand(self):
with self.assertRaises(ValueError) as err:
distance("G", "")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Strands must be of equal length.")
def test_disallow_empty_second_strand(self):
with self.assertRaises(ValueError) as err:
distance("G", "")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "Strands must be of equal length.")
|
test/examples/example_script3.py | robintw/recipy | 451 | 12640668 | """
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
dd_1/Part 1/Section 10 - Extras/11 -command line arguments/example8.py | Rebell-Leader/bg | 3,266 | 12640669 | # sometimes we want to specify multiple values for a single argument
import argparse
parser = argparse.ArgumentParser('Prints the squares of a list of numbers, and the cubes of another list.')
parser.add_argument('--sq', help='list of numbers to square', nargs='*', type=float)
parser.add_argument('--cu', help='list of numbers to cube', nargs='+', type=float, required=True)
# here we are specifying that --sq may contain 0 or more elements (the *)
# but --cu must contain at least one element (the +)
# by default these two arguments are optional, but we could make them mandatory
# by setting required=True, which we do for the --cu argument (i.e. --cu
# specifies an argument that is mandatory (required=True) AND must have at least one value (nargs='+')
args = parser.parse_args()
# sq is optional, so we check to see if it is truthy (i.e. a non-empty list)
if args.sq:
squares = [n ** 2 for n in args.sq]
print(squares)
# cu we know is both mandatory and requires at least one value:
cubes = [n ** 3 for n in args.cu]
print(cubes)
# try the following:
# python example8.py -h
# python example8.py --sq 1.5 2 3 --cu 2.5 3 4
# python example8.py --sq --cu 2 3 4
# python example8.py --sq --cu
# python example8.py --sq 1 2 3
|
package_control/package_installer.py | zjzh/package_control | 3,373 | 12640670 | import re
import threading
import time
import sublime
from .thread_progress import ThreadProgress
from .package_manager import PackageManager
from .package_disabler import PackageDisabler
from .versions import version_comparable
USE_QUICK_PANEL_ITEM = hasattr(sublime, 'QuickPanelItem')
class PackageInstaller(PackageDisabler):
"""
Provides helper functionality related to installing packages
"""
def __init__(self):
self.manager = PackageManager()
# Track what the color scheme was before upgrade so we can restore it
self.old_color_scheme_package = None
self.old_color_scheme = None
# Track what the theme was before upgrade so we can restore it
self.old_theme_package = None
self.old_theme = None
def make_package_list(self, ignore_actions=[], override_action=None, ignore_packages=[]):
"""
Creates a list of packages and what operation would be performed for
each. Allows filtering by the applicable action or package name.
Returns the information in a format suitable for displaying in the
quick panel.
:param ignore_actions:
A list of actions to ignore packages by. Valid actions include:
`install`, `upgrade`, `downgrade`, `reinstall`, `overwrite`,
`pull` and `none`. `pull` andd `none` are for Git and Hg
repositories. `pull` is present when incoming changes are detected,
where as `none` is selected if no commits are available. `overwrite`
is for packages that do not include version information via the
`package-metadata.json` file.
:param override_action:
A string action name to override the displayed action for all listed
packages.
:param ignore_packages:
A list of packages names that should not be returned in the list
:return:
A list of lists, each containing three strings:
0 - package name
1 - package description
2 - action; [extra info;] package url
"""
packages = self.manager.list_available_packages()
installed_packages = self.manager.list_packages()
package_list = []
for package in sorted(iter(packages.keys()), key=lambda s: s.lower()):
if ignore_packages and package in ignore_packages:
continue
info = packages[package]
release = info['releases'][0]
if package in installed_packages:
installed = True
metadata = self.manager.get_metadata(package)
if metadata.get('version'):
installed_version = metadata['version']
else:
installed_version = None
else:
installed = False
installed_version_name = 'v' + installed_version if \
installed and installed_version else 'unknown version'
new_version = 'v' + release['version']
vcs = None
settings = self.manager.settings
if override_action:
action = override_action
extra = ''
else:
if self.manager.is_vcs_package(package):
to_ignore = settings.get('ignore_vcs_packages')
if to_ignore is True:
continue
if isinstance(to_ignore, list) and package in to_ignore:
continue
upgrader = self.manager.instantiate_upgrader(package)
vcs = upgrader.cli_name
incoming = upgrader.incoming()
if installed:
if vcs:
if incoming:
action = 'pull'
extra = ' with ' + vcs
else:
action = 'none'
extra = ''
elif not installed_version:
action = 'overwrite'
extra = ' %s with %s' % (installed_version_name, new_version)
else:
installed_version = version_comparable(installed_version)
new_version_cmp = version_comparable(release['version'])
if new_version_cmp > installed_version:
action = 'upgrade'
extra = ' to %s from %s' % (new_version, installed_version_name)
elif new_version_cmp < installed_version:
action = 'downgrade'
extra = ' to %s from %s' % (new_version, installed_version_name)
else:
action = 'reinstall'
extra = ' %s' % new_version
else:
action = 'install'
extra = ' %s' % new_version
extra += ';'
if action in ignore_actions:
continue
description = info.get('description')
if not description:
description = 'No description provided'
homepage = info['homepage']
homepage_display = re.sub('^https?://', '', homepage)
if USE_QUICK_PANEL_ITEM:
description = '<em>%s</em>' % sublime.html_format_command(description)
final_line = '<em>' + action + extra + '</em>'
if homepage_display:
if action or extra:
final_line += ' '
final_line += '<a href="%s">%s</a>' % (homepage, homepage_display)
package_entry = sublime.QuickPanelItem(package, [description, final_line])
else:
package_entry = [package]
package_entry.append(description)
final_line = action + extra
if final_line and homepage_display:
final_line += ' '
final_line += homepage_display
package_entry.append(final_line)
package_list.append(package_entry)
return package_list
def on_done(self, picked):
"""
Quick panel user selection handler - disables a package, installs or
upgrades it, then re-enables the package
:param picked:
An integer of the 0-based package name index from the presented
list. -1 means the user cancelled.
"""
if picked == -1:
return
if USE_QUICK_PANEL_ITEM:
name = self.package_list[picked].trigger
else:
name = self.package_list[picked][0]
if name in self.disable_packages(name, 'install'):
def on_complete():
self.reenable_package(name, 'install')
else:
on_complete = None
thread = PackageInstallerThread(self.manager, name, on_complete)
thread.start()
ThreadProgress(
thread,
'Installing package %s' % name,
'Package %s successfully %s' % (name, self.completion_type)
)
class PackageInstallerThread(threading.Thread):
"""
A thread to run package install/upgrade operations in so that the main
Sublime Text thread does not get blocked and freeze the UI
"""
def __init__(self, manager, package, on_complete, pause=False):
"""
:param manager:
An instance of :class:`PackageManager`
:param package:
The string package name to install/upgrade
:param on_complete:
A callback to run after installing/upgrading the package
:param pause:
If we should pause before upgrading to allow a package to be
fully disabled.
"""
self.package = package
self.manager = manager
self.on_complete = on_complete
self.pause = pause
threading.Thread.__init__(self)
def run(self):
if self.pause:
time.sleep(0.7)
try:
self.result = self.manager.install_package(self.package)
except (Exception):
self.result = False
raise
finally:
# Do not reenable if deferred until next restart
if self.on_complete and self.result is not None:
sublime.set_timeout(self.on_complete, 700)
|
robogym/envs/dactyl/observation/shadow_hand.py | 0xflotus/robogym | 288 | 12640703 | <reponame>0xflotus/robogym
import abc
import numpy as np
from robogym.observation.mujoco import MujocoObservation
from robogym.robot.shadow_hand.hand_forward_kinematics import FINGERTIP_SITE_NAMES
from robogym.robot.shadow_hand.hand_interface import JOINTS
from robogym.robot.shadow_hand.mujoco.mujoco_shadow_hand import MuJoCoShadowHand
def _update_qpos_and_qvel(sim, qpos=None, qvel=None):
for i, joint_name in enumerate(JOINTS):
name = "robot0:" + joint_name
if name in sim.model.joint_names:
if qpos is not None:
sim.data.qpos[sim.model.get_joint_qpos_addr(name)] = qpos[i]
if qvel is not None:
sim.data.qvel[sim.model.get_joint_qvel_addr(name)] = qvel[i]
class MujocoShadowHandObservation(MujocoObservation, abc.ABC):
def __init__(self, provider):
super().__init__(provider)
self.hand = MuJoCoShadowHand(self.provider.mujoco_simulation)
class MujocoShadowhandRelativeFingertipsObservation(MujocoShadowHandObservation):
"""
Mujoco based relative fingertip position observation.
"""
def get(self) -> np.ndarray:
"""
Get relative fingertip positions.
"""
return self.hand.observe().fingertip_positions().flatten()
class MujocoShadowhandAbsoluteFingertipsObservation(MujocoShadowHandObservation):
"""
Mujoco based absolute fingertip position observation.
"""
def get(self) -> np.ndarray:
"""
Get relative fingertip positions.
"""
fingertip_pos = np.array(
[
self.provider.mujoco_simulation.mj_sim.data.get_site_xpos(
f"robot0:{site}"
)
for site in FINGERTIP_SITE_NAMES
]
)
return fingertip_pos.flatten()
class MujocoShadowHandJointPosObservation(MujocoShadowHandObservation):
"""
Mujoco based observation for shadowhand joint positions.
"""
def get(self) -> np.ndarray:
"""
Get shadowhand joint positions.
"""
return self.hand.observe().joint_positions()
class MujocoShadowHandJointVelocityObservation(MujocoShadowHandObservation):
"""
Mujoco based observation for shadowhand joint velocities.
"""
def get(self) -> np.ndarray:
"""
Get shadowhand joint velocities.
"""
return self.hand.observe().joint_velocities()
class MujocoShadowhandAngleObservation(MujocoShadowHandObservation):
"""
Mujoco based observation for shadowhand hand angle.
"""
def get(self) -> np.ndarray:
"""
Get shadowhand joint velocities.
"""
return self.provider.mujoco_simulation.get_qpos("hand_angle")
|
alipay/aop/api/domain/AlipayEcoContractProcessSyncModel.py | antopen/alipay-sdk-python-all | 213 | 12640723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ContractManagerProcessSyncRequest import ContractManagerProcessSyncRequest
class AlipayEcoContractProcessSyncModel(object):
def __init__(self):
self._batch_no = None
self._flows = None
self._sign_platform_code = None
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def flows(self):
return self._flows
@flows.setter
def flows(self, value):
if isinstance(value, list):
self._flows = list()
for i in value:
if isinstance(i, ContractManagerProcessSyncRequest):
self._flows.append(i)
else:
self._flows.append(ContractManagerProcessSyncRequest.from_alipay_dict(i))
@property
def sign_platform_code(self):
return self._sign_platform_code
@sign_platform_code.setter
def sign_platform_code(self, value):
self._sign_platform_code = value
def to_alipay_dict(self):
params = dict()
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.flows:
if isinstance(self.flows, list):
for i in range(0, len(self.flows)):
element = self.flows[i]
if hasattr(element, 'to_alipay_dict'):
self.flows[i] = element.to_alipay_dict()
if hasattr(self.flows, 'to_alipay_dict'):
params['flows'] = self.flows.to_alipay_dict()
else:
params['flows'] = self.flows
if self.sign_platform_code:
if hasattr(self.sign_platform_code, 'to_alipay_dict'):
params['sign_platform_code'] = self.sign_platform_code.to_alipay_dict()
else:
params['sign_platform_code'] = self.sign_platform_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoContractProcessSyncModel()
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'flows' in d:
o.flows = d['flows']
if 'sign_platform_code' in d:
o.sign_platform_code = d['sign_platform_code']
return o
|
lib/django-1.4/django/core/management/commands/validate.py | MiCHiLU/google_appengine_sdk | 790 | 12640726 | from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Validates all installed models."
requires_model_validation = False
def handle_noargs(self, **options):
self.validate(display_num_errors=True)
|
zerver/webhooks/pagerduty/tests.py | Pulkit007/zulip | 17,004 | 12640750 | <gh_stars>1000+
from zerver.lib.test_classes import WebhookTestCase
class PagerDutyHookTests(WebhookTestCase):
STREAM_NAME = "pagerduty"
URL_TEMPLATE = "/api/v1/external/pagerduty?api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "pagerduty"
def test_trigger(self) -> None:
expected_message = "Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) (assigned to [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ)).\n\n``` quote\nfoo\n```"
self.check_webhook("trigger", "Incident 3", expected_message)
def test_trigger_v2(self) -> None:
expected_message = "Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) triggered by [Production XDB Cluster](https://webdemo.pagerduty.com/services/PN49J75) (assigned to [<NAME>](https://webdemo.pagerduty.com/users/P553OPV)).\n\n``` quote\nMy new incident\n```"
self.check_webhook("trigger_v2", "Incident 33", expected_message)
def test_triggerer_v3(self) -> None:
expected_message = "Incident [Test Incident 3 (#9)](https://pig208.pagerduty.com/incidents/PFQZPSY) triggered by [pig208](https://pig208.pagerduty.com/services/PA2P440) (assigned to [PIG 208](https://pig208.pagerduty.com/users/PJ0LVEB))."
self.check_webhook("triggered_v3", "Incident Test Incident 3 (#9)", expected_message)
def test_trigger_without_assignee_v2(self) -> None:
expected_message = "Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) triggered by [Production XDB Cluster](https://webdemo.pagerduty.com/services/PN49J75) (assigned to nobody).\n\n``` quote\nMy new incident\n```"
self.check_webhook("trigger_without_assignee_v2", "Incident 33", expected_message)
def test_unacknowledge(self) -> None:
expected_message = "Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) (assigned to [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ)).\n\n``` quote\nfoo\n```"
self.check_webhook("unacknowledge", "Incident 3", expected_message)
def test_unacknowledged_v3(self) -> None:
expected_message = "Incident [Test Incident (#10)](https://pig208.pagerduty.com/incidents/PQ1K5C8) unacknowledged by [pig208](https://pig208.pagerduty.com/services/PA2P440) (assigned to [PIG 208](https://pig208.pagerduty.com/users/PJ0LVEB))."
self.check_webhook("unacknowledged_v3", "Incident Test Incident (#10)", expected_message)
def test_resolved(self) -> None:
expected_message = "Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ).\n\n``` quote\nIt is on fire\n```"
self.check_webhook("resolved", "Incident 1", expected_message)
def test_resolved_v2(self) -> None:
expected_message = "Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) resolved by [La<NAME>](https://webdemo.pagerduty.com/users/P553OPV).\n\n``` quote\nMy new incident\n```"
self.check_webhook("resolve_v2", "Incident 33", expected_message)
def test_resolved_v3(self) -> None:
expected_message = "Incident [Test Incident (#6)](https://pig208.pagerduty.com/incidents/PCPZE64) resolved by [PIG 208](https://pig208.pagerduty.com/users/PJ0LVEB)."
self.check_webhook("resolved_v3", "Incident Test Incident (#6)", expected_message)
def test_auto_resolved(self) -> None:
expected_message = "Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved.\n\n``` quote\nnew\n```"
self.check_webhook("auto_resolved", "Incident 2", expected_message)
def test_acknowledge(self) -> None:
expected_message = "Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ).\n\n``` quote\nIt is on fire\n```"
self.check_webhook("acknowledge", "Incident 1", expected_message)
def test_acknowledge_without_trigger_summary_data(self) -> None:
expected_message = "Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo](https://zulip-test.pagerduty.com/users/POBCFRJ).\n\n``` quote\n\n```"
self.check_webhook(
"acknowledge_without_trigger_summary_data", "Incident 1", expected_message
)
def test_acknowledged_v3(self) -> None:
expected_message = "Incident [Test Incident (#10)](https://pig208.pagerduty.com/incidents/PQ1K5C8) acknowledged by [PIG 208](https://pig208.pagerduty.com/users/PJ0LVEB)."
self.check_webhook("acknowledged_v3", "Incident Test Incident (#10)", expected_message)
def test_acknowledge_v2(self) -> None:
expected_message = "Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) acknowledged by [<NAME>](https://webdemo.pagerduty.com/users/P553OPV).\n\n``` quote\nMy new incident\n```"
self.check_webhook("acknowledge_v2", "Incident 33", expected_message)
def test_incident_assigned_v2(self) -> None:
expected_message = "Incident [33](https://webdemo.pagerduty.com/incidents/PRORDTY) assigned to [<NAME>](https://webdemo.pagerduty.com/users/PFBSJ2Z).\n\n``` quote\nMy new incident\n```"
self.check_webhook("assign_v2", "Incident 33", expected_message)
def test_reassigned_v3(self) -> None:
expected_message = "Incident [Test Incident (#3)](https://pig208.pagerduty.com/incidents/PIQUG8X) reassigned to [Test User](https://pig208.pagerduty.com/users/PI9DT01)."
self.check_webhook("reassigned_v3", "Incident Test Incident (#3)", expected_message)
def test_no_subject(self) -> None:
expected_message = "Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved.\n\n``` quote\nmp_error_block_down_critical\u2119\u01b4\n```"
self.check_webhook("mp_fail", "Incident 48219", expected_message)
def test_unsupported_webhook_event(self) -> None:
post_params = dict(content_type="application/json")
for version in range(1, 4):
payload = self.get_body(f"unsupported_v{version}")
result = self.client_post(self.url, payload, **post_params)
self.assert_json_error(
result,
"The 'incident.unsupported' event isn't currently supported by the PagerDuty webhook",
)
|
tonic/torch/__init__.py | Eyalcohenx/tonic | 350 | 12640792 | <filename>tonic/torch/__init__.py
from . import agents, models, normalizers, updaters
__all__ = [agents, models, normalizers, updaters]
|
deprecated/dataloaders/deprecated_examples/affect/mosi_late_fusion.py | kapikantzari/MultiBench | 148 | 12640797 | <filename>deprecated/dataloaders/deprecated_examples/affect/mosi_late_fusion.py<gh_stars>100-1000
from unimodals.common_models import GRU, MLP
from datasets.affect.get_data import get_dataloader
from fusions.common_fusions import Concat
from training_structures.Simple_Late_Fusion import train, test
import torch
import sys
import os
sys.path.append(os.getcwd())
# Support mosi/mosi_unaligned/mosei/mosei_unaligned/iemocap/iemocap_unaligned
traindata, validdata, testdata = get_dataloader(
'../affect/processed/mosi_data.pkl')
# mosi
encoders = [GRU(20, 50, dropout=True, has_padding=True).cuda(),
GRU(5, 15, dropout=True, has_padding=True).cuda(),
GRU(300, 600, dropout=True, has_padding=True).cuda()]
head = MLP(665, 300, 1).cuda()
# mosei/iemocap
'''
encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
GRU(74,150,dropout=True,has_padding=True).cuda(),\
GRU(300,600,dropout=True,has_padding=True).cuda()]
head=MLP(820,400,1).cuda()
'''
# iemocap
'''
encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
GRU(74,150,dropout=True,has_padding=True).cuda(),\
GRU(300,600,dropout=True,has_padding=True).cuda()]
head=MLP(820,400,4).cuda()
'''
fusion = Concat().cuda()
# Support simple late_fusion and late_fusion with removing bias
# Simply change regularization=True
# mosi/mosei
train(encoders, fusion, head, traindata, validdata, 1000, True, True,
task="regression", optimtype=torch.optim.AdamW, lr=1e-4, save='best.pt',
weight_decay=0.01, criterion=torch.nn.L1Loss(), regularization=False)
# iemocap
'''
train(encoders,fusion,head,traindata,validdata,1000,True,True, \
optimtype=torch.optim.AdamW,lr=1e-4,save='best.pt', \
weight_decay=0.01,regularization=False)
'''
print("Testing:")
model = torch.load('best.pt').cuda()
test(model, testdata, True, torch.nn.L1Loss(), "regression",)
# test(model,testdata,True,)
|
others/state_machines/python/coin_state_machine.py | CarbonDDR/al-go-rithms | 1,253 | 12640800 | '''
Imagine a machine that gives you a quarter once you put in 25 cents. I suppose if you really like
gumballs that is in someway useful. The machine will wait until it has reached a state of 25 cents,
then it will spit out a quarter. The machine is totally oblivious to what coins were added to reach
its current state, all it knows is the state it is in, which is how it achieves the "no memory" standard.
(at least conceptually)
'''
from __future__ import print_function
def change_state(state):
while state != 25:
print("Balance: {}".format(state))
print("Please insert a coin (penny, nickel, dime, quarter)")
coin = input()
if coin == "penny":
state += 1
elif coin == "nickel":
state += 5
elif coin == "dime":
state += 10
elif coin == "quarter":
state += 25
else:
print("Sorry that's not a coin")
print("Thanks, here's your quarter")
return state
if __name__ == '__main__':
state = 0
change_state(state)
|
mac/pyobjc-core/PyObjCTest/test_ivar.py | albertz/music-player | 132 | 12640804 | from __future__ import unicode_literals
from PyObjCTools.TestSupport import *
import objc
import sys
from PyObjCTest.instanceVariables import ClassWithVariables
NSObject = objc.lookUpClass('NSObject')
NSAutoreleasePool = objc.lookUpClass('NSAutoreleasePool')
class Base (object):
def __init__(self, ondel):
self.ondel = ondel
def __del__ (self):
self.ondel()
class OCBase (NSObject):
def init_(self, ondel):
self.ondel = ondel
def __del__ (self):
self.ondel()
class TestClass (NSObject):
idVar = objc.ivar('idVar')
idVar2 = objc.ivar('idVar2', b'@')
intVar = objc.ivar('intVar', objc._C_INT)
doubleVar = objc.ivar('doubleVar', objc._C_DBL)
class TestInstanceVariables(TestCase):
def setUp(self):
self.object = TestClass.alloc().init()
def testID(self):
# Check that we can set and query attributes of type 'id'
self.assertEqual(self.object.idVar, None)
self.assertEqual(self.object.idVar2, None)
o = NSObject.alloc().init()
self.object.idVar = o
self.object.idVar2 = o
self.assertIs(self.object.idVar, o)
self.assertIs(self.object.idVar2, o)
self.object.idVar = "hello"
self.assertEqual(self.object.idVar, "hello")
def testInt(self):
# Check that we can set and query attributes of type 'int'
self.assertEqual(self.object.intVar, 0)
self.assertRaises(ValueError, lambda x: setattr(self.object, 'intVar', x), "h")
self.object.intVar = 42
self.assertEqual(self.object.intVar, 42)
def testDouble(self):
# Check that we can set and query attributes of type 'double'
# Can't rely on this for doubles...
#self.assertEqual(self.object.doubleVar, 0.0)
self.assertRaises(ValueError, lambda x: setattr(self.object, 'doubleVar', x), "h")
self.object.doubleVar = 42.0
self.assertAlmostEqual(self.object.doubleVar, 42.0)
def testLeak(self):
# Check that plain python objects are correctly released when
# they are no longer the value of an attribute
pool = NSAutoreleasePool.alloc().init()
self.deleted = 0
self.object.idVar = Base(lambda : setattr(self, 'deleted', 1))
self.object.idVar = None
del pool
self.assertEqual(self.deleted, 1)
def testLeak2(self):
self.deleted = 0
pool = NSAutoreleasePool.alloc().init()
self.object.idVar = Base(lambda : setattr(self, 'deleted', 1))
del self.object
del pool
self.assertEqual(self.deleted, 1)
def testOCLeak(self):
# Check that Objective-C objects are correctly released when
# they are no longer the value of an attribute
pool = NSAutoreleasePool.alloc().init()
self.deleted = 0
self.object.idVar = OCBase.alloc().init_(lambda : setattr(self, 'deleted', 1))
self.object.idVar = None
del pool
self.assertEqual(self.deleted, 1)
def testOCLeak2(self):
pool = NSAutoreleasePool.alloc().init()
self.deleted = 0
self.object.idVar = OCBase.alloc().init_(lambda : setattr(self, 'deleted', 1))
del self.object
del pool
self.assertEqual(self.deleted, 1)
def testDelete(self):
self.assertRaises(TypeError, delattr, self.object.idVar)
class TestAllInstanceVariables (TestCase):
# Some tests for accessing any instance variable, even those not
# declared in python.
def testReading(self):
obj = ClassWithVariables.alloc().init()
getter = objc.getInstanceVariable
cls = getter(obj, 'isa')
self.assertIs(cls, type(obj))
self.assertEqual(getter(obj, 'intValue'), 42)
self.assertIsInstance(getter(obj, 'intValue'), int)
self.assertEqual(getter(obj, 'floatValue'), -10.055)
self.assertIsInstance(getter(obj, 'floatValue'), float)
self.assertEqual(getter(obj, 'charValue'), ord('a'))
self.assertIsInstance(getter(obj, 'charValue'), int)
self.assertEqual(getter(obj, 'strValue'), b"hello world")
self.assertIsInstance(getter(obj, 'strValue'), bytes)
self.assertIsInstance(getter(obj, 'objValue'), NSObject)
self.assertIsNone(getter(obj, 'nilValue'))
self.assertEqual(getter(obj, 'pyValue'), slice(1, 10, 4))
self.assertIsInstance(getter(obj, 'pyValue'), slice)
self.assertEqual(getter(obj, 'rectValue'), ((1, 2), (3, 4)))
self.assertRaises(AttributeError, getter, obj, "noSuchMember")
def testWriting(self):
obj = ClassWithVariables.alloc().init()
getter = objc.getInstanceVariable
setter = objc.setInstanceVariable
self.assertEqual(getter(obj, 'intValue'), 42)
setter(obj, 'intValue', 99)
self.assertEqual(getter(obj, 'intValue'), 99)
self.assertEqual(getter(obj, 'floatValue'), -10.055)
setter(obj, 'floatValue', 0.5)
self.assertEqual(getter(obj, 'floatValue'), 0.5)
self.assertEqual(getter(obj, 'charValue'), ord('a'))
setter(obj, 'charValue', b'b')
self.assertEqual(getter(obj, 'charValue'), ord('b'))
setter(obj, 'charValue', 10)
self.assertEqual(getter(obj, 'charValue'), 10)
self.assertEqual(getter(obj, 'strValue'), b"hello world")
setter(obj, 'strValue', b"foo bar")
self.assertEqual(getter(obj, 'strValue'), b"foo bar")
setter(obj, 'strValue', None)
self.assertEqual(getter(obj, 'strValue'), None)
o = NSObject.new()
self.assertIsNot(getter(obj, 'objValue'), o)
self.assertRaises(TypeError, setter, 'objValue', o)
self.assertIsNot(getter(obj, 'objValue'), o)
setter(obj, 'objValue', o, True)
self.assertIs(getter(obj, 'objValue'), o)
o2 = NSObject.new()
o2.retain()
self.assertIsNot(getter(obj, 'objValue'), o2)
setter(obj, 'objValue', o2, False)
self.assertIs(getter(obj, 'objValue'), o2)
self.assertEqual(getter(obj, 'pyValue'), slice(1, 10, 4))
setter(obj, 'pyValue', [1,2,3])
self.assertEqual(getter(obj, 'pyValue'), [1,2,3])
self.assertEqual(getter(obj, 'rectValue'), ((1, 2), (3, 4)))
setter(obj, 'rectValue', ((-4, -8), (2, 7)))
self.assertEqual(getter(obj, 'rectValue'), ((-4, -8), (2, 7)))
self.assertRaises(AttributeError, setter, obj, "noSuchMember", 'foo')
def testClassMod(self):
# It's scary as hell, but updating the class of an object does "work"
# (for some perverted interpretation of the word)
class DummyClass (NSObject):
__slots__ = ()
o = NSObject.alloc().init()
self.assertIsInstance(o, NSObject)
self.assertIsNotInstance(o, DummyClass)
objc.setInstanceVariable(o, "isa", DummyClass)
self.assertIsInstance(o, DummyClass)
def testDir(self):
obj = ClassWithVariables.alloc().init()
# Note: cannot check the exact contents of dir(), who knows
# what NSObject defines...
v = objc.listInstanceVariables(obj)
self.assertIn(('charValue', objc._C_CHR), v)
self.assertIn(('intValue', objc._C_INT), v)
self.assertIn(('isa', objc._C_CLASS), v)
def testAnonymousIvar(self):
class AnonIvarClass (NSObject):
var = objc.ivar()
var2 = objc.ivar(type=objc._C_DBL)
outlet = objc.IBOutlet()
self.assertTrue(outlet.__isOutlet__)
self.assertFalse(outlet.__isSlot__)
o = AnonIvarClass.alloc().init()
o.var = NSObject.alloc().init()
self.assertIsInstance(o.var, NSObject)
o.var2 = 4
self.assertIsInstance(o.var2, float)
def testNamedOutlet(self):
class NamedOutlet (NSObject):
outlet1 = objc.IBOutlet()
outlet2 = objc.IBOutlet("my_outlet")
all_outlets = {}
for name, tp in objc.listInstanceVariables(NamedOutlet):
all_outlets[name] = tp
self.assertEqual(all_outlets['outlet1'], objc._C_ID)
self.assertEqual(all_outlets['my_outlet'], objc._C_ID)
o = NamedOutlet.alloc().init()
self.assertTrue(hasattr(o, 'outlet1'))
self.assertTrue(hasattr(o, 'outlet2'))
class TestStructConvenience (TestCase):
def test_using_convenience(self):
for name, typestr in [
('bool', objc._C_BOOL),
('char', objc._C_CHR),
('int', objc._C_INT),
('short', objc._C_SHT),
('long', objc._C_LNG),
('long_long', objc._C_LNG_LNG),
('unsigned_char', objc._C_UCHR),
('unsigned_int', objc._C_UINT),
('unsigned_short', objc._C_USHT),
('unsigned_long', objc._C_ULNG),
('unsigned_long_long', objc._C_ULNG_LNG),
('float', objc._C_FLT),
('double', objc._C_DBL),
('BOOL', objc._C_NSBOOL),
('UniChar', objc._C_UNICHAR),
('char_text', objc._C_CHAR_AS_TEXT),
('char_int', objc._C_CHAR_AS_INT),
]:
self.assertHasAttr(objc.ivar, name)
v = getattr(objc.ivar, name)()
self.assertIsInstance(v, objc.ivar)
self.assertEqual(v.__typestr__, typestr)
self.assertEqual(v.__name__, None)
self.assertFalse(v.__isOutlet__)
self.assertFalse(v.__isSlot__)
v = getattr(objc.ivar, name)('my_var')
self.assertIsInstance(v, objc.ivar)
self.assertEqual(v.__typestr__, typestr)
self.assertEqual(v.__name__, 'my_var')
self.assertFalse(v.__isOutlet__)
self.assertFalse(v.__isSlot__)
if __name__ == '__main__':
main()
|
tests/test_app/library/loans/migrations/0001_initial.py | Pijuli/django-jazzmin | 972 | 12640806 | # Generated by Django 2.2.15 on 2020-10-14 12:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("books", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Library",
fields=[
(
"id",
models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
("address", models.CharField(max_length=255)),
(
"librarian",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, related_name="library", to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="BookLoan",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
help_text="Unique ID for this particular book across whole library",
primary_key=True,
serialize=False,
),
),
("imprint", models.CharField(max_length=200)),
("due_back", models.DateField(blank=True, null=True)),
("loan_start", models.DateTimeField()),
("duration", models.DurationField(blank=True)),
(
"status",
models.CharField(
blank=True,
choices=[
("m", "Maintenance"),
("o", "On loan"),
("a", "Available"),
("r", "Reserved"),
],
default="m",
help_text="Book availability",
max_length=1,
),
),
(
"book",
models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to="books.Book"),
),
(
"borrower",
models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL
),
),
],
options={
"ordering": ("due_back",),
},
),
]
|
textworld/version.py | microsoft/TextWorld | 307 | 12640815 | <filename>textworld/version.py
__version__ = '1.4.5'
__prerelease__ = '1.4.5rc1'
|
predict.py | marcoleewow/LaTeX_OCR | 290 | 12640832 | from scipy.misc import imread
import PIL
import os
from PIL import Image
import numpy as np
from model.img2seq import Img2SeqModel
from model.utils.general import Config, run
from model.utils.text import Vocab
from model.utils.image import greyscale, crop_image, pad_image, downsample_image, TIMEOUT
def interactive_shell(model):
"""Creates interactive shell to play with model
"""
model.logger.info("""
This is an interactive mode.
To exit, enter 'exit'.
Enter a path to a file
input> data/images_test/0.png""")
while True:
# img_path = raw_input("input> ")# for python 2
img_path = input("input> ") # for python 3
if img_path == "exit" or img_path == "q":
break # 退出交互
if img_path[-3:] == "png":
img = imread(img_path)
elif img_path[-3:] == "pdf":
# call magick to convert the pdf into a png file
buckets = [
[240, 100], [320, 80], [400, 80], [400, 100], [480, 80], [480, 100],
[560, 80], [560, 100], [640, 80], [640, 100], [720, 80], [720, 100],
[720, 120], [720, 200], [800, 100], [800, 320], [1000, 200],
[1000, 400], [1200, 200], [1600, 200], [1600, 1600]
]
dir_output = "tmp/"
name = img_path.split('/')[-1].split('.')[0]
run("magick convert -density {} -quality {} {} {}".format(200, 100, img_path, dir_output+"{}.png".format(name)), TIMEOUT)
img_path = dir_output + "{}.png".format(name)
crop_image(img_path, img_path)
pad_image(img_path, img_path, buckets=buckets)
downsample_image(img_path, img_path, 2)
img = imread(img_path)
img = greyscale(img)
hyps = model.predict(img)
model.logger.info(hyps[0])
if __name__ == "__main__":
# restore config and model
dir_output = "./results/full/"
config_vocab = Config(dir_output + "vocab.json")
config_model = Config(dir_output + "model.json")
vocab = Vocab(config_vocab)
model = Img2SeqModel(config_model, dir_output, vocab)
model.build_pred()
# model.restore_session(dir_output + "model_weights/model.cpkt")
interactive_shell(model)
|
10 Days of Statistics/08 - Day 2 - Compound Event Probability.py | srgeyK87/Hacker-Rank-30-days-challlenge | 275 | 12640856 | <filename>10 Days of Statistics/08 - Day 2 - Compound Event Probability.py
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/s10-mcq-3/problem
# Difficulty: Easy
# Max Score: 10
# Language: Python
# Multiple Choice Question - No code required but checked with code
# ========================
# Solution
# ========================
import itertools
from fractions import Fraction
from collections import Counter
# Let r = 1 and black = 0
# Bag X
X = list(Counter({1:4, 0:3}).elements())
# Bag Y
Y = list(Counter({1:5, 0:4}).elements())
# Bag z
Z = list(Counter({1:4, 0:4}).elements())
# Sample space / total number of outcomes
TOTAL_SAMPLES = list(itertools.product(X, Y, Z))
# Total number of outcomes
TOTAL_SAMPLES_SIZE = len(TOTAL_SAMPLES)
# Total number of favourable outcomes
FAVOURABLE_OUTCOMES_SIZE = sum([sum(i) == 2 for i in list(itertools.product(X, Y, Z))])
# Probability as a fraction
print(Fraction(FAVOURABLE_OUTCOMES_SIZE,TOTAL_SAMPLES_SIZE))
# >>> 17/42
|
alex/components/tts/voicerss.py | oplatek/alex | 184 | 12640882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import alex.utils.cache as cache
import alex.utils.audio as audio
from alex.components.tts import TTSInterface
from alex.components.tts.exceptions import TTSException
from alex.components.tts.preprocessing import TTSPreprocessing
class VoiceRssTTS(TTSInterface):
"""Uses The VoiceRss TTS service to synthesize sentences in a
specific language, e.g. en-us.
The main function synthesize returns a string which contain a RIFF
wave file audio of the synthesized text."""
def __init__(self, cfg):
"""Intitialize: just remember the configuration."""
self.cfg = cfg
super(VoiceRssTTS, self).__init__(cfg)
self.preprocessing = TTSPreprocessing(self.cfg, self.cfg['TTS']['VoiceRss']['preprocessing'])
@cache.persistent_cache(True, 'VoiceRssTTS.get_tts_mp3.')
def get_tts_mp3(self, language, text):
"""Access the VoiceRss TTS service and get synthesized audio
for a text.
Returns a string with a WAV stream."""
baseurl = "http://api.voicerss.org"
values = {'src': text.encode('utf8'),
'hl': language,
'c': 'MP3',
'f': '16khz_16bit_mono',
'key': self.cfg['TTS']['VoiceRss']['api_key']}
data = urllib.urlencode(values)
request = urllib2.Request(baseurl, data)
request.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11")
try:
mp3response = urllib2.urlopen(request)
return mp3response.read()
except (urllib2.HTTPError, urllib2.URLError):
raise TTSException("SpeechTech TTS error.")
def synthesize(self, text):
"""Synthesize the text and return it in a string
with audio in default format and sample rate."""
wav = b""
try:
if text:
text = self.preprocessing.process(text)
mp3 = self.get_tts_mp3(self.cfg['TTS']['VoiceRss']['language'], text)
wav = audio.convert_mp3_to_wav(self.cfg, mp3)
wav = audio.change_tempo(self.cfg, self.cfg['TTS']['VoiceRss']['tempo'], wav)
return wav
else:
return b""
except TTSException as e:
m = unicode(e) + " Text: %s" % text
self.cfg['Logging']['system_logger'].exception(m)
return b""
return wav
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.