blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e06edc9ef4206d01ba268cd77e82b51af3988588 | 00f3468d8917ac0c1b4df8b4dc50e82c0d9be3fa | /seqsfromfasta.py | f7aa0be287db92eee1959c13a03c700c3416c9e7 | [] | no_license | berkeleyphylogenomics/BPG_utilities | 4e332bb401b8c057502a1a0a1d532396bfff9542 | bbf5df137a0a459598c3f9073d80f0086e5f7550 | refs/heads/master | 2021-01-01T19:21:13.740575 | 2014-11-05T18:40:31 | 2014-11-05T18:40:31 | 24,867,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/env python
from Bio import SeqIO
def seqpull(h, *args): #should use 'any' in py > 2.3
return ''.join([seq.format('fasta') for seq in SeqIO.parse(h,'fasta') \
if sum([seq.id.count(arg) for arg in args])])
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print "%s: get sequences from a fasta file by substring in defline" \
% sys.argv[0]
print "USAGE: %s <multiple fasta file> [keywords]" % sys.argv[0]
else:
h = open(sys.argv[1])
print seqpull(h,*sys.argv[2:])
h.close()
| [
"[email protected]"
] | |
0de765120183e963c96ef35cf7a5098d79f772b4 | 21e5825959a886787a3915ff0d3efa86d9cd3702 | /combat/finishers/impale.py | 7b32166eb2c800a2409971f38f6bb0c6ea4ef7f5 | [
"MIT"
] | permissive | ChrisLR/Python-Roguelike-Template | e0df37752907377e606197f2469fda61202129d5 | 9b63742b0111c7e9456fb98a96a3cd28d41a1e10 | refs/heads/master | 2021-06-26T07:48:39.215338 | 2017-09-14T21:46:08 | 2017-09-14T21:46:08 | 69,761,175 | 0 | 0 | null | 2017-09-14T21:46:09 | 2016-10-01T20:09:24 | Python | UTF-8 | Python | false | false | 2,188 | py | from combat.enums import DamageType
from combat.finishers.base import Finisher
from echo import functions
from util import gridhelpers
class Impale(Finisher):
name = "Impale"
description = "Impale your enemy with a slashing or piercing weapon."
attacker_message = "You impale {defender}'s {defender_bodypart} with your {attacker_weapon}"
observer_message = "{attacker} impales {defender} {defender_bodypart} with {attacker_his} {attacker_weapon}"
@classmethod
def evaluate(cls, attack_result):
if attack_result.context.distance_to <= 1:
attacker_weapon = attack_result.context.attacker_weapon
if attacker_weapon and hasattr(attacker_weapon, 'weapon'):
weapon_component = attacker_weapon.weapon
if weapon_component:
if weapon_component.melee_damage_type in (DamageType.Pierce, DamageType.Slash):
return True
return False
@classmethod
def execute(cls, attack_result):
return cls.get_message(attack_result)
@classmethod
def get_message(cls, attack_result):
attacker = attack_result.context.attacker
defender = attack_result.context.defender
attacker_weapon = attack_result.context.attacker_weapon
if attacker.is_player:
message = cls.attacker_message.format(
defender=defender.name,
defender_bodypart=attack_result.body_part_hit.name,
attacker_weapon=attacker_weapon.name,
)
else:
message = cls.observer_message.format(
attacker=functions.get_name_or_string(attacker),
defender=functions.names_or_your(defender),
defender_bodypart=attack_result.body_part_hit.name,
attacker_his=functions.his_her_it(attacker),
attacker_weapon=attacker_weapon.name
)
if defender.body.blood:
message += " splashing {blood} behind {defender_him}!!\n".format(
blood=defender.body.blood.name,
defender_him=functions.him_her_it(defender)
)
return message
| [
"[email protected]"
] | |
145ae6eef5a9c9e0b2b158275aa8d8b2b504fd31 | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/pdf/canvas/font/simple_font/true_type_font.py | c0b6112c2b259f0d2c80c3ef2231a518f87ed377 | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,861 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The TrueType font format was developed by Apple Computer, Inc., and has been adopted as a standard font
format for the Microsoft Windows operating system. Specifications for the TrueType font file format are
available in Apple’s TrueType Reference Manual and Microsoft’s TrueType 1.0 Font Files Technical
Specification (see Bibliography).
"""
import typing
import zlib
from decimal import Decimal
from pathlib import Path
from borb.io.read.types import Decimal as pDecimal
from borb.io.read.types import Dictionary, List, Name, Stream, String
from borb.pdf.canvas.font.composite_font.cid_font_type_2 import CIDType2Font
from borb.pdf.canvas.font.composite_font.font_type_0 import Type0Font
from borb.pdf.canvas.font.simple_font.font_type_1 import Type1Font
from fontTools.agl import toUnicode # type: ignore [import]
from fontTools.pens.boundsPen import BoundsPen # type: ignore [import]
from fontTools.ttLib import TTFont # type: ignore [import]
class TrueTypeFont(Type1Font):
"""
A TrueType font dictionary may contain the same entries as a Type 1 font dictionary (see Table 111), with these
differences:
• The value of Subtype shall be TrueType.
• The value of Encoding is subject to limitations that are described in 9.6.6, "Character Encoding".
• The value of BaseFont is derived differently. The PostScript name for the value of BaseFont may be determined in one of two ways:
• If the TrueType font program's “name” table contains a PostScript name, it shall be used.
• In the absence of such an entry in the “name” table, a PostScript name shall be derived from the name by
which the font is known in the host operating system. On a Windows system, the name shall be based on
the lfFaceName field in a LOGFONT structure; in the Mac OS, it shall be based on the name of the FOND
resource. If the name contains any SPACEs, the SPACEs shall be removed.
"""
@staticmethod
def true_type_font_from_file(
path_to_font_file: Path,
) -> typing.Union["TrueTypeFont", "Type0Font"]:
"""
This function returns the PDF TrueTypeFont object for a given TTF file
"""
assert path_to_font_file.exists()
assert path_to_font_file.name.endswith(".ttf")
font_file_bytes: typing.Optional[bytes] = None
with open(path_to_font_file, "rb") as ffh:
font_file_bytes = ffh.read()
assert font_file_bytes
# read file
ttf_font_file: TTFont = TTFont(path_to_font_file)
# read cmap
cmap: typing.Optional[typing.Dict[int, str]] = ttf_font_file.getBestCmap()
assert cmap is not None
cmap_reverse: typing.Dict[str, int] = {}
for k, v in cmap.items():
if v in cmap_reverse:
cmap_reverse[v] = min(cmap_reverse[v], k)
else:
cmap_reverse[v] = k
glyph_order: typing.List[str] = [
x for x in ttf_font_file.glyphOrder if x in cmap_reverse
]
# if there are more than 256 glyphs, we need to switch to a Type0Font
if len(glyph_order) >= 256:
# fmt: off
type_0_font: Type0Font = TrueTypeFont._type_0_font_from_file(ttf_font_file)
type_0_font["DescendantFonts"][0]["FontDescriptor"][Name("FontFile2")] = TrueTypeFont._get_font_file_stream(font_file_bytes)
return type_0_font
# fmt: on
# build font
font: TrueTypeFont = TrueTypeFont()
font_name: str = TrueTypeFont._get_base_font(ttf_font_file)
font[Name("Name")] = Name(font_name)
font[Name("BaseFont")] = Name(font_name)
# build widths
units_per_em: pDecimal = pDecimal(ttf_font_file["head"].unitsPerEm)
if cmap is not None:
font[Name("FirstChar")] = pDecimal(0)
font[Name("LastChar")] = pDecimal(len(glyph_order))
font[Name("Widths")] = List()
for glyph_name in glyph_order:
w: typing.Union[pDecimal, Decimal] = (
pDecimal(ttf_font_file.getGlyphSet()[glyph_name].width)
/ units_per_em
) * Decimal(1000)
w = pDecimal(round(w, 2))
font["Widths"].append(w)
assert font[Name("FirstChar")] >= 0
assert (
font[Name("LastChar")] < 256
), "TrueType fonts with more than 256 glyphs are currently not supported."
font[Name("FontDescriptor")] = TrueTypeFont._get_font_descriptor(ttf_font_file)
font[Name("Encoding")] = Dictionary()
font["Encoding"][Name("BaseEncoding")] = Name("WinAnsiEncoding")
font["Encoding"][Name("Differences")] = List()
for i in range(0, len(glyph_order)):
font["Encoding"]["Differences"].append(pDecimal(i))
font["Encoding"]["Differences"].append(Name(glyph_order[i]))
# embed font file
font["FontDescriptor"][Name("FontFile2")] = TrueTypeFont._get_font_file_stream(
font_file_bytes
)
# return
return font
@staticmethod
def _get_font_file_stream(font_file_bytes: bytes) -> Stream:
font_stream: Stream = Stream()
font_stream[Name("Type")] = Name("Font")
font_stream[Name("Subtype")] = Name("TrueType")
font_stream[Name("Length")] = pDecimal(len(font_file_bytes))
font_stream[Name("Length1")] = pDecimal(len(font_file_bytes))
font_stream[Name("Filter")] = Name("FlateDecode")
font_stream[Name("DecodedBytes")] = font_file_bytes
font_stream[Name("Bytes")] = zlib.compress(font_file_bytes, 9)
return font_stream
@staticmethod
def _get_font_descriptor(ttf_font_file: TTFont) -> Dictionary:
# fmt: off
font_descriptor: Dictionary = Dictionary()
font_descriptor[Name("Type")] = Name("FontDescriptor")
font_descriptor[Name("FontName")] = String(TrueTypeFont._get_base_font(ttf_font_file))
font_descriptor[Name("FontStretch")] = Name("Normal") # TODO
font_descriptor[Name("FontWeight")] = pDecimal(400) # TODO
font_descriptor[Name("Flags")] = pDecimal(4) # TODO
# fmt: on
# determine FontBBox, CapHeight
units_per_em: float = ttf_font_file["head"].unitsPerEm
min_x: float = 1000
min_y: float = 1000
max_x: float = 0
max_y: float = 0
cap_height: typing.Optional[pDecimal] = None
glyph_set = ttf_font_file.getGlyphSet()
for glyph_name in ttf_font_file.glyphOrder:
pen = BoundsPen(glyph_set)
glyph_set[glyph_name].draw(pen)
if pen.bounds is None:
continue
# determine CapHeight
if glyph_name in "EFHIJLMNTZ" and cap_height is None:
cap_height = pDecimal(pen.bounds[3])
min_x = min(min_x, pen.bounds[0] / units_per_em * 1000)
min_y = min(min_y, pen.bounds[1] / units_per_em * 1000)
max_x = max(max_x, pen.bounds[2] / units_per_em * 1000)
max_y = max(max_y, pen.bounds[3] / units_per_em * 1000)
if cap_height is None:
cap_height = pDecimal(840)
font_descriptor[Name("FontBBox")] = List().set_can_be_referenced(False) # type: ignore[attr-defined]
font_descriptor["FontBBox"].append(pDecimal(min_x))
font_descriptor["FontBBox"].append(pDecimal(min_y))
font_descriptor["FontBBox"].append(pDecimal(max_x))
font_descriptor["FontBBox"].append(pDecimal(max_y))
# fmt: off
font_descriptor[Name("ItalicAngle")] = pDecimal(ttf_font_file["post"].italicAngle)
font_descriptor[Name("Ascent")] = pDecimal(ttf_font_file["hhea"].ascent / units_per_em * 1000)
font_descriptor[Name("Descent")] = pDecimal(ttf_font_file["hhea"].descent / units_per_em * 1000)
font_descriptor[Name("CapHeight")] = cap_height
font_descriptor[Name("StemV")] = pDecimal(297) # TODO
# fmt: on
return font_descriptor
@staticmethod
def _get_base_font(ttf_font_file: TTFont) -> str:
font_name: str = str(
[
x
for x in ttf_font_file["name"].names
if x.platformID == 3 and x.platEncID == 1 and x.nameID == 6
][0].string,
"latin1",
)
font_name = "".join(
[x for x in font_name if x.lower() in "abcdefghijklmnopqrstuvwxyz-"]
)
return font_name
@staticmethod
def _build_custom_cmap(ttf_font_file: TTFont) -> Stream:
cmap_prefix: str = """
/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <FFFF>
endcodespacerange
"""
# 1 beginbfchar
# <0000> <0000>
# endbfchar
pairs: typing.List[typing.Tuple[str, str]] = []
for i, g in enumerate(ttf_font_file.glyphOrder):
g_unicode: str = toUnicode(g)
if len(g_unicode) == 0:
continue
g_hex: str = ""
if len(g_unicode) == 1:
g_hex = hex(ord(g_unicode))[2:]
if len(g_unicode) == 2:
g_hex = hex(ord(g_unicode[0]))[2:] + hex(ord(g_unicode[1]))[2:]
while len(g_hex) < 4:
g_hex = "0" + g_hex
i_hex: str = hex(i)[2:]
while len(i_hex) < 4:
i_hex = "0" + i_hex
pairs.append((i_hex, g_hex))
cmap_content: str = ""
for i in range(0, len(pairs), 100):
start_index: int = i
end_index: int = min(start_index + 100, len(pairs))
n: int = end_index - start_index
cmap_content += "%d beginbfchar\n" % n
for j in range(start_index, end_index):
cmap_content += "<%s> <%s>\n" % (pairs[j][0], pairs[j][1])
cmap_content += "endbfchar\n"
cmap_suffix: str = """
endcmap
CMapName currentdict /CMap defineresource pop
end
end
"""
bts: bytes = (cmap_prefix + cmap_content + cmap_suffix).encode("latin1")
to_unicode_stream = Stream()
to_unicode_stream[Name("DecodedBytes")] = bts
to_unicode_stream[Name("Bytes")] = zlib.compress(bts, 9)
to_unicode_stream[Name("Filter")] = Name("FlateDecode")
to_unicode_stream[Name("Length")] = pDecimal(len(bts))
return to_unicode_stream
@staticmethod
def _type_0_font_from_file(ttf_font_file: TTFont) -> "Type0Font":
type_0_font: Type0Font = Type0Font()
# build BaseFont
font_name: str = TrueTypeFont._get_base_font(ttf_font_file)
type_0_font[Name("BaseFont")] = Name(font_name)
# set Encoding
type_0_font[Name("Encoding")] = Name("Identity-H")
# set ToUnicode
type_0_font[Name("ToUnicode")] = TrueTypeFont._build_custom_cmap(ttf_font_file)
# build DescendantFont
descendant_font: CIDType2Font = CIDType2Font()
descendant_font[Name("Type")] = Name("Font")
descendant_font[Name("Subtype")] = Name("CIDFontType2")
descendant_font[Name("BaseFont")] = Name(font_name)
descendant_font[Name("FontDescriptor")] = TrueTypeFont._get_font_descriptor(
ttf_font_file
)
descendant_font[Name("DW")] = pDecimal(250)
# build W array
cmap = ttf_font_file["cmap"].getcmap(3, 1).cmap
glyph_set = ttf_font_file.getGlyphSet()
widths_array: List = List()
for cid, g in enumerate(ttf_font_file.glyphOrder):
glyph_width: float = 0
try:
glyph_width = glyph_set[cmap[ord(toUnicode(g))]].width
except:
glyph_width = pDecimal(0)
# set DW based on the width of a space character
if toUnicode(g) == " ":
descendant_font[Name("DW")] = pDecimal(glyph_width)
widths_array.append(pDecimal(cid))
widths_array.append(List())
widths_array[-1].append(pDecimal(glyph_width))
descendant_font[Name("W")] = widths_array
descendant_font[Name("CIDToGIDMap")] = Name("Identity")
# build CIDSystemInfo
# fmt: off
descendant_font[Name("CIDSystemInfo")] = Dictionary()
descendant_font[Name("CIDSystemInfo")][Name("Registry")] = String("Adobe")
descendant_font[Name("CIDSystemInfo")][Name("Ordering")] = String("Identity")
descendant_font[Name("CIDSystemInfo")][Name("Supplement")] = pDecimal(0)
# fmt: on
# add to DescendantFonts
type_0_font[Name("DescendantFonts")] = List()
type_0_font[Name("DescendantFonts")].append(descendant_font)
# return
return type_0_font
def __init__(self):
super(TrueTypeFont, self).__init__()
self[Name("Subtype")] = Name("TrueType")
def _empty_copy(self) -> "Font": # type: ignore [name-defined]
return TrueTypeFont()
def __deepcopy__(self, memodict={}):
# fmt: off
f_out: TrueTypeFont = super(TrueTypeFont, self).__deepcopy__(memodict)
f_out[Name("Subtype")] = Name("TrueType")
f_out._character_identifier_to_unicode_lookup: typing.Dict[int, str] = {k: v for k, v in self._character_identifier_to_unicode_lookup.items()}
f_out._unicode_lookup_to_character_identifier: typing.Dict[str, int] = {k: v for k, v in self._unicode_lookup_to_character_identifier.items()}
return f_out
# fmt: on
| [
"[email protected]"
] | |
0fd9070e7532c771b9766a91098a73150dfb5d01 | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/leveleditor/worldData/shipUndeadInterceptor3.py | acc6d9d173dd1413dd886408aaa47773ffbce77e | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.shipUndeadInterceptor3
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1189043800.81gjeon': {'Type': 'Ship Part', 'Name': 'shipNavyInterceptor3', 'Category': '38: Phantom', 'File': '', 'Flagship': True, 'LogoOverride': '-1: Default', 'Objects': {'1255998720.0jubutler': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(1.543, -17.163, 22.11), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Area', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1255998848.0jubutler': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(18.012, 9.63, 23.531), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler0': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-14.636, 3.658, 23.536), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler1': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-12.938, -34.07, 22.156), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1255998848.0jubutler2': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(14.147, -35.882, 22.071), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}}, 'Respawns': True, 'StyleOverride': '-1: Default', 'Team': 'EvilNavy', 'Visual': {'Model': ['models/shipparts/interceptorL3-geometry_High', 'models/shipparts/interceptorL3-collisions']}}}, 'Node Links': [['1255998720.0jubutler', '1255998848.0jubutler0', 'Bi-directional'], ['1255998720.0jubutler', '1255998848.0jubutler', 'Bi-directional'], ['1255998848.0jubutler', '1255998848.0jubutler0', 'Bi-directional'], ['1255998848.0jubutler0', '1255998848.0jubutler1', 'Bi-directional'], ['1255998848.0jubutler', '1255998848.0jubutler2', 'Bi-directional'], ['1255998848.0jubutler1', '1255998848.0jubutler2', 'Bi-directional']], 'Layers': {}, 'ObjectIds': {'1189043800.81gjeon': '["Objects"]["1189043800.81gjeon"]', '1255998720.0jubutler': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998720.0jubutler"]', '1255998848.0jubutler': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler"]', '1255998848.0jubutler0': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler0"]', '1255998848.0jubutler1': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler1"]', '1255998848.0jubutler2': '["Objects"]["1189043800.81gjeon"]["Objects"]["1255998848.0jubutler2"]'}}
extraInfo = {'camPos': Point3(-173.398, -66.2502, 103.662), 'camHpr': VBase3(-74.1058, -20.5578, 0), 'focalLength': 1.39999997616, 'skyState': 2, 'fog': 0} | [
"[email protected]"
] | |
106b49f1d09d2c07ec615d4ff6eada48156bac0f | ed3c924c42baa3ab825a482efc15f85a32c06eaa | /boj16649.py | 471eee9a27c32f61b4009e52d88b52912bb2b19c | [] | no_license | JoinNova/baekjoon | 95e94a7ccae51103925e515d765ebda7b6fffeed | 33b900696ecf2a42b8e452fdeae6ee482143e37e | refs/heads/master | 2020-04-16T22:25:31.577968 | 2019-04-28T04:25:24 | 2019-04-28T04:25:24 | 165,966,949 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #boj16649 Building a Stair
def stair(cube):
cnt=cube
row=(cube+1)//2
print(row+1)
pic='.'*(row+1)+'\n'
for i in range(row):
for j in range(row):
if j==0 or i==row-1:
pic+='o';cnt-=1
elif cube%2==0 and i==row-2 and j==1:
pic+='o';cnt-=1;
else:
pic+='.'
pic+='.\n'
print(pic.strip())
#print(cnt)
n=int(input())
if n==2:print(-1)
else:stair(n)
| [
"[email protected]"
] | |
3f861b0b4904d9b72b34ade2c2fae8f9932ec493 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/GoogleDocStringRemoveKeywordVararg.py | 2ff1604bc7d53ec8b7c1992b258b97e54edb35a8 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 57 | py | def f():
"""
Args:
**kwar<caret>gs:
""" | [
"[email protected]"
] | |
65e9e8ebbf9a9682f5fb9acfd790fad23e123824 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/caffe2/quantization/server/group_norm_dnnlowp_op_test.py | b6acc900437ce89c4bd5c4ea17a400c9b8d47839 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 4,517 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpGroupNormTest(hu.HypothesisTestCase):
@given(
N=st.integers(1, 4),
G=st.integers(2, 4),
K=st.integers(2, 12),
H=st.integers(4, 16),
W=st.integers(4, 16),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
weight_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_group_norm(
self,
N,
G,
K,
H,
W,
order,
in_quantized,
out_quantized,
weight_quantized,
gc,
dc,
):
C = G * K
X = np.random.rand(N, C, H, W).astype(np.float32) * 5.0 - 1.0
if order == "NHWC":
X = utils.NCHW2NHWC(X)
gamma = np.random.rand(C).astype(np.float32) * 2.0 - 1.0
beta = np.random.randn(C).astype(np.float32) - 0.5
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("GroupNorm", ""),
("GroupNorm", "DNNLOWP"),
("Int8GroupNorm", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
if do_quantize_weight:
int8_given_tensor_fill, gamma_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
gamma, "gamma_q"
)
net.Proto().op.extend([int8_given_tensor_fill])
X_q_param = dnnlowp_utils.choose_quantization_params(X.min(), X.max())
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
beta, "beta_q", X_q_param, gamma_q_param
)
net.Proto().op.extend([int8_bias_tensor_fill])
group_norm = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"gamma_q" if do_quantize_weight else "gamma",
"beta_q" if do_quantize_weight else "beta",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=0 if do_dequantize else 1,
group=G,
order=order,
is_test=True,
engine=engine,
device_option=gc,
)
if do_quantize_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(group_norm, outputs[0][0])
net.Proto().op.extend([group_norm])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("gamma").feed(gamma, device_option=gc)
self.ws.create_blob("beta").feed(beta, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, atol_scale=2.0)
| [
"[email protected]"
] | |
2095829a72d1af19ee231c7ec670bf65766c274d | fd625e2ea155455c96261c8656a51be22fe420c8 | /Python/euler035.py | 4400059a3e93b485f3924881b3fe16cd51c435bb | [
"MIT"
] | permissive | AnuragAnalog/project_euler | 9b84a6aa0061ad4582c8d0059c3c1eaddd844fd2 | 8babbefbd5b7008ad24509f24a9d5f50ba208f45 | refs/heads/master | 2021-12-12T12:07:29.338791 | 2021-11-01T04:26:44 | 2021-11-01T04:26:44 | 210,749,964 | 6 | 16 | MIT | 2021-11-01T04:26:45 | 2019-09-25T03:44:37 | Python | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/python3
"""
The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
import numpy as np
def isprime(num: int) -> bool:
for i in range(2, int(np.sqrt(num))+1):
if num%i == 0:
return False
return True
def rotate(num: int) -> set:
rot = {num}
length = len(str(num))
k = 0
while k < length:
tmp = list(str(num))
dig = tmp[0]
tmp[:] = tmp[1:]
tmp.append(dig)
num = ''.join(tmp)
rot.add(int(num))
k = k + 1
return rot
def euler35() -> int:
tot = 0
c_primes = [2]
flag = False
for i in range(3, 10**6, 2):
if isprime(i):
flag = True
tmp = set()
cps = rotate(i)
for x in cps:
if isprime(x):
tmp.add(x)
else:
flag = False
break
if flag:
c_primes.extend(list(tmp))
return len(set(c_primes))
tot = euler35()
print(tot)
| [
"[email protected]"
] | |
1cb72016f9c456c6294bdc18ee3bb15e889e96e0 | fcb7030ae6da44d6f36a9a166a614952a66937db | /11 user's functions/03 - max 2.py | b7b20317783541603811f5a4b06304305483c54f | [] | no_license | Immaculated/educational_basic | 4b931bb515343a67bf2132a9b97c029c8b8e7e4c | 8ef0de14f0acc12ac172dcaf0ece8bd81b6ade83 | refs/heads/main | 2023-04-22T01:22:54.999075 | 2021-05-02T05:38:26 | 2021-05-02T05:38:26 | 320,326,316 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | def max_two(a,b):
return a if a > b else b
print(max_two(3,5)) | [
"[email protected]"
] | |
a7114ae73b29642ae1b3b76a8eca40595de9439d | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/embed/embed/tests/test__parse_author.py | fb71e365acb11fa7c73817ca0ef5c02ff77884b6 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 481 | py | import vampytest
from ...embed_author import EmbedAuthor
from ..fields import parse_author
def test__parse_author():
"""
Tests whether ``parse_author`` works as intended.
"""
author = EmbedAuthor(name = 'hell')
for input_data, expected_output in (
({}, None),
({'author': None}, None),
({'author': author.to_data()}, author),
):
output = parse_author(input_data)
vampytest.assert_eq(output, expected_output)
| [
"[email protected]"
] | |
df22c26d03c9eb5404718fa0aee45e5b9bfd5116 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_image01.py | 81559d51c09a291a0dd6ef1c9b9f4f8d5f70ee88 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,149 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
d3553bdebfba88789aa4678fd67bb97396e9767d | 79fa6f3a9c0c07b2768b5c67d48cd2d3ada921c7 | /kikimr/public/api/protos/draft/persqueue_error_codes_pb2.py | 97b818110b3a6a275934558ce54cac8287566409 | [
"Apache-2.0"
] | permissive | clumpytuna/ydb-python-sdk | 8dd951a532045587fcba1d541b3fb8798c358318 | f09d8db19f62032738ed77dabb3672c3e0f86cc3 | refs/heads/master | 2023-06-09T22:38:29.747969 | 2021-06-30T08:09:14 | 2021-06-30T08:09:14 | 319,103,389 | 0 | 0 | NOASSERTION | 2020-12-06T18:32:35 | 2020-12-06T18:32:34 | null | UTF-8 | Python | false | true | 6,378 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kikimr/public/api/protos/draft/persqueue_error_codes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kikimr/public/api/protos/draft/persqueue_error_codes.proto',
package='NPersQueue.NErrorCode',
syntax='proto3',
serialized_pb=_b('\n:kikimr/public/api/protos/draft/persqueue_error_codes.proto\x12\x15NPersQueue.NErrorCode*\xd9\x04\n\nEErrorCode\x12\x06\n\x02OK\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0c\n\x08OVERLOAD\x10\x02\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x03\x12\x10\n\x0cWRONG_COOKIE\x10\x04\x12!\n\x1dWRITE_ERROR_PARTITION_IS_FULL\x10\x05\x12\x1c\n\x18WRITE_ERROR_DISK_IS_FULL\x10\x0f\x12\x1a\n\x16WRITE_ERROR_BAD_OFFSET\x10\x13\x12!\n\x1d\x43REATE_SESSION_ALREADY_LOCKED\x10\x06\x12\x1d\n\x19\x44\x45LETE_SESSION_NO_SESSION\x10\x07\x12\x1a\n\x16READ_ERROR_IN_PROGRESS\x10\x08\x12\x19\n\x15READ_ERROR_NO_SESSION\x10\t\x12\x10\n\x0cREAD_TIMEOUT\x10\n\x12\x1f\n\x1bREAD_ERROR_TOO_SMALL_OFFSET\x10\x0b\x12\x1d\n\x19READ_ERROR_TOO_BIG_OFFSET\x10\x0c\x12%\n!SET_OFFSET_ERROR_COMMIT_TO_FUTURE\x10\r\x12\x15\n\x11TABLET_IS_DROPPED\x10\x0e\x12\x11\n\rREAD_NOT_DONE\x10\x10\x12\x11\n\rUNKNOWN_TOPIC\x10\x11\x12\x11\n\rACCESS_DENIED\x10\x12\x12\x14\n\x10\x43LUSTER_DISABLED\x10\x14\x12\x1a\n\x16WRONG_PARTITION_NUMBER\x10\x15\x12\x12\n\x0e\x43REATE_TIMEOUT\x10\x16\x12\x10\n\x0cIDLE_TIMEOUT\x10\x17\x12\t\n\x05\x45RROR\x10\x64\x42\x1a\n\x18\x63om.yandex.ydb.persqueueb\x06proto3')
)
_EERRORCODE = _descriptor.EnumDescriptor(
name='EErrorCode',
full_name='NPersQueue.NErrorCode.EErrorCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INITIALIZING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OVERLOAD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRONG_COOKIE', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_PARTITION_IS_FULL', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_DISK_IS_FULL', index=6, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_ERROR_BAD_OFFSET', index=7, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_SESSION_ALREADY_LOCKED', index=8, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_SESSION_NO_SESSION', index=9, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_IN_PROGRESS', index=10, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_NO_SESSION', index=11, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_TIMEOUT', index=12, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_TOO_SMALL_OFFSET', index=13, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_ERROR_TOO_BIG_OFFSET', index=14, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_OFFSET_ERROR_COMMIT_TO_FUTURE', index=15, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TABLET_IS_DROPPED', index=16, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_NOT_DONE', index=17, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_TOPIC', index=18, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCESS_DENIED', index=19, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CLUSTER_DISABLED', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRONG_PARTITION_NUMBER', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_TIMEOUT', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IDLE_TIMEOUT', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=24, number=100,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=86,
serialized_end=687,
)
_sym_db.RegisterEnumDescriptor(_EERRORCODE)
EErrorCode = enum_type_wrapper.EnumTypeWrapper(_EERRORCODE)
OK = 0
INITIALIZING = 1
OVERLOAD = 2
BAD_REQUEST = 3
WRONG_COOKIE = 4
WRITE_ERROR_PARTITION_IS_FULL = 5
WRITE_ERROR_DISK_IS_FULL = 15
WRITE_ERROR_BAD_OFFSET = 19
CREATE_SESSION_ALREADY_LOCKED = 6
DELETE_SESSION_NO_SESSION = 7
READ_ERROR_IN_PROGRESS = 8
READ_ERROR_NO_SESSION = 9
READ_TIMEOUT = 10
READ_ERROR_TOO_SMALL_OFFSET = 11
READ_ERROR_TOO_BIG_OFFSET = 12
SET_OFFSET_ERROR_COMMIT_TO_FUTURE = 13
TABLET_IS_DROPPED = 14
READ_NOT_DONE = 16
UNKNOWN_TOPIC = 17
ACCESS_DENIED = 18
CLUSTER_DISABLED = 20
WRONG_PARTITION_NUMBER = 21
CREATE_TIMEOUT = 22
IDLE_TIMEOUT = 23
ERROR = 100
DESCRIPTOR.enum_types_by_name['EErrorCode'] = _EERRORCODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.yandex.ydb.persqueue'))
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
002efcfadbaaadc2cad01fb292a4a4ad65565a90 | ca3a1b6386e44f8222b0f9d93dcb382027d26018 | /choix/ep.py | a457f2efc0712505ba7e9b6db3a815a10f126aac | [
"MIT"
] | permissive | pkrouth/choix | 80ef6fceaffbbc618fb6496217f4e077b3d8e6d4 | 05a57a10bb707338113a9d91601ca528ead7a881 | refs/heads/master | 2020-04-27T15:34:29.404796 | 2018-08-08T13:17:01 | 2018-08-08T13:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,314 | py | import functools
import numpy as np
import numpy.random as nprand
from numpy.linalg import norm
from math import exp, log, pi, sqrt # Faster than numpy equivalents.
from scipy.misc import logsumexp
from .utils import normal_cdf, inv_posdef, SQRT2, SQRT2PI
# EP-related settings.
THRESHOLD = 1e-4
MAT_ONE = np.array([[1.0, -1.0], [-1.0, 1.0]])
MAT_ONE_FLAT = MAT_ONE.ravel()
# Some magic constants for a stable computation of _log_phi(z).
CS = [
0.00048204, -0.00142906, 0.0013200243174, 0.0009461589032, -0.0045563339802,
0.00556964649138, 0.00125993961762116, -0.01621575378835404,
0.02629651521057465, -0.001829764677455021, 2*(1-pi/3), (4-pi)/3, 1, 1,]
RS = [
1.2753666447299659525, 5.019049726784267463450, 6.1602098531096305441,
7.409740605964741794425, 2.9788656263939928886,]
QS = [
2.260528520767326969592, 9.3960340162350541504, 12.048951927855129036034,
17.081440747466004316, 9.608965327192787870698, 3.3690752069827527677,]
def ep_pairwise(n_items, data, alpha, model="logit", max_iter=100,
initial_state=None):
"""Compute a distribution of model parameters using the EP algorithm.
This function computes an approximate Bayesian posterior probability
distribution over model parameters, given pairwise-comparison data (see
:ref:`data-pairwise`). It uses the expectation propagation algorithm, as
presented, e.g., in [CG05]_.
The prior distribution is assumed to be isotropic Gaussian with variance
``1 / alpha``. The posterior is approximated by a a general multivariate
Gaussian distribution, described by a mean vector and a covariance matrix.
Two different observation models are available. ``logit`` (default) assumes
that pairwise-comparison outcomes follow from a Bradley-Terry model.
``probit`` assumes that the outcomes follow from Thurstone's model.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Pairwise-comparison data.
alpha : float
Inverse variance of the (isotropic) prior.
model : str, optional
Observation model. Either "logit" or "probit".
max_iter : int, optional
Maximum number of iterations allowed.
initial_state : tuple of array_like, optional
Natural parameters used to initialize the EP algorithm.
Returns
-------
mean : numpy.ndarray
The mean vector of the approximate Gaussian posterior.
cov : numpy.ndarray
The covariance matrix of the approximate Gaussian posterior.
Raises
------
ValueError
If the observation model is not "logit" or "probit".
"""
if model == "logit":
match_moments = _match_moments_logit
elif model == "probit":
match_moments = _match_moments_probit
else:
raise ValueError("unknown model '{}'".format(model))
return _ep_pairwise(
n_items, data, alpha, match_moments, max_iter, initial_state)
def _ep_pairwise(
n_items, comparisons, alpha, match_moments, max_iter, initial_state):
"""Compute a distribution of model parameters using the EP algorithm.
Raises
------
RuntimeError
If the algorithm does not converge after ``max_iter`` iterations.
"""
# Static variable that allows to check the # of iterations after the call.
_ep_pairwise.iterations = 0
m = len(comparisons)
prior_inv = alpha * np.eye(n_items)
if initial_state is None:
# Initially, mean and covariance come from the prior.
mean = np.zeros(n_items)
cov = (1 / alpha) * np.eye(n_items)
# Initialize the natural params in the function space.
tau = np.zeros(m)
nu = np.zeros(m)
# Initialize the natural params in the space of thetas.
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
else:
tau, nu = initial_state
mean, cov, xs, prec = _init_ws(
n_items, comparisons, prior_inv, tau, nu)
for _ in range(max_iter):
_ep_pairwise.iterations += 1
# Keep a copy of the old parameters for convergence testing.
tau_old = np.array(tau, copy=True)
nu_old = np.array(nu, copy=True)
for i in nprand.permutation(m):
a, b = comparisons[i]
# Update mean and variance in function space.
f_var = cov[a,a] + cov[b,b] - 2 * cov[a,b]
f_mean = mean[a] - mean[b]
# Cavity distribution.
tau_tot = 1.0 / f_var
nu_tot = tau_tot * f_mean
tau_cav = tau_tot - tau[i]
nu_cav = nu_tot - nu[i]
cov_cav = 1.0 / tau_cav
mean_cav = cov_cav * nu_cav
# Moment matching.
logpart, dlogpart, d2logpart = match_moments(mean_cav, cov_cav)
# Update factor params in the function space.
tau[i] = -d2logpart / (1 + d2logpart / tau_cav)
delta_tau = tau[i] - tau_old[i]
nu[i] = ((dlogpart - (nu_cav / tau_cav) * d2logpart)
/ (1 + d2logpart / tau_cav))
delta_nu = nu[i] - nu_old[i]
# Update factor params in the weight space.
prec[(a, a, b, b), (a, b, a, b)] += delta_tau * MAT_ONE_FLAT
xs[a] += delta_nu
xs[b] -= delta_nu
# Update mean and covariance.
if abs(delta_tau) > 0:
phi = -1.0 / ((1.0 / delta_tau) + f_var) * MAT_ONE
upd_mat = cov.take([a, b], axis=0)
cov = cov + upd_mat.T.dot(phi).dot(upd_mat)
mean = cov.dot(xs)
# Recompute the global parameters for stability.
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
if _converged((tau, nu), (tau_old, nu_old)):
return mean, cov
raise RuntimeError(
"EP did not converge after {} iterations".format(max_iter))
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres
def _match_moments_logit(mean_cav, cov_cav):
# Adapted from the GPML function `likLogistic.m`.
# First use a scale mixture.
lambdas = sqrt(2) * np.array([0.44, 0.41, 0.40, 0.39, 0.36]);
cs = np.array([
1.146480988574439e+02,
-1.508871030070582e+03,
2.676085036831241e+03,
-1.356294962039222e+03,
7.543285642111850e+01
])
arr1, arr2, arr3 = np.zeros(5), np.zeros(5), np.zeros(5)
for i, x in enumerate(lambdas):
arr1[i], arr2[i], arr3[i] = _match_moments_probit(
x * mean_cav, x * x * cov_cav)
logpart1 = logsumexp(arr1, b=cs)
dlogpart1 = (np.dot(np.exp(arr1) * arr2, cs * lambdas)
/ np.dot(np.exp(arr1), cs))
d2logpart1 = (np.dot(np.exp(arr1) * (arr2 * arr2 + arr3),
cs * lambdas * lambdas)
/ np.dot(np.exp(arr1), cs)) - (dlogpart1 * dlogpart1)
# Tail decays linearly in the log domain (and not quadratically).
exponent = -10.0 * (abs(mean_cav) - (196.0 / 200.0) * cov_cav - 4.0)
if exponent < 500:
lambd = 1.0 / (1.0 + exp(exponent))
logpart2 = min(cov_cav / 2.0 - abs(mean_cav), -0.1)
dlogpart2 = 1.0
if mean_cav > 0:
logpart2 = log(1 - exp(logpart2))
dlogpart2 = 0.0
d2logpart2 = 0.0
else:
lambd, logpart2, dlogpart2, d2logpart2 = 0.0, 0.0, 0.0, 0.0
logpart = (1 - lambd) * logpart1 + lambd * logpart2
dlogpart = (1 - lambd) * dlogpart1 + lambd * dlogpart2
d2logpart = (1 - lambd) * d2logpart1 + lambd * d2logpart2
return logpart, dlogpart, d2logpart
def _match_moments_probit(mean_cav, cov_cav):
# Adapted from the GPML function `likErf.m`.
z = mean_cav / sqrt(1 + cov_cav)
logpart, val = _log_phi(z)
dlogpart = val / sqrt(1 + cov_cav) # 1st derivative w.r.t. mean.
d2logpart = -val * (z + val) / (1 + cov_cav)
return logpart, dlogpart, d2logpart
def _init_ws(n_items, comparisons, prior_inv, tau, nu):
"""Initialize parameters in the weight space."""
prec = np.zeros((n_items, n_items))
xs = np.zeros(n_items)
for i, (a, b) in enumerate(comparisons):
prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT
xs[a] += nu[i]
xs[b] -= nu[i]
cov = inv_posdef(prior_inv + prec)
mean = cov.dot(xs)
return mean, cov, xs , prec
def _converged(new, old, threshold=THRESHOLD):
for param_new, param_old in zip(new, old):
if norm(param_new - param_old, ord=np.inf) > threshold:
return False
return True
| [
"[email protected]"
] | |
82e5f74cb9e4d564e4c9db40175c77111f664934 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5630113748090880_1/Python/Hichamdz38/b.py | ec503006a405070048db8b02218200889a3eaef9 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import numpy as np
for i in xrange(1,input()+1):
N=input()
z=np.array(N**2*2-N)
SS=[]
for j in xrange(N*2-1):
S=map(int,raw_input().split())
SS.extend(S)
f=[]
for j in SS:
if SS.count(j)%2!=0:
if j not in f:
f.append(j)
f.sort()
print "Case #{}:".format(i),
for j in f:
print j,
print | [
"[email protected]"
] | |
4570d38cc342698b9f5e6bcaaca77a8459c0408c | b385fc2f18bbb43ec1bca1606b62ae83f33dcb2d | /Programming-Basics/While Loop/Sequence 2k+1/Sequence 2k+1.py | d1b977f06e673eb316811f75cd6f44e4799f9e5f | [] | no_license | rishinkaku/Software-University---Software-Engineering | d9bee36de12affc9aed7fcc0b8b6616768340e51 | b798a0c6927ef461491c8327451dd00561d836e4 | refs/heads/master | 2023-06-10T19:52:51.016630 | 2021-07-08T00:45:06 | 2021-07-08T00:45:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | n = int(input())
i = 1
while True:
print(i)
i = 2 * i + 1
if i > n:
break
| [
"[email protected]"
] | |
9f2797f7ec61e1beba2df83d4671a275d44af30c | a0c53168a4bdcfb0aa917d6d2c602f0999443a10 | /projexui/widgets/xcalendarwidget/xcalendarscene.py | 6f9462a72e6858bc49065498b0206a2d26eb0e3a | [] | no_license | kanooshka/DPS_PIPELINE | 8067154c59ca5c8c9c09740969bb6e8537021903 | df2fcdecda5bce98e4235ffddde1e99f334562cc | refs/heads/master | 2021-05-24T04:32:03.457648 | 2018-09-07T13:25:11 | 2018-09-07T13:25:11 | 29,938,064 | 3 | 2 | null | 2020-07-23T23:06:37 | 2015-01-27T22:26:01 | Python | UTF-8 | Python | false | false | 19,573 | py | #!/usr/bin/python
"""
Defines a calendar widget similar to the ones found in outlook or ical.
"""
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
from PyQt4.QtCore import Qt,\
QDate, \
QLine,\
QRectF,\
QDateTime,\
QTime
from PyQt4.QtGui import QGraphicsScene,\
QPalette,\
QCursor
from projex.enum import enum
from projexui.widgets.xcalendarwidget.xcalendaritem import XCalendarItem
from projexui.widgets.xpopupwidget import XPopupWidget
class XCalendarScene(QGraphicsScene):
Mode = enum('Day', 'Week', 'Month', 'Agenda')
TimelineScale = enum('Day', 'Week', 'Month', 'Year')
def __init__( self, parent = None ):
super(XCalendarScene, self).__init__( parent )
# define custom properties
self._currentDate = QDate.currentDate()
self._currentMode = XCalendarScene.Mode.Month
self._timelineScale = XCalendarScene.TimelineScale.Week
self._minimumDate = QDate()
self._maximumDate = QDate()
self._dateGrid = {}
self._dateTimeGrid = {}
self._buildData = {}
self._rebuildRequired = False
# set default properties
# create connections
def addCalendarItem( self ):
"""
Adds a new calendar item to the scene.
:return <XCalendarItem>
"""
item = XCalendarItem()
self.addItem(item)
return item
def addItem( self, item ):
"""
Adds the item to the scene and redraws the item.
:param item | <QGraphicsItem>
"""
result = super(XCalendarScene, self).addItem(item)
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
return result
def currentDate( self ):
"""
Returns the current date displayed with this calendar widget.
:return <QDate>
"""
return self._currentDate
def currentMode( self ):
"""
Returns what calendar mode this calendar is currently displaying.
:return <XCalendarScene.Mode>
"""
return self._currentMode
def dateAt( self, point ):
"""
Returns the date at the given point.
:param point | <QPoint>
"""
for date, data in self._dateGrid.items():
if ( data[1].contains(point) ):
return QDate.fromJulianDay(date)
return QDate()
def dateTimeAt( self, point ):
"""
Returns the date time at the inputed point.
:param point | <QPoint>
"""
for dtime, data in self._dateTimeGrid.items():
if ( data[1].contains(point) ):
return QDateTime.fromTime_t(dtime)
return QDateTime()
def dateRect( self, date ):
"""
Returns the rect that is defined by the inputed date.
:return <QRectF>
"""
data = self._dateGrid.get(date.toJulianDay())
if ( data ):
return QRectF(data[1])
return QRectF()
def dateTimeRect( self, dateTime ):
"""
Returns the rect that is defined by the inputed date time.
:return <QRectF>
"""
data = self._dateTimeGrid.get(dateTime.toTime_t())
if ( data ):
return QRectF(data[1])
return QRectF()
def drawBackground( self, painter, rect ):
"""
Draws the background of the scene using painter.
:param painter | <QPainter>
rect | <QRectF>
"""
if ( self._rebuildRequired ):
self.rebuild()
super(XCalendarScene, self).drawBackground(painter, rect)
palette = self.palette()
# draw custom options
if ( 'curr_date' in self._buildData ):
clr = palette.color(QPalette.Highlight)
clr.setAlpha(40)
painter.setBrush(clr)
painter.setPen(Qt.NoPen)
painter.drawRect(self._buildData['curr_date'])
painter.setBrush(Qt.NoBrush)
if ( 'today' in self._buildData ):
painter.setPen(Qt.NoPen)
clr = palette.color(QPalette.AlternateBase)
clr.setAlpha(120)
painter.setBrush(clr)
painter.drawRect(self._buildData['today'])
painter.setBrush(Qt.NoBrush)
# draw the grid
painter.setPen(palette.color(QPalette.Mid))
painter.drawLines(self._buildData.get('grid', []))
# draw text fields
painter.setPen(palette.color(QPalette.Text))
for data in self._buildData.get('regular_text', []):
painter.drawText(*data)
# draw mid text fields
painter.setPen(palette.color(QPalette.Mid))
for data in self._buildData.get('mid_text', []):
painter.drawText(*data)
def helpEvent( self, event ):
"""
Displays a tool tip for the given help event.
:param event | <QHelpEvent>
"""
item = self.itemAt(event.scenePos())
if ( item and item and item.toolTip() ):
parent = self.parent()
rect = item.path().boundingRect()
point = event.scenePos()
point.setY(item.pos().y() + rect.bottom())
point = parent.mapFromScene(point)
point = parent.mapToGlobal(point)
XPopupWidget.showToolTip(item.toolTip(),
point = point,
parent = parent)
event.accept()
else:
super(XCalendarScene, self).helpEvent(event)
def markForRebuild( self, state = True ):
"""
Marks this scene as needing to be rebuild.
:param state | <bool>
"""
self._rebuildRequired = state
self.invalidate()
def maximumDate( self ):
"""
Returns the maximum date for this widget. This value will be used \
when in timeline mode to determine the end for the date range to \
search for.
:return <QDate>
"""
return self._maximumDate
def mousePressEvent( self, event ):
"""
Changes the current date to the clicked on date.
:param event | <QMousePressEvent>
"""
XPopupWidget.hideToolTip()
# update the current date
self.setCurrentDate(self.dateAt(event.scenePos()))
super(XCalendarScene, self).mousePressEvent(event)
def minimumDate( self ):
"""
Returns the minimum date for this widget. This value will be used \
when in timeline mode to determine the start for the date range to \
search for.
:return <QDate>
"""
return self._minimumDate
def rebuild( self ):
"""
Rebuilds the information for this scene.
"""
self._buildData.clear()
self._dateGrid.clear()
self._dateTimeGrid.clear()
curr_min = self._minimumDate
curr_max = self._maximumDate
self._maximumDate = QDate()
self._minimumDate = QDate()
self.markForRebuild(False)
# rebuilds the month view
if ( self.currentMode() == XCalendarScene.Mode.Month ):
self.rebuildMonth()
elif ( self.currentMode() in (XCalendarScene.Mode.Week,
XCalendarScene.Mode.Day)):
self.rebuildDays()
# rebuild the items in the scene
items = sorted(self.items())
for item in items:
item.setPos(0, 0)
item.hide()
for item in items:
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
if ( curr_min != self._minimumDate or curr_max != self._maximumDate ):
parent = self.parent()
if ( parent and not parent.signalsBlocked() ):
parent.dateRangeChanged.emit(self._minimumDate,
self._maximumDate)
def rebuildMonth( self ):
"""
Rebuilds the month for this scene.
"""
# make sure we start at 0 for sunday vs. 7 for sunday
day_map = dict([(i+1, i+1) for i in range(7)])
day_map[7] = 0
today = QDate.currentDate()
curr = self.currentDate()
first = QDate(curr.year(), curr.month(), 1)
last = QDate(curr.year(), curr.month(), curr.daysInMonth())
first = first.addDays(-day_map[first.dayOfWeek()])
last = last.addDays(6-day_map[last.dayOfWeek()])
cols = 7
rows = (first.daysTo(last) + 1) / cols
hlines = []
vlines = []
padx = 6
pady = 6
header = 24
w = self.width() - (2 * padx)
h = self.height() - (2 * pady)
dw = (w / cols) - 1
dh = ((h - header) / rows) - 1
x0 = padx
y0 = pady + header
x = x0
y = y0
for row in range(rows + 1):
hlines.append(QLine(x0, y, w, y))
y += dh
for col in range(cols + 1):
vlines.append(QLine(x, y0, x, h))
x += dw
self._buildData['grid'] = hlines + vlines
# draw the date fields
date = first
row = 0
col = 0
# draw the headers
x = x0
y = pady
regular_text = []
mid_text = []
self._buildData['regular_text'] = regular_text
self._buildData['mid_text'] = mid_text
for day in ('Sun', 'Mon','Tue','Wed','Thu','Fri','Sat'):
regular_text.append((x + 5,
y,
dw,
y0,
Qt.AlignLeft | Qt.AlignVCenter,
day))
x += dw
for i in range(first.daysTo(last) + 1):
top = (y0 + (row * dh))
left = (x0 + (col * dw))
rect = QRectF(left - 1, top, dw, dh)
# mark the current date on the calendar
if ( date == curr ):
self._buildData['curr_date'] = rect
# mark today's date on the calendar
elif ( date == today ):
self._buildData['today'] = rect
# determine how to draw the calendar
format = 'd'
if ( date.day() == 1 ):
format = 'MMM d'
# determine the color to draw the text
if ( date.month() == curr.month() ):
text = regular_text
else:
text = mid_text
# draw the text
text.append((left + 2,
top + 2,
dw - 4,
dh - 4,
Qt.AlignTop | Qt.AlignLeft,
date.toString(format)))
# update the limits
if ( not i ):
self._minimumDate = date
self._maximumDate = date
self._dateGrid[date.toJulianDay()] = ((row, col), rect)
if ( col == (cols - 1) ):
row += 1
col = 0
else:
col += 1
date = date.addDays(1)
def rebuildDays( self ):
"""
Rebuilds the interface as a week display.
"""
time = QTime(0, 0, 0)
hour = True
x = 6
y = 6 + 24
w = self.width() - 12 - 25
dh = 48
indent = 58
text_data = []
vlines = []
hlines = [QLine(x, y, w, y)]
time_grids = []
for i in range(48):
if ( hour ):
hlines.append(QLine(x, y, w, y))
text_data.append((x,
y + 6,
indent - 6,
dh,
Qt.AlignRight | Qt.AlignTop,
time.toString('hap')))
else:
hlines.append(QLine(x + indent, y, w, y))
time_grids.append((time, y, dh / 2))
# move onto the next line
hour = not hour
time = time.addSecs(30 * 60)
y += dh / 2
hlines.append(QLine(x, y, w, y))
h = y
y = 6 + 24
# load the grid
vlines.append(QLine(x, y, x, h))
vlines.append(QLine(x + indent, y, x + indent, h))
vlines.append(QLine(w, y, w, h))
today = QDate.currentDate()
curr_date = self.currentDate()
# load the days
if ( self.currentMode() == XCalendarScene.Mode.Week ):
date = self.currentDate()
day_of_week = date.dayOfWeek()
if ( day_of_week == 7 ):
day_of_week = 0
min_date = date.addDays(-day_of_week)
max_date = date.addDays(6-day_of_week)
self._minimumDate = min_date
self._maximumDate = max_date
dw = (w - (x + indent)) / 7.0
vx = x + indent
date = min_date
for i in range(7):
vlines.append(QLine(vx, y, vx, h))
text_data.append((vx + 6,
6,
dw,
24,
Qt.AlignCenter,
date.toString('ddd MM/dd')))
self._dateGrid[date.toJulianDay()] = ((0, i),
QRectF(vx, y, dw, h - y))
# create the date grid for date time options
for r, data in enumerate(time_grids):
time, ty, th = data
dtime = QDateTime(date, time)
key = dtime.toTime_t()
self._dateTimeGrid[key] = ((r, i), QRectF(vx, ty, dw, th))
if ( date == curr_date ):
self._buildData['curr_date'] = QRectF(vx, y, dw, h - 29)
elif ( date == today ):
self._buildData['today'] = QRectF(vx, y, dw, h - 29)
date = date.addDays(1)
vx += dw
# load a single day
else:
date = self.currentDate()
self._maximumDate = date
self._minimumDate = date
text_data.append((x + indent,
6,
w,
24,
Qt.AlignCenter,
date.toString('ddd MM/dd')))
self._dateGrid[date.toJulianDay()] = ((0, 0),
QRectF(x, y, w - x, h - y))
# create the date grid for date time options
for r, data in enumerate(time_grids):
time, ty, th = data
dtime = QDateTime(date, time)
key = dtime.toTime_t()
rect = QRectF(x + indent, ty, w - (x + indent), th)
self._dateTimeGrid[key] = ((r, 0), rect)
self._buildData['grid'] = hlines + vlines
self._buildData['regular_text'] = text_data
rect = self.sceneRect()
rect.setHeight(h + 6)
super(XCalendarScene, self).setSceneRect(rect)
def setCurrentDate( self, date ):
"""
Sets the current date displayed by this calendar widget.
:return <QDate>
"""
if ( date == self._currentDate or not date.isValid() ):
return
self._currentDate = date
self.markForRebuild()
parent = self.parent()
if ( not parent.signalsBlocked() ):
parent.currentDateChanged.emit(date)
parent.titleChanged.emit(self.title())
def setCurrentMode( self, mode ):
"""
Sets the current mode that this calendar will be displayed in.
:param mode | <XCalendarScene.Mode>
"""
self._currentMode = mode
self.markForRebuild()
def setSceneRect( self, *args ):
"""
Updates the scene rect for this item.
:param *args
"""
h = self.height()
super(XCalendarScene, self).setSceneRect(*args)
if ( self.currentMode() != XCalendarScene.Mode.Month ):
rect = self.sceneRect()
rect.setHeight(h)
super(XCalendarScene, self).setSceneRect(rect)
self.markForRebuild()
def setTimelineScale( self, timelineScale ):
"""
Sets the timeline scale that will be used when rendering a calendar in \
timeline mode.
:param timelineScale | <XCalendarScene.TimelineScale>
"""
self._timelineScale = timelineScale
def title( self ):
"""
Returns the title for this scene based on its information.
:return <str>
"""
if ( self.currentMode() == XCalendarScene.Mode.Day ):
return self.currentDate().toString('dddd, MMMM dd, yyyy')
elif ( self.currentMode() == XCalendarScene.Mode.Week ):
title = str(self.minimumDate().toString('dddd, MMMM dd'))
title += ' - '
title += str(self.maximumDate().toString('dddd, MMMM dd, yyyy'))
return title
elif ( self.currentMode() == XCalendarScene.Mode.Month ):
return self.currentDate().toString('MMMM yyyy')
else:
return ''
def timelineScale( self ):
"""
Returns the timeline scale that will be used when rendering a calendar \
in timeline mode.
:return <XCalendarScene.TimelineScale>
"""
return self._timelineScale | [
"[email protected]"
] | |
b9be26523a79e0ed4ebc0819a2cf4003d2b1ee59 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02838/s053367568.py | d3c49f223dc225bd9cca1700aa01ef3296ab9707 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
#import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
EPS = 1e-8
mod = 10 ** 9 + 7
def mapline(t = int):
return map(t, sysread().split())
def mapread(t = int):
return map(t, read().split())
def generate_inv(n,mod):
"""
逆元行列
n >= 2
Note: mod must bwe a prime number
"""
ret = [0, 1]
for i in range(2,n+1):
next = -ret[mod%i] * (mod // i)
next %= mod
ret.append(next)
return ret
def run():
N, *A = mapread()
maxA = max(A)
L = maxA.bit_length()
subs = [0] * L
for k in range(L):
sum = 0
for a in A:
if (a >> k) & 1:
sum += 1 << k
sum %= mod
subs[k] = sum
sumA = 0
for a in A:
sumA += a
sumA %= mod
ret = 0
ret += (sumA * N) % mod
ret += (sumA * N) % mod
sub_sum = 0
for a in A:
sums = 0
for k in range(L):
if (a >> k) & 1:
sums += subs[k] * 2
sums %= mod
sub_sum += sums
sub_sum %= mod
ret -= sub_sum
ret %= mod
inv = generate_inv(2, mod)
ret *= inv[2]
ret %= mod
print(ret)
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
4fd3ad15ddd33c92cdffecb72052595b15ddd601 | 4beabdb5089e3284251dcaf046366c35d3afe02f | /rectangles.py | 06768e5dd0cb13903384183826b1e5920a411701 | [] | no_license | AndrewFendrich/Mandelbrot | c3fa2b1463d6e01b91ac0a3c53ef88c8e1716641 | 074ebd9028c13a9f840c2436ab2c8c3d2275dbf6 | refs/heads/master | 2021-01-13T00:52:24.060863 | 2017-05-08T14:30:02 | 2017-05-08T14:30:02 | 50,623,517 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 23:25:48 2015
@author: User
"""
import pygame
pygame.init()
rectangle = pygame.Rect(50,50,100,100)
print(rectangle)
rectangle.inflate_ip(2,2)
print(rectangle) | [
"[email protected]"
] | |
80a1c18f8e69671ebde216c7d4f3665ff8b2181b | b281dd9e711d737579745914c6611d8cfaddb07d | /phones_media_files_demo/phones_media_files_demo/phones/migrations/0001_initial.py | c64a49f588e99ab5a1c3d237694ae76464f853d7 | [
"MIT"
] | permissive | Beshkov/Python-web-fundamentals | daf76f3765cb56e02bdaba8ea7df675990dd3885 | 6b0e9cc9725ea80a33c2ebde6e29f2ab585ab8d9 | refs/heads/main | 2023-08-03T07:04:22.238320 | 2021-09-12T18:57:36 | 2021-09-12T18:57:36 | 392,644,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # Generated by Django 3.2.6 on 2021-08-06 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufacturer', models.CharField(max_length=30)),
('model', models.CharField(max_length=15)),
('image', models.ImageField(blank=True, upload_to='phones')),
],
),
]
| [
"[email protected]"
] | |
54b5f81c202a4a9d48f25271d4ba743e2e4d049f | 4015e9d9cc72889b3494ae8b58e81dc507ae8d31 | /venv/Lib/site-packages/celery/bin/beat.py | faddd256a6bad3001f11a3074518b1a34db1463b | [] | no_license | Unlimit78/Test_For_DevelopsToday | 675676d3a477f590485722019bc1b1e1412b3926 | dc4e4ae887edf243adaca3a03c5fd3209ee60300 | refs/heads/master | 2022-12-17T18:41:33.511674 | 2020-09-15T18:13:53 | 2020-09-15T18:13:53 | 295,706,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | # -*- coding: utf-8 -*-
"""The :program:`celery beat` command.
.. program:: celery beat
.. seealso::
See :ref:`preload-options` and :ref:`daemon-options`.
.. cmdoption:: --detach
Detach and run in the background as a daemon.
.. cmdoption:: -s, --schedule
Path to the schedule database. Defaults to `celerybeat-schedule`.
The extension '.db' may be appended to the filename.
Default is {default}.
.. cmdoption:: -S, --scheduler
Scheduler class to use.
Default is :class:`{default}`.
.. cmdoption:: --max-interval
Max seconds to sleep between schedule iterations.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: --pidfile
File used to store the process pid. Defaults to `celerybeat.pid`.
The program won't start if this file already exists
and the pid is still alive.
.. cmdoption:: --uid
User id, or user name of the user to run as after detaching.
.. cmdoption:: --gid
Group id, or group name of the main group to change to after
detaching.
.. cmdoption:: --umask
Effective umask (in octal) of the process after detaching. Inherits
the umask of the parent process by default.
.. cmdoption:: --workdir
Optional directory to change to after detaching.
.. cmdoption:: --executable
Executable to use for the detached process.
"""
from __future__ import absolute_import, unicode_literals
from functools import partial
from celery.bin.base import Command, daemon_options
from celery.platforms import detached, maybe_drop_privileges
__all__ = ("beat",)
HELP = __doc__
class beat(Command):
"""Start the beat periodic task scheduler.
Examples:
.. code-block:: console
$ celery beat -l info
$ celery beat -s /var/run/celery/beat-schedule --detach
$ celery beat -S django
The last example requires the :pypi:`django-celery-beat` extension
package found on PyPI.
"""
doc = HELP
enable_config_from_cmdline = True
supports_args = False
def run(
self,
detach=False,
logfile=None,
pidfile=None,
uid=None,
gid=None,
umask=None,
workdir=None,
**kwargs
):
if not detach:
maybe_drop_privileges(uid=uid, gid=gid)
kwargs.pop("app", None)
beat = partial(self.app.Beat, logfile=logfile, pidfile=pidfile, **kwargs)
if detach:
with detached(logfile, pidfile, uid, gid, umask, workdir):
return beat().run()
else:
return beat().run()
def add_arguments(self, parser):
c = self.app.conf
bopts = parser.add_argument_group("Beat Options")
bopts.add_argument("--detach", action="store_true", default=False)
bopts.add_argument("-s", "--schedule", default=c.beat_schedule_filename)
bopts.add_argument("--max-interval", type=float)
bopts.add_argument("-S", "--scheduler", default=c.beat_scheduler)
bopts.add_argument("-l", "--loglevel", default="WARN")
daemon_options(parser, default_pidfile="celerybeat.pid")
user_options = self.app.user_options["beat"]
if user_options:
uopts = parser.add_argument_group("User Options")
self.add_compat_options(uopts, user_options)
def main(app=None):
beat(app=app).execute_from_commandline()
if __name__ == "__main__": # pragma: no cover
main()
| [
"[email protected]"
] | |
936b7b10e86cdeeaefe0e6f870ba20839b804f3d | 04a0614b8c2a893dab29bc4ffb0aaf82364fdf3f | /53. Maximum Subarray.py | 2fd8f5e141461dfc091e452ab1ffef6fc179a75e | [] | no_license | sharmaji27/Leetcode-Problems | 716bcb4a36b9e4f45274c4d551967e15c40ddbd2 | 0f878933b17df170c18f0b67b7200cec76c276e0 | refs/heads/master | 2021-10-20T17:35:35.175757 | 2021-10-20T05:33:17 | 2021-10-20T05:33:17 | 218,299,755 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if len(nums)==1:
print(nums[0])
global_max = nums[0]
current_sum = nums[0]
for i in range(1,len(nums)):
current_sum = max(current_sum+nums[i],nums[i])
global_max = max(current_sum,global_max)
return global_max | [
"[email protected]"
] | |
21d3e4137877c5c962c0a372bcf51516c5d5cab3 | 32233acff831abdd290f4168a982b694f9f95393 | /src/util.py | 5aa9b1fda0b49ea8cf671313917c63a379a0a966 | [] | no_license | CarsonScott/onlinestats | e2dfa7ceca21e99b828eb1fd0149fc34e2b9c2ce | 48a8024a4c5c0f8ddbc7ec4f1b1eef3485ae95e7 | refs/heads/master | 2020-04-26T22:36:06.471611 | 2019-03-06T22:02:45 | 2019-03-06T22:02:45 | 173,877,356 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | import math
from copy import copy
def iterable(X):
return isinstance(X, list) or isinstance(X, tuple) | [
"[email protected]"
] | |
fcfdfb2bf143fbabd9e7882777ff096eaec7745c | eeee18e2769766c550fb5e0948977a016b48e15a | /Creational/abstract-factory.py | 72d108161a2fa85440dac2ece3f9d6bf79735986 | [] | no_license | MoeinGhbh/DesignPattern | 19aff7bd09f4161d11af2662b1be7962fb692989 | b543a5c4eaf9da1341f95e9c777310d4f25ddeaf | refs/heads/master | 2022-11-18T17:49:56.101880 | 2020-07-22T07:54:23 | 2020-07-22T07:54:23 | 266,117,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | """
Abstract Factory
Car => Benz, Bmw => Suv, Coupe
benz suv => gla, glc
bmw suv => x1, x2
benz coupe => cls, E-class
bmw coupe => m2, m4
"""
from abc import ABC,abstractclassmethod
class Car(ABC):
@abstractclassmethod
def call_suv(self):
pass
@abstractclassmethod
def call_coupe(self):
pass
#---------------------------------------------
class Benz(Car):
def call_suv(self):
return Gla()
def call_coupe(self):
return Cls()
#---------------------------------------------
class Bmw(Car):
def call_suv(self):
return X1()
def call_coupe(self):
return M2()
#---------------------------------------------
class SUV(ABC):
@abstractclassmethod
def create_suv(self):
pass
class Coupe(ABC):
@abstractclassmethod
def create_coupe(self):
pass
#------------------------------------------------
# Benz
class Gla(SUV):
def create_suv(self):
print("this is your Gla SUV Benz...")
class Cls(Coupe):
def create_coupe(self):
print("this is your cls coupe Benz...")
#---------------------------------------------------
# BMW
class X1(SUV):
def create_suv(self):
print("this is your X1 SUV BMW .... ")
class M2(Coupe):
def create_coupe(self):
print("this is your me coupe BMW ....")
#------------------------------------------------------
def client_suv_order(order):
suv = order.call_suv()
suv.create_suv()
def client_coupe_order(order):
coupe= order.call_coupe()
coupe.create_coupe()
#----------------------------------------------------------
client_coupe_order(Benz())
client_coupe_order(Bmw())
client_suv_order(Benz())
client_suv_order(Bmw())
| [
"="
] | = |
99a2478cea3c8d541d34c24dfcb9bc4ca59b0605 | 73b8aba05ee1424f38a8598a9f1305185588075f | /0x04-python-more_data_structures/9-multiply_by_2.py | 6a475a580fe3f50723c6e049968a98f01637a6dd | [] | no_license | nicolasportela/holbertonschool-higher_level_programming | 0d176c0e56f4f703c1e9a98b430fc6120f22f675 | e1537b81f21118456e5cfa0e4ed89520b232adb6 | refs/heads/master | 2023-04-20T21:30:22.693434 | 2021-05-13T01:47:30 | 2021-05-13T01:47:30 | 319,397,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dic = {}
for k, v in a_dictionary.items():
new_dic[k] = v * 2
return new_dic
| [
"[email protected]"
] | |
aa2ad8ba0ff14340d3d7d30cd9b8fb24c00f071c | 6820e74ec72ed67f6b84a071cef9cfbc9830ad74 | /plans/tasks.py | 22acd6cb6cb911b7571adefb4585bd699ce306c6 | [
"MIT"
] | permissive | AppforallHQ/f5 | 96c15eaac3d7acc64e48d6741f26d78c9ef0d8cd | 0a85a5516e15d278ce30d1f7f339398831974154 | refs/heads/master | 2020-06-30T17:00:46.646867 | 2016-11-21T11:41:59 | 2016-11-21T11:41:59 | 74,357,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | from celery import task
from django.utils import timezone
from datetime import timedelta
import requests
import json
class EndpointNotAvailabe(Exception):
pass
def call_external_endpoint_to_update_status(the_task, action, subscription):
payload = {"uuid": subscription.uuid,
"plan": subscription.plan.pk,
"activate": (action == "activate"),
}
response = requests.put(
subscription.plan.interaction_endpoint_url % payload,
data=json.dumps(payload))
if response.status_code != 200:
e = EndpointNotAvailabe()
raise the_task \
.retry(args=[subscription], exc=e)
else:
return True
@task
def send_invoice_notification(invoice, email_type, **kwargs):
return
import requests
payload = {
"invoice_payment_url": invoice.payment_url,
"email_type": email_type,
"uuid": invoice.subscription.uuid,
"plan": invoice.subscription.plan.pk,
}
mail_body_response = requests.post(
invoice.subscription.plan.mail_endpoint_url % payload,
data=json.dumps(payload))
params = json.loads(mail_body_response.text)
from .actions import send_mail
send_mail(invoice, params, email_type)
@task(default_retry_delay=3*60)
def activate_subscription(subscription, **kwargs):
pass#return call_external_endpoint_to_update_status(activate_subscription, "activate", subscription)
@task(default_retry_delay=3*60)
def deactivate_subscription(subscription, **kwargs):
return call_external_endpoint_to_update_status(deactivate_subscription, "deactivate", subscription)
@task
def send_preinvoice():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() + timedelta(days=subscription.plan.preinvoice_length) \
and subscription.status == Subscription.ACTIVE:
subscription.status = Subscription.PREINVOICE
subscription.full_clean()
subscription.save()
@task
def mark_subscriptions_as_overdue():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date < timezone.now() and subscription.status == Subscription.PREINVOICE:
subscription.status = Subscription.OVERDUE
subscription.full_clean()
subscription.save()
@task
def end_gracetime_for_fucking_users():
from plans.models import Subscription
# FIXME
for subscription in Subscription.objects.filter():
if subscription.due_date + timedelta(days=subscription.plan.overdue_length) < timezone.now():
subscription.status = Subscription.DEACTIVE
subscription.full_clean()
subscription.save()
@task
def invalidate_invoices():
from plans.models import Invoice
# FIXME
for invoice in Invoice.objects.filter():
if invoice.expires_at < timezone.now():
invoice.mark_as_invalid()
| [
"[email protected]"
] | |
753271955f78deae3afbada6c0d93276ade8e340 | 03bca281c8bb3ba69c3a01252cc7c9e35cd675bd | /django/DCC/dccApp/migrations/0001_initial.py | 63c611642310e159928a36aac3c2066355be6090 | [] | no_license | satish15625/pythonwork | 380fef04170064aef8aeb919a4e30f65db9a097f | 12d776152689a84f1560d08f35987f8ca4ea3fb0 | refs/heads/master | 2023-07-07T15:12:48.355226 | 2021-08-13T06:33:13 | 2021-08-13T06:33:13 | 374,058,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Generated by Django 3.0.7 on 2020-12-15 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image_Img', models.ImageField(upload_to='images/')),
],
),
]
| [
"[email protected]"
] | |
4eec63edb5849bedfb3c1094f0944238a960f578 | a81d84fdb57e1b90812fc5b5b523685ba5b663c0 | /python/2021_04/Question0769.py | 648c3a58f644d02e661df59e3decc996ad812c3d | [] | no_license | KujouNozom/LeetCode | 1919081001126924daa7549493a0823702631a37 | 4de1e601274de1336d669e41f732a8cb056880b9 | refs/heads/master | 2023-07-17T12:17:45.156451 | 2021-09-04T11:57:40 | 2021-09-04T11:57:40 | 268,075,373 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # 769. 最多能完成排序的块 [双指针]
from typing import List
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
min_value, max_value, start = 10, -1, 0
ans = 0
for index in range(len(arr)):
min_value = min(min_value, arr[index])
max_value = max(max_value, arr[index])
if min_value == start and max_value == index:
ans += 1
min_value, max_value, start = 10, -1, index + 1
return ans
| [
"[email protected]"
] | |
8e902e4e628a8d138844e6ee81c87d0dc785a0b1 | 4674b8088ffdf55905d44995f08a0792a3e4cd5c | /tests/hwsim/test_monitor_interface.py | bfc9a1562ff2e5c9fb7ee4dc4b08dfa12334195f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | vanhoefm/krackattacks-scripts | 41daca791638a92aa4cfa68a582e46119037560e | 4b78669686f74efe664c6543b1b5b1616b22f902 | refs/heads/research | 2022-10-29T20:21:11.512335 | 2022-10-16T18:44:41 | 2022-10-16T18:44:41 | 107,408,514 | 2,184 | 577 | NOASSERTION | 2021-07-06T12:43:49 | 2017-10-18T12:58:08 | C | UTF-8 | Python | false | false | 3,271 | py | # AP mode using the older monitor interface design
# Copyright (c) 2013, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import time
import hwsim_utils
import hostapd
from wpasupplicant import WpaSupplicant
def test_monitor_iface_open(dev, apdev):
"""Open connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface")
wpas.set_network(id, "key_mgmt", "NONE")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
def test_monitor_iface_wpa2_psk(dev, apdev):
"""WPA2-PSK connection using cfg80211 monitor interface on AP"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="use_monitor=1")
id = wpas.add_network()
wpas.set_network(id, "mode", "2")
wpas.set_network_quoted(id, "ssid", "monitor-iface-wpa2")
wpas.set_network(id, "proto", "WPA2")
wpas.set_network(id, "key_mgmt", "WPA-PSK")
wpas.set_network_quoted(id, "psk", "12345678")
wpas.set_network(id, "pairwise", "CCMP")
wpas.set_network(id, "group", "CCMP")
wpas.set_network(id, "frequency", "2412")
wpas.connect_network(id)
dev[0].connect("monitor-iface-wpa2", psk="12345678", scan_freq="2412")
def test_monitor_iface_multi_bss(dev, apdev):
"""AP mode mmonitor interface with hostapd multi-BSS setup"""
params = { "ssid": "monitor-iface", "driver_params": "use_monitor=1" }
hapd = hostapd.add_ap(apdev[0], params)
hostapd.add_bss(apdev[0], apdev[0]['ifname'] + '-2', 'bss-2.conf')
dev[0].connect("monitor-iface", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("bss-2", key_mgmt="NONE", scan_freq="2412")
@remote_compatible
def test_monitor_iface_unknown_sta(dev, apdev):
"""AP mode monitor interface and Data frame from unknown STA"""
ssid = "monitor-iface-pmf"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params['driver_params'] = "use_monitor=1"
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
addr = dev[0].p2p_interface_addr()
dev[0].connect(ssid, psk=passphrase, ieee80211w="2",
key_mgmt="WPA-PSK-SHA256", proto="WPA2",
scan_freq="2412")
dev[0].request("DROP_SA")
# This protected Deauth will be ignored by the STA
hapd.request("DEAUTHENTICATE " + addr)
# But the unprotected Deauth from TX frame-from-unassoc-STA will now be
# processed
dev[0].request("DATA_TEST_CONFIG 1")
dev[0].request("DATA_TEST_TX " + bssid + " " + addr + " 0")
dev[0].request("DATA_TEST_CONFIG 0")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("No disconnection")
dev[0].request("DISCONNECT")
| [
"[email protected]"
] | |
efc0ff16e064e56e714719076065e0481806106e | 951e433b25a25afeea4d9b45994a57e0a6044144 | /LeetCode/Q187_HM_findRepeatedDnaSequences.py | 4be93e56f1f5ce25527e7b244bc6bc2c45797d72 | [] | no_license | EricaEmmm/CodePython | 7c401073e0a9b7cd15f9f4a553f0aa3db1a951a3 | d52aa2a0bf71b5e7934ee7bff70d593a41b7e644 | refs/heads/master | 2020-05-31T14:00:34.266117 | 2019-09-22T09:48:23 | 2019-09-22T09:48:23 | 190,318,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | # 重复的DNA序列
# 所有 DNA 由一系列缩写为 A,C,G 和 T 的核苷酸组成,例如:“ACGAATTCCG”。在研究 DNA 时,识别 DNA 中的重复序列有时会对研究非常有帮助
# 编写一个函数来查找 DNA 分子中所有出现超多一次的10个字母长的序列(子串)。
#
# 示例:
# 输入: s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
# 输出: ["AAAAACCCCC", "CCCCCAAAAA"]
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
res = dict()
if len(s) < 10:
return res
for i in range(len(s)-9):
tmp = s[i:i+10]
res[tmp] = res.get(tmp,0) + 1 # 返回指定键的值,如果值不在字典中返回default值
return list([i for i in res.keys() if res[i] > 1])
if __name__ == '__main__':
s = Solution()
tmp = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
print(s.findRepeatedDnaSequences(tmp))
# st = "abc"
# t = [1,2,3]
# print(st[0:3]) | [
"[email protected]"
] | |
6c172d1d135b205d3134c570b5fea04025c05ba2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02578/s304732693.py | fd8ac3cded43971a72e3cf659d1486c121afa2f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | n = int(input())
input_line = input().split()
member = [int(input_line[i]) for i in range(n)]
stands = 0
for i in range(1,n):
stand = member[i-1] - member[i]
if stand > 0:
stands += stand
member[i] += stand
print(stands) | [
"[email protected]"
] | |
b594ea5d9c012feedfb6dd74228118ce0300906b | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py/html5lib/treebuilders/__init__.py | 50c8deeb08c187d8b51fcfdcb742e414c6ee52ab | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b2a0b8030b4449b4e227ef389f48544f1136f27d0fc657128ee58812e529f7d3
size 4478
| [
"[email protected]"
] | |
dda213c37af2f9c9c79342b1e51e552411080ec5 | 49ab501632b0a8336058406e7daa3afce6be6e93 | /python_server/run_keras_server.py | 14a25f5c8f258346bcedf3cf308c98eb4e1fbf53 | [] | no_license | CharlesFauman/meme_server | 3ab73e9788b9fea26f6ea270563381515d4b0d47 | 75b0d6fc041c1e2b04e260e9eecbff160225a0f6 | refs/heads/master | 2020-03-25T08:58:32.780593 | 2018-08-05T19:24:58 | 2018-08-05T19:24:58 | 143,640,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,029 | py | # USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F [email protected] 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
import numpy as np
from threading import Thread
import flask
import redis
import uuid
import time
import json
import sys
import io
# initialize constants used for server queuing
PROCESSING_QUEUE = "processing_queue"
BATCH_SIZE = 32
SERVER_SLEEP = 0.25
CLIENT_SLEEP = 0.25
# initialize our Flask application, Redis server, and Keras model
app = flask.Flask(__name__)
db = redis.StrictRedis(host="localhost", port=6379, db=0)
db.flushdb()
print("* Loading model...")
import meme_model as model
print("* Model loaded")
def classify_process():
# continually pool for new inputs to classify
while True:
# attempt to grab a batch of inputs from the database, then
# initialize the input IDs and batch of inputs themselves
queue = db.lrange(PROCESSING_QUEUE, 0, BATCH_SIZE - 1)
inputIDs = []
batch = None
# loop over the queue
for q in queue:
# deserialize the object and obtain the input
q = json.loads(q)
input_ = model.preprocess_deserialize(q["input"])
# check to see if the batch list is None
if batch is None:
batch = input_
# otherwise, stack the data
else:
batch = np.vstack([batch, input_])
# update the list of input IDs
inputIDs.append(q["id"])
# check to see if we need to process the batch
if len(inputIDs) > 0:
# classify the batch
print("* Batch size: {}".format(batch.shape))
preds = model.process(batch)
preds = model.postprocess_serialize(preds)
# loop over the image IDs and their corresponding set of
# results from our model
for (inputID, result) in zip(inputIDs, preds):
db.set(inputID, json.dumps(result))
# remove the set of images from our queue
db.ltrim(PROCESSING_QUEUE, len(inputIDs), -1)
# sleep for a small amount
time.sleep(SERVER_SLEEP)
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
print("predicting!")
# ensure an input was properly uploaded to our endpoint
if flask.request.method == "POST":
print("was post!")
input_form = None
input_files = None
if(flask.request.form.get("input")):
input_form = flask.request.form.get("input")
if(flask.request.files.get("input")):
input_files = flask.request.files.get("input").read()
if input_form or input_files:
input_ = model.preprocess_serialize(input_form, input_files)
# generate an ID for the classification then add the
# classification ID + input to the queue
k = str(uuid.uuid4())
d = {"id": k, "input": input_}
db.rpush(PROCESSING_QUEUE, json.dumps(d))
# keep looping until our model server returns the output
# predictions
while True:
# attempt to grab the output predictions
output = db.get(k)
# check to see if our model has classified the input
if output is not None:
# add the output predictions to our data
# dictionary so we can return it to the client
data["predictions"] = json.loads(output)
# delete the result from the database and break
# from the polling loop
db.delete(k)
break
# sleep for a small amount to give the model a chance
# to classify the input
time.sleep(CLIENT_SLEEP)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
# load the function used to classify input images in a *separate*
# thread than the one used for main classification
print("* Starting model service...")
t = Thread(target=classify_process, args=())
t.daemon = True
t.start()
# start the web server
print("* Starting web service...")
app.run() | [
"[email protected]"
] | |
8f9536c2451f1c553693aed0b4015a05647789bf | 4b95aeb2533f0a582cea2fb26d6177e94aabb21f | /2020/src/lobby_layout.py | 3bba605d05757a8dc9695996a0304392f18ef81b | [] | no_license | MarcoBurgos/advent_of_code | 0d9984e0fa47f68e52ef0f5cdf7681e23767bd16 | 81ac54bfe200cc348efbe860bd95aae4270f03b7 | refs/heads/main | 2023-02-09T14:40:38.204271 | 2020-12-26T00:09:36 | 2020-12-26T00:09:36 | 317,739,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | import sys
from utils import read_and_load_input
VECTORS = {
'w' : (-4, 0),
'e' : ( 4, 0),
'nw': (-2, -3),
'ne': ( 2, -3),
'sw': (-2, 3),
'se': ( 2, 3),
}
def parse(line):
result = []
while line:
stepLength = 1 if line[0] in ('e', 'w') else 2
result.append(line[:stepLength])
line = line[stepLength:]
return result
def walk(path):
x, y = 0, 0
for step in path:
dx, dy = VECTORS[step]
x += dx
y += dy
return x, y
def lobby_layout_1():
result = set()
for path in tiles:
tile = walk(path)
if tile in result:
result.remove(tile)
else:
result.add(tile)
return result
def neighbors(tile):
yield from ((tile[0] + dx, tile[1] + dy) for dx, dy in VECTORS.values())
def lobby_layout_2(blackTiles):
for day in range(100):
newTiles = set()
affectedTiles = blackTiles.copy()
for tile in blackTiles:
affectedTiles.update(neighbors(tile))
for tile in affectedTiles:
numNeighbors = sum(n in blackTiles for n in neighbors(tile))
if tile in blackTiles:
if numNeighbors in (1, 2):
newTiles.add(tile)
else:
if numNeighbors == 2:
newTiles.add(tile)
blackTiles = newTiles
return len(blackTiles)
if __name__ == '__main__':
input_data = read_and_load_input("Day24")
tiles = [parse(line.rstrip()) for line in input_data]
blackTiles = lobby_layout_1()
print(f"Solution 1: {len(blackTiles)}")
print(f"Solution 2: {lobby_layout_2(blackTiles)}")
| [
"[email protected]"
] | |
7ce9f25d9e4a88e41687b206e6a0bd9b74daa432 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /trex/trex_output.py | f0d835f8b948280acec5897964ce1cb142978ed3 | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,924 | py | # -*- coding: utf-8 -*-
# TREX
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
#from trex import trex_input
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
from trex import trex_model
from uber import uber_lib
class TRexOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
chem_name = form.getvalue('chemical_name')
use = form.getvalue('Use')
formu_name = form.getvalue('Formulated_product_name')
a_i = form.getvalue('percent_ai')
a_i = float(a_i)/100
Application_type = form.getvalue('Application_type')
p_i = form.getvalue('percent_incorporated')
p_i = float(p_i)/100
a_r = form.getvalue('application_rate')
a_r = float(a_r)
a_r_l = form.getvalue('application_rate_l')
a_r_l=float(a_r_l)
seed_treatment_formulation_name = form.getvalue('seed_treatment_formulation_name')
den = form.getvalue('density_of_product')
den = float(den)
m_s_r_p = form.getvalue('maximum_seedling_rate_per_use')
m_s_r_p = float(m_s_r_p)
a_r_p = form.getvalue('application_rate_per_use')
a_r_p = float(a_r_p)
r_s = form.getvalue('row_sp')
r_s=float(r_s)
b_w = form.getvalue('bandwidth') #convert to ft
b_w = float(b_w)/12
n_a = form.getvalue('number_of_applications')
a_t = form.getvalue('Application_target')
if a_t=='Short grass':
para=240 #coefficient used to estimate initial conc.
elif a_t=='Tall grass':
para=110
elif a_t=='Broad-leafed plants/small insects':
para=135
elif a_t=='Fruits/pods/seeds/large insects':
para=15
i_a = form.getvalue('interval_between_applications')
h_l = form.getvalue('Foliar_dissipation_half_life')
ld50_bird = form.getvalue('avian_ld50')
lc50_bird = form.getvalue('avian_lc50')
NOAEC_bird = form.getvalue('avian_NOAEC')
NOAEC_bird = float(NOAEC_bird)
NOAEL_bird = form.getvalue('avian_NOAEL')
NOAEL_bird = float(NOAEL_bird)
# bird_type = form.getvalue('Bird_type')
aw_bird = form.getvalue('body_weight_of_the_assessed_bird')
aw_bird = float(aw_bird)
tw_bird = form.getvalue('body_weight_of_the_tested_bird')
tw_bird = float(tw_bird)
x = form.getvalue('mineau_scaling_factor')
ld50_mamm = form.getvalue('mammalian_ld50')
lc50_mamm = form.getvalue('mammalian_lc50')
lc50_mamm=float(lc50_mamm)
NOAEC_mamm = form.getvalue('mammalian_NOAEC')
NOAEC_mamm = float(NOAEC_mamm)
NOAEL_mamm = form.getvalue('mammalian_NOAEL')
# mammal_type = form.getvalue('Mammal_type')
# if mammal_type =='Herbivores and insectivores':
# mf_w_mamm=0.8 #coefficient used to estimate initial conc.
# elif mammal_type=='Granivores':
# mf_w_mamm=0.1
# if bird_type =='Herbivores and insectivores':
# mf_w_bird=0.8 #coefficient used to estimate initial conc.
# elif bird_type=='Granivores':
# mf_w_bird=0.1
aw_mamm = form.getvalue('body_weight_of_the_assessed_mammal')
aw_mamm = float(aw_mamm)
tw_mamm = form.getvalue('body_weight_of_the_tested_mammal')
tw_mamm = float(tw_mamm)
#mf_w_mamm = form.getvalue('mass_fraction_of_water_in_the_mammal_food')
#mf_w_bird = form.getvalue('mass_fraction_of_water_in_the_bird_food')
text_file = open('trex/trex_description.txt','r')
x1 = text_file.read()
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "TREX Output")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'trex','page':'output'})
html = html + template.render (templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'trex',
'model_attributes':'T-Rex Output'})
html = html + """<table width="600" border="1" class="out_1">
<tr>
<th scope="col">Inputs</div></th>
<th scope="col">Value</div></th>
<th scope="col">Inputs</div></th>
<th scope="col">Value</div></th>
</tr>
<tr>
<td>Chemical name</td>
<td>%s</td>
<td>Use</td>
<td>%s</td>
</tr>
<tr>
<td>Formulated procuct name</td>
<td>%s</td>
<td>Percentage active ingredient</td>
<td>%s%%</td>
</tr>
<tr>
<td>Application type</td>
<td>%s</td>
<td>Percentage incorporated</td>
<td>%s%%</td>
</tr>
<tr>
<td>Application rate (lbs a.i./A)</td>
<td>%s</td>
<td>Liquid application rate (fl oz/A)</td>
<td>%s</td>
</tr>
<tr>
<td>Seed treatment formulation name</td>
<td>%s</td>
<td>Density of product (lbs/gal)</td>
<td>%s</td>
</tr>
<tr>
<td>Maximum seeding rate per use (lbs/A)</td>
<td>%s</td>
<td>Application rate per use (fl oz/cwt)</td>
<td>%s</td>
</tr>
<tr>
<td>Row spacing (inch)</td>
<td>%s</td>
<td>Bandwidth (inch)</td>
<td>%s</td>
</tr>
<tr>
<td>Number of applications</td>
<td>%s</td>
<td>Application target</td>
<td>%s</td>
</tr>
<tr>
<td>Interval between applications (days)</td>
<td>%s</td>
<td>Foliar dissipation half-life (days)</td>
<td>%s</td>
</tr>
<tr>
<td>Avian LD50 (mg/kg-bw)</td>
<td>%s</td>
<td>Avian LC50 (mg/kg-diet)</td>
<td>%s</td>
</tr>
<tr>
<td>Avian NOAEC (mg/kg-diet)</td>
<td>%s</td>
<td>Avian NOAEL (mg/kg-bw)</td>
<td>%s</td>
</tr>
<tr>
<td>Body weight of assessed bird (g)</td>
<td>%s</td>
<td>Body weight of tested bird (g)</td>
<td>%s</td>
</tr>
<tr>
<td>Mineau scaling factor</td>
<td>%s</td>
<td>Mammalian LD50 (mg/kg-bw)</td>
<td>%s</td>
</tr>
<tr>
<td>Mammalian LC50 (mg/kg-diet)</td>
<td>%s</td>
<td>Mammalian NOAEC (mg/kg-diet)</td>
<td>%s</td>
</tr>
<tr>
<td>Mammalian NOAEL (mg/kg-bw)</td>
<td>%s</td>
<td>Body weight of assessed mammal (g)</td>
<td>%s</td>
</tr>
<tr>
<td>Body weight of tested mammal (g)</td>
<td>%s</td>
<td> </td>
<td> </td>
</tr>
</table>
<p> </p>
"""%(chem_name, use, formu_name, 100*a_i, Application_type, 100*p_i, a_r, a_r_l, seed_treatment_formulation_name, den, m_s_r_p, a_r_p,
r_s, b_w, n_a, a_t, i_a, h_l, ld50_bird, lc50_bird, NOAEC_bird, NOAEL_bird, aw_bird, tw_bird, x, ld50_mamm,
lc50_mamm, NOAEC_mamm, NOAEL_mamm, aw_mamm, tw_mamm)
html = html + """<table width="600" border="1" class="out_2">
<tr>
<th scope="col">Outputs</div></th>
<th scope="col">Value</div></th>
</tr>
<tr>
<td>Dietary-based EECs for %s</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute EECs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute EECs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian dose-based acute RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian diet-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian diet-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute EECs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute EECs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based acute RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian dose-based chronic RQs (Granivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian diet-based acute RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian diet-based chronic RQs for %s (Herbivores and insectivores)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for row/band/in-furrow granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for row/band/in-furrow liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for broadcast granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Avian LD50<sup>-2</sup> for broadcast liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for row/band/in-furrow granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for row/band/in-furrow liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for broadcast granular application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Mammalian LD50<sup>-2</sup> for broadcast liquid application</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian acute RQs (method 1)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian acute RQs (method 2)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment avian chronic RQs</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian acute RQs (method 1)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian acute RQs (method 2)</td>
<td>%0.2E</td>
</tr>
<tr>
<td>Seed treatment mammalian chronic RQs</td>
<td>%0.2E</td>
</tr>
</table>""" %(a_t, trex_model.EEC_diet(trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t, trex_model.EEC_dose_bird(trex_model.EEC_diet, aw_bird, trex_model.fi_bird, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.EEC_dose_bird_g(trex_model.EEC_diet, aw_bird, trex_model.fi_bird, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t,
trex_model.ARQ_dose_bird(trex_model.EEC_dose_bird, trex_model.EEC_diet, aw_bird, trex_model.fi_bird, trex_model.at_bird, ld50_bird, tw_bird, x, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.ARQ_dose_bird_g(trex_model.EEC_dose_bird, trex_model.EEC_diet, aw_bird, trex_model.fi_bird, trex_model.at_bird, ld50_bird, tw_bird, x, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_diet_bird(trex_model.EEC_diet, lc50_bird, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), a_t, trex_model.CRQ_diet_bird(trex_model.EEC_diet, NOAEC_bird, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.EEC_dose_mamm(trex_model.EEC_diet, aw_mamm, trex_model.fi_mamm, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l), trex_model.EEC_dose_mamm_g(trex_model.EEC_diet, aw_mamm, trex_model.fi_mamm, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_dose_mamm(trex_model.EEC_dose_mamm, trex_model.at_mamm, aw_mamm, ld50_mamm, tw_mamm, 0.8, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.ARQ_dose_mamm_g(trex_model.EEC_dose_mamm, trex_model.at_mamm, aw_mamm, ld50_mamm, tw_mamm, 0.1, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.CRQ_dose_mamm(trex_model.EEC_diet, trex_model.EEC_dose_mamm, trex_model.ANOAEL_mamm, NOAEL_mamm, aw_mamm, tw_mamm, 0.8, n_a, i_a, a_r, a_i, para, h_l),
trex_model.CRQ_dose_mamm_g(trex_model.EEC_diet, trex_model.EEC_dose_mamm, trex_model.ANOAEL_mamm, NOAEL_mamm, aw_mamm, tw_mamm, 0.1, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.ARQ_diet_mamm(trex_model.EEC_diet, lc50_mamm, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
a_t, trex_model.CRQ_diet_mamm(trex_model.EEC_diet, NOAEC_mamm, trex_model.C_0, n_a, i_a, a_r, a_i, para, h_l),
trex_model.LD50_rg_bird(Application_type, a_r, a_i, p_i, r_s, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird, x), trex_model.LD50_rl_bird(Application_type, a_r_l, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird, x),
trex_model.LD50_bg_bird(Application_type, a_r, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird,x),trex_model.LD50_bl_bird(Application_type, a_r_l, a_i, p_i, b_w, aw_bird, trex_model.at_bird, ld50_bird, tw_bird,x),
trex_model.LD50_rg_mamm(Application_type, a_r, a_i, p_i, r_s, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm), trex_model.LD50_rl_mamm(Application_type, a_r_l, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),
trex_model.LD50_bg_mamm(Application_type, a_r, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),trex_model.LD50_bl_mamm(Application_type, a_r_l, a_i, p_i, b_w, aw_mamm, trex_model.at_mamm, ld50_mamm, tw_mamm),
trex_model.sa_bird_1(a_r_p, a_i, den, trex_model.at_bird,trex_model.fi_bird, ld50_bird, aw_bird, tw_bird, x),trex_model.sa_bird_2(a_r_p, a_i, den, m_s_r_p, trex_model.at_bird, ld50_bird, aw_bird, tw_bird, x),
trex_model.sc_bird(a_r_p, a_i, den, NOAEC_bird),trex_model.sa_mamm_1(a_r_p, a_i, den, trex_model.at_mamm, trex_model.fi_mamm, ld50_mamm, aw_mamm, tw_mamm),
trex_model.sa_mamm_2(a_r_p, a_i, den, m_s_r_p, trex_model.at_mamm, ld50_mamm, aw_mamm, tw_mamm),trex_model.sc_mamm(a_r_p, a_i, den, NOAEC_mamm))
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {'sub_title': ''})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', TRexOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
04c0a9aa06b8567653908c8159d470bb3be89b2d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/5468.py | 063e5c8d196c9bfcca7a5d638432897002ca1793 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | dirt=[]
k=1
t=input()
for j in range(t):
n=input();w=n
while(w):
c=0;g=n%10
n=w;q=(n)%10;m=-2
while(n):
d=n%10
if c>=1:
if q<d:
break
q=d;n/=10;
c+=1;g=d
if n==0:
dirt.append(w)
break
w=w-1
for i in dirt:
print "Case #{0}: {1}".format(k,i)
k+=1
| [
"[email protected]"
] | |
a5ddd507e15815aaad86ceaaa47e2a295133f13d | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/devices/v20160203/list_iot_hub_resource_keys.py | 42ce719ca651ad316e0363197087b52eff4ffe47 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListIotHubResourceKeysResult',
'AwaitableListIotHubResourceKeysResult',
'list_iot_hub_resource_keys',
'list_iot_hub_resource_keys_output',
]
@pulumi.output_type
class ListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
The next link.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The list of shared access policies.
"""
return pulumi.get(self, "value")
class AwaitableListIotHubResourceKeysResult(ListIotHubResourceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotHubResourceKeysResult(
next_link=self.next_link,
value=self.value)
def list_iot_hub_resource_keys(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotHubResourceKeysResult:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20160203:listIotHubResourceKeys', __args__, opts=opts, typ=ListIotHubResourceKeysResult).value
return AwaitableListIotHubResourceKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
@_utilities.lift_output_func(list_iot_hub_resource_keys)
def list_iot_hub_resource_keys_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListIotHubResourceKeysResult]:
"""
The list of shared access policies with a next link.
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
...
| [
"[email protected]"
] | |
e7f28841c3cab7b1b034f5d0de68744734459162 | f8c35a47c7199aed0747e91e5c36ec97e7543de1 | /custom/icds_reports/management/commands/generate_migration_tables.py | 868e4fe59a2acf305643ee8eed39d62f19f82f18 | [] | no_license | dr-aryone/commcare-hq | 13a3f2a39382e3f6fe1f19d6c08bb61b808c146d | 3e7e09247fc8d1246ccfc77c1fff8603c9f65228 | refs/heads/master | 2020-05-27T14:29:48.923458 | 2019-05-26T00:01:33 | 2019-05-26T00:01:33 | 188,650,727 | 2 | 1 | null | 2019-05-26T07:03:18 | 2019-05-26T07:03:18 | null | UTF-8 | Python | false | false | 6,056 | py | from __future__ import absolute_import, print_function
from __future__ import unicode_literals
import logging
import re
import sqlite3
from django.core.management import CommandError
from django.core.management.base import BaseCommand
from sqlalchemy import inspect as sqlinspect
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.util import get_indicator_adapter, UCR_TABLE_PREFIX
from corehq.sql_db.connections import connection_manager
from custom.icds_reports.const import DASHBOARD_DOMAIN
from custom.icds_reports.management.commands.create_citus_child_tables import keep_child_tables, plain_tables, \
drop_child_tables, get_parent_child_mapping
from custom.icds_reports.models import AggregateSQLProfile
logger = logging.getLogger(__name__)
IGNORE_TABLES = {
'django_migrations',
AggregateSQLProfile._meta.db_table,
'ucr_table_name_mapping',
}
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS tables (
id integer PRIMARY KEY,
source_table text NOT NULL,
date text,
target_table text,
migrated integer
); """
def get_all_tables(connection):
res = connection.execute("select tablename from pg_tables where schemaname = 'public'")
return {row.tablename for row in res}
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('output_database')
parser.add_argument(
'--source-engine-id', default='icds-ucr',
help='Django alias for source database'
)
def handle(self, output_database, source_engine_id, **options):
with connection_manager.get_engine(source_engine_id).begin() as conn:
self.parent_child_mapping = get_parent_child_mapping(conn)
self.child_parent_mapping = {
child: parent
for parent, children in self.parent_child_mapping.items()
for child in children
}
self.table_count = 0
self.db = sqlite3.connect(output_database)
try:
self.setup_sqlite_db()
self.generate_dump_script(source_engine_id)
self.stdout.write("\n{} tables processed\n".format(self.table_count))
finally:
self.db.close()
def setup_sqlite_db(self):
with self.db:
self.db.execute(CREATE_TABLE)
res = self.db.execute('select count(*) from tables')
if res.fetchone()[0] > 0:
raise CommandError('Database already has records. Delete it and re-run command.')
def insert_row(self, row):
self.table_count += 1
with self.db:
self.db.execute('INSERT INTO tables(source_table, date, target_table) values (?,?,?)', row)
def generate_dump_script(self, source_engine_id):
self.seen_tables = set()
source_engine = connection_manager.get_engine(source_engine_id)
# direct dump and load from parent + child tables
with source_engine.begin() as source_conn:
insp = sqlinspect(source_conn)
for table in keep_child_tables + plain_tables:
for line in self.get_table_date_target(insp, table):
self.insert_row(line)
# direct dump and load from parent
# dump from all child tables into parent table
for table in drop_child_tables:
for line in self.get_table_date_target(insp, table, all_in_parent=True):
self.insert_row(line)
for datasource in StaticDataSourceConfiguration.by_domain(DASHBOARD_DOMAIN):
if source_engine_id == datasource.engine_id or source_engine_id in datasource.mirrored_engine_ids:
adapter = get_indicator_adapter(datasource)
table_name = adapter.get_table().name
# direct dump and load from parent
# dump from all child tables into parent table
# - if table is distrubuted, citus will distribute the data
# - if table is partitioned the triggers on the parent will distribute the data
for line in self.get_table_date_target(insp, table_name, all_in_parent=True):
self.insert_row(line)
all_tables = get_all_tables(source_conn)
remaining_tables = all_tables - self.seen_tables - IGNORE_TABLES
icds_ucr_prefix = '{}{}_'.format(UCR_TABLE_PREFIX, DASHBOARD_DOMAIN)
def keep_table(table):
root_table = self.child_parent_mapping.get(table, table)
return not root_table.startswith(UCR_TABLE_PREFIX) or root_table.startswith(icds_ucr_prefix)
remaining_tables = list(filter(keep_table, remaining_tables))
if remaining_tables:
self.stderr.write("Some tables not seen:")
for t in remaining_tables:
parent = self.child_parent_mapping.get(t)
if parent:
self.stderr.write("\t{} (parent: {})".format(t, parent))
else:
self.stderr.write("\t{}".format(t))
def get_table_date_target(self, sql_insepctor, table, all_in_parent=False):
yield table, None, None
self.seen_tables.add(table)
for child in self.parent_child_mapping[table]:
self.seen_tables.add(child)
yield child, get_table_date(sql_insepctor, child), table if all_in_parent else None
def get_table_date(sql_insepctor, table):
def _get_date(string):
match = re.match(r'.*(\d{4}-\d{2}-\d{2}).*', string)
if match:
return match.groups()[0]
date = _get_date(table)
if not date:
constraints = [
constraint for constraint in sql_insepctor.get_check_constraints(table)
if constraint['name'].startswith(table)
]
if constraints:
date = _get_date(constraints[0]['sqltext'])
return date
| [
"[email protected]"
] | |
19aab88df7aec32b7971ae1f9f4d9863c192e9e8 | 965fe92b03b37d2e6fa700281c4ef383fb104ada | /sciencebeam_trainer_delft/sequence_labelling/debug.py | 53a1befadf92215d50210611fddba0ded0508508 | [
"MIT"
] | permissive | elifesciences/sciencebeam-trainer-delft | 1591bebb7f5b9ed178329f4e9e02a9d893ab228d | 2413fe7f0801869208741e4ab6c4096db8d53b5e | refs/heads/develop | 2022-05-20T21:55:13.210432 | 2022-03-28T17:32:31 | 2022-03-28T17:32:31 | 192,557,708 | 5 | 1 | MIT | 2022-03-28T17:33:14 | 2019-06-18T14:34:50 | Python | UTF-8 | Python | false | false | 3,133 | py | import os
import logging
import time
from contextlib import contextmanager
from pathlib import Path
from typing import IO, Iterator, Optional
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.tag_formatter import (
TagOutputFormats,
format_tag_result
)
LOGGER = logging.getLogger(__name__)
SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT = "SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT"
@contextmanager
def exclusive_prefixed_file(prefix: str, suffix: str = '') -> Iterator[IO]:
for index in range(1, 10000):
filename = '%s-%d%s' % (prefix, index, suffix)
try:
with open(filename, mode='x', encoding='utf-8') as fileobj:
yield fileobj
return
except FileExistsError:
continue
raise FileExistsError('could not create any prefixed file: %s, suffix: %s' % (prefix, suffix))
class TagDebugReporter:
def __init__(self, output_directory: str):
self.output_directory = output_directory
def get_base_output_name(self, model_name: str) -> str:
return os.path.join(self.output_directory, 'sciencebeam-delft-%s-%s' % (
round(time.time()),
model_name
))
def report_tag_results(
self,
texts: np.array,
features: np.array,
annotations,
model_name: str):
base_filename_prefix = self.get_base_output_name(model_name=model_name)
with exclusive_prefixed_file(base_filename_prefix, '.json') as json_fp:
output_file = json_fp.name
filename_prefix = os.path.splitext(output_file)[0]
LOGGER.info('tagger, output_file: %s', output_file)
format_tag_result_kwargs = dict(
tag_result=annotations,
texts=texts,
features=features,
model_name=model_name
)
formatted_text = format_tag_result(
output_format=TagOutputFormats.TEXT,
**format_tag_result_kwargs
)
Path(filename_prefix + '.txt').write_text(formatted_text, encoding='utf-8')
formatted_json = format_tag_result(
output_format=TagOutputFormats.JSON,
**format_tag_result_kwargs
)
json_fp.write(formatted_json)
formatted_xml = format_tag_result(
output_format=TagOutputFormats.XML,
**format_tag_result_kwargs
)
Path(filename_prefix + '.xml').write_text(formatted_xml, encoding='utf-8')
if features is not None:
formatted_data = format_tag_result(
output_format=TagOutputFormats.DATA,
**format_tag_result_kwargs
)
Path(filename_prefix + '.data').write_text(formatted_data, encoding='utf-8')
def get_tag_debug_reporter_if_enabled() -> Optional[TagDebugReporter]:
output_directory = os.environ.get(SCIENCEBEAM_DELFT_TAGGING_DEBUG_OUT)
if not output_directory:
return None
return TagDebugReporter(output_directory)
| [
"[email protected]"
] | |
d4ee6961649aca8865294008a94b35181bbe50bc | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/14/03/5.py | 44713545f4a1f6d56fb33b4f7f95aaa4764dea56 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | def trans(a):
return map(lambda x: ''.join(list(x)), zip(*a))
def can(r, c, m):
if r > c: r, c = c, r
safe = r * c - m
if r == 1 or safe == 1:
return True
elif r == 2:
return safe % 2 == 0 and safe >= 4
else:
return not safe in [2, 3, 5, 7]
def solve(r, c, m):
if not can(r, c, m):
print 'Impossible'
return
swapped = False
if r > c: r, c, swapped = c, r, True
ans, safe = [['.'] * c for _ in xrange(r)], r * c - m
if r == 1:
for i in xrange(safe, c):
ans[0][i] = '*'
elif r == 2:
for i in xrange(safe // 2, c):
ans[0][i] = ans[1][i] = '*'
elif m <= (r - 2) * (c - 2):
for i in xrange(m):
ans[r - i % (r - 2) - 1][c - i // (r - 2) - 1] = '*'
else:
ans = [['*'] * c for _ in xrange(r)]
if safe <= 6:
for i in xrange(safe // 2):
ans[i][0] = ans[i][1] = '.'
else:
for i in xrange(8):
ans[i % 3][i // 3] = '.'
safe -= 8
if safe % 2 == 1:
ans[2][2] = '.'
safe -= 1
a = min(r - 3, safe // 2)
for i in xrange(a):
ans[3 + i][0] = ans[3 + i][1] = '.'
safe -= 2 * a
for i in xrange(safe // 2):
ans[0][3 + i] = ans[1][3 + i] = '.'
ans[0][0] = 'c'
if swapped: ans = trans(ans)
for row in ans: print ''.join(row)
T = input()
for i in xrange(T):
[r, c, m] = map(int, raw_input().split())
print 'Case #%d:' % (i + 1)
solve(r, c, m)
| [
"[email protected]"
] | |
b6176db9cf906b94b069180306ba7dc935c84b19 | 4061f9f2a7dc2acde4c4c630fbe10ac8f5913f5d | /user/views.py | 6059ac2806bf2a9c4bcdc72bc67893bae2b34d3b | [] | no_license | eibrahimarisoy/tasty_dishes | 8b9db3129c4d670f71a9e64025b25f51646c9e36 | ddfa3286bca06e153fbbd1e1a0d914c9f31d008e | refs/heads/master | 2022-12-04T00:45:55.607207 | 2020-04-03T09:42:31 | 2020-04-03T09:42:31 | 252,424,641 | 0 | 0 | null | 2022-11-22T05:27:25 | 2020-04-02T10:29:54 | JavaScript | UTF-8 | Python | false | false | 4,511 | py | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, redirect, render
from user.forms import RegisterForm, LoginForm, UserUpdateForm
from recipe.models import Recipe
STATUS = "published"
def user_register(request):
context = dict()
form = RegisterForm(request.POST or None)
if form.is_valid():
# get new user information from form
username = form.clean_username()
first_name = form.clean_first_name()
last_name = form.clean_last_name()
email = form.clean_email()
password = form.clean_password()
# create new user and set_password and set active
new_user = User(username=username, last_name=last_name,
first_name=first_name, email=email)
new_user.set_password(password)
new_user.is_active = True
new_user.save()
# login new user
login(request, new_user)
messages.success(request, "You have successfully registered.")
return redirect("index")
context["register_form"] = form
return render(request, "user/register.html", context)
def user_login(request):
context = dict()
form = LoginForm(request.POST or None)
context["form"] = form
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
# if username is not exists throw and error to user
try:
username = User.objects.get(email=email).username
except User.DoesNotExist:
messages.info(request, "Username is wrong.")
return render(request, "user/login.html", context)
# check username and password are correct
user = authenticate(request, username=username, password=password)
if user is None:
messages.info(request, "Username or password is wrong")
return render(request, "user/login.html", context)
else:
messages.success(request, "You have successfully logged in.")
# start new session for user
login(request, user)
return redirect("index")
return render(request, "user/login.html", context)
@login_required()
def user_logout(request):
logout(request)
messages.success(request, "You have successfully logged out.")
return redirect("index")
@login_required()
def user_like_recipe_list(request):
# to send user's favorite recipes to template
context = dict()
user = request.user
recipes = Recipe.objects.filter(likes=user)
context['recipes'] = recipes
return render(request, "user/like_recipe_list.html", context)
@login_required()
def user_recipe_list(request):
# to show the user their own recipes
context = dict()
user = request.user
recipes = Recipe.objects.filter(
owner=user,
status=STATUS,
)
context['recipes'] = recipes
return render(request, "user/recipe_list.html", context)
@login_required()
def user_profile(request):
context = dict()
user = get_object_or_404(User, pk=request.user.pk)
context['user'] = user
return render(request, "user/profile.html", context)
@login_required()
def update_user_profile(request):
context = dict()
form = UserUpdateForm(request.POST or None, instance=request.user)
context['form'] = form
if request.method == "POST":
if form.is_valid():
form.save()
messages.success(request, "Your profile updated successfully.")
return redirect("user_profile")
return render(request, "user/update_profile.html", context)
@login_required()
def change_password(request):
context = dict()
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password has been successfully changed!')
return redirect('user_profile')
else:
messages.error(request, 'You have logged in incorrectly!')
else:
form = PasswordChangeForm(request.user)
context['form'] = form
return render(request, 'user/change_password.html', context)
| [
"[email protected]"
] | |
fa3f5466ad8bcab2dadb823615e08fc9e14db94a | c0795000de54a26956efe1a87afba507bb328b81 | /docs/conf.py | ccd96e7307cb1b7e20bed096c7eb0dfae85de6c9 | [
"MIT"
] | permissive | steinitzu/beets | ff6c24d9e072b3d86f889e2b9af66a6ca2374d09 | 1fbbe6154698ce50f1a7e8d32af9a6376e2c7ede | refs/heads/master | 2021-01-16T20:26:07.732280 | 2013-02-28T18:43:02 | 2013-02-28T18:43:02 | 7,949,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | AUTHOR = u'Adrian Sampson'
# -- General configuration -----------------------------------------------------
extensions = []
#templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.1'
release = '1.1b3'
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
#html_static_path = ['_static']
htmlhelp_basename = 'beetsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'beets.tex', u'beets Documentation',
AUTHOR, 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('reference/cli', 'beet', u'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', u'beets configuration file',
[AUTHOR], 5),
]
| [
"[email protected]"
] | |
2ea747e7a97063f59f0d0d4584ff5c12e534398b | 90deb98bd63bdc0f08d80954d3edb3a277e63cd1 | /arq/jobs.py | 1d4c756caae0842df1a7973d086f698534b73085 | [
"MIT"
] | permissive | filmor/arq | 93a97852eb2aa554ce2c6d548fcfa7dac35b74b4 | f0b4b8b4db2df0c950069f98d5d62c104912e48d | refs/heads/master | 2020-03-13T21:08:36.514480 | 2018-01-10T15:36:23 | 2018-01-10T15:36:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | """
:mod:`jobs`
===========
Defines the ``Job`` class and descendants which deal with encoding and decoding job data.
"""
import base64
import os
from datetime import datetime
import msgpack
from .utils import DEFAULT_CURTAIL, from_unix_ms, timestamp, to_unix_ms_tz, truncate
__all__ = ['JobSerialisationError', 'Job', 'DatetimeJob']
class ArqError(Exception):
pass
class JobSerialisationError(ArqError):
pass
def gen_random():
"""
generate a lowercase alpha-numeric random string of length 24.
Should have more randomness for its size thank uuid
"""
return base64.b32encode(os.urandom(10))[:16].decode().lower()
# "device control one" should be fairly unique as a dict key and only one byte
DEVICE_CONTROL_ONE = '\x11'
class Job:
"""
Main Job class responsible for encoding and decoding jobs as they go
into and come out of redis.
"""
__slots__ = 'id', 'queue', 'queued_at', 'class_name', 'func_name', 'args', 'kwargs', 'raw_queue', 'raw_data'
def __init__(self, raw_data: bytes, *, queue_name: str=None, raw_queue: bytes=None) -> None:
"""
Create a job instance be decoding a job definition eg. from redis.
:param raw_data: data to decode, as created by :meth:`arq.jobs.Job.encode`
:param raw_queue: raw name of the queue the job was taken from
:param queue_name: name of the queue the job was dequeued from
"""
self.raw_data = raw_data
if queue_name is None and raw_queue is None:
raise ArqError('either queue_name or raw_queue are required')
self.queue = queue_name or raw_queue.decode()
self.raw_queue = raw_queue or queue_name.encode()
self.queued_at, self.class_name, self.func_name, self.args, self.kwargs, self.id = self.decode_raw(raw_data)
self.queued_at /= 1000
@classmethod
def encode(cls, *, job_id: str=None, queued_at: int=None, class_name: str, func_name: str,
args: tuple, kwargs: dict) -> bytes:
"""
Create a byte string suitable for pushing into redis which contains all
required information about a job to be performed.
:param job_id: id to use for the job, leave blank to generate a uuid
:param queued_at: time in ms unix time when the job was queue, if None now is used
:param class_name: name (see :attr:`arq.main.Actor.name`) of the actor class where the job is defined
:param func_name: name of the function be called
:param args: arguments to pass to the function
:param kwargs: key word arguments to pass to the function
"""
queued_at = queued_at or int(timestamp() * 1000)
try:
return cls.encode_raw([queued_at, class_name, func_name, args, kwargs, cls.generate_id(job_id)])
except TypeError as e:
raise JobSerialisationError(str(e)) from e
@classmethod
def generate_id(cls, given_id):
return given_id or gen_random()
@classmethod
def msgpack_encoder(cls, obj):
"""
The default msgpack encoder, adds support for encoding sets.
"""
if isinstance(obj, set):
return {DEVICE_CONTROL_ONE: list(obj)}
else:
return obj
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) == 1 and DEVICE_CONTROL_ONE in obj:
return set(obj[DEVICE_CONTROL_ONE])
return obj
@classmethod
def encode_raw(cls, data) -> bytes:
return msgpack.packb(data, default=cls.msgpack_encoder, use_bin_type=True)
@classmethod
def decode_raw(cls, data: bytes):
return msgpack.unpackb(data, object_hook=cls.msgpack_object_hook, encoding='utf8')
def to_string(self, args_curtail=DEFAULT_CURTAIL):
arguments = ''
if self.args:
arguments = ', '.join(map(str, self.args))
if self.kwargs:
if arguments:
arguments += ', '
arguments += ', '.join(f'{k}={v!r}' for k, v in sorted(self.kwargs.items()))
return '{s.id:.6} {s.class_name}.{s.func_name}({args})'.format(s=self, args=truncate(arguments, args_curtail))
def short_ref(self):
return '{s.id:.6} {s.class_name}.{s.func_name}'.format(s=self)
def __str__(self):
return self.to_string()
def __repr__(self):
return f'<Job {self} on {self.queue}>'
DEVICE_CONTROL_TWO = '\x12'
TIMEZONE = 'O'
class DatetimeJob(Job):
"""
Alternative Job which copes with datetimes. None timezone naïve dates are supported but
the returned datetimes will use a :mod:`datetime.timezone` class to define the timezone
regardless of the timezone class originally used on the datetime object (eg. ``pytz``).
"""
@classmethod
def msgpack_encoder(cls, obj):
if isinstance(obj, datetime):
ts, tz = to_unix_ms_tz(obj)
result = {DEVICE_CONTROL_TWO: ts}
if tz is not None:
result[TIMEZONE] = tz
return result
else:
return super().msgpack_encoder(obj)
@classmethod
def msgpack_object_hook(cls, obj):
if len(obj) <= 2 and DEVICE_CONTROL_TWO in obj:
return from_unix_ms(obj[DEVICE_CONTROL_TWO], utcoffset=obj.get(TIMEZONE))
else:
return super().msgpack_object_hook(obj)
| [
"[email protected]"
] | |
f7b3033abbffc59bb77ce0801784a595aa9821d1 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/leetcode/unique_paths_ii/unique_paths_ii.py | 3cdf92f1c359c3b7d2a6b32488d8026d34b9638a | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 3,955 | py | '''
https://leetcode.com/problems/unique-paths-ii/
63. Unique Paths II
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
Note: m and n will be at most 100.
Example 1:
Input:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
Output: 2
Explanation:
There is one obstacle in the middle of the 3x3 grid above.
There are two ways to reach the bottom-right corner:
1. Right -> Right -> Down -> Down
2. Down -> Down -> Right -> Right
'''
'''
Solution Outline:
0. Allowed directions are R, D
1. Consider moving to cell x,y from 0,0
If there were no obstacles, it would be (num_paths_to(x-1,y) + num_paths_to(x,y-1))
with num_paths_to(x,0) == 1, (only direction allowed is down)
and num_paths_to(0,y) == 1 (only direction allowed is right) {for any 0<=x<m,0<=y<n}
2. With obstacles,
if x,0 is an obstacle,
then the column looks like (x=2 in the example)
[[0
[0
[1
[0
[0 0 . . .
num_paths_to(0,0) = 1
num_paths_to(1,0) = 1
num_paths_to(2,0) = 0 (blockade)
num_paths_to(3,0) = 0 (can' get past blockade moving only D)
num_paths_to(4,0) = 0
Similarly, if (0,y) is an obstacle,
then the first row looks like (y=1 in the example)
[[0 1 0 0 0 0]
num_paths_to(0,0) = 1
num_paths_to(0,1) = 0 (blockade)
num_paths_to(0,y) = 0 (for all y > 1) (can't get past blockade moving only R)
For any random(x,y),
if x,y is an obstacle, then num_paths_to(x,y) = 0
otherwise,
num_paths_to(x,y) = sum(num_paths_to(x-1,y), num_paths_to(x,y-1))
Sample run 1:
A= [
[0,0,0],
[0,1,0],
[0,0,0]
]
DP: [
[0,0,0],
[0,0,0],
[0,0,0]
]
Fill DP row 0,
DP: [
[1,1,1],
[0,0,0],
[0,0,0]
]
Fill DP col 0,
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,1) is a blockade
DP: [
[1,1,1],
[1,0,0],
[1,0,0]
]
(x,y): (1,2) == sum(left, up) == sum(DP[1,1], DP[0,2]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,0,0]
]
(x,y): (2,1) == sum(left,up) == sum(DP[2,0], DP[1,1]) == 1
DP: [
[1,1,1],
[1,0,1],
[1,1,0]
]
(x,y): (2,2) == sum(left,up) == sum(DP[2,1], DP[1,2]) == 2
DP: [
[1,1,1],
[1,0,1],
[1,1,2]
]
'''
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid:
return 0
m = len(obstacleGrid)
n = len(obstacleGrid[0])
# End cell is blocked
if obstacleGrid[-1][-1] == 1:
return 0
DP = [[0 for _ in xrange(n)] for _ in xrange(m)]
# first row
for j in xrange(n):
if obstacleGrid[0][j] == 1:
break
DP[0][j] = 1
# first column
for i in xrange(m):
if obstacleGrid[i][0] == 1:
break
DP[i][0] = 1
for i in xrange(1, m):
for j in xrange(1, n):
if obstacleGrid[i][j] == 0:
DP[i][j] = DP[i-1][j] + DP[i][j-1]
# if A[i][j] is an obstacle, DP[i][j] remains 0
return DP[-1][-1]
if __name__ == '__main__':
s = Solution()
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,0]
]) == 2
assert s.uniquePathsWithObstacles(\
[
[0,0,0],
[0,1,0],
[0,0,1]
]) == 0
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 3
assert s.uniquePathsWithObstacles(\
[
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[1,0,0,0]
]) == 9
| [
"[email protected]"
] | |
9cf5ec04d45c55aa2d077d17107a6832fd095729 | f8166c72c0514f39ff4fc6bbb3d56ac2d8089fb0 | /whileInput.py | 46d75a3531d93d86855555c132488e21d76b1ddc | [] | no_license | KahlilMonteiro-lpsr/class-samples | 5f2a1dd68971b0c5ad342c709454493275f6138a | 6ed5e8fa37ca2bda53f4bc387a829a97d20b8f2c | refs/heads/master | 2021-01-18T22:08:13.251542 | 2016-06-08T22:07:53 | 2016-06-08T22:07:53 | 48,007,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # purposely an infinite loop
myInput = "LPS"
while myInput != "leave":
myInput = raw_input()
print("You said: " + myInput)
| [
"lps@lps-1011PX.(none)"
] | lps@lps-1011PX.(none) |
9c916129fe72fbdc76aaf2997d9bbdfa460fd235 | de54e5ddf4d350176d70c2bb1501b878285a18b8 | /fpn.py | 04a74603728490c73565dff2f7b4854aee3e9411 | [] | no_license | lizhe960118/find-star | e1d73b78b29087ca2e83990354b96b7406eaedf4 | e233dca4fe9a5faf6df9b6a4e0b2f29a7eb096b0 | refs/heads/master | 2020-05-18T10:55:52.008399 | 2019-05-01T03:49:32 | 2019-05-01T03:49:32 | 184,363,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,390 | py | '''RetinaFPN in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.downsample = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.downsample(x)
out = F.relu(out)
return out
# 基础残差块
class ResNetBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(ResNetBasicBlock, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(out_channel))
self.relu = nn.ReLU(inplace=True)
self.layer2 = nn.Sequential(
nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channel))
self.downsample = downsample
self.stride = stride
def forward(self,x):
residual = x
out = self.layer1(x)
out = self.relu(out)
out = self.layer2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FPN(nn.Module):
def __init__(self, block, num_blocks):
super(FPN, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 3*3 s1
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False) # 1*1 s1
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) # 3*3 s2
self.bn3 = nn.BatchNorm2d(64)
# Bottom-up layers
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.conv5 = nn.Conv2d(1024, 256, kernel_size=3, stride=2, padding=1)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
# self.conv6 = nn.Conv2d(2048, 256, kernel_size=3, stride=2, padding=1)
# self.conv7 = nn.Conv2d( 256, 256, kernel_size=3, stride=2, padding=1)
# Lateral layers
# self.latlayer1 = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer2 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
# self.latlayer3 = nn.Conv2d( 512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.toplayer1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.toplayer2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, x):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(x)))
c1 = F.relu(self.bn2(self.conv2(c1)))
c1 = F.relu(self.bn3(self.conv3(c1)))
# c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.layer1(c1) # 300 * 300
c3 = self.layer2(c2)
c4 = self.layer3(c3)
p5 = self.conv5(c4)
# c5 = self.layer4(c4)
# p6 = self.conv6(c5)
# p7 = self.conv7(F.relu(p6))
# Top-down
p4 = self.latlayer1(c4)
p3 = self._upsample_add(p4, self.latlayer2(c3))
p3 = self.toplayer1(p3)
p2 = self._upsample_add(p3, self.latlayer3(c2))
p2 = self.toplayer2(p2)
# p5 = self.latlayer1(c5)
# p4 = self._upsample_add(p5, self.latlayer2(c4))
# p4 = self.toplayer1(p4)
# p3 = self._upsample_add(p4, self.latlayer3(c3))
# p3 = self.toplayer2(p3)
return p2, p3, p4, p5
def FPN50():
# return FPN(Bottleneck, [3,4,6,3])
return FPN(Bottleneck, [3, 4, 6])
def FPN101():
return FPN(Bottleneck, [2,4,23,3])
def test():
net = FPN50()
# fms = net(Variable(torch.randn(1,3,600,300)))
fms = net(Variable(torch.randn(1, 3, 832, 832)))
for fm in fms:
print(fm.size())
# test() | [
"[email protected]"
] | |
b96bfac9435a26fb0dac083564d3a9020962e566 | e4d4149a717d08979953983fa78fea46df63d13d | /Week8/Day1/projects/animals_project/manage.py | a880c80321ea81b2c1b015e806c82f0971f83302 | [] | no_license | fayblash/DI_Bootcamp | 72fd75497a2484d19c779775c49e4306e602d10f | a4e8f62e338df5d5671fd088afa575ea2e290837 | refs/heads/main | 2023-05-05T20:55:31.513558 | 2021-05-27T06:48:40 | 2021-05-27T06:48:40 | 354,818,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animals_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
95ad4fd20d715d2b592087416dd9db29358e23b9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02407/s580407099.py | 11041f9012ef0a39f8fbc696d88e6c36fe254b03 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | n = int(input())
a = list(map(int,input().split()))
a.reverse()
for i,elem in enumerate(a):
if i != 0:
print (" ", end='')
print (elem, end='')
print ('')
| [
"[email protected]"
] | |
37416ae207a95ca269005d08f020dd3f0e703430 | 14de7abd1267122ad128c130f45ff86a087ed5cd | /nlp/match_blocks.py | 7e4efbe57c5b3b8a5ce86ca674e74a43cecd808f | [
"MIT"
] | permissive | saurabhXcode/tf-attentive-conv | 64124c470acdb26125680d903cc97ae1cc68a4b9 | 8dcc403575392c8e5c6c287432272a781410c49c | refs/heads/master | 2020-04-12T12:21:35.091291 | 2018-08-11T00:26:44 | 2018-08-11T00:26:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <[email protected]> <https://hanxiao.github.io>
import tensorflow as tf
from nlp.encode_blocks import CNN_encode
from nlp.nn import linear_logit, dropout_res_layernorm
def AttentiveCNN_match(context, query, context_mask, query_mask,
scope='AttentiveCNN_Block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
cnn_wo_att = CNN_encode(context, filter_size=3, direction='none', act_fn=None)
att_context, _ = Attentive_match(context, query, context_mask, query_mask)
cnn_att = CNN_encode(att_context, filter_size=1, direction='none', act_fn=None)
output = tf.nn.tanh(cnn_wo_att + cnn_att)
return dropout_res_layernorm(context, output, **kwargs)
def Attentive_match(context, query, context_mask, query_mask,
score_func='dot', causality=False,
scope='attention_match_block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
batch_size, context_length, num_units = context.get_shape().as_list()
_, query_length, _ = query.get_shape().as_list()
if score_func == 'dot':
score = tf.matmul(context, query, transpose_b=True)
elif score_func == 'bilinear':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True)
elif score_func == 'scaled':
score = tf.matmul(linear_logit(context, num_units, scope='context_x_We'), query, transpose_b=True) / \
(num_units ** 0.5)
elif score_func == 'additive':
score = tf.squeeze(linear_logit(
tf.tanh(tf.tile(tf.expand_dims(linear_logit(context, num_units, scope='context_x_We'), axis=2),
[1, 1, query_length, 1]) +
tf.tile(tf.expand_dims(linear_logit(query, num_units, scope='query_x_We'), axis=1),
[1, context_length, 1, 1])), 1, scope='x_ve'), axis=3)
else:
raise NotImplementedError
mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(mask, 0), paddings, score) # B, Lc, Lq
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(masked_score[0, :, :]) # (Lc, Lq)
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (Lc, Lq)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(masked_score)[0], 1, 1]) # B, Lc, Lq
paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(masks, 0), paddings, masked_score) # B, Lc, Lq
query2context_score = tf.nn.softmax(masked_score, axis=2) * mask # B, Lc, Lq
query2context_attention = tf.matmul(query2context_score, query) # B, Lc, D
context2query_score = tf.nn.softmax(masked_score, axis=1) * mask # B, Lc, Lq
context2query_attention = tf.matmul(context2query_score, context, transpose_a=True) # B, Lq, D
return (query2context_attention, # B, Lc, D
context2query_attention) # B, Lq, D
| [
"[email protected]"
] | |
827370360a0c207ac6273117c06be4bf6b0b163e | 882be627c49870ae6f2f81963a3cfc9b719c0011 | /wscript | 601c17dd013a3c54bc088dbbc86fb37531affd98 | [] | no_license | brettviren/cogs | 794142a04c87ce148e939f8ded852dfa1f6df9bc | 681d1ed7e12cd2e7469a5ba3fd7261dc4f8f4c26 | refs/heads/master | 2022-11-30T12:26:19.624956 | 2020-08-10T12:34:44 | 2020-08-10T12:34:44 | 273,746,410 | 0 | 1 | null | 2020-08-10T12:34:45 | 2020-06-20T16:40:57 | JavaScript | UTF-8 | Python | false | false | 2,954 | #!/usr/bin/env waf
VERSION='0.0.0'
APPNAME='cogs'
import os.path as osp
def options(opt):
opt.load('compiler_cxx')
opt.load('waf_unit_test')
opt.add_option('--quell-tests', action='store_true', default=False,
help='Compile but do not run the tests (default=%default)')
opt.add_option('--with-ers', default=None,
help='Set to ERS install area')
opt.add_option('--with-nljs', default=None,
help='Point nlohmann json install area')
opt.add_option('--with-boost', default=None,
help='Set to BOOST install area (needed by ERS)')
def configure(cfg):
cfg.load('compiler_cxx')
cfg.load('waf_unit_test')
cfg.env.CXXFLAGS += ['-std=c++17', '-ggdb3', '-Wall', '-Werror']
## nlohmann::json
nljs = getattr(cfg.options, 'with_nljs', None)
if nljs:
print("using " + nljs)
setattr(cfg.env, 'INCLUDES_NLJS', [osp.join(nljs, "include")])
cfg.check(features='cxx cxxprogram', define_name='HAVE_NLJS',
header_name='nlohmann/json.hpp',
use='NLJS', uselib_store='NLJS', mandatory=True)
## ERS
ers = getattr(cfg.options, 'with_ers',None)
if ers:
setattr(cfg.env, 'RPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'LIBPATH_ERS', [osp.join(ers, 'lib')]);
setattr(cfg.env, 'INCLUDES_ERS', [osp.join(ers, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_ERS',
header='ers/ers.h', lib=['ers','ErsBaseStreams'],
use='ERS', uselib_store='ERS', mandatory=True)
## Boost is not needed directly by cogs but ERS needs it.
boost = getattr(cfg.options, 'with_boost', None)
if boost:
setattr(cfg.env, 'RPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'LIBPATH_BOOST', [osp.join(boost, 'lib')]);
setattr(cfg.env, 'INCLUDES_BOOST', [osp.join(boost, 'include')]);
cfg.check(features='cxx cxxprogram', define_name='HAVE_BOOST',
header=['boost/filesystem/filesystem.hpp',
'boost/preprocessor/preprocessor.hpp'],
lib=['boost_filesystem'],
use='BOOST', uselib_store='BOOST', mandatory=True)
cfg.write_config_header('config.hpp')
def build(bld):
bld.recurse("test")
use=['ERS','BOOST','NLJS']
sources = bld.path.ant_glob('src/*.cpp');
bld.shlib(features='cxx', includes='inc',
source = sources, target='cogs',
uselib_store='COGS', use=use)
bld.install_files('${PREFIX}/include/cogs',
bld.path.ant_glob("inc/cogs/**/*.hpp"),
cwd=bld.path.find_dir('inc/cogs'),
install_path=bld.env.PREFIX + '/lib',
relative_trick=True)
from waflib.Tools import waf_unit_test
bld.add_post_fun(waf_unit_test.summary)
bld.recurse("demo")
| [
"[email protected]"
] | ||
cdc237084299675f5c218544154e89c2be810335 | 980434e03e722eaf3a5ff4ab4f1971c8d1cde4c5 | /宝石与石头.py | a2ae90f7262a28b814a440bfe3b1d2cf7a48bc01 | [] | no_license | arry-lee/arryleetcode | c9c548b0defc9771e4e488b3e760809364456c99 | b4b9b971ec81a921cca606dfa46ea4109d975dfb | refs/heads/master | 2020-07-26T14:11:27.645307 | 2019-09-15T23:31:09 | 2019-09-15T23:31:09 | 208,670,826 | 1 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 172 | py | #±¦Ê¯Óëʯͷ
#2019-08-17 06:20:13
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
return len([stone for stone in S if stone in J]) | [
"[email protected]"
] | |
d21ab71fd92fd043000de53f2cc733961ddbb79f | 2e2843ead0186fca5f124743395cf1f65f8e579f | /father/urls.py | 5cec4ad8f2d3b6e146074d7c02489928bf232b00 | [] | no_license | django-spain/django-father-rest-framework | 5ce003569d3bdc3d7873de791c25e4a98b6ae57c | 0dd1cd61dd1a624f0baa27320fc99388ca7ca620 | refs/heads/master | 2022-02-21T23:02:23.972257 | 2019-08-27T00:45:11 | 2019-08-27T00:45:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """father URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1.0/', include('book.urls')),
]
| [
"[email protected]"
] | |
ced152ee74e1836bdeb08bcfe32e146b988939d7 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/training/experimental/mixed_precision_global_state.py | 6f0a179db65b1ebb31c2cbc1265eaf71b2a09fd6 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 2,635 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains global variables related to mixed precision.
This is not part of mixed_precision.py to avoid a circular dependency.
mixed_precision.py depends on Session, and Session depends on this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Whether the mixed precision graph rewrite has been enabled or not with
# `enable_mixed_precision_graph_rewrite`. Used to turn on auto_mixed_precision
# in ConfigProtos passed to Sessions.
_mixed_precision_graph_rewrite_is_enabled = False
# True if a Session has been created without the mixed precision graph rewrite
# being enabled. Used to give a warning if mixed precision is enabled after a
# Session has already been created.
_non_mixed_precision_session_created = False
# Whether the global tf.keras.mixed_precision.Policy uses mixed precision. Used
# to raise an error message if both a mixed Policy and the graph rewrite are
# used at the same time.
_using_mixed_precision_policy = False
@tf_export('__internal__.train.is_mixed_precision_graph_rewrite_enabled', v1=[])
def is_mixed_precision_graph_rewrite_enabled():
return _mixed_precision_graph_rewrite_is_enabled
def set_mixed_precision_graph_rewrite_enabled(enabled):
global _mixed_precision_graph_rewrite_is_enabled
_mixed_precision_graph_rewrite_is_enabled = enabled
def non_mixed_precision_session_created():
return _non_mixed_precision_session_created
def set_non_mixed_precision_session_created(created):
global _non_mixed_precision_session_created
_non_mixed_precision_session_created = created
def is_using_mixed_precision_policy():
return _using_mixed_precision_policy
@tf_export('__internal__.train.set_using_mixed_precision_policy', v1=[])
def set_using_mixed_precision_policy(is_using):
global _using_mixed_precision_policy
_using_mixed_precision_policy = is_using
| [
"[email protected]"
] | |
09018e0be0d1189db97fad7103f982719fe99170 | e25b917f2f0ce28f6d046afaa9c0faddf0aeae34 | /Tutorials/split_and_merging.py | 0e4d372a63ff7e204d74bc9d502e062c6df0682b | [] | no_license | pmusau17/ComputerVision | d9344c22ed1fe1bf8a8c6166a060c307c08529a5 | 2fcdce0a967567c15232fe3c9f02982ca95e5796 | refs/heads/master | 2020-12-11T00:30:20.292943 | 2020-06-03T21:54:36 | 2020-06-03T21:54:36 | 233,751,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | import numpy as np
import argparse
import cv2
#create argument parser
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image', required=True)
args=vars(ap.parse_args())
#load the image
image=cv2.imread(args['image'])
(B,G,R)=cv2.split(image)
#this will display each of the channels as grayscale
cv2.imshow("Red",R)
cv2.imshow("Green",G)
cv2.imshow("Blue",B)
cv2.waitKey(0)
#this is what I want because I want zeros in the other channels and I hope it gets the
#correct predition
zeros = np.zeros(image.shape[:2],dtype='uint8')
cv2.imshow("Red",cv2.merge([zeros,zeros,R]))
cv2.imshow("Green",cv2.merge([zeros,G,zeros]))
cv2.imshow("Blue",cv2.merge([B,zeros,zeros]))
cv2.waitKey(0)
merged=cv2.merge([B,G,R])
cv2.imshow("Merged",merged)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
17237a95039c4b7a5d68f70a91b7049b857dfa02 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/kmp_20200716201539.py | c0ccf11ea7faea0cd681296187ca576428e15267 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | '''
Given a text txt[0..n-1] and a pattern pat[0..m-1],
write a function search(char pat[], char txt[]) that prints all occurrences of pat[] in txt[]. You may assume that n > m.
'''
def pattern(txt,pat):
# Catepillar algorithm
# we have a left and right pointer
# then the length of the search string
# when searching for the string when they don't match move the right pointer
# to increase the window size
# if the match return poisition of left, store it in an array
# when the len(sub) > substring move the left pointer
if pat in txt:
left = 0
right = 1
while right < len(txt) and left < len(txt):
if txt[left:right] == pat
print('index',txt.index(pat))
pattern("AABAACAADAABAABA","AABA") | [
"[email protected]"
] | |
d6db78cbc8e88ec12c049d25fffbbe429655373c | c22b9c7c4a854ed985e777bcbecd18870439b334 | /byteofpy/file.py | b2c51954e6226494b8066a0e68daaff28ff6f548 | [
"BSD-3-Clause"
] | permissive | pezy/python_test | ceb35a8a63ca8ebe26ffa5c72ace664718c7b328 | b019a0d9f267b5071c37fc85c9acaf27e9146625 | refs/heads/master | 2021-01-19T01:09:20.820202 | 2016-07-30T08:35:15 | 2016-07-30T08:35:15 | 18,096,404 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | poem = '''\
Programming is fun
When the work is done
if you wanna make your work also fun:
use Python!
'''
# Open for writing
f = open('poem.txt', 'w')
# Write text to file
f.write(poem)
f.close()
# If no mode is specified
# Read mode is assumed by default
f = open('poem.txt')
while True:
line = f.readline()
# Zero length indicates EOF
if len(line) == 0:
break
print line,
f.close()
| [
"[email protected]"
] | |
6809085f9885e6f57126dab2ff54953c84d4801d | 77aa8c9213cfb5c44c2d62579b7e92f64a479800 | /player9.py | d5a6ef4b0c069ffa2b1b79aeed242fbdf9e6d372 | [] | no_license | aiswarya98/programs | 9761f0ab0cb5c7b93e34f7ed534af012a9bfffdb | 96ffb4c3b0c32ea54bd769bfa4c728ac9710bb5a | refs/heads/master | 2020-06-13T23:58:37.756727 | 2019-08-14T05:03:51 | 2019-08-14T05:03:51 | 194,829,076 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | x,y=map(str,input().split())
a=x.lower()
b=y.lower()
z=a[0].upper()+a[1:]
q=b[0].upper()+b[1:]
print(z,q,end=' ')
| [
"[email protected]"
] | |
2d5ee23a8cba0fea02e4f205bafc24f5c98fc027 | 375e5bca82843647941068bd7634cf7adf2015ca | /tests/test_transforms_resize_modulo_pad_crop.py | f72a33b918735569e106f2221c7a10a6e1392d92 | [
"MIT"
] | permissive | civodlu/trw | cd57e7bded7fdb0a9d623ed9cd50645fab96583b | 11c59dea0072d940b036166be22b392bb9e3b066 | refs/heads/master | 2023-02-08T09:56:39.203340 | 2023-02-07T14:22:16 | 2023-02-07T14:22:16 | 195,147,670 | 12 | 2 | MIT | 2020-10-19T15:24:11 | 2019-07-04T01:19:31 | Python | UTF-8 | Python | false | false | 1,864 | py | import unittest
import trw
import torch
import numpy as np
class TestTransformsResizeModuloPadCrop(unittest.TestCase):
def test_crop_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_multiples(self):
# test with multiple of `multiples_of` shape
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(10)
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 60, 60)
def test_crop_mode_torch_different_shape(self):
batch = {
'images': torch.rand([2, 3, 64, 64], dtype=torch.float32),
'images2': torch.rand([2, 1, 64, 64], dtype=torch.float32)
}
batch['images'][0, 0, 32, 32] = 42.0
batch['images2'][0, 0, 32, 32] = 42.0
tfm = trw.transforms.TransformResizeModuloCropPad(60)
transformed = tfm(batch)
# make sure we can handle different shapes of the same dimension
assert transformed['images'].shape == (2, 3, 60, 60)
assert transformed['images2'].shape == (2, 1, 60, 60)
# make sure the crop/pad are the same for the different images
indices = np.where(batch['images'].numpy() == 42)
assert (batch['images2'][indices] == 42.0).all()
def test_pad_mode_torch(self):
batch = {
'images': torch.rand([2, 3, 65, 65], dtype=torch.float32)
}
tfm = trw.transforms.TransformResizeModuloCropPad(32, mode='pad')
transformed = tfm(batch)
assert transformed['images'].shape == (2, 3, 96, 96)
| [
"[email protected]"
] | |
a6159c8300fb0b87abae3a18443aa2a0f5289589 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02909/s153186210.py | 30d874e36f07b14826fef70e4615890c76d5b77a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | s=input()
if s=="Sunny":
a="Cloudy"
elif s=="Cloudy":
a="Rainy"
else:
a="Sunny"
print(a) | [
"[email protected]"
] | |
03a3ca057562ab99e7a7f7fa9b400e945b6c94f8 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/14/4_2_0_1_table.py | f018e05360c37820a130d303e43efa4aad77138d | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 11,096 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Specific humidity', 'units': 'kg/kg'},
{'abbr': 1, 'code': 1, 'title': 'Relative humidity', 'units': '%'},
{'abbr': 2, 'code': 2, 'title': 'Humidity mixing ratio', 'units': 'kg/kg'},
{'abbr': 3, 'code': 3, 'title': 'Precipitable water', 'units': 'kg m-2'},
{'abbr': 4, 'code': 4, 'title': 'Vapour pressure', 'units': 'Pa'},
{'abbr': 5, 'code': 5, 'title': 'Saturation deficit', 'units': 'Pa'},
{'abbr': 6, 'code': 6, 'title': 'Evaporation', 'units': 'kg m-2'},
{'abbr': 7, 'code': 7, 'title': 'Precipitation rate', 'units': 'kg m-2 s-1'},
{'abbr': 8, 'code': 8, 'title': 'Total precipitation', 'units': 'kg m-2'},
{'abbr': 9,
'code': 9,
'title': 'Large-scale precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 10,
'code': 10,
'title': 'Convective precipitation',
'units': 'kg m-2'},
{'abbr': 11, 'code': 11, 'title': 'Snow depth', 'units': 'm'},
{'abbr': 12,
'code': 12,
'title': 'Snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 13,
'code': 13,
'title': 'Water equivalent of accumulated snow depth',
'units': 'kg m-2'},
{'abbr': 14, 'code': 14, 'title': 'Convective snow', 'units': 'kg m-2'},
{'abbr': 15, 'code': 15, 'title': 'Large-scale snow', 'units': 'kg m-2'},
{'abbr': 16, 'code': 16, 'title': 'Snow melt', 'units': 'kg m-2'},
{'abbr': 17, 'code': 17, 'title': 'Snow age', 'units': 'd'},
{'abbr': 18, 'code': 18, 'title': 'Absolute humidity', 'units': 'kg m-3'},
{'abbr': 19,
'code': 19,
'title': 'Precipitation type',
'units': 'Code table 4.201'},
{'abbr': 20,
'code': 20,
'title': 'Integrated liquid water',
'units': 'kg m-2'},
{'abbr': 21, 'code': 21, 'title': 'Condensate', 'units': 'kg/kg'},
{'abbr': 22, 'code': 22, 'title': 'Cloud mixing ratio', 'units': 'kg/kg'},
{'abbr': 23, 'code': 23, 'title': 'Ice water mixing ratio', 'units': 'kg/kg'},
{'abbr': 24, 'code': 24, 'title': 'Rain mixing ratio', 'units': 'kg/kg'},
{'abbr': 25, 'code': 25, 'title': 'Snow mixing ratio', 'units': 'kg/kg'},
{'abbr': 26,
'code': 26,
'title': 'Horizontal moisture convergence',
'units': 'kg kg-1 s-1'},
{'abbr': 27, 'code': 27, 'title': 'Maximum relative humidity', 'units': '%'},
{'abbr': 28,
'code': 28,
'title': 'Maximum absolute humidity',
'units': 'kg m-3'},
{'abbr': 29, 'code': 29, 'title': 'Total snowfall', 'units': 'm'},
{'abbr': 30,
'code': 30,
'title': 'Precipitable water category',
'units': 'Code table 4.202'},
{'abbr': 31, 'code': 31, 'title': 'Hail', 'units': 'm'},
{'abbr': 32, 'code': 32, 'title': 'Graupel (snow pellets)', 'units': 'kg/kg'},
{'abbr': 33,
'code': 33,
'title': 'Categorical rain',
'units': 'Code table 4.222'},
{'abbr': 34,
'code': 34,
'title': 'Categorical freezing rain',
'units': 'Code table 4.222'},
{'abbr': 35,
'code': 35,
'title': 'Categorical ice pellets',
'units': 'Code table 4.222'},
{'abbr': 36,
'code': 36,
'title': 'Categorical snow',
'units': 'Code table 4.222'},
{'abbr': 37,
'code': 37,
'title': 'Convective precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 38,
'code': 38,
'title': 'Horizontal moisture divergence',
'units': 'kg kg-1 s-1'},
{'abbr': 39,
'code': 39,
'title': 'Per cent frozen precipitation',
'units': '%'},
{'abbr': 40, 'code': 40, 'title': 'Potential evaporation', 'units': 'kg m-2'},
{'abbr': 41,
'code': 41,
'title': 'Potential evaporation rate',
'units': 'W m-2'},
{'abbr': 42, 'code': 42, 'title': 'Snow cover', 'units': '%'},
{'abbr': 43,
'code': 43,
'title': 'Rain fraction of total cloud water',
'units': 'Proportion'},
{'abbr': 44, 'code': 44, 'title': 'Rime factor', 'units': 'Numeric'},
{'abbr': 45,
'code': 45,
'title': 'Total column integrated rain',
'units': 'kg m-2'},
{'abbr': 46,
'code': 46,
'title': 'Total column integrated snow',
'units': 'kg m-2'},
{'abbr': 47,
'code': 47,
'title': 'Large scale water precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 48,
'code': 48,
'title': 'Convective water precipitation',
'units': 'kg m-2'},
{'abbr': 49,
'code': 49,
'title': 'Total water precipitation',
'units': 'kg m-2'},
{'abbr': 50,
'code': 50,
'title': 'Total snow precipitation',
'units': 'kg m-2'},
{'abbr': 51,
'code': 51,
'title': 'Total column water (Vertically integrated total water (vapour + '
'cloud water/ice))',
'units': 'kg m-2'},
{'abbr': 52,
'code': 52,
'title': 'Total precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 53,
'code': 53,
'title': 'Total snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 54,
'code': 54,
'title': 'Large scale precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 55,
'code': 55,
'title': 'Convective snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 56,
'code': 56,
'title': 'Large scale snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 57, 'code': 57, 'title': 'Total snowfall rate', 'units': 'm/s'},
{'abbr': 58, 'code': 58, 'title': 'Convective snowfall rate', 'units': 'm/s'},
{'abbr': 59, 'code': 59, 'title': 'Large scale snowfall rate', 'units': 'm/s'},
{'abbr': 60,
'code': 60,
'title': 'Snow depth water equivalent',
'units': 'kg m-2'},
{'abbr': 61, 'code': 61, 'title': 'Snow density', 'units': 'kg m-3'},
{'abbr': 62, 'code': 62, 'title': 'Snow evaporation', 'units': 'kg m-2'},
{'abbr': 63, 'code': 63, 'title': 'Reserved'},
{'abbr': 64,
'code': 64,
'title': 'Total column integrated water vapour',
'units': 'kg m-2'},
{'abbr': 65,
'code': 65,
'title': 'Rain precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 66,
'code': 66,
'title': 'Snow precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 67,
'code': 67,
'title': 'Freezing rain precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 68,
'code': 68,
'title': 'Ice pellets precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 69,
'code': 69,
'title': 'Total column integrated cloud water',
'units': 'kg m-2'},
{'abbr': 70,
'code': 70,
'title': 'Total column integrated cloud ice',
'units': 'kg m-2'},
{'abbr': 71, 'code': 71, 'title': 'Hail mixing ratio', 'units': 'kg/kg'},
{'abbr': 72,
'code': 72,
'title': 'Total column integrated hail',
'units': 'kg m-2'},
{'abbr': 73,
'code': 73,
'title': 'Hail precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 74,
'code': 74,
'title': 'Total column integrated graupel',
'units': 'kg m-2'},
{'abbr': 75,
'code': 75,
'title': 'Graupel (snow pellets) precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 76,
'code': 76,
'title': 'Convective rain rate',
'units': 'kg m-2 s-1'},
{'abbr': 77,
'code': 77,
'title': 'Large scale rain rate',
'units': 'kg m-2 s-1'},
{'abbr': 78,
'code': 78,
'title': 'Total column integrated water (all components including '
'precipitation)',
'units': 'kg m-2'},
{'abbr': 79, 'code': 79, 'title': 'Evaporation rate', 'units': 'kg m-2 s-1'},
{'abbr': 80, 'code': 80, 'title': 'Total condensate', 'units': 'kg/kg'},
{'abbr': 81,
'code': 81,
'title': 'Total column-integrated condensate',
'units': 'kg m-2'},
{'abbr': 82, 'code': 82, 'title': 'Cloud ice mixing-ratio', 'units': 'kg/kg'},
{'abbr': 83,
'code': 83,
'title': 'Specific cloud liquid water content',
'units': 'kg/kg'},
{'abbr': 84,
'code': 84,
'title': 'Specific cloud ice water content',
'units': 'kg/kg'},
{'abbr': 85,
'code': 85,
'title': 'Specific rainwater content',
'units': 'kg/kg'},
{'abbr': 86,
'code': 86,
'title': 'Specific snow water content',
'units': 'kg/kg'},
{'abbr': 90,
'code': 90,
'title': 'Total kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 91,
'code': 91,
'title': 'u-component (zonal) kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 92,
'code': 92,
'title': 'v-component (meridional) kinematic moisture flux',
'units': 'kg kg-1 m s-1'},
{'abbr': 93,
'code': 93,
'title': 'Relative humidity with respect to water',
'units': '%'},
{'abbr': 94,
'code': 94,
'title': 'Relative humidity with respect to ice',
'units': '%'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"[email protected]"
] | |
2ebcefcec2c64fbf9f76368c9e52f2c4f6031297 | 1a80c38ea020a8b18bb2c61b55caff8a38f553b9 | /SWEA/sol/5356.py | ad8db3bcc47a8c35ae33aab7759fffd9c7fb8cff | [] | no_license | jiwookseo/problem-solving | 775a47825dc73f8a29616ef7011e8ee7be346f80 | eefbefb21608ae0a2b3c75c010ae14995b7fc646 | refs/heads/master | 2020-04-19T03:11:02.659816 | 2019-08-14T08:59:06 | 2019-08-14T08:59:06 | 167,926,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | for tc in range(1, int(input())+1):
s = [input() for _ in range(5)]
l = [len(i) for i in s]
ml = max(l)
temp = ""
for c in range(ml):
for r in range(5):
if l[r] > c:
temp += s[r][c]
print("#{} {}".format(tc, temp))
| [
"[email protected]"
] | |
952f0ccca47807b4540c47e2a8a72c32c763961a | a8a5772674e62beaa4f5b1f115d280103fd03749 | /persistence.py | 00a776c4f7564a2e7aae6994a1f3c1b497b94024 | [] | no_license | tahentx/pv_workbook | c6fb3309d9acde5302dd3ea06a34ad2aee0de4b7 | 08912b0ef36a5226d23fa0430216a3f277aca33b | refs/heads/master | 2022-12-12T20:39:35.688510 | 2021-03-30T03:20:54 | 2021-03-30T03:20:54 | 172,827,250 | 0 | 1 | null | 2022-12-08T16:47:39 | 2019-02-27T02:25:24 | Python | UTF-8 | Python | false | false | 188 | py | n = 38941
value = [int(x) for x in str(n)]
persist = value[0] * value[1]
next_value = [int(x) for x in str(persist)]
persist_again = next_value[0] * next_value[1]
print(str(persist_again)
| [
"[email protected]"
] | |
05f6da044977d12f49574500ccb24d84c43ab32d | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/webdriver/pylib/selenium/webdriver/common/desired_capabilities.py | 0f97e7273aeda07105d9a8c34258dad8554e9e60 | [
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 2,994 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example::
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"acceptInsecureCerts": True,
"moz:debuggerAddress": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"platformName": "windows",
}
EDGE = {
"browserName": "MicrosoftEdge",
}
CHROME = {
"browserName": "chrome",
}
OPERA = {
"browserName": "opera",
}
SAFARI = {
"browserName": "safari",
"platformName": "mac",
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "mac",
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "mac",
}
WEBKITGTK = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
WPEWEBKIT = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
| [
"[email protected]"
] | |
7d4aaa5e2ea4a2279deba143f873f693f7394bc4 | c5148bc364dac753c0872bd5676027a30b260486 | /biosteam/_facility.py | be6ea781f47ebbbb09b743864d6630a54816bf2b | [
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ecoent/biosteam | 86f47c713a2cae5d6261b6c2c7734ccf7a90fb4e | f1371386d089df3aa8ce041175f210c0318c1fe0 | refs/heads/master | 2021-02-24T14:10:23.158984 | 2020-03-05T03:43:17 | 2020-03-05T03:43:17 | 245,433,768 | 1 | 0 | NOASSERTION | 2020-03-06T13:59:27 | 2020-03-06T13:59:26 | null | UTF-8 | Python | false | false | 270 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 13 02:24:35 2019
@author: yoelr
"""
from ._unit import Unit
__all__ = ('Facility',)
class Facility(Unit, isabstract=True, new_graphics=False):
@property
def system(self):
return self._system
| [
"[email protected]"
] | |
6d031f1f4dcbd9c766182a6e3f257ba19b599a3e | 562d4bf000dbb66cd7109844c972bfc00ea7224c | /addons-clarico/clarico_product/model/__init__.py | ab1cc71c7f627b65c753c486e16b42d63d131315 | [] | no_license | Mohamed33/odoo-efact-11-pos | e9da1d17b38ddfe5b2d0901b3dbadf7a76bd2059 | de38355aea74cdc643a347f7d52e1d287c208ff8 | refs/heads/master | 2023-03-10T15:24:44.052883 | 2021-03-06T13:25:58 | 2021-03-06T13:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | from . import product_template
from . import website
from . import res_config
| [
"[email protected]"
] | |
ef4ace6d77e93557af3874532ced9981d737fdd6 | a5a4cee972e487512275c34f308251e6cc38c2fa | /examples/Ni__eam__born_exp_fs__postprocessing/CCA_param_clusters_in_qoi_space/configuration/configure_qoi_pca_transform.py | 7c3b9ff7b710945bd9e4fc499cce2f5621fb418b | [
"MIT"
] | permissive | eragasa/pypospack | 4f54983b33dcd2dce5b602bc243ea8ef22fee86b | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | refs/heads/master | 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | from collections import OrderedDict
from pypospack.pyposmat.data.pipeline import PyposmatPipeline
pipeline_configuration = OrderedDict()
# define first segment (normalization)
pipeline_configuration[0] = OrderedDict() # int keys indicate step number
pipeline_configuration[0]['segment_type'] = 'preprocess'
pipeline_configuration[0]['function_calls'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]= OrderedDict() # int keys allow multiple calls to same function
pipeline_configuration[0]['function_calls'][0]['function'] = 'normalize_standard_scaler'
pipeline_configuration[0]['function_calls'][0]['args'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['cols'] = ['qoi']
pipeline_configuration[0]['function_calls'][0]['args']['clusters'] = None
pipeline_configuration[0]['function_calls'][0]['args']['kwargs'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler']['with_mean'] = True
pipeline_configuration[0]['function_calls'][0]['args']['kwargs']['standard_scaler']['with_std'] = True
# define second segment (CCA transformation)
pipeline_configuration[1] = OrderedDict()
pipeline_configuration[1]['segment_type'] = 'pca'
pipeline_configuration[1]['function_calls'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]= OrderedDict()
pipeline_configuration[1]['function_calls'][0]['function'] = 'transform_cca'
pipeline_configuration[1]['function_calls'][0]['args'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]['args']['cols'] = ['n_qoi']
pipeline_configuration[1]['function_calls'][0]['args']['clusters'] = None
pipeline_configuration[1]['function_calls'][0]['args']['kwargs'] = OrderedDict()
pipeline_configuration[1]['function_calls'][0]['args']['kwargs']['cca'] = OrderedDict()
if __name__ == "__main__":
pipeline = PyposmatPipeline()
fn = __file__.replace('.py', '.in')
pipeline.write_configuration(filename=fn,
d=pipeline_configuration)
| [
"[email protected]"
] | |
777b2f147135a023870de3cce3193786a5c9b525 | 55f60b7ec448eb48b75118b01b3878c8345242bb | /tests/scripts/waf-tools/f_guidelines/__init__.py | de2b6d8f5507598e6072beabb216d9c336060fc1 | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | dd-rrc-ps/foxbms-2 | cd8d272afa24187c85c6fa747226bebed4cefc5e | 555704a9c4af3dd1c2213e6f0be9860f34e2b1b3 | refs/heads/master | 2023-08-18T13:49:42.503755 | 2021-10-21T12:15:09 | 2021-10-21T12:15:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
# f_guidelines is not a proper python module name, but this is OK since we need
# it just for the unit test discovery
# pylint: disable-all
| [
"[email protected]"
] | |
2ae2ad897bb8822cbdc7b5ce0e30b526761062cd | 00cb5907750926f1a9b0fde97301f10d01f49645 | /tf_quant_finance/experimental/local_stochastic_volatility/local_stochastic_volatility_model.py | 0502cc8f587b533bfd4c7c39c011e4add8ee24ea | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] | permissive | dannyb2018/tf-quant-finance | 63761e4a39b615da6a5258e48030d2b12a142b26 | 668b4fb0f91b1f60c9015cef087b3e879ee2a4f7 | refs/heads/master | 2023-07-07T20:00:59.529305 | 2021-08-18T13:05:11 | 2021-08-18T13:05:51 | 284,707,826 | 0 | 0 | Apache-2.0 | 2020-08-03T13:29:15 | 2020-08-03T13:29:14 | null | UTF-8 | Python | false | false | 33,519 | py | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local Stochastic Volatility process."""
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dates
from tf_quant_finance import math
from tf_quant_finance.experimental import local_volatility as lvm
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils
from tf_quant_finance.math import pde
from tf_quant_finance.math.interpolation import linear
from tf_quant_finance.models import generic_ito_process
class LocalStochasticVolatilityModel(generic_ito_process.GenericItoProcess):
r"""Local stochastic volatility model.
Local stochastic volatility (LSV) models assume that the spot price of an
asset follows the following stochastic differential equation under the risk
neutral measure [1]:
```None
dS(t) / S(t) = (r - d) dt + sqrt(v(t)) * L(t, S(t)) * dW_s(t)
dv(t) = a(v(t)) dt + b(v(t)) dW_v(t)
E[dW_s(t)dW_v(t)] = rho dt
```
where `r` and `d` denote the risk free interest rate and dividend yield
respectively. `S(t)` is the spot price, `v(t)` denotes the stochastic variance
and the function `L(t, S(t))` is the leverage function which is calibrated
using the volatility smile data. The functions `a(v(t))` and `b(v(t))` denote
the drift and volitility of the stochastic process for the variance and `rho`
denotes the instantabeous correlation between the spot and the variance
process. LSV models thus combine the local volatility dynamics with
stochastic volatility.
Using the relationship between the local volatility and the expectation of
future instantaneous variance, leverage function can be computed as follows
[2]:
```
sigma(T,K)^2 = L(T,K)^2 * E[v(T)|S(T)=K]
```
where the local volatility function `sigma(T,K)` can be computed using the
Dupire's formula.
The `LocalStochasticVolatilityModel` class contains a generic implementation
of the LSV model with the flexibility to specify an arbitrary variance
process. The default variance process is a Heston type process with
mean-reverting variance (as in Ref. [1]):
```
dv(t) = k(m - v(t)) dt + alpha*sqrt(v(t)) dW_v(t)
```
#### References:
[1]: Iain J. Clark. Foreign exchange option pricing - A Practitioner's
guide. Chapter 5. 2011.
[2]: I. Gyongy. Mimicking the one-dimensional marginal distributions of
processes having an ito differential. Probability Theory and Related
Fields, 71, 1986.
"""
def __init__(self,
leverage_fn,
variance_process,
risk_free_rate=None,
dividend_yield=None,
rho=None,
dtype=None,
name=None):
"""Initializes the Local stochastic volatility model.
Args:
leverage_fn: A Python callable which returns the Leverage function
`L(t, S(t))` as a function of state and time. The function must accept
a scalar `Tensor` corresponding to time 't' and a real `Tensor` of shape
`[num_samples, 1]` corresponding to the underlying price (S) as
inputs and return a real `Tensor` containing the leverage function
computed at (S,t).
variance_process: An instance of `ItoProcess` specifying the
dynamics of the variance process of the LSV model. The
`variance_process` should implement a one-factor stochastic process.
For the common version of Heston like variance model use
`LSVVarianceModel`.
risk_free_rate: An optional scalar real `Tensor` specifying the
(continuously compounded) risk free interest rate. If the underlying is
an FX rate, then use this input to specify the domestic interest rate.
Note that the current implementation supports constant interest rates
and dividend yield.
Default value: `None` in which case the input is set to zero.
dividend_yield: An optional real scalar `Tensor` specifying the
(continuosly compounded) dividend yield. If the underlying is an FX
rate, then use this input to specify the foreign interest rate.
Note that the currect implementation supports constant interest rates
and dividend yield.
Default value: `None` in which case the input is set to zero.
rho: A real scalar `Tensor` specifying the correlation between the
underlying spot price and the variance process.
Default value: `None` in which case cross correlations are assumed
to be zero.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name
`local_stochastic_volatility_model`.
"""
self._name = name or "local_stochastic_volatility_model"
with tf.name_scope(self._name):
if risk_free_rate is None:
risk_free_rate = 0.0
if dividend_yield is None:
dividend_yield = 0.0
self._risk_free_rate = tf.convert_to_tensor(risk_free_rate, dtype=dtype)
self._dtype = dtype or self._domestic_rate.dtype
self._dividend_yield = tf.convert_to_tensor(dividend_yield, dtype=dtype)
self._leverage_fn = leverage_fn
self._variance_process = variance_process
dim = 1 + variance_process.dim()
rho = rho or 0.0
self._rho = _create_corr_matrix(rho, self._dtype)
self._sqrt_rho = tf.linalg.cholesky(self._rho)
def _vol_fn(t, state):
"""Volatility function of LSV model."""
num_samples = state.shape.as_list()[0]
broadcasted_t = tf.broadcast_to(t, [1, num_samples])
spot_prices = state[:, 0]
variance = state[:, 1:]
level_fun = self._leverage_fn(
broadcasted_t, tf.expand_dims(spot_prices, axis=0))
spot_diffusion = tf.expand_dims(
level_fun[0, :], axis=-1) * tf.expand_dims(
spot_prices, axis=-1) * tf.math.sqrt(variance)
variance_diffusion = self._variance_process.volatility_fn()(
t, variance)
diffusion = tf.concat([spot_diffusion, variance_diffusion], axis=1)
diffusion = tf.expand_dims(diffusion, axis=-2)
return diffusion * self._sqrt_rho
# Drift function
def _drift_fn(t, state):
"""Drift function of LSV model."""
spot_drift = (
self._risk_free_rate - self._dividend_yield) * state[:, :1]
variance_drift = self._variance_process.drift_fn()(t, state[:, 1:])
return tf.concat([spot_drift, variance_drift], axis=1)
super(LocalStochasticVolatilityModel,
self).__init__(dim, _drift_fn, _vol_fn, self._dtype, self._name)
@classmethod
def from_market_data(cls,
valuation_date,
expiry_dates,
strikes,
implied_volatilities,
variance_process,
initial_spot,
initial_variance,
rho=None,
risk_free_rate=None,
dividend_yield=None,
time_step=None,
num_grid_points=None,
grid_minimums=None,
grid_maximums=None,
dtype=None):
"""Creates a `LocalStochasticVolatilityModel` from market data.
This function computes the leverage function for the LSV model by first
computing the joint probability density function `p(t, X(t), v(t))` where
`X(t)` is the log of the spot price and `v(t)` is the variance at time `t`.
The joint probablity density is computed using the Fokker-Planck equation of
the LSV model (see 6.8.2 in Ref [1]):
```None
dp/dt = 1/2 d^2 [v L(t,X)^2 p]/dX^2 + 1/2 d^2 [b(v)^2 p]/dv^2 +
rho d^2 [sqrt(v)L(t,X)b(v) p]/dXdv -
d[(r - d - 1/2 v L(t,X)^2)p]/dX -
d[a(v) p]/dv
```
where `a(v)` and `b(v)` are the drift and diffusion functions for the
variance process. Defining
```None
I_n(k,t) = int v^n p(t, k, v) dv
```
we can calculate the leverage function as follows:
```None
L(k, t) = sigma(exp(k), t) sqrt(I_0(k, t)/I_1(k, t)).
```
Note that the computation of `I_0` and `I_1` require the knowledge of
leverage function and hence the computation of the leverage function is
implicit in nature.
Args:
valuation_date: A scalar `DateTensor` specifying the valuation
(or settlement) date for the market data.
expiry_dates: A `DateTensor` of shape `(num_expiries,)` containing the
expiry dates on which the implied volatilities are specified.
strikes: A `Tensor` of real dtype and shape `(num_expiries,
num_strikes)` specifying the strike prices at which implied volatilities
are specified.
implied_volatilities: A `Tensor` of real dtype and shape `(num_expiries,
num_strikes)` specifying the implied volatilities.
variance_process: An instance of `LSVVarianceModel` or
`ItoProcess` specifying the dynamics of the variance process of
the LSV model.
initial_spot: A real scalar `Tensor` specifying the underlying spot price
on the valuation date.
initial_variance: A real scalar `Tensor` specifying the initial variance
on the valuation date.
rho: A real scalar `Tensor` specifying the correlation between spot price
and the stochastic variance.
risk_free_rate: A real scalar `Tensor` specifying the (continuosly
compounded) risk free interest rate. If the underlying is an FX rate,
then use this input to specify the domestic interest rate.
Default value: `None` in which case the input is set to zero.
dividend_yield: A real scalar `Tensor` specifying the (continuosly
compounded) divident yield. If the underlying is an FX rate, then use
this input to specify the foreign interest rate.
Default value: `None` in which case the input is set to zero.
time_step: A real scalar `Tensor` specifying the time step during the
numerical solution of the Fokker-Planck PDE.
Default value: None, in which case `time_step` corresponding to 100 time
steps is used.
num_grid_points: A scalar integer `Tensor` specifying the number of
discretization points for each spatial dimension.
Default value: None, in which case number of grid points is set to 100.
grid_minimums: An optional `Tensor` of size 2 containing the minimum grid
points for PDE spatial discretization. `grid_minimums[0]` correspond
to the minimum spot price in the spatial grid and `grid_minimums[1]`
correspond to the minimum variance value.
grid_maximums: An optional `Tensor` of size 2 containing the maximum grid
points for PDE spatial discretization. `grid_maximums[0]` correspond
to the maximum spot price in the spatial grid and `grid_maximums[1]`
correspond to the maximum variance value.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
Returns:
An instance of `LocalStochasticVolatilityModel` constructed using the
input data.
"""
if risk_free_rate is None:
discount_factor_fn = lambda t: tf.ones_like(t, dtype=dtype)
else:
r = tf.convert_to_tensor(risk_free_rate, dtype=dtype)
discount_factor_fn = lambda t: tf.math.exp(-r * t)
lv_model = lvm.LocalVolatilityModel.from_market_data(
dim=1,
valuation_date=valuation_date,
expiry_dates=expiry_dates,
strikes=strikes,
implied_volatilities=implied_volatilities,
spot=initial_spot,
discount_factor_fn=discount_factor_fn,
dividend_yield=dividend_yield,
dtype=dtype)
dtype = dtype or lv_model.dtype()
max_time = tf.math.reduce_max(
dates.daycount_actual_365_fixed(
start_date=valuation_date, end_date=expiry_dates, dtype=dtype))
if time_step is None:
time_step = max_time / 100.0
rho = rho or 0.0
num_grid_points = num_grid_points or 100
leverage_fn = _leverage_function_using_pde(
risk_free_rate=risk_free_rate,
dividend_yield=dividend_yield,
lv_model=lv_model,
variance_model=variance_process,
rho=[rho],
initial_spot=initial_spot,
initial_variance=initial_variance,
time_step=time_step,
max_time=max_time,
num_grid_points=num_grid_points,
grid_minimums=grid_minimums,
grid_maximums=grid_maximums,
dtype=dtype)
return LocalStochasticVolatilityModel(
leverage_fn,
variance_process,
risk_free_rate=risk_free_rate,
dividend_yield=dividend_yield,
rho=rho,
dtype=dtype)
@classmethod
def from_volatility_surface(cls,
implied_volatility_surface,
variance_process,
initial_spot,
initial_variance,
rho=None,
risk_free_rate=None,
dividend_yield=None,
time_step=None,
num_grid_points=None,
grid_minimums=None,
grid_maximums=None,
dtype=None):
"""Creates a `LocalStochasticVolatilityModel` from volatility surface.
This function computes the leverage function for the LSV model by first
computing the joint probablity density function `p(t, X(t), v(t))` where
`X(t)` is the log of the spot price and `v(t)` is the variance at time `t`.
The joint probablity density is computed using the Fokker-Planck equation of
the LSV model (see 6.8.2 in Ref [1]):
```None
dp/dt = 1/2 d^2 [v L(t,X)^2 p]/dX^2 + 1/2 d^2 [b(v)^2 p]/dv^2 +
rho d^2 [sqrt(v)L(t,X)b(v) p]/dXdv -
d[(r - d - 1/2 v L(t,X)^2)p]/dX -
d[a(v) p]/dv
```
where `a(v)` and `b(v)` are the drift and diffusion functions for the
variance process. Defining
```None
I_n(k,t) = int v^n p(t, k, v) dv
```
we can calculate the leverage function as follows:
```None
L(k, t) = sigma(exp(k), t) sqrt(I_0(k, t)/I_1(k, t)).
```
Args:
implied_volatility_surface: Either an instance of
`processed_market_data.VolatilitySurface` or a Python object containing
the implied volatility market data. If the input is a Python object,
then the object must implement a function `volatility(strike,
expiry_times)` which takes real `Tensor`s corresponding to option
strikes and time to expiry and returns a real `Tensor` containing the
correspoding market implied volatility.
variance_process: An instance of `LSVVarianceModel` or
`ItoProcess`specifying the dynamics of the variance process of
the LSV model.
initial_spot: A real scalar `Tensor` specifying the underlying spot price
on the valuation date.
initial_variance: A real scalar `Tensor` specifying the initial variance
on the valuation date.
rho: A real scalar `Tensor` specifying the correlation between spot price
and the stochastic variance.
risk_free_rate: A real scalar `Tensor` specifying the (continuosly
compounded) risk free interest rate. If the underlying is an FX rate,
then use this input to specify the domestic interest rate.
Default value: `None` in which case the input is set to zero.
dividend_yield: A real scalar `Tensor` specifying the (continuosly
compounded) divident yield. If the underlying is an FX rate, then use
this input to specify the foreign interest rate.
Default value: `None` in which case the input is set to zero.
time_step: An optional real scalar `Tensor` specifying the time step
during the numerical solution of the Fokker-Planck PDE.
Default value: None, in which case `time_step` corresponding to 100 time
steps is used.
num_grid_points: A scalar integer `Tensor` specifying the number of
discretization points for each spatial dimension.
Default value: None, in which case number of grid points is set to 100.
grid_minimums: An optional `Tensor` of size 2 containing the minimum grid
points for PDE spatial discretization. `grid_minimums[0]` correspond
to the minimum spot price in the spatial grid and `grid_minimums[1]`
correspond to the minimum variance value.
grid_maximums: An optional `Tensor` of size 2 containing the maximum grid
points for PDE spatial discretization. `grid_maximums[0]` correspond
to the maximum spot price in the spatial grid and `grid_maximums[1]`
correspond to the maximum variance value.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
Returns:
An instance of `LocalStochasticVolatilityModel` constructed using the
input data.
"""
if risk_free_rate is None:
discount_factor_fn = lambda t: tf.ones_like(t, dtype=dtype)
else:
r = tf.convert_to_tensor(risk_free_rate, dtype=dtype)
discount_factor_fn = lambda t: tf.math.exp(-r * t)
lv_model = lvm.LocalVolatilityModel.from_volatility_surface(
dim=1,
spot=initial_spot,
implied_volatility_surface=implied_volatility_surface,
discount_factor_fn=discount_factor_fn,
dividend_yield=dividend_yield,
dtype=dtype)
dtype = dtype or lv_model.dtype()
day_count_fn = utils.get_daycount_fn(
implied_volatility_surface.daycount_convention)
max_time = tf.math.reduce_max(
day_count_fn(
start_date=implied_volatility_surface.settlement_date(),
end_date=implied_volatility_surface.node_expiries()))
if time_step is None:
time_step = max_time / 100.0
rho = rho or 0.0
num_grid_points = num_grid_points or 100
leverage_fn = _leverage_function_using_pde(
risk_free_rate=risk_free_rate,
dividend_yield=dividend_yield,
lv_model=lv_model,
variance_model=variance_process,
rho=[rho],
initial_spot=initial_spot,
initial_variance=initial_variance,
time_step=time_step,
max_time=max_time,
num_grid_points=num_grid_points,
grid_minimums=grid_minimums,
grid_maximums=grid_maximums,
dtype=dtype)
return LocalStochasticVolatilityModel(
leverage_fn,
variance_process,
risk_free_rate=risk_free_rate,
dividend_yield=dividend_yield,
rho=rho,
dtype=dtype)
def _create_corr_matrix(rho, dtype):
"""Create correlation matrix with scalar `rho`."""
one = tf.constant(1.0, dtype=dtype)
m1 = tf.concat([one, rho], axis=0)
m2 = tf.concat([rho, one], axis=0)
return tf.stack([m1, m2])
def _machine_eps(dtype):
"""Returns the machine epsilon for the supplied dtype."""
dtype = tf.as_dtype(dtype).as_numpy_dtype
eps = 1e-6 if dtype == np.float32 else 1e-10
return eps
def _two_d_integration(grid, value_grid):
"""Perform 2-D integration numerically."""
log_spot_grid, variance_grid = tf.meshgrid(*grid)
delta_v = variance_grid[1:, :] - variance_grid[:-1, :]
delta_s = log_spot_grid[:, 1:] - log_spot_grid[:, :-1]
integral = tf.math.reduce_sum(value_grid[0, :-1, :] * delta_v, axis=0)
integral = tf.math.reduce_sum(integral[:-1] * delta_s[0, :])
return integral
# TODO(b/175023506): Move to `grids` module
def _tavella_randell_nonuniform_grid(x_min, x_max, x_star, num_grid_points,
alpha, dtype):
"""Creates non-uniform grid clustered around a specified point.
Args:
x_min: A real `Tensor` of shape `(dim,)` specifying the lower limit of the
grid.
x_max: A real `Tensor` of same shape and dtype as `x_min` specifying the
upper limit of the grid.
x_star: A real `Tensor` of same shape and dtype as `x_min` specifying the
location on the grid around which higher grid density is desired.
num_grid_points: A scalar integer `Tensor` specifying the number of points
on the grid.
alpha: A scalar parameter which controls the degree of non-uniformity of the
grid. The smaller values of `alpha` correspond to greater degree of
clustering around `x_star`.
dtype: The default dtype to use when converting values to `Tensor`s.
Returns:
A real `Tensor` of shape `(dim, num_grid_points+1)` containing the
non-uniform grid.
"""
c1 = tf.math.asinh((x_min - x_star) / alpha)
c2 = tf.math.asinh((x_max - x_star) / alpha)
i = tf.expand_dims(tf.range(0, num_grid_points + 1, 1, dtype=dtype), axis=-1)
grid = x_star + alpha * tf.math.sinh(c2 * i / num_grid_points + c1 *
(1 - i / num_grid_points))
# reshape from (num_grid_points+1, dim) to (dim, num_grid_points+1)
return tf.transpose(grid)
def _conditional_expected_variance_from_pde_solution(grid, value_grid, dtype):
"""Computes E[variance|log_spot=k]."""
# value_grid.shape = [1, num_x, num_y]
log_spot_grid, variance_grid = tf.meshgrid(*grid)
delta_s = variance_grid[1:, :] - variance_grid[:-1, :]
# Calculate I(0)
integral_0 = tf.math.reduce_sum(value_grid[0, :-1, :] * delta_s, axis=0)
# Calculate I(1)
integral_1 = tf.math.reduce_sum(
variance_grid[:-1, :] * value_grid[0, :-1, :] * delta_s, axis=0)
variance_given_logspot = tf.math.divide_no_nan(integral_1, integral_0)
return functools.partial(
linear.interpolate,
x_data=log_spot_grid[0, :],
y_data=variance_given_logspot,
dtype=dtype)
def _leverage_function_using_pde(*, risk_free_rate, dividend_yield, lv_model,
variance_model, rho, initial_spot,
initial_variance, max_time, time_step,
num_grid_points, grid_minimums,
grid_maximums, dtype):
"""Computes Leverage function using Fokker-Planck PDE for joint density.
This function computes the leverage function for the LSV model by first
computing the joint probablity density function `p(t, X(t), v(t))` where
`X(t)` is the log of the spot price and `v(t)` is the variance at time `t`.
The joint probablity density is computed using the Fokker-Planck equation of
the LSV model (see 6.8.2 in Ref [1]):
```None
dp/dt = 1/2 d^2 [v L(t,X)^2 p]/dX^2 + 1/2 d^2 [b(v)^2 p]/dv^2 +
rho d^2 [sqrt(v)L(t,X)b(v) p]/dXdv - d[(r - d - 1/2 v L(t,X)^2)p]/dX -
d[a(v) p]/dv
```
where `a(v)` and `b(v)` are the drift and diffusion functions for the
variance process. Defining
```None
I_n(k,t) = int v^n p(t, k, v) dv
```
we can calculate the leverage function as follows:
```None
L(k, t) = sigma(exp(k), t) sqrt(I_0(k, t)/I_1(k, t)).
```
Args:
risk_free_rate: A scalar real `Tensor` specifying the (continuosly
compounded) risk free interest rate. If the underlying is an FX rate, then
use this input to specify the domestic interest rate.
dividend_yield: A real scalar `Tensor` specifying the (continuosly
compounded) dividend yield. If the underlying is an FX rate, then use this
input to specify the foreign interest rate.
lv_model: An instance of `LocalVolatilityModel` specifying the local
volatility for the spot price.
variance_model: An instance of `LSVVarianceModel` specifying the dynamics of
the variance process of the LSV model.
rho: A real scalar `Tensor` specifying the correlation between spot price
and the stochastic variance.
initial_spot: A real scalar `Tensor` specifying the underlying spot price on
the valuation date.
initial_variance: A real scalar `Tensor` specifying the initial variance on
the valuation date.
max_time: A real scalar `Tensor` specifying the maximum time to which the
Fokker-Planck PDE is evolved.
time_step: A real scalar `Tensor` specifying the time step during the
numerical solution of the Fokker-Planck PDE.
num_grid_points: A scalar integer `Tensor` specifying the number of
discretization points for each spatial dimension.
grid_minimums: An optional `Tensor` of size 2 containing the minimum grid
points for PDE spatial discretization. `grid_minimums[0]` correspond
to the minimum spot price in the spatial grid and `grid_minimums[1]`
correspond to the minimum variance value.
grid_maximums: An optional `Tensor` of size 2 containing the maximum grid
points for PDE spatial discretization. `grid_maximums[0]` correspond
to the maximum spot price in the spatial grid and `grid_maximums[1]`
correspond to the maximum variance value.
dtype: The default dtype to use when converting values to `Tensor`s.
Returns:
A Python callable which computes the Leverage function `L(t, S(t))`. The
function accepts a scalar `Tensor` corresponding to time 't' and a real
`Tensor` of shape `[num_samples, 1]` corresponding to the spot price (S) as
inputs and return a real `Tensor` corresponding to the leverage function
computed at (S,t).
"""
if variance_model.dim() > 1:
raise ValueError("The default model of Leverage function doesn\'t support "
"the variance process with more than 1 factor.")
pde_grid_tol = _machine_eps(dtype)
rho = tf.convert_to_tensor(rho, dtype=dtype)
initial_spot = tf.convert_to_tensor(initial_spot, dtype=dtype)
initial_log_spot = tf.math.log(
tf.convert_to_tensor(initial_spot, dtype=dtype))
initial_variance = tf.convert_to_tensor(initial_variance, dtype=dtype)
risk_free_rate = tf.convert_to_tensor(risk_free_rate, dtype=dtype)
dividend_yield = tf.convert_to_tensor(dividend_yield, dtype=dtype)
rho = tf.convert_to_tensor(rho, dtype=dtype)
x_scale = initial_log_spot
y_scale = initial_variance
# scaled log spot = log(spot/initial_spot)
# scaled variance = variance / initial_variance
scaled_initial_point = tf.convert_to_tensor([0.0, 1.0], dtype=dtype)
# These are minimums and maximums for scaled log spot and scaled variance
if grid_minimums is None:
grid_minimums = [0.01, 0.0001]
else:
grid_minimums = tf.convert_to_tensor(grid_minimums, dtype=dtype)
grid_minimums = [grid_minimums[0] / initial_spot,
grid_minimums[1] / initial_variance]
if grid_maximums is None:
grid_maximums = [10.0, 5.0]
else:
grid_maximums = tf.convert_to_tensor(grid_maximums, dtype=dtype)
grid_maximums = [grid_maximums[0] / initial_spot,
grid_maximums[1] / initial_variance]
log_spot_min = tf.math.log(
tf.convert_to_tensor([grid_minimums[0]], dtype=dtype))
log_spot_max = tf.math.log(
tf.convert_to_tensor([grid_maximums[0]], dtype=dtype))
variance_min = tf.convert_to_tensor([grid_minimums[1]], dtype=dtype)
variance_max = tf.convert_to_tensor([grid_maximums[1]], dtype=dtype)
grid_minimums = tf.concat([log_spot_min, variance_min], axis=0)
grid_maximums = tf.concat([log_spot_max, variance_max], axis=0)
grid = _tavella_randell_nonuniform_grid(grid_minimums, grid_maximums,
scaled_initial_point, num_grid_points,
0.3, dtype)
grid = [tf.expand_dims(grid[0], axis=0), tf.expand_dims(grid[1], axis=0)]
delta_x = tf.math.reduce_min(grid[0][0, 1:] - grid[0][0, :-1])
delta_y = tf.math.reduce_min(grid[1][0, 1:] - grid[1][0, :-1])
# Initialize leverage function L(t=0, S) = 1
leverage_fn = functools.partial(
linear.interpolate, x_data=[[0.0, 1.0]], y_data=[[1.0, 1.0]], dtype=dtype)
def _initial_value():
"""Computes initial value as a delta function delta(log_spot(t), var(0))."""
log_spot, variance = tf.meshgrid(*grid)
init_value = tf.where(
tf.math.logical_and(
tf.math.abs(log_spot - scaled_initial_point[0]) <
delta_x + pde_grid_tol,
tf.math.abs(variance - scaled_initial_point[1]) <
delta_y + pde_grid_tol), 1.0 / (delta_x * delta_y * 4), 0.0)
# initial_value.shape = (1, num_grid_x, num_grid_y)
return tf.expand_dims(init_value, axis=0)
def _second_order_coeff_fn(t, grid):
log_spot = grid[0] + x_scale
variance = grid[1] * y_scale
leverage_fn_t_x = leverage_fn(log_spot)
val_xx = 0.5 * variance * leverage_fn_t_x**2
val_xy = 0.5 * (rho * tf.math.sqrt(variance) * leverage_fn_t_x *
variance_model.volatility_fn()(t, variance)) / y_scale
val_yx = val_xy
val_yy = 0.5 * variance_model.volatility_fn()(t, variance)**2 / y_scale**2
# return list of shape = (2,2). Each element has shape = grid.shape
return [[-val_yy, -val_yx], [-val_xy, -val_xx]]
def _first_order_coeff_fn(t, grid):
log_spot = grid[0] + x_scale
variance = grid[1] * y_scale
leverage_fn_t_x = leverage_fn(log_spot)
val_x = (risk_free_rate - dividend_yield -
0.5 * variance * leverage_fn_t_x**2)
val_y = variance_model.drift_fn()(t, variance)
# return list of shape = (2,). Each element has shape = grid.shape
return [val_y / y_scale, val_x]
def _compute_leverage_fn(t, coord_grid, value_grid):
log_spot = tf.expand_dims(coord_grid[0], axis=-1) + x_scale
local_volatility_values = lv_model.local_volatility_fn()(
t, tf.math.exp(log_spot))
# TODO(b/176826650): Large values represent instability. Eventually this
# should be addressed inside local vol model.
local_volatility_values = tf.where(
tf.math.abs(local_volatility_values) > 1e4, 0.0,
local_volatility_values)
# variance_given_logspot.shape = (num_grid_x, 1)
variance_given_logspot = _conditional_expected_variance_from_pde_solution(
[coord_grid[0] + x_scale, coord_grid[1] * y_scale], value_grid, dtype)(
log_spot)
leverage_fn_values = tf.math.divide_no_nan(
local_volatility_values, tf.math.sqrt(variance_given_logspot))
leverage_fn = functools.partial(
linear.interpolate,
x_data=grid[0] + x_scale,
y_data=tf.transpose(leverage_fn_values),
dtype=dtype)
return leverage_fn
@pde.boundary_conditions.neumann
def _trivial_neumann_boundary(t, location_grid):
del t, location_grid
return 0.0
leverage_fn_values = []
leverage_fn_values.append(leverage_fn(grid[0][0])[0])
# joint_density.shape = (1, num_grid_x, num_grid_y)
joint_density = _initial_value()
for tstart in np.arange(0.0, max_time, time_step):
joint_density, coord_grid, _, _ = pde.fd_solvers.solve_forward(
tstart,
tstart + time_step,
coord_grid=[grid[0][0], grid[1][0]],
values_grid=joint_density,
time_step=time_step / 10.0,
values_transform_fn=None,
inner_second_order_coeff_fn=_second_order_coeff_fn,
inner_first_order_coeff_fn=_first_order_coeff_fn,
zeroth_order_coeff_fn=None,
boundary_conditions=[[
_trivial_neumann_boundary, _trivial_neumann_boundary
], [_trivial_neumann_boundary, _trivial_neumann_boundary]],
dtype=dtype)
joint_density = tf.math.maximum(joint_density, 0.0)
area_under_joint_density = _two_d_integration(
[grid[0][0, :], grid[1][0, :]], joint_density)
joint_density = joint_density / area_under_joint_density
# TODO(b/176826743): Perform fixed point iteration instead of one step
# update
leverage_fn = _compute_leverage_fn(
tf.convert_to_tensor(tstart + time_step), coord_grid, joint_density)
leverage_fn_values.append(leverage_fn(grid[0][0, :] + x_scale)[0, :])
# leverage_fn_values.shape = (num_pde_timesteps, num_grid_x,)
leverage_fn_values = tf.convert_to_tensor(leverage_fn_values, dtype=dtype)
times = tf.range(0.0, max_time + time_step, time_step, dtype=dtype)
def _return_fn(t, spot):
leverage_fn_interpolator = (
math.interpolation.interpolation_2d.Interpolation2D(
x_data=[times],
y_data=tf.expand_dims(
tf.repeat(grid[0] + x_scale, times.shape[0], axis=0), axis=0),
z_data=tf.expand_dims(leverage_fn_values, axis=0),
dtype=dtype))
return leverage_fn_interpolator.interpolate(t, tf.math.log(spot))
return _return_fn
| [
"[email protected]"
] | |
fe6aaab1a8339dd6dc8d16d83021eb02079bdd3c | cc352b04dc8eb5033399a8925274f23be51ae3bf | /leonardo/__init__.py | 778f36dfa838719fd5e13576cde4c652cb4a8cd6 | [
"BSD-2-Clause"
] | permissive | lukaszle/django-leonardo | 1dcb16f0155495d4ef0e52f667450ee53f2b58be | a54dd0822c3d8fbf4a52547d0ad3ae17c04b88b7 | refs/heads/master | 2021-01-18T09:36:08.203184 | 2016-02-01T20:25:37 | 2016-02-01T20:25:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py |
default_app_config = 'leonardo.apps.LeonardoConfig'
__import__('pkg_resources').declare_namespace(__name__)
try:
from leonardo.base import leonardo # noqa
except ImportError:
import warnings
def simple_warn(message, category, filename, lineno, file=None, line=None):
return '%s: %s' % (category.__name__, message)
msg = ("Could not import Leonardo dependencies. "
"This is normal during installation.\n")
warnings.formatwarning = simple_warn
warnings.warn(msg, Warning)
| [
"[email protected]"
] | |
2d6fced001982c85bda4c3008e1f9051ce24ffda | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14387.py | 1a3c8e77f0bda5b6a8b9a9699422269adf3d924c | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # Python Celery Task.update_state potentially blocks forever
BROKER_POOL_LIMIT=100
| [
"[email protected]"
] | |
c86b19c4c30e2fabbe0d81972a65af9e5be88efe | de6dc75873bd8615d22dd25c51f2fe3bc82cd7f8 | /069.猜数字游戏.py | 07de0ac470912f9fd6bb2e924865ff59a1419c0a | [] | no_license | cuimin07/LeetCode-test | b9e87b4e353b09dfa84f62c24c2950d57656fff2 | 8f02b78dcbdefa154bb52c14a271998361e92a86 | refs/heads/master | 2020-08-14T13:37:27.799071 | 2020-01-13T03:11:40 | 2020-01-13T03:11:40 | 215,178,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,670 | py | '''
你正在和你的朋友玩 猜数字(Bulls and Cows)游戏:你写下一个数字让你的朋友猜。
每次他猜测后,你给他一个提示,告诉他有多少位数字和确切位置都猜对了(称为“Bulls”, 公牛),
有多少位数字猜对了但是位置不对(称为“Cows”, 奶牛)。你的朋友将会根据提示继续猜,直到猜出秘密数字。
请写出一个根据秘密数字和朋友的猜测数返回提示的函数,用 A 表示公牛,用 B 表示奶牛。
请注意秘密数字和朋友的猜测数都可能含有重复数字。
示例 1:
输入: secret = "1807", guess = "7810"
输出: "1A3B"
解释: 1 公牛和 3 奶牛。公牛是 8,奶牛是 0, 1 和 7。
示例 2:
输入: secret = "1123", guess = "0111"
输出: "1A1B"
解释: 朋友猜测数中的第一个 1 是公牛,第二个或第三个 1 可被视为奶牛。
说明: 你可以假设秘密数字和朋友的猜测数都只包含数字,并且它们的长度永远相等。
'''
#答:
class Solution:
def getHint(self, secret: str, guess: str) -> str:
A,B=0,0
dic1,dic2={},{}
siz=len(secret)
for i in range(siz):
if secret[i]==guess[i]:
A+=1
else:
if secret[i] not in dic1:
dic1[secret[i]]=1
else:
dic1[secret[i]]+=1
if guess[i] not in dic2:
dic2[guess[i]]=1
else:
dic2[guess[i]]+=1
for x in dic1:
if x in dic2:
B+=min(dic1[x],dic2[x])
return str(A)+'A'+str(B)+'B'
| [
"[email protected]"
] | |
e686b01403ab17049ad212cde428b766ca9b55f6 | 973b40c806bfcfdfbe4258b3decd9e52f8d4b574 | /vmware_exporter/helpers.py | e6df9262e3710f638050d836a6405a66c56421ae | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gitter-badger/vmware_exporter | 467507e83551134f2e89b7fb3125bccb949113d4 | d805dde7ff768d55e96719fcd727a6f4b5e81dc7 | refs/heads/master | 2020-04-13T17:06:59.370635 | 2018-12-24T05:19:48 | 2018-12-24T05:19:48 | 163,339,090 | 0 | 0 | null | 2018-12-27T21:53:31 | 2018-12-27T21:53:31 | null | UTF-8 | Python | false | false | 1,375 | py | from pyVmomi import vmodl
def batch_fetch_properties(content, obj_type, properties):
view_ref = content.viewManager.CreateContainerView(
container=content.rootFolder,
type=[obj_type],
recursive=True
)
PropertyCollector = vmodl.query.PropertyCollector
# Describe the list of properties we want to fetch for obj_type
property_spec = PropertyCollector.PropertySpec()
property_spec.type = obj_type
property_spec.pathSet = properties
# Describe where we want to look for obj_type
traversal_spec = PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec = PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
obj_spec.selectSet = [traversal_spec]
filter_spec = PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
props = content.propertyCollector.RetrieveContents([filter_spec])
results = {}
for obj in props:
properties = {}
properties['obj'] = obj.obj
properties['id'] = obj.obj._moId
for prop in obj.propSet:
properties[prop.name] = prop.val
results[obj.obj._moId] = properties
return results
| [
"[email protected]"
] | |
7fe4ba0f5ad62a80601a216373746ad51ac9e09f | 2e00398c4b77ab6e1996dbbefa167e13a8ad40a9 | /products/urls.py | fab0250699fb90757ba44b5592f3d12ac5e94b7e | [] | no_license | cleliofavoccia/PurBeurre | d754b83ed28b1240447243f149080058a60ccdfb | e2b5a51fbd91412e68ddb1c3c785713c7988cc41 | refs/heads/main | 2023-03-20T11:06:32.466520 | 2021-03-12T16:02:22 | 2021-03-12T16:02:22 | 331,650,830 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | """URLS of products app"""
from django.urls import path
from . import views
app_name = 'products'
urlpatterns = [
path('<int:pk>/', views.ProductDetailView.as_view(), name='product'),
path('results/', views.ResultsListView.as_view(), name='results')
]
| [
"[email protected]"
] | |
d7347ded155d726c1280eaa8b4a1b75779976483 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_169/ch167_2020_06_19_15_17_32_858494.py | a7645291dc1cbd46804f0758f0845bbdbddb6e65 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | def bairro_mais_custoso(dicionario):
dicionario2={}
lista=[]
dicionario3={}
for i in dicionario:
dicionario2[i]=0
for e in dicionario[i][6:]:
dicionario2[i]+=e
for k in dicionario2:
lista.append(dicionario2[k])
dicionario3[dicionario2[k]]=k
for e in lista:
if e == dicionario3[dicionario2[k]]:
return k
return k
| [
"[email protected]"
] | |
5b04d15420f559f7c75a1cf772a31cb8aa898403 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2566/60627/239561.py | e69aa50e41e94e98b56e97cd11efa5bf495bf257 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # 20
n = int(input())
s = '['
for i in range(n):
inp = input()
s += ('[' + inp + '],')
s = s[:-1] + ']'
from ast import literal_eval
num = literal_eval(s)
l = []
def f(num,i,j,t):
lis = range(len(num))
global l
t += num[i][j]
if i==len(num) - 1 and j==len(num) - 1:
l.append(t)
return
if i+1 in lis:
f(num,i+1,j,t)
if j+1 in lis:
f(num,i,j+1,t)
f(num,0,0,0)
print(min(l)) | [
"[email protected]"
] | |
c1da5d4f2e2d43b82c977f498ea155098ae2e99d | c77a40408bc40dc88c466c99ab0f3522e6897b6a | /Programming_basics/Exercise_1/VacationBooksList.py | 3ce9b5265267af70f22eb065be20cff43206264f | [] | no_license | vbukovska/SoftUni | 3fe566d8e9959d390a61a4845381831929f7d6a3 | 9efd0101ae496290313a7d3b9773fd5111c5c9df | refs/heads/main | 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | book_pages = int(input())
reading_speed = int(input())
time_limit = int(input())
tot_hours = book_pages / reading_speed
per_day = tot_hours / time_limit
print(per_day)
| [
"[email protected]"
] | |
1189ee43148ae71e4e63174d6f48d775698a66d8 | 235c4b3aa630737b379050a420923efadd432da8 | /1stRound/Easy/599 Minimum Index Sum of Two Lists/Heap.py | 509359ed759a98a80c7b55d98f9e9ee6e90ae456 | [
"MIT"
] | permissive | ericchen12377/Leetcode-Algorithm-Python | 4e5dc20062280ef46194da5480600b2459fd89f8 | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | refs/heads/master | 2023-02-22T22:43:55.612650 | 2021-01-28T04:00:20 | 2021-01-28T04:00:20 | 258,058,468 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import heapq
class Solution:
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
interest = dict()
for i, l in enumerate(list1):
interest[l] = [i, 100000]
for j, l in enumerate(list2):
if l in interest:
interest[l][1] = j
heap = [(sum(v), l) for l, v in interest.items()]
heapq.heapify(heap)
res = []
smallest = -1
while heap:
cursum, curl = heapq.heappop(heap)
if smallest == -1:
smallest = cursum
if smallest == cursum:
res.append(curl)
else:
break
return res
list1 = ["Shogun", "Tapioca Express", "Burger King", "KFC"]
list2 = ["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]
p = Solution()
print(p.findRestaurant(list1,list2)) | [
"[email protected]"
] | |
6d36d7e25b88308e58d0b8062d820079f9529fc8 | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-Metal/PyObjCTest/test_mtlaccelerationstructuretypes.py | c3bd2771327be119bf00faa1fd5e34797066345f | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | import Metal
from PyObjCTools.TestSupport import TestCase
class TestMTLAccelerationStructureTypes(TestCase):
def test_structs(self):
self.assertNotHasAttr(Metal, "MTLPackedFloat3")
self.assertNotHasAttr(Metal, "MTLPackedFloat4x3")
self.assertNotHasAttr(Metal, "MTLAccelerationStructureInstanceDescriptor")
# v = Metal.MTLPackedFloat3()
# self.assertIsInstance(v.x, float)
# self.assertIsInstance(v.y, float)
# self.assertIsInstance(v.z, float)
# self.asssertNotHasattr(v, "elements")
# v = Metal.MTLPackedFloat4x3()
# self.assertHasattr(v, "columns")
# v = Metal.MTLAccelerationStructureInstanceDescriptor()
# self.assertIsInstance(v.transformationMatrix, Metal.MTLPackedFloat4x3)
# self.assertIsInstance(v.flags, int)
# self.assertIsInstance(v.mask, int)
# self.assertIsInstance(v.intersectionFunctionTableOffset, int)
# self.assertIsInstance(v.accelerationStructureIndex, int)
def test_functions(self):
# MTLPackedFloat3 is not available (See above)
self.assertNotHasAttr(Metal, "MTLPackedFloat3Make")
| [
"[email protected]"
] | |
04e5c29fd3536e5ffc4f03ada2434ad4101b1362 | e9d7689655887232b652ef369c7eaf3a1ef06955 | /old/ePhy/in vivo multi/convert v3.py | 30b7047a231015b6843c8b5de6d3593611be8041 | [] | no_license | Gilles-D/main | 81ac13cdb1614eb0c82afb3d0e847a30b78cad30 | f3714d2cbe4aae22ab36f4f94c94067159270820 | refs/heads/master | 2023-08-31T06:20:48.554237 | 2023-08-30T20:33:27 | 2023-08-30T20:33:27 | 222,518,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,954 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 11:57:30 2019
@author: lspaeth (modified by flareno)
Created on Mon Nov 12 14:14:18 2018
This class loads HdF5 recordings from MCS acquisition system as matrices of shape ((channel,data))
Allows to load Raw signals
+ associated time vectors
+ associated sampling rates
All in Volts and Seconds
Hope it will work
Then all you have to do is to load HdF5IO from eletroPy package; init class with smthg = HdF5IO(filepath)
After that u can load every instance with associated function, they are all described bellow.
"""
import matplotlib.pyplot as plt
import numpy as np
class HdF5IO:
def __init__(self,filepath):
import h5py as h5
file_ = h5.File(filepath,'r')
self.file = file_['Data'] #Loads first node
#----------RAW RECORDINGS---------------------------------------------------------------------------------------------
def raw_record(self): #Gets Raw Records as matrix ((channel,data))
raw = self.file['Recording_0']['AnalogStream']['Stream_0']['ChannelData']
import numpy as np
raw_record = np.zeros((raw.shape[0],raw.shape[1]))
raw_conv = float(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][10]) #Scaling Factor
for i in range(raw.shape[0]): #Stores data in new matrix
raw_record[i,:] = raw[i,:]/raw_conv #From pV to V
return raw_record
def raw_time(self): #Gets time vector for raw records
import numpy as np
raw_tick = int(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][9])/1000000.0 #exp6 to pass from us to s
raw_length = len(self.file['Recording_0']['AnalogStream']['Stream_0']['ChannelData'][0])
raw_time = np.arange(0,raw_length*raw_tick,raw_tick)
return raw_time
def raw_sampling_rate(self): #Gets sampling rate
raw_tick = float(self.file['Recording_0']['AnalogStream']['Stream_0']['InfoChannel'][0][9])/1000000.0
return 1.0/raw_tick #In Hz
#---------CONVERT H5 to RAW BINARY-----------------------------------------------------------------------------------
def convert_folder(folderpath, newpath, data_type='raw'):
import os, re
import numpy as np
list_dir = os.listdir(folderpath)
# folderpath = folderpath
# newpath = newpath
concatenated_file=[]
for file in list_dir:
if file.endswith('.h5'):
print ('Converting ' + file + '...')
new_path = '%s/%s'%(folderpath,file)
data = HdF5IO(new_path)
traces = data.raw_record()
concatenated_file.append(traces)
print ('Conversion DONE')
else:
print (file + ' is not an h5 file, will not be converted')
return concatenated_file
# new_path = '%s/'%(folderpath)
data = HdF5IO(new_path)
traces = data.raw_record()
# sampling_rate = int(data.raw_sampling_rate())
# name = re.sub('\.h5$', '', "concatenated")
# file_save = '%s/%s_%sHz.rbf'%(newpath,name,sampling_rate)
# with open(file_save, mode='wb') as file :
# traces.tofile(file,sep='')
# print ('Whole directory has been converted successfully')
if __name__ == '__main__':
folderpath = r'C:/Users/Gilles.DELBECQ/Desktop/In vivo Février 2022/H5/15-02'
newpath = r'C:\Users\Gilles.DELBECQ\Desktop\In vivo Février 2022\RBF/15-02'
a = convert_folder(folderpath, newpath)
array_final = np.array([])
array_final = np.concatenate(a,axis=0)
file_save = 'C:/Users/Gilles.DELBECQ/Desktop/In vivo Février 2022/H5/15-02/concatenated.rbf'
with open(file_save, mode='wb') as file :
array_final.tofile(file,sep='')
| [
"[email protected]"
] | |
2c8834ff912fd0b52c11a67b58347b14e20a59c2 | 18310e7bb4e7c46d7d3fd51046a5bd92ca5f9c48 | /gaping/parameters.py | 6509d009f0bbb6100dfbd4420f7302283a6bba73 | [] | no_license | shawwn/gaping | c91b6b4b2e2ef2ab6b868403f02e0e237b7b2761 | 41d477c79814b37f8a09715433c0c489a56c92d2 | refs/heads/master | 2023-03-15T05:42:37.086420 | 2021-03-16T21:21:01 | 2021-03-16T21:21:01 | 323,994,300 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import gin
import gin.tf.external_configurables
import tensorflow as tf
from .util import EasyDict
@gin.configurable
def options(**kws):
return EasyDict(kws)
| [
"[email protected]"
] | |
48c663aa2a5710c161b3eb746a960ff8252ec051 | 709b1549033c9a547c67ee507fdc10b7e5d234ad | /test/test_worker_pools_api.py | a9689158dab784197bf9245cf0d64ca7dd1eb230 | [
"Apache-2.0"
] | permissive | cvent/octopus-deploy-api-client | d622417286b348c0be29678a86005a809c77c005 | 0e03e842e1beb29b132776aee077df570b88366a | refs/heads/master | 2020-12-05T14:17:46.229979 | 2020-01-07T05:06:58 | 2020-01-07T05:06:58 | 232,135,963 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,971 | py | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.worker_pools_api import WorkerPoolsApi # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestWorkerPoolsApi(unittest.TestCase):
"""WorkerPoolsApi unit test stubs"""
def setUp(self):
self.api = octopus_deploy_client.worker_pools_api.WorkerPoolsApi() # noqa: E501
def tearDown(self):
pass
def test_create_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for create_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Create a WorkerPoolResource # noqa: E501
"""
pass
def test_create_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for create_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Create a WorkerPoolResource # noqa: E501
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder_spaces(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_sort_worker_pools_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_infrastructure_summary_worker_pools_summary_responder_spaces
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder
"""
pass
def test_custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder_spaces(self):
"""Test case for custom_query_response_descriptor_octopus_server_web_api_actions_worker_pools_workers_responder_spaces
"""
pass
def test_delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Delete a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for delete_on_background_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Delete a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_index_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for index_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_index_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for index_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for list_all_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a list of WorkerPoolResources # noqa: E501
"""
pass
def test_load_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for load_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Get a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_load_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for load_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Get a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource(self):
"""Test case for modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource
Modify a WorkerPoolResource by ID # noqa: E501
"""
pass
def test_modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces(self):
"""Test case for modify_response_descriptor_worker_pools_worker_pool_worker_pool_resource_spaces
Modify a WorkerPoolResource by ID # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
dd65e49ac4e5d72ad391c44fe86fbec7470da58a | 754f71f70dfd6a22944d8d872c6d2f1d6983ac14 | /sensirion_shdlc_driver/device.py | 26dc53bb4e13555ba24e995327b14f5e2a84b6c7 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | Sensirion/python-shdlc-driver | 052685da8db5629fa5929da65000210db82358e7 | 31e9683c27004ee05edf89996d656bc50f5bdb3a | refs/heads/master | 2021-06-10T10:35:47.299481 | 2021-03-19T08:47:12 | 2021-03-19T08:47:12 | 144,961,065 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,213 | py | # -*- coding: utf-8 -*-
# (c) Copyright 2019 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from .device_base import ShdlcDeviceBase
from .commands.device_info import ShdlcCmdGetProductType, \
ShdlcCmdGetProductName, ShdlcCmdGetArticleCode, ShdlcCmdGetSerialNumber, \
ShdlcCmdGetProductSubType
from .commands.device_version import ShdlcCmdGetVersion
from .commands.error_state import ShdlcCmdGetErrorState
from .commands.device_reset import ShdlcCmdDeviceReset
from .commands.slave_address import ShdlcCmdGetSlaveAddress, \
ShdlcCmdSetSlaveAddress
from .commands.baudrate import ShdlcCmdGetBaudrate, ShdlcCmdSetBaudrate
from .commands.reply_delay import ShdlcCmdGetReplyDelay, ShdlcCmdSetReplyDelay
from .commands.system_up_time import ShdlcCmdGetSystemUpTime
from .commands.factory_reset import ShdlcCmdFactoryReset
import logging
log = logging.getLogger(__name__)
class ShdlcDevice(ShdlcDeviceBase):
"""
Generic SHDLC device, providing only common SHDLC commands. This class is
intended only to communicate with devices which do not provide a
corresponding device driver (yet). With this class you can for example
read the serial number of a device even if no device specific driver
exists. But if there exists a device specific driver, you should always
use it instead of this driver.
This is a low-level driver which just provides all SHDLC commands as Python
methods. Typically, calling a method sends one SHDLC request to the device
and interprets its response. There is no higher level functionality
available, please look for other drivers if you need a higher level
interface.
There is no (or very few) caching functionality in this driver. For example
if you call
:py:meth:`~sensirion_shdlc_driver.device.ShdlcDevice.get_serial_number()`
100 times, it will send the command 100 times over the SHDLC interface to
the device. This makes the driver (nearly) stateless.
"""
def __init__(self, connection, slave_address):
"""
Create an SHDLC device instance on an SHDLC connection.
.. note:: This constructor does not communicate with the device, so
it's possible to instantiate an object even if the device is
not connected or powered yet.
:param ~sensirion_shdlc_driver.connection.ShdlcConnection connection:
The connection used for the communication.
:param byte slave_address:
The address of the device.
"""
super(ShdlcDevice, self).__init__(connection, slave_address)
def get_product_type(self, as_int=False):
"""
Get the product type. The product type (sometimes also called "device
type") can be used to detect what kind of SHDLC product is connected.
:param bool as_int: If ``True``, the product type is returned as an
integer, otherwise as a string of hexadecimal
digits (default).
:return: The product type as an integer or string of hexadecimal
digits.
:rtype: string/int
"""
product_type = self.execute(ShdlcCmdGetProductType())
if as_int:
product_type = int(product_type, 16)
return product_type
def get_product_subtype(self):
"""
Get the product subtype. Some product types exist in multiple slightly
different variants, this command allows to determine the exact variant
of the connected device. Sometimes this is called "device subtype".
.. note:: This command is not supported by every product type.
:return: The product subtype as a byte (the interpretation depends on
the connected product type).
:rtype: byte
"""
return self.execute(ShdlcCmdGetProductSubType())
def get_product_name(self):
"""
Get the product name of the device.
.. note:: This command is not supported by every product type.
:return: The product name as an ASCII string.
:rtype: string
"""
return self.execute(ShdlcCmdGetProductName())
def get_article_code(self):
"""
Get the article code of the device.
.. note:: This command is not supported by every product type.
:return: The article code as an ASCII string.
:rtype: string
"""
return self.execute(ShdlcCmdGetArticleCode())
def get_serial_number(self):
"""
Get the serial number of the device.
:return: The serial number as an ASCII string.
:rtype: string
"""
return self.execute(ShdlcCmdGetSerialNumber())
def get_version(self):
"""
Get the version of the device firmware, hardware and SHDLC protocol.
:return: The device version as a Version object.
:rtype: Version
"""
return self.execute(ShdlcCmdGetVersion())
def get_error_state(self, clear=True, as_exception=False):
"""
Get and optionally clear the device error state and the last error. The
state and error code interpretation depends on the connected device
type.
:param bool clear:
If ``True``, the error state on the device gets cleared.
:param bool as_exception:
If ``True``, the error state is returned as an
:py:class:`~sensirion_shdlc_driver.errors.ShdlcDeviceError`
object instead of a byte.
:return: The device state as a 32-bit unsigned integer containing all
error flags, and the last error which occurred on the device.
If ``as_exception`` is ``True``, it's returned as an
:py:class:`~sensirion_shdlc_driver.errors.ShdlcDeviceError`
object or ``None``, otherwise as a byte.
:rtype: int, byte/ShdlcDeviceError/None
"""
state, error = self.execute(ShdlcCmdGetErrorState(clear=clear))
if as_exception:
error = self._get_device_error(error)
return state, error
def get_slave_address(self):
"""
Get the SHDLC slave address of the device.
.. note:: See also the property
:py:attr:`~sensirion_shdlc_driver.device.ShdlcDevice.slave_address`
which returns the device's slave address without sending a
command. This method really sends a command to the device,
even though the slave address is actually already known by
this object.
:return: The slave address of the device.
:rtype: byte
"""
return self.execute(ShdlcCmdGetSlaveAddress())
def set_slave_address(self, slave_address, update_driver=True):
"""
Set the SHDLC slave address of the device.
.. note:: The slave address is stored in non-volatile memory of the
device and thus persists after a device reset. So the next
time connecting to the device, you have to use the new
address.
.. warning:: When changing the address of a slave, make sure there
isn't already a slave with that address on the same bus!
In that case you would get communication issues which can
only be fixed by disconnecting one of the slaves.
:param byte slave_address:
The new slave address [0..254]. The address 255 is reserved for
broadcasts.
:param bool update_driver:
If ``True``, the property
:py:attr:`~sensirion_shdlc_driver.device.ShdlcDevice.slave_address`
of this object is also updated with the new address. This is
needed to allow further communication with the device, as its
address has changed.
"""
self.execute(ShdlcCmdSetSlaveAddress(slave_address))
if update_driver:
self._slave_address = slave_address
def get_baudrate(self):
"""
Get the SHDLC baudrate of the device.
.. note:: This method really sends a command to the device, even though
the baudrate is already known by the used
:py:class:`~sensirion_shdlc_driver.port.ShdlcPort` object.
:return: The baudrate of the device [bit/s].
:rtype: int
"""
return self.execute(ShdlcCmdGetBaudrate())
def set_baudrate(self, baudrate, update_driver=True):
"""
Set the SHDLC baudrate of the device.
.. note:: The baudrate is stored in non-volatile memory of the
device and thus persists after a device reset. So the next
time connecting to the device, you have to use the new
baudrate.
.. warning:: If you pass ``True`` to the argument ``update_driver``,
the baudrate of the underlaying
:py:class:`~sensirion_shdlc_driver.port.ShdlcPort` object
is changed. As the baudrate applies to the whole bus (with
all its slaves), you might no longer be able to
communicate with other slaves. Generally you should change
the baudrate of all slaves consecutively, and only set
``update_driver`` to ``True`` the last time.
:param int baudrate:
The new baudrate. See device documentation for a list of supported
baudrates. Many devices support the baudrates 9600, 19200 and
115200.
:param bool update_driver:
If true, the baudrate of the
:py:class:`~sensirion_shdlc_driver.port.ShdlcPort` object is also
updated with the baudrate. This is needed to allow further
communication with the device, as its baudrate has changed.
"""
self.execute(ShdlcCmdSetBaudrate(baudrate))
if update_driver:
self._connection.port.bitrate = baudrate
def get_reply_delay(self):
"""
Get the SHDLC reply delay of the device.
See
:py:meth:`~sensirion_shdlc_driver.device.ShdlcDevice.set_reply_delay()`
for details.
:return: The reply delay of the device [μs].
:rtype: byte
"""
return self.execute(ShdlcCmdGetReplyDelay())
def set_reply_delay(self, reply_delay):
"""
Set the SHDLC reply delay of the device.
The reply delay allows to increase the minimum response time of the
slave to a given value in Microseconds. This is needed for RS485
masters which require some time to switch from sending to receiving.
If the slave starts sending the response while the master is still
driving the bus lines, a conflict on the bus occurs and communication
fails. If you use such a slow RS485 master, you can increase the reply
delay of all slaves to avoid this issue.
:param byte reply_delay: The new reply delay [μs].
"""
self.execute(ShdlcCmdSetReplyDelay(reply_delay))
def get_system_up_time(self):
"""
Get the system up time of the device.
:return: The time since the last power-on or device reset [s].
:rtype: int
"""
return self.execute(ShdlcCmdGetSystemUpTime())
def device_reset(self):
"""
Execute a device reset (reboot firmware, similar to power cycle).
"""
self.execute(ShdlcCmdDeviceReset())
def factory_reset(self):
"""
Perform a factory reset (restore the off-the-shelf factory
configuration).
.. warning:: This resets any configuration done after leaving the
factory! Keep in mind that this command might also change
communication parameters (i.e. baudrate and slave address)
and thus you might have to adjust the driver's parameters
to allow further communication with the device.
"""
self.execute(ShdlcCmdFactoryReset())
| [
"[email protected]"
] | |
cfb080d14c05e5ba70f3611fba5c7802c11373c9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02882/s256412363.py | 88b565ca18c9c84c582fb7237d25bd5927bd6b85 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from math import atan,pi
a,b,x=map(int,input().split())
if b-x/a**2 <= x/a**2:print(atan((b-x/a**2)/(a/2))*(180/pi))
else:
y = x/a*2/b
print(atan(b/y)*(180/pi)) | [
"[email protected]"
] | |
e99a386905b70424a8de735bdb86b29c0631b254 | d4e573e8eae32db155fe5931b3e2dcd3aa48969b | /indigo/lib/python2.7/dist-packages/rocon_std_msgs/srv/_GetPlatformInfo.py | 9632cf90da8b4370fa3f61a096c961f655bcb6dd | [] | no_license | javierdiazp/myros | ee52b0a7c972d559a1a377f8de4eb37878b8a99b | 7571febdfa881872cae6378bf7266deca7901529 | refs/heads/master | 2022-11-09T09:24:47.708988 | 2016-11-10T16:56:28 | 2016-11-10T16:56:28 | 73,733,895 | 0 | 1 | null | 2022-10-25T05:16:35 | 2016-11-14T18:19:06 | C++ | UTF-8 | Python | false | false | 13,022 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_std_msgs/GetPlatformInfoRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetPlatformInfoRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "rocon_std_msgs/GetPlatformInfoRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetPlatformInfoRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_std_msgs/GetPlatformInfoResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import rocon_std_msgs.msg
class GetPlatformInfoResponse(genpy.Message):
_md5sum = "b7b34c89d857c757ff89bd8e49fa695f"
_type = "rocon_std_msgs/GetPlatformInfoResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """PlatformInfo platform_info
================================================================================
MSG: rocon_std_msgs/PlatformInfo
# Provides platform details for robots, software or human
# interactive devices.
########################### Variables ###########################
# rocon universal resource identifier
string uri
# rocon version compatibility identifier (used when connecting to concerts)
string version
Icon icon
================================================================================
MSG: rocon_std_msgs/Icon
# Used to idenfity the original package/filename resource this icon was/is to be loaded from
# This typically doesn't have to be set, but can be very useful when loading icons from yaml definitions.
string resource_name
# Image data format. "jpeg" or "png"
string format
# Image data.
uint8[] data"""
__slots__ = ['platform_info']
_slot_types = ['rocon_std_msgs/PlatformInfo']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
platform_info
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetPlatformInfoResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.platform_info is None:
self.platform_info = rocon_std_msgs.msg.PlatformInfo()
else:
self.platform_info = rocon_std_msgs.msg.PlatformInfo()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.platform_info.uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.version
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.resource_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.format
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.platform_info is None:
self.platform_info = rocon_std_msgs.msg.PlatformInfo()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.uri = str[start:end].decode('utf-8')
else:
self.platform_info.uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.version = str[start:end].decode('utf-8')
else:
self.platform_info.version = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.icon.resource_name = str[start:end].decode('utf-8')
else:
self.platform_info.icon.resource_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.icon.format = str[start:end].decode('utf-8')
else:
self.platform_info.icon.format = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.platform_info.icon.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.platform_info.uri
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.version
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.resource_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.format
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.platform_info.icon.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.platform_info is None:
self.platform_info = rocon_std_msgs.msg.PlatformInfo()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.uri = str[start:end].decode('utf-8')
else:
self.platform_info.uri = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.version = str[start:end].decode('utf-8')
else:
self.platform_info.version = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.icon.resource_name = str[start:end].decode('utf-8')
else:
self.platform_info.icon.resource_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.platform_info.icon.format = str[start:end].decode('utf-8')
else:
self.platform_info.icon.format = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.platform_info.icon.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class GetPlatformInfo(object):
_type = 'rocon_std_msgs/GetPlatformInfo'
_md5sum = 'b7b34c89d857c757ff89bd8e49fa695f'
_request_class = GetPlatformInfoRequest
_response_class = GetPlatformInfoResponse
| [
"[email protected]"
] | |
14de914eafa10449b77e6e446ba593c4617271a1 | 12d007b50d20030c4a0d8ecceaeb532b3de4f966 | /setup.py | 1ccbc3c4e37c98f182e6b3eedb9ea81800bdaf3a | [
"MIT"
] | permissive | Tygs/ayo | 8be03cf1854d122b763272ba256e3fa87135e776 | 27b2225770581e19f3abdb8db0721776f0cfb195 | refs/heads/master | 2021-11-08T02:09:37.979755 | 2021-11-01T10:44:35 | 2021-11-01T10:44:35 | 136,607,852 | 32 | 2 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | """
Install: python setup.py install
Dev mode: python setup.py develop
Test: pip install pytest && pytest tests
All the config is in setup.cfg
"""
import setuptools
setuptools.setup()
| [
"[email protected]"
] | |
a9b33f6c6c2f40ad46017f0a75775c17579f1e0a | 0b98732dcd3dd94a97555a8f3e8dd3524bb8ec86 | /configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x.py | a6b720332cf33263295dcfeeae0d85b793e5166d | [
"Apache-2.0"
] | permissive | hasanirtiza/Pedestron | e89fea2ec676f150a7266f6b65963dd6c4ec35c9 | 8ab23ec38982cfaf0ae82c77c30f10b2fff62d12 | refs/heads/master | 2023-08-06T02:53:06.368937 | 2023-04-06T13:46:27 | 2023-04-06T13:46:27 | 247,410,025 | 723 | 161 | Apache-2.0 | 2022-10-02T10:17:44 | 2020-03-15T05:52:52 | Python | UTF-8 | Python | false | false | 5,628 | py | # model settings
model = dict(
type='MaskScoringRCNN',
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
mask_iou_head=dict(
type='MaskIoUHead',
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=81))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
mask_thr_binary=0.5,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=True,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=True,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ms_rcnn_x101_64x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
52dc6364b0481881b567046e1443faf02235b238 | 75491989e021c515461ae94dd9e069c0e3cebd48 | /Etc/CodeUp_basic/1079.py | 795c6ba6a8bfcf45ba6538395c8b796605650995 | [] | no_license | Girin7716/PythonCoding | c60db97d269aa4a90159ae83f40c332244af6b41 | 7ac4f942aed727b5290f18ce252c5f99ad657c72 | refs/heads/master | 2023-04-20T23:33:36.077633 | 2021-05-07T11:51:16 | 2021-05-07T11:51:16 | 291,244,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | chars = input().split()
for i in chars:
if i == 'q':
print(i)
break;
print(i) | [
"[email protected]"
] | |
2b09af06835e7474ad61e8d98f0c2a72f6f3ed6b | dc37f36199b107933e33486761125cef2f492ae2 | /export_contacts.py | 9eb70ffd28bd589f83971c6a335fa94871265327 | [] | no_license | spookylukey/christchurch_django | ca3acd67df1695a1cd7cb462b729ad72a37e43b7 | d489e400b201b8ac56ee4065b3d6bc0f861f92f2 | refs/heads/master | 2022-12-20T03:27:26.081809 | 2015-10-15T18:36:20 | 2015-10-15T18:36:20 | 300,521,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | #!/usr/bin/env python
from __future__ import unicode_literals
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'christchurch.settings'
import csv
writer = csv.writer(open("contact-list.csv", "w"))
writer.writerow(["First Name", "Last Name", "Gender (M/F)", "Student (Y/N)", "Address", "Email Address", "Phone Number", "Mobile", "Photo File Name", "Home Group", "Username", "Password", "Admin User (Y/N)", "Church member", "Include on email lists"])
from django.contrib.auth.models import User
from contacts.models import Contact
admins = {u.email: u for u in User.objects.all().filter(is_staff=True)}
for contact in Contact.objects.all():
try:
first_name, last_name = contact.name.split(' ', 2)
except ValueError:
first_name, last_name = contact.name, ""
writer.writerow([
first_name,
last_name,
"",
"N",
contact.address.strip() + "\n" + contact.post_code,
contact.email,
contact.phone_number,
contact.mobile_number,
"",
contact.home_group.name if contact.home_group else "",
admins[contact.email].username if contact.email in admins else "",
"",
"Y" if contact.email in admins else "N",
"Y" if contact.church_member else "N",
"Y" if contact.include_on_email_lists else "N",
])
| [
"[email protected]"
] | |
77af41358982c08950c144fac88c03820ae1a378 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/battle_control/controllers/feedback_events.py | 745d8091451fb08bc693fbe8f33885b44f3694f5 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 16,113 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/feedback_events.py
import logging
from BattleFeedbackCommon import BATTLE_EVENT_TYPE as _BET, NONE_SHELL_TYPE
from gui.battle_control.battle_constants import FEEDBACK_EVENT_ID as _FET
from constants import ATTACK_REASON, ATTACK_REASONS, BATTLE_LOG_SHELL_TYPES, ROLE_TYPE, ROLE_TYPE_TO_LABEL
_logger = logging.getLogger(__name__)
def _unpackInteger(packedData):
return packedData
def _unpackDamage(packedData):
return _DamageExtra(*_BET.unpackDamage(packedData))
def _unpackCrits(packedData):
return _CritsExtra(*_BET.unpackCrits(packedData))
def _unpackVisibility(packedData):
return _VisibilityExtra(*_BET.unpackVisibility(packedData))
def _unpackMultiStun(packedData):
return _MultiStunExtra(packedData, True)
_BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT = {_BET.KILL: _FET.PLAYER_KILLED_ENEMY,
_BET.DAMAGE: _FET.PLAYER_DAMAGED_HP_ENEMY,
_BET.CRIT: _FET.PLAYER_DAMAGED_DEVICE_ENEMY,
_BET.SPOTTED: _FET.PLAYER_SPOTTED_ENEMY,
_BET.RADIO_ASSIST: _FET.PLAYER_ASSIST_TO_KILL_ENEMY,
_BET.TRACK_ASSIST: _FET.PLAYER_ASSIST_TO_KILL_ENEMY,
_BET.STUN_ASSIST: _FET.PLAYER_ASSIST_TO_STUN_ENEMY,
_BET.BASE_CAPTURE_POINTS: _FET.PLAYER_CAPTURED_BASE,
_BET.BASE_CAPTURE_DROPPED: _FET.PLAYER_DROPPED_CAPTURE,
_BET.BASE_CAPTURE_BLOCKED: _FET.PLAYER_BLOCKED_CAPTURE,
_BET.TANKING: _FET.PLAYER_USED_ARMOR,
_BET.RECEIVED_DAMAGE: _FET.ENEMY_DAMAGED_HP_PLAYER,
_BET.RECEIVED_CRIT: _FET.ENEMY_DAMAGED_DEVICE_PLAYER,
_BET.TARGET_VISIBILITY: _FET.VEHICLE_VISIBILITY_CHANGED,
_BET.DETECTED: _FET.VEHICLE_DETECTED,
_BET.ENEMY_SECTOR_CAPTURED: _FET.ENEMY_SECTOR_CAPTURED,
_BET.DESTRUCTIBLE_DAMAGED: _FET.DESTRUCTIBLE_DAMAGED,
_BET.DESTRUCTIBLE_DESTROYED: _FET.DESTRUCTIBLE_DESTROYED,
_BET.DESTRUCTIBLES_DEFENDED: _FET.DESTRUCTIBLES_DEFENDED,
_BET.DEFENDER_BONUS: _FET.DEFENDER_BONUS,
_BET.SMOKE_ASSIST: _FET.SMOKE_ASSIST,
_BET.INSPIRE_ASSIST: _FET.INSPIRE_ASSIST,
_BET.MULTI_STUN: _FET.PLAYER_STUN_ENEMIES,
_BET.EQUIPMENT_TIMER_EXPIRED: _FET.EQUIPMENT_TIMER_EXPIRED}
_PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS = {_FET.PLAYER_DAMAGED_HP_ENEMY: _unpackDamage,
_FET.PLAYER_ASSIST_TO_KILL_ENEMY: _unpackDamage,
_FET.PLAYER_CAPTURED_BASE: _unpackInteger,
_FET.PLAYER_DROPPED_CAPTURE: _unpackInteger,
_FET.PLAYER_BLOCKED_CAPTURE: _unpackInteger,
_FET.PLAYER_USED_ARMOR: _unpackDamage,
_FET.PLAYER_DAMAGED_DEVICE_ENEMY: _unpackCrits,
_FET.ENEMY_DAMAGED_HP_PLAYER: _unpackDamage,
_FET.ENEMY_DAMAGED_DEVICE_PLAYER: _unpackCrits,
_FET.PLAYER_ASSIST_TO_STUN_ENEMY: _unpackDamage,
_FET.VEHICLE_VISIBILITY_CHANGED: _unpackVisibility,
_FET.VEHICLE_DETECTED: _unpackVisibility,
_FET.DESTRUCTIBLE_DAMAGED: _unpackInteger,
_FET.DESTRUCTIBLES_DEFENDED: _unpackInteger,
_FET.SMOKE_ASSIST: _unpackDamage,
_FET.INSPIRE_ASSIST: _unpackDamage,
_FET.PLAYER_SPOTTED_ENEMY: _unpackVisibility,
_FET.PLAYER_STUN_ENEMIES: _unpackMultiStun}
def _getShellType(shellTypeID):
return None if shellTypeID == NONE_SHELL_TYPE else BATTLE_LOG_SHELL_TYPES(shellTypeID)
class _DamageExtra(object):
__slots__ = ('__damage', '__attackReasonID', '__isBurst', '__shellType', '__isShellGold', '__secondaryAttackReasonID', '__isRoleAction')
def __init__(self, damage=0, attackReasonID=0, isBurst=False, shellTypeID=NONE_SHELL_TYPE, shellIsGold=False, secondaryAttackReasonID=0, isRoleAction=False):
super(_DamageExtra, self).__init__()
self.__damage = damage
self.__attackReasonID = attackReasonID
self.__isBurst = bool(isBurst)
self.__shellType = _getShellType(shellTypeID)
self.__isShellGold = bool(shellIsGold)
self.__secondaryAttackReasonID = secondaryAttackReasonID
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_DamageExtra isRoleAction = %s', isRoleAction)
def getDamage(self):
return self.__damage
def getAttackReasonID(self):
return self.__attackReasonID
def getSecondaryAttackReasonID(self):
return self.__secondaryAttackReasonID
def getShellType(self):
return self.__shellType
def isNone(self):
return self.isAttackReason(ATTACK_REASON.NONE)
def isBurst(self):
return self.__isBurst
def isShellGold(self):
return self.__isShellGold
def isFire(self):
return self.isAttackReason(ATTACK_REASON.FIRE)
def isBerserker(self):
return self.isAttackReason(ATTACK_REASON.BERSERKER)
def isMinefield(self):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isRam(self):
return self.isAttackReason(ATTACK_REASON.RAM)
def isShot(self):
return self.isAttackReason(ATTACK_REASON.SHOT)
def isWorldCollision(self):
return self.isAttackReason(ATTACK_REASON.WORLD_COLLISION)
def isDeathZone(self):
return self.isAttackReason(ATTACK_REASON.DEATH_ZONE)
def isProtectionZone(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isAttackReason(ATTACK_REASON.ARTILLERY_SECTOR) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_SECTOR)
def isArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_EQ)
def isFortArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ)
def isBomberEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBER_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBER_EQ)
def isBombers(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBERS) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBERS)
def isMineField(self, primary=True):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isDamagingSmoke(self, primary=True):
return self.isAttackReason(ATTACK_REASON.SMOKE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.SMOKE)
def isCorrodingShot(self, primary=True):
return self.isAttackReason(ATTACK_REASON.CORRODING_SHOT) if primary else self.isSecondaryAttackReason(ATTACK_REASON.CORRODING_SHOT)
def isFireCircle(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FIRE_CIRCLE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FIRE_CIRCLE)
def isThunderStrike(self, primary=True):
return self.isAttackReason(ATTACK_REASON.THUNDER_STRIKE) if primary else self.isSecondaryAttackReason(ATTACK_REASON.THUNDER_STRIKE)
def isAttackReason(self, attackReason):
return ATTACK_REASONS[self.__attackReasonID] == attackReason
def isSecondaryAttackReason(self, attackReason):
return ATTACK_REASONS[self.__secondaryAttackReasonID] == attackReason
def isRoleAction(self):
return self.__isRoleAction
def isSpawnedBotExplosion(self, primary=True):
return self.isAttackReason(ATTACK_REASON.SPAWNED_BOT_EXPLOSION) if primary else self.isSecondaryAttackReason(ATTACK_REASON.SPAWNED_BOT_EXPLOSION)
def isSpawnedBotRam(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BRANDER_RAM) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BRANDER_RAM)
def isClingBrander(self):
isShot = self.isAttackReason(ATTACK_REASON.SHOT)
isClingBrander = self.isSecondaryAttackReason(ATTACK_REASON.CLING_BRANDER)
return isShot and isClingBrander
def isClingBranderRam(self):
return self.isAttackReason(ATTACK_REASON.CLING_BRANDER_RAM)
class _VisibilityExtra(object):
__slots__ = ('__isVisible', '__isDirect', '__isRoleAction')
def __init__(self, isVisible, isDirect, isRoleAction):
super(_VisibilityExtra, self).__init__()
self.__isVisible = isVisible
self.__isDirect = isDirect
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_VisibilityExtra isRoleAction = %s', isRoleAction)
def isVisible(self):
return self.__isVisible
def isDirect(self):
return self.__isDirect
def isRoleAction(self):
return self.__isRoleAction
class _MultiStunExtra(object):
__slots__ = ('__targetsAmount', '__isRoleAction')
def __init__(self, targetsAmount, isRoleAction):
super(_MultiStunExtra, self).__init__()
self.__targetsAmount = targetsAmount
self.__isRoleAction = bool(isRoleAction)
_logger.debug('_StunExtra isRoleAction = %s', isRoleAction)
def getTargetsAmount(self):
return self.__targetsAmount
def isRoleAction(self):
return self.__isRoleAction
class _CritsExtra(object):
__slots__ = ('__critsCount', '__shellType', '__isShellGold', '__attackReasonID', '__secondaryAttackReasonID')
def __init__(self, critsCount=0, attackReasonID=0, shellTypeID=NONE_SHELL_TYPE, shellIsGold=False, secondaryAttackReasonID=0):
super(_CritsExtra, self).__init__()
self.__critsCount = critsCount
self.__attackReasonID = attackReasonID
self.__shellType = _getShellType(shellTypeID)
self.__isShellGold = bool(shellIsGold)
self.__secondaryAttackReasonID = secondaryAttackReasonID
def getCritsCount(self):
return self.__critsCount
def getShellType(self):
return self.__shellType
def isShellGold(self):
return self.__isShellGold
def isFire(self):
return self.isAttackReason(ATTACK_REASON.FIRE)
def isBerserker(self):
return self.isAttackReason(ATTACK_REASON.BERSERKER)
def isMinefield(self):
return self.isAttackReason(ATTACK_REASON.MINEFIELD_EQ)
def isDamagingSmoke(self):
return self.isAttackReason(ATTACK_REASON.SMOKE)
def isCorrodingShot(self):
return self.isAttackReason(ATTACK_REASON.CORRODING_SHOT)
def isFireCircle(self):
return self.isAttackReason(ATTACK_REASON.FIRE_CIRCLE)
def isThunderStrike(self):
return self.isAttackReason(ATTACK_REASON.THUNDER_STRIKE)
def isRam(self):
return self.isAttackReason(ATTACK_REASON.RAM)
def isShot(self):
return self.isAttackReason(ATTACK_REASON.SHOT)
def isWorldCollision(self):
return self.isAttackReason(ATTACK_REASON.WORLD_COLLISION)
def isDeathZone(self):
return self.isAttackReason(ATTACK_REASON.DEATH_ZONE)
def isProtectionZone(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isAttackReason(ATTACK_REASON.ARTILLERY_SECTOR) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_PROTECTION) or self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_SECTOR)
def isArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.ARTILLERY_EQ)
def isFortArtilleryEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.FORT_ARTILLERY_EQ)
def isBomberEq(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBER_EQ) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBER_EQ)
def isBombers(self, primary=True):
return self.isAttackReason(ATTACK_REASON.BOMBERS) if primary else self.isSecondaryAttackReason(ATTACK_REASON.BOMBERS)
def isSecondaryAttackReason(self, attackReason):
return ATTACK_REASONS[self.__secondaryAttackReasonID] == attackReason
def isAttackReason(self, attackReason):
return ATTACK_REASONS[self.__attackReasonID] == attackReason
def isClingBrander(self):
isShot = self.isAttackReason(ATTACK_REASON.SHOT)
isClingBrander = self.isSecondaryAttackReason(ATTACK_REASON.CLING_BRANDER)
return isShot and isClingBrander
def isClingBranderRam(self):
return self.isAttackReason(ATTACK_REASON.CLING_BRANDER_RAM)
class _FeedbackEvent(object):
__slots__ = ('__eventType',)
def __init__(self, feedbackEventType):
super(_FeedbackEvent, self).__init__()
self.__eventType = feedbackEventType
def getType(self):
return self.__eventType
@staticmethod
def fromDict(summaryData, additionalData=None):
raise NotImplementedError
class PlayerFeedbackEvent(_FeedbackEvent):
__slots__ = ('__battleEventType', '__targetID', '__count', '__extra', '__attackReasonID', '__isBurst', '__role')
def __init__(self, feedbackEventType, eventType, targetID, count, role, extra):
super(PlayerFeedbackEvent, self).__init__(feedbackEventType)
self.__battleEventType = eventType
self.__targetID = targetID
self.__count = count
self.__role = role
self.__extra = extra
@staticmethod
def fromDict(battleEventData, additionalData=None):
battleEventType = battleEventData['eventType']
if battleEventType in _BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT:
feedbackEventType = _BATTLE_EVENT_TO_PLAYER_FEEDBACK_EVENT[battleEventType]
if feedbackEventType in _PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS:
converter = _PLAYER_FEEDBACK_EXTRA_DATA_CONVERTERS[feedbackEventType]
extra = converter(battleEventData['details'])
else:
extra = None
role = ROLE_TYPE_TO_LABEL[ROLE_TYPE.NOT_DEFINED]
if additionalData is not None:
role = ROLE_TYPE_TO_LABEL[additionalData.get('role') or ROLE_TYPE.NOT_DEFINED]
return PlayerFeedbackEvent(feedbackEventType, battleEventData['eventType'], battleEventData['targetID'], battleEventData['count'], role, extra)
else:
return
def getBattleEventType(self):
return self.__battleEventType
def getTargetID(self):
return self.__targetID
def getExtra(self):
return self.__extra
def getCount(self):
return self.__count
def getRole(self):
return self.__role
class BattleSummaryFeedbackEvent(_FeedbackEvent):
__slots__ = ('__damage', '__trackAssistDamage', '__radioAssistDamage', '__blockedDamage', '__stunAssist')
def __init__(self, damage, trackAssist, radioAssist, tankings, stunAssist):
super(BattleSummaryFeedbackEvent, self).__init__(_FET.DAMAGE_LOG_SUMMARY)
self.__damage = damage
self.__trackAssistDamage = trackAssist
self.__radioAssistDamage = radioAssist
self.__blockedDamage = tankings
self.__stunAssist = stunAssist
@staticmethod
def fromDict(summaryData, additionalData=None):
return BattleSummaryFeedbackEvent(damage=summaryData['damage'], trackAssist=summaryData['trackAssist'], radioAssist=summaryData['radioAssist'], tankings=summaryData['tankings'], stunAssist=summaryData['stunAssist'])
def getTotalDamage(self):
return self.__damage
def getTotalAssistDamage(self):
return self.__trackAssistDamage + self.__radioAssistDamage
def getTotalBlockedDamage(self):
return self.__blockedDamage
def getTotalStunDamage(self):
return self.__stunAssist
class PostmortemSummaryEvent(_FeedbackEvent):
__slots__ = ('__killerID', '__deathReasonID')
def __init__(self, lastKillerID, lastDeathReasonID):
super(PostmortemSummaryEvent, self).__init__(_FET.POSTMORTEM_SUMMARY)
self.__killerID = lastKillerID
self.__deathReasonID = lastDeathReasonID
@staticmethod
def fromDict(summaryData, additionalData=None):
return PostmortemSummaryEvent(lastKillerID=summaryData['lastKillerID'], lastDeathReasonID=summaryData['lastDeathReasonID'])
def getKillerID(self):
return self.__killerID
def getDeathReasonID(self):
return self.__deathReasonID
| [
"[email protected]"
] | |
6a5d15682bbaa458fe83a7acb7339950b92acdcb | 795caca6c497891e2fcd2b0253a209500744c56d | /src/models/continuous_encoder_decoder_models/encoder_decoder_variants/enc_dec_out.py | fe36585660ddbd55eae5ad88b1e6f06abb913378 | [] | no_license | RitaRamo/remote-sensing-images-caption | 29c0e0a6b5352b9b3d62c7315cd4d7ac6b0b7076 | 426d97b5d3688f6c52c51ef6e33872554d55751a | refs/heads/master | 2021-11-24T03:02:00.238003 | 2021-11-04T09:23:20 | 2021-11-04T09:23:20 | 244,619,672 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,534 | py | import torchvision
from torch import nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence
from models.basic_encoder_decoder_models.encoder_decoder import Encoder, Decoder
from models.abtract_model import AbstractEncoderDecoderModel
import torch.nn.functional as F
from embeddings.embeddings import get_embedding_layer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from data_preprocessing.preprocess_tokens import OOV_TOKEN
from embeddings.embeddings import EmbeddingsType
from models.continuous_encoder_decoder_models.encoder_decoder import ContinuousEncoderDecoderModel
from embeddings.embeddings import EmbeddingsType
class VocabAttention(nn.Module):
"""
Attention Network.
"""
def __init__(self, vocab_dim, decoder_dim, embedding_vocab):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(VocabAttention, self).__init__()
# linear layer to transform decoder's output
self.decoder_att = nn.Linear(decoder_dim, vocab_dim)
self.full_att = nn.Linear(vocab_dim, 1)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
self.embedding_vocab = embedding_vocab
def forward(self, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
# (batch_size, l_regions (512), regions_dim (300))
vocab = self.embedding_vocab.repeat(decoder_hidden.size()[0], 1, 1)
query = self.decoder_att(decoder_hidden) # (batch_size, 1, encoder_dim)
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
# (batch_size, num_pixels,1) -> com squeeze(2) fica (batch_size, l_regions)
att = self.full_att(self.relu(vocab + query.unsqueeze(1))).squeeze(2)
alpha = self.softmax(att) # (batch_size, l_regions)
attention_weighted_encoding = (
vocab * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha
class ContinuousDecoderWithOut(Decoder):
def __init__(self, decoder_dim, embed_dim, embedding_type, vocab_size, token_to_id, post_processing, device,
encoder_dim=2048, dropout=0.5):
super(ContinuousDecoderWithOut, self).__init__(decoder_dim, embed_dim,
embedding_type, vocab_size, token_to_id, post_processing, encoder_dim, dropout)
# replace softmax with a embedding layer
self.fc = nn.Linear(decoder_dim, embed_dim)
list_wordid = list(range(vocab_size)) # ignore first 4 special tokens : "start,end, unknow, padding"
vocab = torch.transpose(torch.tensor(list_wordid).unsqueeze(-1), 0, 1)
embedding_vocab = self.embedding(vocab).to(device)
self.attention_out = VocabAttention(embed_dim, decoder_dim, embedding_vocab) # attention network
def forward(self, word, encoder_out, decoder_hidden_state, decoder_cell_state):
embeddings = self.embedding(word)
decoder_hidden_state, decoder_cell_state = self.decode_step(
embeddings, (decoder_hidden_state, decoder_cell_state)
)
scores, alpha_out = self.attention_out(self.dropout(decoder_hidden_state))
return scores, decoder_hidden_state, decoder_cell_state, alpha_out
class ContinuousEncoderDecoderOutModel(ContinuousEncoderDecoderModel):
def __init__(self,
args,
vocab_size,
token_to_id,
id_to_token,
max_len,
device
):
super().__init__(args, vocab_size, token_to_id, id_to_token, max_len, device)
def _initialize_encoder_and_decoder(self):
if (self.args.embedding_type not in [embedding.value for embedding in EmbeddingsType]):
raise ValueError(
"Continuous model should use pretrained embeddings...")
self.encoder = Encoder(self.args.image_model_type,
enable_fine_tuning=self.args.fine_tune_encoder)
self.decoder = ContinuousDecoderWithOut(
encoder_dim=self.encoder.encoder_dim,
decoder_dim=self.args.decoder_dim,
embedding_type=self.args.embedding_type,
embed_dim=self.args.embed_dim,
vocab_size=self.vocab_size,
token_to_id=self.token_to_id,
post_processing=self.args.post_processing,
device=self.device,
dropout=self.args.dropout
)
self.decoder.normalize_embeddings(self.args.no_normalization)
self.encoder = self.encoder.to(self.device)
self.decoder = self.decoder.to(self.device)
def _predict(self, encoder_out, caps, caption_lengths):
batch_size = encoder_out.size(0)
num_pixels = encoder_out.size(1)
# Create tensors to hold word predicion scores and alphas
all_predictions = torch.zeros(batch_size, max(
caption_lengths), self.decoder.embed_dim).to(self.device)
all_alphas_out = torch.zeros(batch_size, max(
caption_lengths), self.vocab_size).to(self.device)
h, c = self.decoder.init_hidden_state(encoder_out)
# Predict
for t in range(max(
caption_lengths)):
# batchsizes of current time_step are the ones with lenght bigger than time-step (i.e have not fineshed yet)
batch_size_t = sum([l > t for l in caption_lengths])
predictions, h, c, alpha_out = self.decoder(
caps[:batch_size_t, t], encoder_out[:batch_size_t], h[:batch_size_t], c[:batch_size_t])
all_predictions[:batch_size_t, t, :] = predictions
all_alphas_out[:batch_size_t, t, :] = alpha_out
return {"predictions": all_predictions, "alpha_out": all_alphas_out}
def generate_output_index(self, input_word, encoder_out, h, c):
predictions, h, c, _ = self.decoder(
input_word, encoder_out, h, c)
current_output_index = self._convert_prediction_to_output(predictions)
return current_output_index, h, c
| [
"[email protected]"
] | |
4a2fb9f16742d3718a5490b53140ab00b8c65f5a | f6ed7bc808f5536bc77166fe5c3571e5c028f308 | /neptune/internal/common/utils/files.py | c694ca7a2144941196bdd9a68e8df828c7b73206 | [
"Apache-2.0"
] | permissive | jiji-online/neptune-cli | d086bb59725b7545f3e0f80bd89e8f99ff3851a0 | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | refs/heads/main | 2023-07-18T17:56:10.671562 | 2021-09-14T07:54:13 | 2021-09-14T07:54:13 | 406,275,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import os
import io
def create_empty_file(path):
io.open(path, 'w').close()
def create_dir_if_nonexistent(dir_path):
try:
os.makedirs(dir_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| [
"[email protected]"
] | |
3034b0e2dc2f6fae511f9a14f1f4e669ce99bf43 | b8e6b9ac7d92941e3b8ee2f97952ff8048d9fed6 | /django_app/config/celery.py | 5f2ee85922eaca996254f9a6b2d7da3b932d1cf8 | [] | no_license | LeeHanYeong/Elasticbeanstalk-Celery-Redis-Elasticache | 00e571d90141ecf987ed5d86a90797f3de7ccae1 | 63c0b8f519a2c90f82d796affa884d3b1a440732 | refs/heads/master | 2020-06-26T10:00:30.498364 | 2017-11-16T19:59:16 | 2017-11-16T19:59:16 | 97,014,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.debug')
app = Celery('config')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| [
"[email protected]"
] | |
21bc6ecaca89a962b6c47a14a1809fc53cb6ae5e | ed90fcbfd1112545fa742e07131159bb3a68246a | /smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/firewall_rules/list.py | 8d62e0bafc6eecf56466830a10565be1b2193749 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | wemanuel/smry | 2588f2a2a7b7639ebb6f60b9dc2833f1b4dee563 | b7f676ab7bd494d71dbb5bda1d6a9094dfaedc0a | refs/heads/master | 2021-01-10T21:56:55.226753 | 2015-08-01T13:37:06 | 2015-08-01T13:37:06 | 40,047,329 | 0 | 1 | Apache-2.0 | 2020-07-24T18:32:40 | 2015-08-01T13:26:17 | Python | UTF-8 | Python | false | false | 441 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for listing firewall rules."""
from googlecloudsdk.compute.lib import base_classes
class List(base_classes.GlobalLister):
"""List Google Compute Engine firewall rules."""
@property
def service(self):
return self.compute.firewalls
@property
def resource_type(self):
return 'firewalls'
List.detailed_help = base_classes.GetGlobalListerHelp('firewall rules')
| [
"[email protected]"
] | |
ef8050413a53ba46fbf7838ae42ee7b94417348b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03304/s635103583.py | df29bcaa3b5769da632eaa3ea1863d89e01068ee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import sys
import math
import copy
from heapq import heappush, heappop, heapify
from functools import cmp_to_key
from bisect import bisect_left, bisect_right
from collections import defaultdict, deque, Counter
# sys.setrecursionlimit(1000000)
# input aliases
input = sys.stdin.readline
getS = lambda: input().strip()
getN = lambda: int(input())
getList = lambda: list(map(int, input().split()))
getZList = lambda: [int(x) - 1 for x in input().split()]
INF = float("inf")
MOD = 10**9 + 7
divide = lambda x: pow(x, MOD-2, MOD)
def solve():
n, m, d = getList()
if d == 0:
each = n
else:
each = (n - d) * 2
# igai = pow(n, m-2)
all = each * (m-1) / (n * n)
ans = all
print(ans)
def main():
n = getN()
for _ in range(n):
solve()
return
if __name__ == "__main__":
# main()
solve() | [
"[email protected]"
] | |
6470e5104a790f16c984bcde668a934317ac2e95 | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/notifications/migrations/0018_gcmmessage_queue_id.py | 64dd06bd40b6ed39edc8bd2ae0a208bec73ed197 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 437 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0017_auto_20151217_2000'),
]
operations = [
migrations.AddField(
model_name='gcmmessage',
name='queue_id',
field=models.CharField(max_length=128, default='', blank=True),
),
]
| [
"[email protected]"
] | |
7b122931a2d1665b2d483991ac0a54efe644b77e | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/net/log/stitch_net_log_files.py | aea6d7b0f58ca282bcb4daf53c9837ae3b963544 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
This script stitches the NetLog files in a specified directory.
The complete NetLog will be written to net-internals-log.json in the directory
passed as argument to --path.
'''
import argparse, os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store',
help="Specifies the complete filepath of the directory where the log "
"files are located.")
# TODO(dconnol): Automatically pull all event files matching the format
# event_file_<num>.json and remove the num_files argument.
parser.add_argument('--num_files', action='store',
help="Specifies the number of event files (not including the constants "
"file or the end_netlog file) that need need to be stitched together. "
"The number of event files passed to the script must not be greater "
"than the number of event files in the directory.")
args = parser.parse_args()
num_files = int(args.num_files)
filepath = args.path
if filepath[-1:] != "/":
filepath += "/"
os.chdir(filepath)
with open("net-internals-log.json", "w") as stitched_file:
try:
file = open("constants.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"constants.json\" not found."
return
events_written = False;
for i in range(num_files):
try:
file = open("event_file_%d.json" % i)
with file:
if not events_written:
line = file.readline();
events_written = True
for next_line in file:
if next_line.strip() == "":
line += next_line
else:
stitched_file.write(line)
line = next_line
except IOError:
os.remove("net-internals-log.json")
print "File \"event_file_%d.json\" not found." % i
return
# Remove hanging comma from last event
# TODO(dconnol): Check if the last line is a valid JSON object. If not,
# do not write the line to file. This handles incomplete logs.
line = line.strip()
if line[-1:] == ",":
stitched_file.write(line[:-1])
elif line:
raise ValueError('Last event is not properly formed')
try:
file = open("end_netlog.json")
with file:
for line in file:
stitched_file.write(line)
except IOError:
os.remove("net-internals-log.json")
print "File \"end_netlog\" not found."
return
# Delete old NetLog files
for i in range (num_files):
os.remove("event_file_%d.json" % i)
os.remove("constants.json")
os.remove("end_netlog.json")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c279470529493caf6dca7d09df9d96753ca09dc2 | d4280eca1a9badb0a4ad2aa22598616eedece373 | /Automate The Boring Stuff With Python/03/04-sameName.py | c723f7c075712db216aaaf5d638a7face06363b8 | [] | no_license | Little-Captain/py | 77ec12bb2aaafe9f709a70831266335b03f63663 | 74ba3c3449e7b234a77500a17433e141e68169f7 | refs/heads/master | 2021-06-09T11:33:23.205388 | 2019-11-22T01:17:44 | 2019-11-22T01:17:44 | 131,844,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python
def spam():
eggs = 'spam local'
print(eggs)
def bacon():
eggs = 'bacon local'
print(eggs)
spam()
print(eggs)
eggs = 'global'
bacon()
print(eggs) | [
"[email protected]"
] | |
1bbc11411983c07e73a6f0ab5f9eff30995621b0 | a6f8aae8f552a06b82fe018246e8dcd65c27e632 | /pr089/__init__.py | 159be3c3aebc785921f28b14145490cf183d1d97 | [] | no_license | P4SSER8Y/ProjectEuler | 2339ee7676f15866ceb38cad35e21ead0dad57e9 | 15d1b681e22133fc562a08b4e8e41e582ca8e625 | refs/heads/master | 2021-06-01T09:22:11.165235 | 2016-05-06T14:02:40 | 2016-05-06T14:02:40 | 46,722,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from .pr089 import run as pyRun
run = pyRun
#run = cRun
| [
"[email protected]"
] | |
269e0ffaa05096b410f812324e38587094ee38df | 24a52b2b363417a8bdfeb8f669ee53b7ee19f4d6 | /playa/conf.py | 7579c8aef6242a240ea812a489b5517cb84d0ca7 | [
"Apache-2.0"
] | permissive | isabella232/playa | e203997e2660babe333d4915f294530cde57ccb0 | a93335e592aa596645a60497a7c030a36ae7fec2 | refs/heads/master | 2023-03-18T23:51:35.577746 | 2011-07-15T01:07:53 | 2011-07-15T01:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """
playa.conf
~~~~~~~~~~
Represents the default values for all settings.
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os
import os.path
class PlayaConfig(object):
ROOT = os.path.normpath(os.path.dirname(__file__))
DEBUG = True
AUDIO_PATHS = []
WEB_HOST = '0.0.0.0'
WEB_PORT = 9000
WEB_LOG_FILE = os.path.join(ROOT, 'playa.log')
WEB_PID_FILE = os.path.join(ROOT, 'playa.pid')
DATA_PATH = os.path.join(ROOT, 'data')
SECRET_KEY = '_#(wkvb#@%%!x-dd!xt&i-1g5rylz4q&t6%m5u@3&7hyuqd437' | [
"[email protected]"
] | |
2013df2811af303bf28b622389c22251a0e40bff | 99cd943ad5deed305608a516c0596cf3e1b552e5 | /src/py/VendingMachine/vending_machine1.py | c9767d0aef06e0102daeaf59a770b9d458689ecd | [] | no_license | koukyo1994/algorithms | da8beebafe95768890a88babdba5951b01a3f975 | 6cb3350f89ddbc244071c1bc3e1a10ec9e0760ed | refs/heads/master | 2021-06-23T19:04:22.618607 | 2021-04-24T08:33:01 | 2021-04-24T08:33:01 | 200,551,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import sys
if __name__ == "__main__":
insert_price = input("insert: ")
if not insert_price.isdecimal():
print("整数を入力してください")
sys.exit()
product_price = input("product: ")
if not product_price.isdecimal():
print("整数を入力してください")
sys.exit()
change = int(insert_price) - int(product_price)
if change < 0:
print("金額が不足しています")
sys.exit()
coins = [5000, 1000, 500, 100, 50, 10, 5, 1]
for coin in coins:
n_coin = change // coin
change = change % coin
print(f"{coin}: {n_coin}")
| [
"[email protected]"
] | |
9492454662d9baa6149dbe4c257a23c9a281b4af | 4fc6fdad6c0f52ff0f15186e411b106b7500fd4d | /osipkd/views/tu_ppkd/ap_advist.py | 18a3f920b7295924590b5854cd16890da12ceafd | [] | no_license | aagusti/osipkd-pdpt | 03e01e327d7df26da4f4dcdd82a35ba8cfa1ce40 | 130abc77292f2f3023da6f8b785fb7ccf337a374 | refs/heads/master | 2021-01-10T14:44:35.409216 | 2015-06-01T08:19:34 | 2015-06-01T08:19:34 | 36,646,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,868 | py | import os
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime,date
from sqlalchemy import not_, func
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession
from osipkd.models.apbd_anggaran import Kegiatan, KegiatanSub, KegiatanItem
from osipkd.models.pemda_model import Unit
from osipkd.models.apbd_tu import Sp2d, Advist
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah ap-advist gagal'
SESS_EDIT_FAILED = 'Edit ap-advist gagal'
class view_ap_advist_ppkd(BaseViews):
@view_config(route_name="ap-advist", renderer="templates/ap-advist/list.pt")
def view_list(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
return dict(project='EIS',
)
##########
# Action #
##########
@view_config(route_name='ap-advist-act', renderer='json',
permission='read')
def view_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
pk_id = 'id' in params and params['id'] and int(params['id']) or 0
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('kode'))
columns.append(ColumnDT('tanggal', filter=self._DTstrftime))
columns.append(ColumnDT('nama'))
columns.append(ColumnDT('nominal'))
query = DBSession.query(Advist
).filter(Advist.tahun_id==ses['tahun'],
Advist.unit_id==ses['unit_id'] ,
).order_by(Advist.kode.asc())
rowTable = DataTables(req, Advist, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
def err_kegiatan():
raise colander.Invalid(form,
'Kegiatan dengan no urut tersebut sudah ada')
def get_form(self, class_form):
schema = class_form(validator=self.form_validator)
schema.request = self.request
return Form(schema, buttons=('simpan','batal'))
def save(self, values, row=None):
if not row:
row = Advist()
row.created = datetime.now()
row.create_uid = self.request.user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = self.request.user.id
row.posted=0
row.disabled = 'disabled' in values and 1 or 0
if not row.kode:
tahun = self.session['tahun']
unit_kd = self.session['unit_kd']
unit_id = self.session['unit_id']
no_urut = Advist.get_norut(tahun, unit_id)+1
no = "0000%d" % no_urut
nomor = no[-5:]
row.kode = "%d" % tahun + "-%s" % unit_kd + "-BUD-%s" % nomor
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
values["nominal"]=values["nominal"].replace('.','')
row = self.save(values, row)
self.request.session.flash('Advist sudah disimpan.')
return row
def route_list(self):
return HTTPFound(location=self.request.route_url('ap-advist'))
def session_failed(request, session_name):
r = dict(form=request.session[session_name])
del request.session[session_name]
return r
@view_config(route_name='ap-advist-add', renderer='templates/ap-advist/add.pt',
permission='add')
def view_add(self):
request=self.request
form = self.get_form(AddSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
controls_dicted = dict(controls)
#Cek Kode Sama ato tidak
if not controls_dicted['kode']=='':
a = form.validate(controls)
b = a['kode']
c = "%s" % b
cek = DBSession.query(Advist).filter(Advist.kode==c).first()
if cek :
self.request.session.flash('Kode advist sudah ada.', 'error')
return HTTPFound(location=self.request.route_url('ap-advist-add'))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
row = self.save_request(controls_dicted)
return HTTPFound(location=request.route_url('ap-advist-edit',id=row.id))
return self.route_list()
elif SESS_ADD_FAILED in request.session:
del request.session[SESS_ADD_FAILED]
return dict(form=form)
########
# Edit #
########
def query_id(self):
return DBSession.query(Advist).filter(Advist.id==self.request.matchdict['id'])
def id_not_found(request):
msg = 'User ID %s not found.' % request.matchdict['id']
request.session.flash(msg, 'error')
return self.route_list()
@view_config(route_name='ap-advist-edit', renderer='templates/ap-advist/add.pt',
permission='edit')
def view_edit(self):
request = self.request
row = self.query_id().first()
uid = row.id
kode = row.kode
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
#Cek Kode Sama ato tidak
a = form.validate(controls)
b = a['kode']
c = "%s" % b
cek = DBSession.query(Advist).filter(Advist.kode==c).first()
if cek:
kode1 = DBSession.query(Advist).filter(Advist.id==uid).first()
d = kode1.kode
if d!=c:
self.request.session.flash('Kode advist sudah ada', 'error')
return HTTPFound(location=request.route_url('ap-advist-edit',id=row.id))
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
del request.session[SESS_EDIT_FAILED]
return dict(form=form)
values = row.to_dict()
form.set_appstruct(values)
return dict(form=form)
##########
# Delete #
##########
@view_config(route_name='ap-advist-delete', renderer='templates/ap-advist/delete.pt',
permission='delete')
def view_delete(self):
q = self.query_id()
row = q.first()
request=self.request
if not row:
return id_not_found(request)
if row.nominal:
request.session.flash('Data tidak dapat dihapus, karena masih memiliki items', 'error')
return self.route_list()
form = Form(colander.Schema(), buttons=('hapus','cancel'))
values= {}
if request.POST:
if 'hapus' in request.POST:
msg = '%s dengan kode %s telah berhasil.' % (request.title, row.kode)
DBSession.query(Advist).filter(Advist.id==request.matchdict['id']).delete()
DBSession.flush()
request.session.flash(msg)
return self.route_list()
return dict(row=row, form=form.render())
class AddSchema(colander.Schema):
unit_id = colander.SchemaNode(
colander.String(),
oid = "unit_id")
tahun_id = colander.SchemaNode(
colander.Integer(),
title="Tahun",
oid = "tahun_id")
kode = colander.SchemaNode(
colander.String(),
missing=colander.drop,
title="No. Advist")
nama = colander.SchemaNode(
colander.String(),
title = "Bank/Tujuan"
)
tanggal = colander.SchemaNode(
colander.Date(),
title = "Tanggal"
)
nominal = colander.SchemaNode(
colander.String(),
missing=colander.drop,
oid="jml_total",
title="Nominal"
)
class EditSchema(AddSchema):
id = colander.SchemaNode(
colander.Integer(),
oid="id")
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.