blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d51133b3bdbcb43ffd745eeb543c357ff5a4faa | 6443a587e16658a58b884a2e5c6dbbab1be50674 | /Design_Patterns/MVC.py | 41527a426a427ef6d6f168cc407aa49bd64086ef | [] | no_license | xiaochenchen-PITT/CC150_Python | a6cbe213946851639a827068961934920b6c3e57 | e96394265d8a41a1b4558d5d2b34aa34af99662f | refs/heads/master | 2020-12-24T17:18:14.606804 | 2014-11-08T21:48:20 | 2014-11-08T21:48:20 | 25,654,100 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | import Tkinter as tk
class Observable:
"""class Observable defines the infrastructure of
model/view register and notification"""
def __init__(self, InitialValue = 0):
self.data = InitialValue
self.observer_list = []
def RegisterObserver(self, observer):
self.observer_list.append(observer)
def ObserverNotify(self):
for ov in self.observer_list:
ov.update(self.data)
def get(self):
return self.data
class Model(Observable):
"""Model extends its super class Observable and purely just functions"""
def __init__(self):
Observable.__init__(self)
def AddMoney(self, value):
self.data = self.get() + value
Observable.ObserverNotify(self)
def SubMoney(self, value):
self.data = self.get() - value
Observable.ObserverNotify(self)
class View(tk.Toplevel):
"""viewis the visual presentation of data"""
def __init__(self, master):
tk.Toplevel.__init__(self, master)
self.up_frame = tk.Frame(self)
self.up_frame.pack()
self.bottom_frame = tk.Frame(self)
self.bottom_frame.pack(side = 'bottom')
self.label = tk.Label(self.up_frame, text = 'My Money')
self.label.pack(side = 'left')
self.moneyDisplay = tk.Entry(self.up_frame, width = 8)
self.moneyDisplay.pack(side = 'left')
self.addButton = tk.Button(self.bottom_frame, text = 'Add', width = 8)
self.addButton.pack(side = 'left')
self.subButton = tk.Button(self.bottom_frame, text = 'Sub', width = 8)
self.subButton.pack(side = 'left')
def update(self, money):
self.moneyDisplay.delete(0, 'end')
self.moneyDisplay.insert('end', str(money))
class Controller:
"""Controller is the interconnection of model and view"""
def __init__(self, root):
self.model = Model()
self.view = View(root)
self.model.RegisterObserver(self.view)
self.view.addButton.config(command = self.AddMoney)
self.view.subButton.config(command = self.SubMoney)
self.MoneyChanged(self.model.get())
def AddMoney(self):
self.model.AddMoney(10)
def SubMoney(self):
self.model.SubMoney(10)
def MoneyChanged(self, money):
self.view.update(money)
if __name__ == '__main__':
root = tk.Tk()
root.withdraw()
whatever = Controller(root)
root.mainloop()
| [
"[email protected]"
] | |
587a5111f581fa6a8b63ab0fe2cce3dccbb8d97c | cb1d59b57510d222efcfcd37e7e4e919b6746d6e | /python/fizz_buzz.py | 56c3fdeaade6e0122efa3841053aa624a1f63b6c | [] | no_license | pzmrzy/LeetCode | 416adb7c1066bc7b6870c6616de02bca161ef532 | ef8c9422c481aa3c482933318c785ad28dd7703e | refs/heads/master | 2021-06-05T14:32:33.178558 | 2021-05-17T03:35:49 | 2021-05-17T03:35:49 | 49,551,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
return ["FizzBuzz" if n % 15 == 0 else "Fizz" if n % 3 == 0 else "Buzz" if n % 5 == 0 else str(n) for n in range(1, n + 1)]
| [
"[email protected]"
] | |
dff2d7ca09d52a201ba58b3ce5d4779e7271cb95 | 0c8214d0d7827a42225b629b7ebcb5d2b57904b0 | /practice/P001_Matrix/main.py | e52128512ba48dafcdf746dde3478eb9df8fa36b | [] | no_license | mertturkmenoglu/python-examples | 831b54314410762c73fe2b9e77aee76fe32e24da | 394072e1ca3e62b882d0d793394c135e9eb7a56e | refs/heads/master | 2020-05-04T15:42:03.816771 | 2020-01-06T19:37:05 | 2020-01-06T19:37:05 | 179,252,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # Practice 001: Read and print matrix
row = int(input('Row number: '))
col = int(input('Col number: '))
matrix = [[0 for _ in range(col)] for _ in range(row)]
print(matrix)
| [
"[email protected]"
] | |
3af938f6309801fbf139369b23994e67acc3176f | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/safezone/DistributedDDTreasure.py | 97aff6809e6d0c59e21d67a112ed9c6dee2141f7 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 368 | py | # File: t (Python 2.4)
import DistributedSZTreasure
class DistributedDDTreasure(DistributedSZTreasure.DistributedSZTreasure):
def __init__(self, cr):
DistributedSZTreasure.DistributedSZTreasure.__init__(self, cr)
self.modelPath = 'phase_6/models/props/starfish_treasure'
self.grabSoundPath = 'phase_4/audio/sfx/SZ_DD_treasure.mp3'
| [
"[email protected]"
] | |
6675c1f8bde9a8b0c7cdeeb0041b9769069edcca | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/effects/LureGlow.py | e9c4eaabe605d5c76b88c0746c2a90f554148add | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.effects.LureGlow
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from EffectController import EffectController
from PooledEffect import PooledEffect
class LureGlow(PooledEffect, EffectController):
__module__ = __name__
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleCards')
self.effectModel = model.find('**/particleSparkle')
self.effectModel.reparentTo(self)
self.effectColor = Vec4(1, 1, 1, 1)
self.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne))
self.setTransparency(True)
self.setColorScaleOff()
self.setBillboardPointEye()
self.setDepthWrite(0)
self.setLightOff()
self.setFogOff()
self.effectModel.hide()
def createTrack(self):
self.effectModel.hide()
self.effectModel.setColorScale(self.effectColor)
pulseIval = Sequence(LerpScaleInterval(self.effectModel, duration=0.15, scale=3.5), LerpScaleInterval(self.effectModel, duration=0.15, scale=1.0))
self.startEffect = Sequence(Func(self.effectModel.show), Func(pulseIval.loop))
self.endEffect = Sequence(Func(pulseIval.finish), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self) | [
"[email protected]"
] | |
d53af3ccac95736c531c5c23a3f1ef96f5272802 | 1e8ec582b0e5d880a17a823aa5bffe07371222f3 | /cmake/tools/patch.py | 9ae3cfb622a198c02382662409bcc6e3b60b00c9 | [] | no_license | muhkuh-sys/com.github.vsergeev-lua-periphery | 51f18836acbad992cb203a578aaed7fac2a1f9a8 | 9ffb8e784339b791dcb51fbb6e3089ab4b9f7384 | refs/heads/master | 2023-08-05T03:13:59.340320 | 2023-07-26T07:30:15 | 2023-07-26T07:30:15 | 202,534,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,134 | py | #!/usr/bin/env python
"""
Patch utility to apply unified diffs
Brute-force line-by-line non-recursive parsing
Copyright (c) 2008-2016 anatoly techtonik
Available under the terms of MIT license
"""
from __future__ import print_function
__author__ = "anatoly techtonik <[email protected]>"
__version__ = "1.16"
__license__ = "MIT"
__url__ = "https://github.com/techtonik/python-patch"
import copy
import logging
import re
# cStringIO doesn't support unicode in 2.5
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO # python 3
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
from os.path import exists, isfile, abspath
import os
import posixpath
import shutil
import sys
PY3K = sys.version_info >= (3, 0)
# PEP 3114
if not PY3K:
compat_next = lambda gen: gen.next()
else:
compat_next = lambda gen: gen.__next__()
def tostr(b):
""" Python 3 bytes encoder. Used to print filename in
diffstat output. Assumes that filenames are in utf-8.
"""
if not PY3K:
return b
# [ ] figure out how to print non-utf-8 filenames without
# information loss
return b.decode('utf-8')
#------------------------------------------------
# Logging is controlled by logger named after the
# module name (e.g. 'patch' for patch.py module)
logger = logging.getLogger(__name__)
debug = logger.debug
info = logger.info
warning = logger.warning
class NullHandler(logging.Handler):
""" Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
streamhandler = logging.StreamHandler()
# initialize logger itself
logger.addHandler(NullHandler())
debugmode = False
def setdebug():
global debugmode, streamhandler
debugmode = True
loglevel = logging.DEBUG
logformat = "%(levelname)8s %(message)s"
logger.setLevel(loglevel)
if streamhandler not in logger.handlers:
# when used as a library, streamhandler is not added
# by default
logger.addHandler(streamhandler)
streamhandler.setFormatter(logging.Formatter(logformat))
#------------------------------------------------
# Constants for Patch/PatchSet types
DIFF = PLAIN = "plain"
GIT = "git"
HG = MERCURIAL = "mercurial"
SVN = SUBVERSION = "svn"
# mixed type is only actual when PatchSet contains
# Patches of different type
MIXED = MIXED = "mixed"
#------------------------------------------------
# Helpers (these could come with Python stdlib)
# x...() function are used to work with paths in
# cross-platform manner - all paths use forward
# slashes even on Windows.
def xisabs(filename):
""" Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
"""
if filename.startswith(b'/'): # Linux/Unix
return True
elif filename.startswith(b'\\'): # Windows
return True
elif re.match(b'\\w:[\\\\/]', filename): # Windows
return True
return False
def xnormpath(path):
""" Cross-platform version of os.path.normpath """
# replace escapes and Windows slashes
normalized = posixpath.normpath(path).replace(b'\\', b'/')
# fold the result
return posixpath.normpath(normalized)
def xstrip(filename):
""" Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
"""
while xisabs(filename):
# strip windows drive with all slashes
if re.match(b'\\w:[\\\\/]', filename):
filename = re.sub(b'^\\w+:[\\\\/]+', b'', filename)
# strip all slashes
elif re.match(b'[\\\\/]', filename):
filename = re.sub(b'^[\\\\/]+', b'', filename)
return filename
#-----------------------------------------------
# Main API functions
def fromfile(filename):
""" Parse patch file. If successful, returns
PatchSet() object. Otherwise returns False.
"""
patchset = PatchSet()
debug("reading %s" % filename)
fp = open(filename, "rb")
res = patchset.parse(fp)
fp.close()
if res == True:
return patchset
return False
def fromstring(s):
""" Parse text string and return PatchSet()
object (or False if parsing fails)
"""
ps = PatchSet( StringIO(s) )
if ps.errors == 0:
return ps
return False
def fromurl(url):
""" Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
"""
ps = PatchSet( urllib_request.urlopen(url) )
if ps.errors == 0:
return ps
return False
# --- Utility functions ---
# [ ] reuse more universal pathsplit()
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != b'':
pathlist[0:1] = os.path.split(pathlist[0])
return b'/'.join(pathlist[n:])
# --- /Utility function ---
class Hunk(object):
""" Parsed hunk data container (hunk starts with @@ -R +R @@) """
def __init__(self):
self.startsrc=None #: line count starts with 1
self.linessrc=None
self.starttgt=None
self.linestgt=None
self.invalid=False
self.desc=''
self.text=[]
# def apply(self, estream):
# """ write hunk data into enumerable stream
# return strings one by one until hunk is
# over
#
# enumerable stream are tuples (lineno, line)
# where lineno starts with 0
# """
# pass
class Patch(object):
""" Patch for a single file.
If used as an iterable, returns hunks.
"""
def __init__(self):
self.source = None
self.target = None
self.hunks = []
self.hunkends = []
self.header = []
self.type = None
def __iter__(self):
for h in self.hunks:
yield h
class PatchSet(object):
""" PatchSet is a patch parser and container.
When used as an iterable, returns patches.
"""
def __init__(self, stream=None):
# --- API accessible fields ---
# name of the PatchSet (filename or ...)
self.name = None
# patch set type - one of constants
self.type = None
# list of Patch objects
self.items = []
self.errors = 0 # fatal parsing errors
self.warnings = 0 # non-critical warnings
# --- /API ---
if stream:
self.parse(stream)
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = compat_next(super(wrapumerate, self))
except StopIteration:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile(br"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@")
self.errors = 0
# temp buffers for header and filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunkparsed:
hunkparsed = False
if re_hunk_start.match(fe.line):
hunkhead = True
elif fe.line.startswith(b"--- "):
filenames = True
else:
headscan = True
# -- ------------------------------------
# read out header
if headscan:
while not fe.is_empty and not fe.line.startswith(b"--- "):
header.append(fe.line)
fe.next()
if fe.is_empty:
if p == None:
debug("no patch data found") # error is shown later
self.errors += 1
else:
info("%d unparsed bytes left at the end of stream" % len(b''.join(header)))
self.warnings += 1
# TODO check for \No new line at the end..
# TODO test for unparsed bytes
# otherwise error += 1
# this is actually a loop exit
continue
headscan = False
# switch to filenames state
filenames = True
line = fe.line
lineno = fe.lineno
# hunkskip and hunkbody code skipped until definition of hunkhead is parsed
if hunkbody:
# [x] treat empty lines inside hunks as containing single space
# (this happens when diff is saved by copy/pasting to editor
# that strips trailing whitespace)
if line.strip(b"\r\n") == b"":
debug("expanding empty line in a middle of hunk body")
self.warnings += 1
line = b' ' + line
# process line first
if re.match(b"^[- \\+\\\\]", line):
# gather stats about line endings
if line.endswith(b"\r\n"):
p.hunkends["crlf"] += 1
elif line.endswith(b"\n"):
p.hunkends["lf"] += 1
elif line.endswith(b"\r"):
p.hunkends["cr"] += 1
if line.startswith(b"-"):
hunkactual["linessrc"] += 1
elif line.startswith(b"+"):
hunkactual["linestgt"] += 1
elif not line.startswith(b"\\"):
hunkactual["linessrc"] += 1
hunkactual["linestgt"] += 1
hunk.text.append(line)
# todo: handle \ No newline cases
else:
warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
# check exit conditions
if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno+1, p.target))
# add hunk status node
hunk.invalid = True
p.hunks.append(hunk)
self.errors += 1
# switch to hunkskip state
hunkbody = False
hunkskip = True
elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
# hunk parsed successfully
p.hunks.append(hunk)
# switch to hunkparsed state
hunkbody = False
hunkparsed = True
# detect mixed window/unix line ends
ends = p.hunkends
if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
warning("inconsistent line ends in patch hunks for %s" % p.source)
self.warnings += 1
if debugmode:
debuglines = dict(ends)
debuglines.update(file=p.target, hunk=nexthunkno)
debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
# fetch next line
continue
if hunkskip:
if re_hunk_start.match(line):
# switch to hunkhead state
hunkskip = False
hunkhead = True
elif line.startswith(b"--- "):
# switch to filenames state
hunkskip = False
filenames = True
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
if filenames:
if line.startswith(b"--- "):
if srcname != None:
# XXX testcase
warning("skipping false patch for %s" % srcname)
srcname = None
# XXX header += srcname
# double source filename line is encountered
# attempt to restart from this second line
re_filename = b"^--- ([^\t]+)"
match = re.match(re_filename, line)
# todo: support spaces in filenames
if match:
srcname = match.group(1).strip()
else:
warning("skipping invalid filename at line %d" % (lineno+1))
self.errors += 1
# XXX p.header += line
# switch back to headscan state
filenames = False
headscan = True
elif not line.startswith(b"+++ "):
if srcname != None:
warning("skipping invalid patch with no target for %s" % srcname)
self.errors += 1
srcname = None
# XXX header += srcname
# XXX header += line
else:
# this should be unreachable
warning("skipping invalid target patch")
filenames = False
headscan = True
else:
if tgtname != None:
# XXX seems to be a dead branch
warning("skipping invalid patch - double target at line %d" % (lineno+1))
self.errors += 1
srcname = None
tgtname = None
# XXX header += srcname
# XXX header += tgtname
# XXX header += line
# double target filename line is encountered
# switch back to headscan state
filenames = False
headscan = True
else:
re_filename = br"^\+\+\+ ([^\t]+)"
match = re.match(re_filename, line)
if not match:
warning("skipping invalid patch - no target filename at line %d" % (lineno+1))
self.errors += 1
srcname = None
# switch back to headscan state
filenames = False
headscan = True
else:
if p: # for the first run p is None
self.items.append(p)
p = Patch()
p.source = srcname
srcname = None
p.target = match.group(1).strip()
p.header = header
header = []
# switch to hunkhead state
filenames = False
hunkhead = True
nexthunkno = 0
p.hunkends = lineends.copy()
continue
if hunkhead:
match = re.match(br"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line)
if not match:
if not p.hunks:
warning("skipping invalid patch with no hunks for file %s" % p.source)
self.errors += 1
# XXX review switch
# switch to headscan state
hunkhead = False
headscan = True
continue
else:
# TODO review condition case
# switch to headscan state
hunkhead = False
headscan = True
else:
hunk = Hunk()
hunk.startsrc = int(match.group(1))
hunk.linessrc = 1
if match.group(3): hunk.linessrc = int(match.group(3))
hunk.starttgt = int(match.group(4))
hunk.linestgt = 1
if match.group(6): hunk.linestgt = int(match.group(6))
hunk.invalid = False
hunk.desc = match.group(7)[1:].rstrip()
hunk.text = []
hunkactual["linessrc"] = hunkactual["linestgt"] = 0
# switch to hunkbody state
hunkhead = False
hunkbody = True
nexthunkno += 1
continue
# /while fe.next()
if p:
self.items.append(p)
if not hunkparsed:
if hunkskip:
warning("warning: finished with errors, some hunks may be invalid")
elif headscan:
if len(self.items) == 0:
warning("error: no patch data found!")
return False
else: # extra data at the end of file
pass
else:
warning("error: patch stream is incomplete!")
self.errors += 1
if len(self.items) == 0:
return False
if debugmode and len(self.items) > 0:
debug("- %2d hunks for %s" % (len(p.hunks), p.source))
# XXX fix total hunks calculation
debug("total files: %d total hunks: %d" % (len(self.items),
sum(len(p.hunks) for p in self.items)))
# ---- detect patch and patchset types ----
for idx, p in enumerate(self.items):
self.items[idx].type = self._detect_type(p)
types = set([p.type for p in self.items])
if len(types) > 1:
self.type = MIXED
else:
self.type = types.pop()
# --------
self._normalize_filenames()
return (self.errors == 0)
def _detect_type(self, p):
""" detect and return type for the specified Patch object
analyzes header and filenames info
NOTE: must be run before filenames are normalized
"""
# check for SVN
# - header starts with Index:
# - next line is ===... delimiter
# - filename is followed by revision number
# TODO add SVN revision
if (len(p.header) > 1 and p.header[-2].startswith(b"Index: ")
and p.header[-1].startswith(b"="*67)):
return SVN
# common checks for both HG and GIT
DVCS = ((p.source.startswith(b'a/') or p.source == b'/dev/null')
and (p.target.startswith(b'b/') or p.target == b'/dev/null'))
# GIT type check
# - header[-2] is like "diff --git a/oldname b/newname"
# - header[-1] is like "index <hash>..<hash> <mode>"
# TODO add git rename diffs and add/remove diffs
# add git diff with spaced filename
# TODO http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
# Git patch header len is 2 min
if len(p.header) > 1:
# detect the start of diff header - there might be some comments before
for idx in reversed(range(len(p.header))):
if p.header[idx].startswith(b"diff --git"):
break
if p.header[idx].startswith(b'diff --git a/'):
if (idx+1 < len(p.header)
and re.match(b'index \\w{7}..\\w{7} \\d{6}', p.header[idx+1])):
if DVCS:
return GIT
# HG check
#
# - for plain HG format header is like "diff -r b2d9961ff1f5 filename"
# - for Git-style HG patches it is "diff --git a/oldname b/newname"
# - filename starts with a/, b/ or is equal to /dev/null
# - exported changesets also contain the header
# # HG changeset patch
# # User [email protected]
# ...
# TODO add MQ
# TODO add revision info
if len(p.header) > 0:
if DVCS and re.match(b'diff -r \\w{12} .*', p.header[-1]):
return HG
if DVCS and p.header[-1].startswith(b'diff --git a/'):
if len(p.header) == 1: # native Git patch header len is 2
return HG
elif p.header[0].startswith(b'# HG changeset patch'):
return HG
return PLAIN
def _normalize_filenames(self):
""" sanitize filenames, normalizing paths, i.e.:
1. strip a/ and b/ prefixes from GIT and HG style patches
2. remove all references to parent directories (with warning)
3. translate any absolute paths to relative (with warning)
[x] always use forward slashes to be crossplatform
(diff/patch were born as a unix utility after all)
return None
"""
if debugmode:
debug("normalize filenames")
for i,p in enumerate(self.items):
# if debugmode:
# debug(" patch type = " + p.type)
# debug(" source = " + p.source)
# debug(" target = " + p.target)
if p.type in (HG, GIT):
# TODO: figure out how to deal with /dev/null entries
debug("stripping a/ and b/ prefixes")
if p.source != '/dev/null':
if not p.source.startswith(b"a/"):
warning("invalid source filename")
else:
p.source = p.source[2:]
if p.target != '/dev/null':
if not p.target.startswith(b"b/"):
warning("invalid target filename")
else:
p.target = p.target[2:]
p.source = xnormpath(p.source)
p.target = xnormpath(p.target)
sep = b'/' # sep value can be hardcoded, but it looks nice this way
# references to parent are not allowed
if p.source.startswith(b".." + sep):
warning("error: stripping parent path for source file patch no.%d" % (i+1))
self.warnings += 1
while p.source.startswith(b".." + sep):
p.source = p.source.partition(sep)[2]
if p.target.startswith(b".." + sep):
warning("error: stripping parent path for target file patch no.%d" % (i+1))
self.warnings += 1
while p.target.startswith(b".." + sep):
p.target = p.target.partition(sep)[2]
# absolute paths are not allowed
if xisabs(p.source) or xisabs(p.target):
warning("error: absolute paths are not allowed - file no.%d" % (i+1))
self.warnings += 1
if xisabs(p.source):
warning("stripping absolute path from source name '%s'" % p.source)
p.source = xstrip(p.source)
if xisabs(p.target):
warning("stripping absolute path from target name '%s'" % p.target)
p.target = xstrip(p.target)
self.items[i].source = p.source
self.items[i].target = p.target
def diffstat(self):
""" calculate diffstat and return as a string
Notes:
- original diffstat ouputs target filename
- single + or - shouldn't escape histogram
"""
names = []
insert = []
delete = []
delta = 0 # size change in bytes
namelen = 0
maxdiff = 0 # max number of changes for single file
# (for histogram width calculation)
for patch in self.items:
i,d = 0,0
for hunk in patch.hunks:
for line in hunk.text:
if line.startswith(b'+'):
i += 1
delta += len(line)-1
elif line.startswith(b'-'):
d += 1
delta -= len(line)-1
names.append(patch.target)
insert.append(i)
delete.append(d)
namelen = max(namelen, len(patch.target))
maxdiff = max(maxdiff, i+d)
output = ''
statlen = len(str(maxdiff)) # stats column width
for i,n in enumerate(names):
# %-19s | %-4d %s
format = " %-" + str(namelen) + "s | %" + str(statlen) + "s %s\n"
hist = ''
# -- calculating histogram --
width = len(format % ('', '', ''))
histwidth = max(2, 80 - width)
if maxdiff < histwidth:
hist = "+"*insert[i] + "-"*delete[i]
else:
iratio = (float(insert[i]) / maxdiff) * histwidth
dratio = (float(delete[i]) / maxdiff) * histwidth
# make sure every entry gets at least one + or -
iwidth = 1 if 0 < iratio < 1 else int(iratio)
dwidth = 1 if 0 < dratio < 1 else int(dratio)
#print(iratio, dratio, iwidth, dwidth, histwidth)
hist = "+"*int(iwidth) + "-"*int(dwidth)
# -- /calculating +- histogram --
output += (format % (tostr(names[i]), str(insert[i] + delete[i]), hist))
output += (" %d files changed, %d insertions(+), %d deletions(-), %+d bytes"
% (len(names), sum(insert), sum(delete), delta))
return output
def findfile(self, old, new):
""" return name of file to be patched or None """
if exists(old):
return old
elif exists(new):
return new
else:
# [w] Google Code generates broken patches with its online editor
debug("broken patch from Google Code, stripping prefixes..")
if old.startswith(b'a/') and new.startswith(b'b/'):
old, new = old[2:], new[2:]
debug(" %s" % old)
debug(" %s" % new)
if exists(old):
return old
elif exists(new):
return new
return None
def apply(self, strip=0, root=None):
""" Apply parsed patch, optionally stripping leading components
from file paths. `root` parameter specifies working dir.
return True on success
"""
if root:
prevdir = os.getcwd()
os.chdir(root)
total = len(self.items)
errors = 0
if strip:
# [ ] test strip level exceeds nesting level
# [ ] test the same only for selected files
# [ ] test if files end up being on the same level
try:
strip = int(strip)
except ValueError:
errors += 1
warning("error: strip parameter '%s' must be an integer" % strip)
strip = 0
#for fileno, filename in enumerate(self.source):
for i,p in enumerate(self.items):
if strip:
debug("stripping %s leading component(s) from:" % strip)
debug(" %s" % p.source)
debug(" %s" % p.target)
old = pathstrip(p.source, strip)
new = pathstrip(p.target, strip)
else:
old, new = p.source, p.target
filename = self.findfile(old, new)
if not filename:
warning("source/target file does not exist:\n --- %s\n +++ %s" % (old, new))
errors += 1
continue
if not isfile(filename):
warning("not a file - %s" % filename)
errors += 1
continue
# [ ] check absolute paths security here
debug("processing %d/%d:\t %s" % (i+1, total, filename))
# validate before patching
f2fp = open(filename, 'rb')
hunkno = 0
hunk = p.hunks[hunkno]
hunkfind = []
hunkreplace = []
validhunks = 0
canpatch = False
for lineno, line in enumerate(f2fp):
if lineno+1 < hunk.startsrc:
continue
elif lineno+1 == hunk.startsrc:
hunkfind = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"]
hunkreplace = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" +"]
#pprint(hunkreplace)
hunklineno = 0
# todo \ No newline at end of file
# check hunks in source file
if lineno+1 < hunk.startsrc+len(hunkfind)-1:
if line.rstrip(b"\r\n") == hunkfind[hunklineno]:
hunklineno+=1
else:
info("file %d/%d:\t %s" % (i+1, total, filename))
info(" hunk no.%d doesn't match source file at line %d" % (hunkno+1, lineno+1))
info(" expected: %s" % hunkfind[hunklineno])
info(" actual : %s" % line.rstrip(b"\r\n"))
# not counting this as error, because file may already be patched.
# check if file is already patched is done after the number of
# invalid hunks if found
# TODO: check hunks against source/target file in one pass
# API - check(stream, srchunks, tgthunks)
# return tuple (srcerrs, tgterrs)
# continue to check other hunks for completeness
hunkno += 1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
continue
else:
break
# check if processed line is the last line
if lineno+1 == hunk.startsrc+len(hunkfind)-1:
debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno+1, filename))
hunkno+=1
validhunks+=1
if hunkno < len(p.hunks):
hunk = p.hunks[hunkno]
else:
if validhunks == len(p.hunks):
# patch file
canpatch = True
break
else:
if hunkno < len(p.hunks):
warning("premature end of source file %s at hunk %d" % (filename, hunkno+1))
errors += 1
f2fp.close()
if validhunks < len(p.hunks):
if self._match_file_hunks(filename, p.hunks):
warning("already patched %s" % filename)
else:
warning("source file is different - %s" % filename)
errors += 1
if canpatch:
backupname = filename+b".orig"
if exists(backupname):
warning("can't backup original file to %s - aborting" % backupname)
else:
import shutil
shutil.move(filename, backupname)
if self.write_hunks(backupname, filename, p.hunks):
info("successfully patched %d/%d:\t %s" % (i+1, total, filename))
os.unlink(backupname)
else:
errors += 1
warning("error patching file %s" % filename)
shutil.copy(filename, filename+".invalid")
warning("invalid version is saved to %s" % filename+".invalid")
# todo: proper rejects
shutil.move(backupname, filename)
if root:
os.chdir(prevdir)
# todo: check for premature eof
return (errors == 0)
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:]
def revert(self, strip=0, root=None):
""" apply patch in reverse order """
reverted = copy.deepcopy(self)
reverted._reverse()
return reverted.apply(strip, root)
def can_patch(self, filename):
""" Check if specified filename can be patched. Returns None if file can
not be found among source filenames. False if patch can not be applied
clearly. True otherwise.
:returns: True, False or None
"""
filename = abspath(filename)
for p in self.items:
if filename == abspath(p.source):
return self._match_file_hunks(filename, p.hunks)
return None
def _match_file_hunks(self, filepath, hunks):
matched = True
fp = open(abspath(filepath), 'rb')
class NoMatch(Exception):
pass
lineno = 1
line = fp.readline()
hno = None
try:
for hno, h in enumerate(hunks):
# skip to first line of the hunk
while lineno < h.starttgt:
if not len(line): # eof
debug("check failed - premature eof before hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
for hline in h.text:
if hline.startswith(b"-"):
continue
if not len(line):
debug("check failed - premature eof on hunk: %d" % (hno+1))
# todo: \ No newline at the end of file
raise NoMatch
if line.rstrip(b"\r\n") != hline[1:].rstrip(b"\r\n"):
debug("file is not patched - failed hunk: %d" % (hno+1))
raise NoMatch
line = fp.readline()
lineno += 1
except NoMatch:
matched = False
# todo: display failed hunk, i.e. expected/found
fp.close()
return matched
def patch_stream(self, instream, hunks):
""" Generator that yields stream patched with hunks iterable
Converts lineends in hunk lines to the best suitable format
autodetected from input
"""
# todo: At the moment substituted lineends may not be the same
# at the start and at the end of patching. Also issue a
# warning/throw about mixed lineends (is it really needed?)
hunks = iter(hunks)
srclineno = 1
lineends = {b'\n':0, b'\r\n':0, b'\r':0}
def get_line():
"""
local utility function - return line from source stream
collecting line end statistics on the way
"""
line = instream.readline()
# 'U' mode works only with text files
if line.endswith(b"\r\n"):
lineends[b"\r\n"] += 1
elif line.endswith(b"\n"):
lineends[b"\n"] += 1
elif line.endswith(b"\r"):
lineends[b"\r"] += 1
return line
for hno, h in enumerate(hunks):
debug("hunk %d" % (hno+1))
# skip to line just before hunk starts
while srclineno < h.startsrc:
yield get_line()
srclineno += 1
for hline in h.text:
# todo: check \ No newline at the end of file
if hline.startswith(b"-") or hline.startswith(b"\\"):
get_line()
srclineno += 1
continue
else:
if not hline.startswith(b"+"):
get_line()
srclineno += 1
line2write = hline[1:]
# detect if line ends are consistent in source file
if sum([bool(lineends[x]) for x in lineends]) == 1:
newline = [x for x in lineends if lineends[x] != 0][0]
yield line2write.rstrip(b"\r\n")+newline
else: # newlines are mixed
yield line2write
for line in instream:
yield line
def write_hunks(self, srcname, tgtname, hunks):
src = open(srcname, "rb")
tgt = open(tgtname, "wb")
debug("processing target file %s" % tgtname)
tgt.writelines(self.patch_stream(src, hunks))
tgt.close()
src.close()
# [ ] TODO: add test for permission copy
shutil.copymode(srcname, tgtname)
return True
def dump(self):
for p in self.items:
for headline in p.header:
print(headline.rstrip('\n'))
print('--- ' + p.source)
print('+++ ' + p.target)
for h in p.hunks:
print('@@ -%s,%s +%s,%s @@' % (h.startsrc, h.linessrc, h.starttgt, h.linestgt))
for line in h.text:
print(line.rstrip('\n'))
def main():
from optparse import OptionParser
from os.path import exists
import sys
opt = OptionParser(usage="1. %prog [options] unified.diff\n"
" 2. %prog [options] http://host/patch\n"
" 3. %prog [options] -- < unified.diff",
version="python-patch %s" % __version__)
opt.add_option("-q", "--quiet", action="store_const", dest="verbosity",
const=0, help="print only warnings and errors", default=1)
opt.add_option("-v", "--verbose", action="store_const", dest="verbosity",
const=2, help="be verbose")
opt.add_option("--debug", action="store_true", dest="debugmode", help="debug mode")
opt.add_option("--diffstat", action="store_true", dest="diffstat",
help="print diffstat and exit")
opt.add_option("-d", "--directory", metavar='DIR',
help="specify root directory for applying patch")
opt.add_option("-p", "--strip", type="int", metavar='N', default=0,
help="strip N path components from filenames")
opt.add_option("--revert", action="store_true",
help="apply patch in reverse order (unpatch)")
(options, args) = opt.parse_args()
if not args and sys.argv[-1:] != ['--']:
opt.print_version()
opt.print_help()
sys.exit()
readstdin = (sys.argv[-1:] == ['--'] and not args)
verbosity_levels = {0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
loglevel = verbosity_levels[options.verbosity]
logformat = "%(message)s"
logger.setLevel(loglevel)
streamhandler.setFormatter(logging.Formatter(logformat))
if options.debugmode:
setdebug() # this sets global debugmode variable
if readstdin:
patch = PatchSet(sys.stdin)
else:
patchfile = args[0]
urltest = patchfile.split(':')[0]
if (':' in patchfile and urltest.isalpha()
and len(urltest) > 1): # one char before : is a windows drive letter
patch = fromurl(patchfile)
else:
if not exists(patchfile) or not isfile(patchfile):
sys.exit("patch file does not exist - %s" % patchfile)
patch = fromfile(patchfile)
if options.diffstat:
print(patch.diffstat())
sys.exit(0)
#pprint(patch)
if options.revert:
patch.revert(options.strip, root=options.directory) or sys.exit(-1)
else:
patch.apply(options.strip, root=options.directory) or sys.exit(-1)
# todo: document and test line ends handling logic - patch.py detects proper line-endings
# for inserted hunks and issues a warning if patched file has incosistent line ends
if __name__ == "__main__":
main()
# Legend:
# [ ] - some thing to be done
# [w] - official wart, external or internal that is unlikely to be fixed
# [ ] API break (2.x) wishlist
# PatchSet.items --> PatchSet.patches
# [ ] run --revert test for all dataset items
# [ ] run .parse() / .dump() test for dataset
| [
"[email protected]"
] | |
0b393c99acb69e2e47500842eda4d0fff53b69a6 | 4adc1d1b8f9badefcd8c25c6e0e87c6545ccde2c | /OrcApi/Driver/Web/WindowDefMod.py | 81ea8efdc597472a2d1aed384e743b1cac1c6b9e | [] | no_license | orange21cn/OrcTestToolsKit | eb7b67e87a608fb52d7bdcb2b859fa588263c136 | 69b6a3c382a7043872db1282df4be9e413d297d6 | refs/heads/master | 2020-04-15T07:30:35.485214 | 2017-09-30T06:16:17 | 2017-09-30T06:16:17 | 68,078,991 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from OrcLib.LibCommon import is_null
from OrcLib.LibException import OrcDatabaseException
from OrcLib.LibDatabase import WebWindowDef
from OrcLib.LibDatabase import orc_db
from OrcLib.LibLog import OrcLog
class WindowDefMod:
"""
Test data management
"""
__session = orc_db.session
__logger = OrcLog("api.driver.web.window_def")
def __init__(self):
pass
def usr_search(self, p_cond=None):
"""
查询符合条件的控件
:param p_cond:
:return:
"""
# 判断输入参数是否为空
cond = p_cond if p_cond else dict()
# 查询条件 like
_like = lambda p_flag: "%%%s%%" % cond[p_flag]
# db session
result = self.__session.query(WebWindowDef)
if 'id' in cond:
# 查询支持多 id
if isinstance(cond["id"], list):
result = result.filter(WebWindowDef.id.in_(cond['id']))
else:
result = result.filter(WebWindowDef.id == cond['id'])
if 'window_mark' in cond:
result = result.filter(WebWindowDef.window_mark.ilike(_like('window_mark')))
if 'window_desc' in cond:
result = result.filter(WebWindowDef.window_desc.ilike(_like('window_desc')))
if 'comment' in cond:
result = result.filter(WebWindowDef.comment.ilike(_like('comment')))
return result.all()
def usr_add(self, p_data):
"""
:param p_data:
:return:
"""
_node = WebWindowDef()
# Create id
_node.id = p_data['id']
# window_mark
_node.window_mark = p_data['window_mark'] if 'window_mark' in p_data else ""
# window_desc
_node.window_desc = p_data['window_desc'] if 'window_desc' in p_data else ""
# batch_desc, comment
_node.comment = p_data['comment'] if 'comment' in p_data else ""
# create_time, modify_time
_node.create_time = datetime.now()
_node.modify_time = datetime.now()
try:
self.__session.add(_node)
self.__session.commit()
except:
self.__logger.error("")
raise OrcDatabaseException
return _node
def usr_update(self, p_cond):
for t_id in p_cond:
if "id" == t_id:
continue
_data = None if is_null(p_cond[t_id]) else p_cond[t_id]
_item = self.__session.query(WebWindowDef).filter(WebWindowDef.id == p_cond['id'])
_item.update({t_id: _data})
self.__session.commit()
def usr_delete(self, p_id):
"""
Delete
:param p_id:
:return:
"""
self.__session.query(WebWindowDef).filter(WebWindowDef.id == p_id).delete()
self.__session.commit()
| [
"[email protected]"
] | |
3da8cd2718bcab6e44d110b99aeb4836a002db47 | eae6dddca9285702c4c7ed6ba6bdaceef9631df2 | /CCC-2019/Junior/Junior-2/J2.py | ead798d878460926221d2b7467237cbe4a4a29f4 | [] | no_license | simrit1/CCC-Solutions-2 | 7823ce14801c4219f6f1dd4c42fb013c2dfc45dd | ee2883aa38f933e526ce187d50ca68763876cb58 | refs/heads/master | 2023-07-04T02:19:37.320261 | 2021-08-07T22:12:36 | 2021-08-07T22:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # CCC 2019 Junior 2: Time to Decompress
#
# Author: Charles Chen
#
# Strings and loops
num_lines = int(input())
num_symbols = []
symbol_type = []
for i in range(num_lines):
input_data = input().split()
num_symbols.append(int(input_data[0]))
symbol_type.append(input_data[1])
for i in range(num_lines):
print(symbol_type[i] * num_symbols[i])
| [
"[email protected]"
] | |
d155f15f7ad2a469627d668066a70e11fe83b0b2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Y4gwcGfcGb3SKz6Tu_0.py | e71f5bc1628a8c29847b14606c19007da771aeb7 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
def max_separator(s):
substrings = [(s.find(i, idx+1) - idx, i) for idx, i in enumerate(s) if s.find(i, idx+1) != -1]
return sorted([c for size, c in substrings if size == max(substrings)[0]])
| [
"[email protected]"
] | |
37913c6ecbbbda6ad28bc4dac8635652653d5abb | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/surface/compute/instance_groups/managed/get_named_ports.py | adf1818f3b56f360348f6678bf9cbb31f379ccb2 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 2,035 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing named ports in instance groups."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
from googlecloudsdk.core import properties
class GetNamedPorts(base.ListCommand):
"""Implements get-named-ports command, alpha, and beta versions."""
def Format(self, unused_args):
return 'table(name, port)'
@staticmethod
def Args(parser):
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(parser)
def Run(self, args):
"""Retrieves response with named ports."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
project = properties.VALUES.core.project.Get(required=True)
group_ref = (
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_ARG.ResolveAsResource(
args, holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=flags.GetDefaultScopeLister(
holder.client, project)))
return instance_groups_utils.OutputNamedPortsForGroup(
group_ref, holder.client)
detailed_help = (
instance_groups_utils.INSTANCE_GROUP_GET_NAMED_PORT_DETAILED_HELP)
| [
"[email protected]"
] | |
511d5d391655996dc02f37c3a43187003bad7158 | 17fe10b0f0f85765767ad0eceaf3d7118694ae80 | /backend/event/api/v1/serializers.py | e4a19836dcba1b8de8bb160cfd2e4b1d0c1f056f | [] | no_license | crowdbotics-apps/cotter-trout-dock-23862 | 5f2de4b3c820f49926d7b43ead203c8f3c8593b2 | 1e956b9b4572d324902af73b6ff4fdde1776f5b2 | refs/heads/master | 2023-02-12T21:13:52.324832 | 2021-01-15T16:17:01 | 2021-01-15T16:17:01 | 329,955,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | from rest_framework import serializers
from event.models import (
Vendor,
Location,
Favorites,
VendorDetail,
Category,
Faq,
Presenter,
Schedule,
MySchedule,
Sponsor,
)
class FaqSerializer(serializers.ModelSerializer):
class Meta:
model = Faq
fields = "__all__"
class SponsorSerializer(serializers.ModelSerializer):
class Meta:
model = Sponsor
fields = "__all__"
class FavoritesSerializer(serializers.ModelSerializer):
class Meta:
model = Favorites
fields = "__all__"
class ScheduleSerializer(serializers.ModelSerializer):
class Meta:
model = Schedule
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class VendorSerializer(serializers.ModelSerializer):
class Meta:
model = Vendor
fields = "__all__"
class VendorDetailSerializer(serializers.ModelSerializer):
class Meta:
model = VendorDetail
fields = "__all__"
class PresenterSerializer(serializers.ModelSerializer):
class Meta:
model = Presenter
fields = "__all__"
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = "__all__"
class MyScheduleSerializer(serializers.ModelSerializer):
class Meta:
model = MySchedule
fields = "__all__"
| [
"[email protected]"
] | |
dc573298a746f7d19680c66c688ceaaf5c15b85f | 081b33ead95b323e77bdce3717af0a5790e34a1e | /backend/apps/settings/models.py | 3692f2d192f0341b23781b392ab4a7de1f431656 | [] | no_license | alexmon1989/afliga | 81ea3b32b18040bb8baa4e8af14a73003fb9a89f | 661da30c0a5aa6b9975eb7dea9c9a031529d2dbb | refs/heads/master | 2023-02-23T11:12:45.608118 | 2023-02-11T12:12:41 | 2023-02-11T12:12:41 | 105,630,198 | 0 | 0 | null | 2023-02-15T20:50:12 | 2017-10-03T08:36:15 | Python | UTF-8 | Python | false | false | 2,744 | py | from django.db import models
from apps.league.models import Player
class FooterSettings(models.Model):
"""Настройки футера."""
information_block_title = models.CharField('Заголовок текстового блока', max_length=100, null=True, blank=True)
information_block_text = models.TextField('Текст блока (html)', null=True, blank=True)
contacts_block_html = models.TextField(
'Текст блока "Контактная информация" (html)',
null=True,
blank=True
)
facebook_link = models.CharField('Ссылка Facebook', max_length=255, null=True, blank=True)
vk_link = models.CharField('Ссылка VK', max_length=255, null=True, blank=True)
twitter_link = models.CharField('Ссылка Twitter', max_length=255, null=True, blank=True)
google_link = models.CharField('Ссылка Google', max_length=255, null=True, blank=True)
copyrights_block_html = models.TextField(
'Текст блока копирайта (html)',
null=True,
blank=True
)
class Meta:
verbose_name = 'Footer'
verbose_name_plural = 'Footer'
class Banner(models.Model):
"""Модель баннера."""
title = models.CharField('Заголовок', max_length=255, blank=False)
is_visible = models.BooleanField('Включено', default=True)
link = models.CharField('Ссылка', max_length=255, null=True, blank=True)
image = models.ImageField('Изображение', upload_to='banners', null=True, blank=True)
created_at = models.DateTimeField('Создано', auto_now_add=True)
updated_at = models.DateTimeField('Обновлено', auto_now=True)
class Meta:
verbose_name = 'Баннер'
verbose_name_plural = 'Баннеры'
class PersonWidget(models.Model):
"""Настройки виджета персоны."""
title = models.CharField('Заголовок', max_length=255, default='Персона')
player = models.ForeignKey(Player, verbose_name='Игрок', on_delete=models.CASCADE)
is_visible = models.BooleanField('Включено', default=True)
class Meta:
verbose_name = 'Виджет "Персона"'
verbose_name_plural = 'Виджет "Персона"'
class Analytics(models.Model):
"""Модель HTML-кода аналитики."""
code = models.TextField('HTML-код')
created_at = models.DateTimeField('Создано', auto_now_add=True)
updated_at = models.DateTimeField('Обновлено', auto_now=True)
class Meta:
verbose_name = 'HTML-код аналитики'
verbose_name_plural = 'HTML-код аналитики'
| [
"[email protected]"
] | |
15715eaf8822905a4c32b90071642f1351caf2ba | 2f54c561e13df5f0f4479e73a47103b8413a235a | /python codes/recursion.py | a4aa83e9d1dd41e409479f1902abf580bc1a520f | [] | no_license | suvimanikandan/PYTHON_CODES | 7123e2898d8c363e018477d2b60f26c7287d4e72 | 2a8eaae317a773b7236529021a333e9e2a40e51f | refs/heads/main | 2023-04-03T19:45:06.009539 | 2021-04-20T05:26:44 | 2021-04-20T05:26:44 | 359,693,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py |
import sys
sys.setrecursionlimit(2000)
print(sys.getrecursionlimit())
i=0
def greet():
global i
i+=1
print("hello",i)
greet()
greet() | [
"[email protected]"
] | |
5c0172e12c8046dfc13f3f6538b0c802f7623194 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/e606d579317c48f6b8b3ea5ff8b93984.py | 42692983767720aa1ef6632e7378b60086e0a348 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 151 | py | def word_count(phrase):
count = dict()
for word in phrase.split():
count.setdefault(word, 0)
count[word] += 1
return count
| [
"[email protected]"
] | |
eb8cfced5d54b2f954d35b0c31564dbadc38423d | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /codestar-notifications_write_1/notification-rule_update.py | a01451f4c062a17c803d10c7d64f8e5e744998b5 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codestar-notifications/update-notification-rule.html
if __name__ == '__main__':
"""
create-notification-rule : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codestar-notifications/create-notification-rule.html
delete-notification-rule : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codestar-notifications/delete-notification-rule.html
describe-notification-rule : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codestar-notifications/describe-notification-rule.html
list-notification-rules : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/codestar-notifications/list-notification-rules.html
"""
parameter_display_string = """
# arn : The Amazon Resource Name (ARN) of the notification rule.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("codestar-notifications", "update-notification-rule", "arn", add_option_dict)
| [
"[email protected]"
] | |
0d1c6e2ddb041ef981f6b1c916b97199a9ef93b8 | bd55b7fefa99156aeb3c28a4abfa407fc03c6bb1 | /vstructui/scripts/vstructui_bin.py | 66119705e9eec7b30622b45971dec5769cecb73d | [
"Apache-2.0"
] | permissive | williballenthin/python-pyqt5-vstructui | 175419738549f9a8ba97ced004c88561356ddcdc | 2e06f5fed8aa362e07ad5f677fb42d5cd15163e1 | refs/heads/master | 2021-01-10T02:18:34.326960 | 2015-12-10T04:55:19 | 2015-12-10T04:55:19 | 36,752,901 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | import os
import sys
import imp
import mmap
import contextlib
from PyQt5.QtWidgets import QApplication
from vstruct import VStruct
from vstruct import VArray
from vstruct.primitives import v_prim
from vstruct.primitives import v_number
from vstruct.primitives import v_bytes
from vstruct.primitives import v_uint8
from vstruct.primitives import v_uint16
from vstruct.primitives import v_uint32
from vstructui.vstruct_parser import ComposedParser
from vstructui.vstruct_parser import VstructInstance
import vstructui
# TODO: use pkg_resources
defspath = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "defs")
def get_parsers(defspath=defspath):
parsers = ComposedParser()
for filename in os.listdir(defspath):
if not filename.endswith(".py"):
continue
deffilepath = os.path.join(defspath, filename)
mod = imp.load_source("vstruct_parser", deffilepath)
if not hasattr(mod, "vsEntryVstructParser"):
continue
parser = mod.vsEntryVstructParser()
parsers.add_parser(parser)
return parsers
_HEX_ALPHA_CHARS = set(list("abcdefABCDEF"))
def is_probably_hex(s):
if s.startswith("0x"):
return True
for c in s:
if c in _HEX_ALPHA_CHARS:
return True
return False
def _main(*args):
parsers = get_parsers()
buf = ""
structs = ()
filename = None
if len(args) == 0:
print("error: at least one argument required (path to binary file)")
return -1
# vstructui.py /path/to/binary/file "0x0:uint32:first dword" "0x4:uint_2:first word"
structs = []
args = list(args) # we want a list that we can modify
filename = args.pop(0)
with open(filename, "rb") as f:
with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as buf:
for d in args:
if ":" not in d:
raise RuntimeError("invalid structure declaration: {:s}".format(d))
soffset, _, parser_name = d.partition(":")
parser_name, _, name = parser_name.partition(":")
offset = None
if is_probably_hex(soffset):
offset = int(soffset, 0x10)
else:
offset = int(soffset)
structs.extend(parsers.parse(parser_name, buf, offset, name=name))
app = QApplication(sys.argv)
screen = vstructui.VstructViewWidget(parsers, structs, buf)
screen.show()
sys.exit(app.exec_())
def main():
sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ae6d37bf310be7ced448efde5f9029f8a50c2e93 | 3da574f57da42ef745c59b121c70f0d89b98242d | /mandrill/mayhem_1.py | 21868aebc9ee247f16ef4607f788a9bc5099a05d | [
"MIT"
] | permissive | yijxiang/mayhem | 5a93e184f4f0081d86e9651b815e01712297218a | 521b1e4540d37395ca47908520183245b167e2b0 | refs/heads/master | 2021-09-20T03:28:13.509396 | 2018-08-02T17:22:02 | 2018-08-02T17:22:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | #!/usr/bin/env python3.7
# Copyright (c) 2018 Lynn Root
"""
Initial setup - starting point based off of
http://asyncio.readthedocs.io/en/latest/producer_consumer.html
Notice! This requires:
- attrs==18.1.0
"""
import asyncio
import logging
import random
import string
import attr
# NB: Using f-strings with log messages may not be ideal since no matter
# what the log level is set at, f-strings will always be evaluated
# whereas the old form ('foo %s' % 'bar') is lazily-evaluated.
# But I just love f-strings.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s,%(msecs)d %(levelname)s: %(message)s',
datefmt='%H:%M:%S',
)
@attr.s
class PubSubMessage:
instance_name = attr.ib()
message_id = attr.ib(repr=False)
hostname = attr.ib(repr=False, init=False)
def __attrs_post_init__(self):
self.hostname = f'{self.instance_name}.example.net'
async def publish(queue, n):
"""Simulates an external publisher of messages.
Args:
queue (asyncio.Queue): Queue to publish messages to.
n (int): Number of messages to publish.
"""
choices = string.ascii_lowercase + string.digits
for x in range(1, n + 1):
host_id = ''.join(random.choices(choices, k=4))
instance_name = f'cattle-{host_id}'
msg = PubSubMessage(message_id=x, instance_name=instance_name)
# publish an item
await queue.put(msg)
logging.info(f'Published {x} of {n} messages')
# indicate the publisher is done
await queue.put(None)
async def consume(queue):
"""Consumer client to simulate subscribing to a publisher.
Args:
queue (asyncio.Queue): Queue from which to consume messages.
"""
while True:
# wait for an item from the publisher
msg = await queue.get()
# the publisher emits None to indicate that it is done
if msg is None:
break
# process the msg
logging.info(f'Consumed {msg}')
# unhelpful simulation of i/o work
await asyncio.sleep(random.random())
if __name__ == '__main__':
queue = asyncio.Queue()
publisher_coro = publish(queue, 5)
consumer_coro = consume(queue)
asyncio.run(publisher_coro)
asyncio.run(consumer_coro)
| [
"[email protected]"
] | |
052e64eed1a7060c8189bf73a146399bddecfa95 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/prb_control/entities/base/pre_queue/listener.py | 4911d5bab483ee2b93e16664718ddd92927ec370 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,051 | py | # 2017.02.03 21:48:33 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/prb_control/entities/base/pre_queue/listener.py
from gui.prb_control.entities.base.listener import IPrbListener
class IPreQueueListener(IPrbListener):
"""
Interface of prequeue listener.
"""
def onEnqueued(self, queueType, *args):
"""
Event that is called when player goes into queue.
Args:
queueType: joined queue type
"""
pass
def onDequeued(self, queueType, *args):
"""
Event that is called when player leaves queue.
Args:
queueType: left queue type
"""
pass
def onEnqueueError(self, queueType, *args):
"""
Event that is called when player receives enqueue error.
Args:
queueType: queue type that we're trying to join
"""
pass
def onKickedFromQueue(self, queueType, *args):
"""
Event that is called when player was kicked from queue.
Args:
queueType: queue type that we're kicked from
"""
pass
def onKickedFromArena(self, queueType, *args):
"""
Event that is called when player was kicked from arena.
Args:
queueType: queue type that we're kicked from
"""
pass
def onArenaJoinFailure(self, queueType, *args):
"""
Event that is called when player was kicked during arena join.
Args:
queueType: queue type that we're kicked from
"""
pass
def onPreQueueSettingsChanged(self, diff):
"""
Event that is called when player receives settings updates.
Args:
diff: settings changes
"""
pass
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\base\pre_queue\listener.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:33 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
8fd3d7573ef2a7f903f530d6c758f8bdd40b49f9 | bd498cbbb28e33370298a84b693f93a3058d3138 | /NVIDIA/benchmarks/transformer/implementations/pytorch/fairseq/data/token_block_dataset.py | 8c9239ff4ca7fef2bb06e97ece279c95a3aecae6 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | piyushghai/training_results_v0.7 | afb303446e75e3e9789b0f6c40ce330b6b83a70c | e017c9359f66e2d814c6990d1ffa56654a73f5b0 | refs/heads/master | 2022-12-19T16:50:17.372320 | 2020-09-24T01:02:00 | 2020-09-24T18:01:01 | 298,127,245 | 0 | 1 | Apache-2.0 | 2020-09-24T00:27:21 | 2020-09-24T00:27:21 | null | UTF-8 | Python | false | false | 3,681 | py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
class TokenBlockDataset(torch.utils.data.Dataset):
"""Break a 1d tensor of tokens into blocks.
The blocks are fetched from the original tensor so no additional memory is allocated.
Args:
tokens: 1d tensor of tokens to break into blocks
sizes: sentence lengths (required for 'complete' and 'eos')
block_size: maximum block size (ignored in 'eos' break mode)
break_mode: Mode used for breaking tokens. Values can be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'eos': each block contains one sentence (block_size is ignored)
include_targets: return next tokens as targets
"""
def __init__(self, tokens, sizes, block_size, break_mode=None, include_targets=False):
super().__init__()
self.tokens = tokens
self.total_size = len(tokens)
self.include_targets = include_targets
self.slice_indices = []
if break_mode is None or break_mode == 'none':
length = math.ceil(len(tokens) / block_size)
def block_at(i):
start = i * block_size
end = min(start + block_size, len(tokens))
return (start, end)
self.slice_indices = [block_at(i) for i in range(length)]
elif break_mode == 'complete':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
tok_idx = 0
sz_idx = 0
curr_size = 0
while sz_idx < len(sizes):
if curr_size + sizes[sz_idx] <= block_size or curr_size == 0:
curr_size += sizes[sz_idx]
sz_idx += 1
else:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if curr_size > 0:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
elif break_mode == 'eos':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
curr = 0
for sz in sizes:
# skip samples with just 1 example (which would be just the eos token)
if sz > 1:
self.slice_indices.append((curr, curr + sz))
curr += sz
else:
raise ValueError('Invalid break_mode: ' + break_mode)
self.sizes = np.array([e - s for s, e in self.slice_indices])
def __getitem__(self, index):
s, e = self.slice_indices[index]
item = torch.LongTensor(self.tokens[s:e])
if self.include_targets:
# target is the sentence, for source, rotate item one token to the left (would start with eos)
if s == 0:
source = np.concatenate([self.tokens[-1:], self.tokens[0:e - 1]])
else:
source = self.tokens[s - 1:e - 1]
return torch.LongTensor(source), item
return item
def __len__(self):
return len(self.slice_indices)
| [
"[email protected]"
] | |
3bd3e5bfac2d80e09dcf0c187b774fd085ee8aa2 | 8b57c6609e4bf3e6f5e730b7a4a996ad6b7023f0 | /input.py | d00f8811a7602e4179209e88696445ab5a71672e | [] | no_license | bullll/splunk | 862d9595ad28adf0e12afa92a18e2c96308b19fe | 7cf8a158bc8e1cecef374dad9165d44ccb00c6e0 | refs/heads/master | 2022-04-20T11:48:50.573979 | 2020-04-23T18:12:58 | 2020-04-23T18:12:58 | 258,293,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,227 | py | from __future__ import absolute_import
from __future__ import print_function
from builtins import object
from future.moves.urllib.parse import urlencode
import lxml.etree as et
import datetime
import time
import socket
import httplib2
try:
import httplib
except ImportError:
import http.client as httplib
import splunk
import splunk.rest as rest
___doc___ = """
This script will allow the python sdk to insert data directly into splunk
"""
#global, don't need to create an instance of this on each call, create once and reuse
h = httplib2.Http(disable_ssl_certificate_validation = True, proxy_info=None)
# ---------------------------
# ---------------------------
class StreamHandler(object):
"""
class that handles the connection
"""
# ----------------------------------------------------------
def __init__(self, dest, endpoint, sessionKey, type='http', ssl=True):
"""
init the connection and buffer
lazy evaluation here...don't make a connection until the first write call
"""
self._dest = dest
self._endpoint = endpoint
self._sessionKey = sessionKey
self._type = type
self._ssl = ssl
self._conn = None
self._sslconn = None
# -------------------------
def _make_http_conn(self):
"""
helper function to make a http connection
"""
if self._ssl:
self._conn = httplib.HTTPSConnection(self._dest)
else:
self._conn = httplib.HTTPConnection(self._dest)
self._conn.connect()
self._conn.putrequest('POST', self._endpoint)
self._conn.putheader('Authorization', 'Splunk ' + self._sessionKey)
self._conn.putheader('X-Splunk-Input-Mode', 'Streaming')
self._conn.endheaders()
# ------------------------
def _make_sock_conn(self):
"""
helper fun to make a socket connection
"""
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = int(self._dest[self._dest.rfind(':') + 1:])
host = self._dest[:self._dest.rfind(':')]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
self._conn.connect(host, port)
self._sslconn = socket.ssl(self._conn)
header = "POST %s HTTP/1.0\r\n" % self._endpoint
header += "Host: localhost:8089\r\n"
header += "Accept-Encoding: identity\r\n"
header += "Authorization: Splunk %s\r\n" % self._sessionKey
header += "X-Splunk-Input-Mode: Streaming\r\n"
header += "\r\n"
self._sslconn.write(header)
# --------------------
def write(self, data):
"""
pump this data into splunkd
"""
if not self._conn:
if self._type == 'http':
self._make_http_conn()
elif self._type == 'socket':
self._make_sock_conn()
#the stream endpoint does not return anything, so we don't either
if self._type == 'socket':
try:
self._sslconn.write(data)
except socket.error as e:
#maybe owing to large inactivity the connection was cut by server, so try again once more...
self._make_sock_conn()
self._sslconn.write(data)
#send a new line else data will not be recognized as an individual event
if len(data) and data[-1]!='\n':
self._sslconn.write("\n")
else:
try:
self._conn.send(data)
except Exception as e:
#can get a variety of exceptions here like HTTPException, NotConnected etc etc etc. Just try again.
self._make_http_conn()
self._conn.send(data)
#send a new line else data will not be recognized as an individual event
if len(data) and data[-1]!='\n':
self._conn.send("\n")
# --------------------------------
def writelines(self, line_list):
"""
wrapper around write function to write multiple lines
"""
for line in line_list:
self.write(line)
# --------------------
def send(self, data):
"""
wrapper for write function for the socket interface
"""
self.write(data)
# ---------------
def flush(self):
"""
do nothing function to make this class resemble a file like object
"""
pass
# ---------------
def close(self):
"""
cleanup
"""
if self._type == 'http':
self._conn.close()
else:
del self._sslconn
self._conn.close()
# ---------------------------------------------------------------------------
def submit(event, hostname=None, source=None, sourcetype=None, index=None):
"""
the interface to the 'simple' receivers endpoint
"""
global h
#construct the uri to POST to
base_uri = splunk.mergeHostPath()
postargs = {'host': hostname, 'source': source, 'sourcetype' : sourcetype, 'index':index}
uri = base_uri + '/services/receivers/simple?%s' % urlencode(postargs)
#get default session key. If none exists, the rest call will raise a splunk.AuthenticationFailed exception
sessionKey = splunk.getDefault('sessionKey')
#make the call, we cannot use the rest interface here as it urlencodes the payload
serverResponse, serverContent = h.request(uri, "POST", headers={'Authorization':'Splunk %s' % sessionKey}, body=event)
#process results
root = et.fromstring(serverContent)
#4xx error messages indicate a client side error e.g. bad request, unauthorized etc so raise a RESTException
if 400 <= serverResponse.status < 500:
extractedMessages = rest.extractMessages(root)
msg_text = []
for msg in extractedMessages:
msg_text.append('message type=%(type)s code=%(code)s text=%(text)s;' % msg)
raise splunk.RESTException(serverResponse.status, msg_text)
#5xx error messages indicate server side error e.g. Internal server error etc so raise a SplunkdException
elif serverResponse.status >= 500:
extractedMessages = rest.extractMessages(root)
msg_text = []
for msg in extractedMessages:
msg_text.append('message type=%(type)s code=%(code)s text=%(text)s;' % msg)
raise splunk.SplunkdException(serverResponse.status, msg_text)
#everything is kosher...
else:
return serverResponse
# -----------------------------------------------------------------------------
def open(hostname=None, source=None, sourcetype=None, index=None, type='http', sessionKey=None, host_regex=None, host_segment=None):
"""
the interface to the 'stream' receivers endpoint
"""
#construct the uri to POST to
base_uri = splunk.mergeHostPath()
postargs = {'source': source, 'sourcetype' : sourcetype, 'index':index}
if host_regex:
postargs['host_regex'] = host_regex
elif host_segment:
postargs['host_segment'] = host_segment
elif hostname:
postargs['host'] = hostname
endpoint = '/services/receivers/stream?%s' % urlencode(postargs)
#get default session key. If none exists, the rest call will raise a splunk.AuthenticationFailed exception
if not sessionKey:
sessionKey = splunk.getSessionKey()
( proto, host_colon_port ) = base_uri.split("://", 1);
return StreamHandler(host_colon_port, endpoint, sessionKey, type, proto != 'http')
# --------------------------------------------------------------------
def connect(hostname=None, source=None, sourcetype=None, index=None):
"""
wrapper for the open to work with sockets
"""
return open(hostname, source, sourcetype, index, type='socket')
# ---------------------
# utility function
# ---------------------
# --------------------------------------------------
def _get_final_count(host, keyi, fail_msg, ok_msg):
"""
utility function to see if we inserted into the index properly
"""
time.sleep(60)
job = splunk.search.dispatch('search index=default host=%s | stats count' % host, sessionKey=key)
start = datetime.datetime.now()
while not job.isDone:
time.sleep(1)
now = datetime.datetime.now()
if int((now - start).seconds) > 20:
print("REST response took more than 20 seconds, timing out...")
break
count = 0
for ele in job.events:
count += 1
job.cancel()
assert count == 3, fail_msg % count
print(ok_msg)
# -------------------------
# -------------------------
if __name__ == '__main__':
import splunk.auth as au
import splunk.search
splunk.mergeHostPath('localhost:8089', True)
key = au.getSessionKey('admin', 'changeme')
raw_data = """Apr 29 19:11:54 AAA\nApr 29 19:12:54 BBB\nApr 29 19:13:54 CCC\n"""
# ------------------------------- #
# test simple receivers endpoint #
# ------------------------------- #
resp = submit(raw_data, sourcetype='http-receivers', index='default', source='http-test', hostname='simple-receivers-test')
print('insertion for simple receivers complete...querying splunk...waiting 60 seconds...')
try:
_get_final_count('simple-receivers-test', key, 'inserted 3 events via simple receivers end point, but found %d', 'insert via simple receivers endpoint - OK')
except AssertionError as e:
#test failed, continue to next
print(e)
# --------------------------------------- #
# test stream receivers endpoint via http #
# --------------------------------------- #
stream = open(sourcetype='http-receivers', index='default', source='http-test', hostname='stream-http-receivers-test')
stream.write('Apr 29 18:11:54 AAA')
stream.writelines(['Apr 29 18:12:54 BBB', 'Apr 29 18:13:54 CCC'])
stream.close()
print('insertion for stream http receivers complete...querying splunk...waiting 60 seconds...')
try:
_get_final_count('stream-http-receivers-test', key, 'inserted 3 events via stream http receivers end point, but found %d', 'insert via stream http receivers endpoint - OK')
except AssertionError as e:
#test failed, continue to next
print(e)
# ------------------------------------------ #
# test stream receivers endpoint via sockets #
# ------------------------------------------ #
socket_stream = connect(sourcetype='http-receivers', index='default', source='http-test', hostname='stream-socket-receivers-test')
socket_stream.send('Apr 29 17:11:54 AAA')
socket_stream.send('Apr 29 17:12:54 BBB')
socket_stream.send('Apr 29 17:13:54 CCC')
socket_stream.close()
print('insertion for stream socket receivers complete...querying splunk...waiting 60 seconds...')
try:
_get_final_count('stream-socket-receivers-test', key, 'inserted 3 events via stream socket receivers end point, but found %d', 'insert via stream socket receivers endpoint - OK')
except AssertionError as e:
#test failed, continue to next
print(e)
| [
"[email protected]"
] | |
68868e6f1a4c75f460454a8f20542df8dbbe4308 | c2ddadd3cf14dfc56ec1e4b8d52b8c1a23ea1e61 | /quiz/models.py | 9e3fbcc7e92e0ec967ef6edc13c06711e68a42e2 | [] | no_license | ashimmitra/Varsity-Final-Project-by-Django | 09f944a9f1aae7be4212f0c09cfe5d2c596bd848 | 6274d966f09d9ead2344542b56576a77e0758d5a | refs/heads/main | 2023-07-17T15:50:04.414565 | 2021-08-20T12:31:24 | 2021-08-20T12:31:24 | 342,790,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | from django.db import models
# Create your models here.
class Quiz(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Ban(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
#class ICT(models.Model):
#question = models.CharField(max_length = 500)
#option1 = models.CharField(max_length = 20)
#option2 = models.CharField(max_length = 20)
#option3 = models.CharField(max_length = 20)
#option4 = models.CharField(max_length = 20)
#answer = models.CharField(max_length = 20)
class Science(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class Math(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class GK(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class MA(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20)
class IC(models.Model):
question = models.CharField(max_length = 500)
option1 = models.CharField(max_length = 20)
option2 = models.CharField(max_length = 20)
option3 = models.CharField(max_length = 20)
option4 = models.CharField(max_length = 20)
answer = models.CharField(max_length = 20) | [
"[email protected]"
] | |
2db4dae997653d84e0371a2931389dd011acab93 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /gen/RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_cfg.py | 58a5a616d4582034f7a015e59624302553db3e82 | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,080 | py | # Auto generated configuration file
# using:
# Revision: 1.381.2.28
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: CMSDIJET/QCDAnalysis/python/RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_cff.py --python_filename /uscms/home/dryu/Dijets/CMSSW_5_3_32_patch3/src/CMSDIJET/QCDAnalysis/gen/RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_cfg.py --fileout file:RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_FastSim_RECOSIM.root --step GEN,FASTSIM,HLT:7E33v2 --mc --eventcontent RECOSIM --datatier GEN-SIM-DIGI-RECO --pileup 2012_Startup_inTimeOnly --geometry DB --conditions auto:mc --beamspot Realistic8TeVCollision --no_exec -n 1000
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('FastSimulation.Configuration.EventContent_cff')
process.load('FastSimulation.PileUpProducer.PileUpSimulator_2012_Startup_inTimeOnly_cff')
process.load('FastSimulation.Configuration.Geometries_MC_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('FastSimulation.Configuration.FamosSequences_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedParameters_cfi')
process.load('HLTrigger.Configuration.HLT_7E33v2_Famos_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.28 $'),
annotation = cms.untracked.string('CMSDIJET/QCDAnalysis/python/RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_cff.py nevts:1000'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:RSGravitonToBBbar_M_650_TuneZ2star_8TeV_pythia6_FastSim_RECOSIM.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-DIGI-RECO')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
process.famosSimHits.SimulateCalorimetry = True
process.famosSimHits.SimulateTracking = True
process.simulation = cms.Sequence(process.simulationWithFamos)
process.HLTEndSequence = cms.Sequence(process.reconstructionWithFamos)
process.Realistic8TeVCollisionVtxSmearingParameters.type = cms.string("BetaFunc")
process.famosSimHits.VertexGenerator = process.Realistic8TeVCollisionVtxSmearingParameters
process.famosPileUp.VertexGenerator = process.Realistic8TeVCollisionVtxSmearingParameters
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:mc', '')
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(8000.0),
crossSection = cms.untracked.double(13.12),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'PARP(82)=1.921 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.227 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model'),
processParameters = cms.vstring('PMAS(347,1)= 650 ! mass of RS Graviton',
'PARP(50) = 0.54 ! 0.54 == c=0.1 (k/M_PL=0.1)',
'MSEL=0 ! (D=1) to select between full user control (0, then use MSUB) and some preprogrammed alternative',
'MSUB(391)=1 ! q qbar -> G* ',
'MSUB(392)=1 ! g g -> G*',
'5000039:ALLOFF ! Turn off all decays of G*',
'5000039:ONIFANY 5 ! Turn on the decay b bbar'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen_genonly)
process.reconstruction = cms.Path(process.reconstructionWithFamos)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step)
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.reconstruction,process.RECOSIMoutput_step])
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# End of customisation functions
| [
"[email protected]"
] | |
f5023268067d834f709d2fce7dfcc26bde67633d | 10d98fecb882d4c84595364f715f4e8b8309a66f | /smurf/smurf_net.py | 7db85ec342f48a9a59ab91a8b9e5e39edb3d9bb2 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 21,402 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Smurf: Unsupervised Optical Flow.
This class provides functions for loading, restoring, computing loss,
and inference.
"""
# pylint:skip-file
import functools
import math
import gin
import tensorflow as tf
from smurf import smurf_utils
from smurf.smurf_models import pwc_model
from smurf.smurf_models import raft_model
@gin.configurable
class SMURFNet(object):
"""Simple interface with infer and train methods."""
def __init__(
self,
checkpoint_dir='',
optimizer='adam',
learning_rate=0.0002,
only_forward=False,
dropout_rate=.25,
selfsup_transform=None,
fb_sigma_teacher=0.003,
fb_sigma_student=0.03,
train_mode='sequence-unsupervised',
smoothness_edge_weighting='gaussian',
smoothness_edge_constant=150,
teacher_image_version='original',
stop_gradient_mask=True,
selfsup_mask='gaussian',
feature_architecture='raft',
flow_architecture='raft',
size=(1, 640, 640),
occlusion_estimation='wang',
smoothness_at_level=2,
use_float16=True,
):
"""Instantiate a SMURF model.
Args:
checkpoint_dir: str, location to checkpoint model
optimizer: str, identifier of which optimizer to use
learning_rate: float, learning rate to use for training
only_forward: bool, if True, only infer flow in one direction
dropout_rate: float, how much dropout to use with pwc net
selfsup_transform: list of functions which transform the flow
predicted from the raw images to be in the frame of images transformed
by geometric_augmentation_fn
fb_sigma_teacher: float, controls how much forward-backward flow
consistency is needed by the teacher model in order to supervise the
student
fb_sigma_student: float, controls how much forward-backward
consistency is needed by the student model in order to not receive
supervision from the teacher model
train_mode: str, controls what kind of training loss should be used. One
of the following: 'unsupervised', 'sequence-unsupervised',
'supervised', or 'sequence-supervised'
smoothness_edge_weighting: str, controls how smoothness penalty is
determined
smoothness_edge_constant: float, a weighting on smoothness
teacher_image_version: str, which image to give to teacher model
stop_gradient_mask: bool, whether to stop the gradient of photometric
loss through the occlusion mask.
selfsup_mask: str, type of selfsupervision mask to use
feature_architecture: str, which feature extractor architecture to use,
either raft or pwc.
flow_architecture: str, which flow model architecture to use, either raft
or pwc.
size: 3-tuple of batch size, height, width
occlusion_estimation: str, a the type of occlusion estimation to use
smoothness_at_level: int, the level to apply smoothness
use_float16: bool, whether or not to use float16 inside the models. This
improves memory usage and computation time and does not impact accuracy.
Returns:
Smurf model instance.
"""
self._only_forward = only_forward
self._selfsup_transform = selfsup_transform
self._fb_sigma_teacher = fb_sigma_teacher
self._fb_sigma_student = fb_sigma_student
self._train_mode = train_mode
self._smoothness_edge_weighting = smoothness_edge_weighting
self._smoothness_edge_constant = smoothness_edge_constant
self._smoothness_at_level = smoothness_at_level
self._teacher_flow_model = None
self._teacher_feature_model = None
self._teacher_image_version = teacher_image_version
self._stop_gradient_mask = stop_gradient_mask
self._selfsup_mask = selfsup_mask
self._size = size
self._use_float16 = use_float16
self._flow_architecture = flow_architecture
# Models
if feature_architecture == 'pwc':
self._feature_model = pwc_model.PWCFeatureSiamese()
elif feature_architecture == 'raft':
self._feature_model = raft_model.RAFTFeatureSiamese()
else:
raise ValueError(
'Unknown feature architecture {}'.format(feature_architecture))
if flow_architecture == 'pwc':
self._flow_model = pwc_model.PWCFlow(
dropout_rate=dropout_rate)
elif flow_architecture == 'raft':
self._flow_model = raft_model.RAFT()
else:
raise ValueError('Unknown flow architecture {}'.format(flow_architecture))
# By default, the teacher flow and featuure models are the same as
# the student flow and feature models.
self._teacher_flow_model = self._flow_model
self._teacher_feature_model = self._feature_model
self._learning_rate = learning_rate
self._optimizer_type = optimizer
self._make_or_reset_optimizer()
# Set up checkpointing.
self._make_or_reset_checkpoint()
self.update_checkpoint_dir(checkpoint_dir)
self._occlusion_estimation = occlusion_estimation
def set_teacher_models(self, teacher_feature_model, teacher_flow_model):
self._teacher_feature_model = teacher_feature_model
self._teacher_flow_model = teacher_flow_model
@property
def feature_model(self):
return self._feature_model
@property
def flow_model(self):
return self._flow_model
def update_checkpoint_dir(self, checkpoint_dir):
"""Changes the checkpoint directory for saving and restoring."""
self._manager = tf.train.CheckpointManager(
self._checkpoint, directory=checkpoint_dir, max_to_keep=1)
def restore(self, reset_optimizer=False, reset_global_step=False):
"""Restores a saved model from a checkpoint."""
status = self._checkpoint.restore(self._manager.latest_checkpoint)
try:
status.assert_existing_objects_matched()
except AssertionError as e:
print('Error while attempting to restore SMURF models:', e)
if reset_optimizer:
self._make_or_reset_optimizer()
self._make_or_reset_checkpoint()
if reset_global_step:
tf.compat.v1.train.get_or_create_global_step().assign(0)
def save(self):
self._manager.save()
def _make_or_reset_optimizer(self):
"""Creates the optimizer attribute if not created, else resets it."""
# Force the models to initialize their variables
fake_image = tf.ones((1, self._size[1], self._size[2], 3))
feature_dict = self._feature_model(fake_image, fake_image)
_ = self._flow_model(feature_dict)
if self._optimizer_type == 'adam':
self._optimizer = tf.keras.optimizers.Adam(self._learning_rate,
name='Optimizer')
elif self._optimizer_type == 'sgd':
self._optimizer = tf.keras.optimizers.SGD(
self._learning_rate, name='Optimizer')
else:
raise ValueError('Optimizer "{}" not yet implemented.'.format(
self._optimizer_type))
if self._use_float16:
self._optimizer = (
tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite(
self._optimizer))
@property
def optimizer(self):
return self._optimizer
def get_checkpoint(self, additional_variables):
return tf.train.Checkpoint(
optimizer=self._optimizer,
feature_model=self._feature_model,
flow_model=self._flow_model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step(),
additional_variables=additional_variables,
)
def _make_or_reset_checkpoint(self):
self._checkpoint = tf.train.Checkpoint(
optimizer=self._optimizer,
feature_model=self._feature_model,
flow_model=self._flow_model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
# Use of tf.function breaks exporting the model
def infer_no_tf_function(self,
image1,
image2,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False,
infer_bw=False):
"""Infer flow for two images.
Args:
image1: tf.tensor of shape [height, width, 3].
image2: tf.tensor of shape [height, width, 3].
input_height: height at which the model should be applied if different
from image height.
input_width: width at which the model should be applied if different from
image width
resize_flow_to_img_res: bool, if True, return the flow resized to the same
resolution as (image1, image2). If False, return flow at the whatever
resolution the model natively predicts it.
infer_occlusion: bool, if True, return both flow and a soft occlusion
mask, else return just flow.
infer_bw: bool, if True, returns the flow in both the forward and reverse
direction.
Returns:
Optical flow for each pixel in image1 pointing to image2.
"""
results = self.batch_infer_no_tf_function(
tf.stack([image1, image2])[None],
input_height=input_height,
input_width=input_width,
resize_flow_to_img_res=resize_flow_to_img_res,
infer_occlusion=infer_occlusion,
infer_bw=infer_bw)
# Remove batch dimension from all results.
if type(results) in [tuple, list]:
return [x[0] for x in results]
else:
return results[0]
# Use of tf.function breaks exporting the model
def batch_infer_no_tf_function(self,
images,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False,
infer_bw=False):
"""Infer flow for two images.
Args:
images: tf.tensor of shape [batchsize, 2, height, width, 3].
input_height: height at which the model should be applied if different
from image height.
input_width: width at which the model should be applied if different from
image width
resize_flow_to_img_res: bool, if True, return the flow resized to the same
resolution as (image1, image2). If False, return flow at the whatever
resolution the model natively predicts it.
infer_occlusion: bool, if True, return both flow and a soft occlusion
mask, else return just flow.
infer_bw: bool, if True, return flow in the reverse direction
Returns:
Optical flow for each pixel in image1 pointing to image2.
"""
orig_height, orig_width = images.shape[-3:-1]
if input_height is None:
input_height = orig_height
if input_width is None:
input_width = orig_width
# Ensure a feasible computation resolution. If specified size is not
# feasible with the model, change it to a slightly higher resolution.
if self._flow_architecture == 'pwc':
divisible_by_num = pow(2.0, self._num_levels)
elif self._flow_architecture == 'raft':
divisible_by_num = 8.0
else:
divisible_by_num = 1.
if (input_height % divisible_by_num != 0 or
input_width % divisible_by_num != 0):
print('Cannot process images at a resolution of '+str(input_height)+
'x'+str(input_width)+', since the height and/or width is not a '
'multiple of '+str(divisible_by_num)+'.')
# compute a feasible resolution
input_height = int(
math.ceil(float(input_height) / divisible_by_num) * divisible_by_num)
input_width = int(
math.ceil(float(input_width) / divisible_by_num) * divisible_by_num)
print('Inference will be run at a resolution of '+str(input_height)+
'x'+str(input_width)+'.')
# Resize images to desired input height and width.
if input_height != orig_height or input_width != orig_width:
images = smurf_utils.resize(
images, input_height, input_width, is_flow=False)
feature_dict = self._feature_model(
images[:, 0], images[:, 1], bidirectional=infer_occlusion)
# Compute flow in frame of image1.
# noinspection PyCallingNonCallable
flow = self._flow_model(feature_dict, training=False)[0]
if infer_occlusion or infer_bw:
# noinspection PyCallingNonCallable
flow_backward = self._flow_model(
feature_dict, training=False, backward=True)[0]
occlusion_mask = self.infer_occlusion(flow, flow_backward)
occlusion_mask = smurf_utils.resize(
occlusion_mask, orig_height, orig_width, is_flow=False)
# Resize and rescale flow to original resolution. This always needs to be
# done because flow is generated at a lower resolution.
if resize_flow_to_img_res:
flow = smurf_utils.resize(flow, orig_height, orig_width, is_flow=True)
if infer_bw:
flow_backward = smurf_utils.resize(flow_backward, orig_height,
orig_width,
is_flow=True)
# TODO: A dictionary or object output here would be preferable to tuples.
if infer_occlusion and infer_bw:
return flow, occlusion_mask, flow_backward
if infer_bw:
return flow, flow_backward
if infer_occlusion:
return flow, occlusion_mask
return flow
@tf.function
def infer(self,
image1,
image2,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False,
infer_bw=False):
return self.infer_no_tf_function(image1, image2, input_height, input_width,
resize_flow_to_img_res, infer_occlusion,
infer_bw)
@tf.function
def batch_infer(self,
images,
input_height=None,
input_width=None,
resize_flow_to_img_res=True,
infer_occlusion=False,
infer_bw=False):
return self.batch_infer_no_tf_function(images, input_height, input_width,
resize_flow_to_img_res,
infer_occlusion, infer_bw)
def infer_occlusion(self, flow_forward, flow_backward):
"""Get a 'soft' occlusion mask from the forward and backward flow."""
occlusion_mask = smurf_utils.compute_occlusions(flow_forward,
flow_backward,
self._occlusion_estimation,
occlusions_are_zeros=False)
return occlusion_mask
def loss_and_grad(self,
inputs,
weights,
occ_active=None):
"""Apply the model on the data in batch and compute the loss.
Args:
inputs: a dictionary of input tf.Tensors
weights: dictionary with float entries per loss.
occ_active: a dictionary describing how occlusions should be handled
Returns:
A tuple consisting of a tf.scalar that represents the total loss for the
current batch, a list of gradients, and a list of the respective
variables.
"""
with tf.GradientTape() as tape:
losses = self.compute_loss(
inputs,
weights,
occ_active=occ_active)
if self._use_float16:
scaled_loss = self._optimizer.get_scaled_loss(losses['total-loss'])
variables = (
self._feature_model.trainable_variables +
self._flow_model.trainable_variables)
if self._use_float16:
scaled_gradients = tape.gradient(scaled_loss, variables)
grads = self._optimizer.get_unscaled_gradients(scaled_gradients)
else:
grads = tape.gradient(losses['total-loss'], variables)
return losses, grads, variables
def compute_loss(self,
inputs,
weights,
occ_active=None):
"""Apply models and compute losses for a batch of image sequences."""
# Check if chosen train_mode is valid.
if self._train_mode not in [
'supervised', 'unsupervised', 'sequence-supervised',
'sequence-unsupervised',]:
raise NotImplementedError(
'train_mode must be one of the following options: supervised, '
'unsupervised, sequence-supervised.')
# The 'images' here have been geometrically but not photometrically
# augmented.
images = inputs.get('images')
augmented_images = inputs.get('augmented_images', images)
ground_truth_flow = inputs.get('flow')
ground_truth_valid = inputs.get('flow_valid')
full_size_images = inputs.get('full_size_images')
crop_h = inputs.get('crop_h')
crop_w = inputs.get('crop_w')
pad_h = inputs.get('pad_h')
pad_w = inputs.get('pad_w')
# Compute only a sequence loss.
sequence_supervised_losses = {}
if self._train_mode == 'sequence-supervised':
flows = smurf_utils.compute_flow_for_sequence_loss(
self._feature_model, self._flow_model, batch=augmented_images,
training=True)
sequence_supervised_losses = smurf_utils.supervised_sequence_loss(
ground_truth_flow, ground_truth_valid, flows, weights)
sequence_supervised_losses['total'] = sum(
sequence_supervised_losses.values())
sequence_supervised_losses = {
key + '-loss': sequence_supervised_losses[key]
for key in sequence_supervised_losses
}
return sequence_supervised_losses
# Compute only a supervised loss.
supervised_losses = {}
if self._train_mode == 'supervised':
if ground_truth_flow is None:
raise ValueError('Need ground truth flow to compute supervised loss.')
flows = smurf_utils.compute_flow_for_supervised_loss(
self._feature_model, self._flow_model, batch=augmented_images,
training=True)
supervised_losses = smurf_utils.supervised_loss(
ground_truth_flow, ground_truth_valid, flows, weights)
supervised_losses['total'] = sum(supervised_losses.values())
supervised_losses = {
key + '-loss': supervised_losses[key] for key in supervised_losses
}
return supervised_losses
# Compute all required flow fields.
# TODO: Can't condition computation on this without breaking autograph.
perform_selfsup = 'selfsup' in weights
flows = smurf_utils.compute_flows_for_unsupervised_loss(
feature_model=self._feature_model,
flow_model=self._flow_model,
batch=augmented_images,
batch_without_aug=images,
training=True,
selfsup_transform_fn=self._selfsup_transform,
return_sequence='sequence' in self._train_mode,
perform_selfsup=perform_selfsup)
# Prepare occlusion estimation function.
occlusion_estimation_fn = functools.partial(
smurf_utils.compute_occlusions,
occlusion_estimation=self._occlusion_estimation,
occlusions_are_zeros=True,
occ_active=occ_active,
boundaries_occluded=full_size_images is None)
# Prepare a simplified call for the unsupervised loss function.
unsupervised_loss_fn = functools.partial(
smurf_utils.unsupervised_loss,
weights=weights,
occlusion_estimation_fn=occlusion_estimation_fn,
only_forward=False,
selfsup_transform_fn=self._selfsup_transform,
fb_sigma_teacher=self._fb_sigma_teacher,
fb_sigma_student=self._fb_sigma_student,
smoothness_edge_weighting=self._smoothness_edge_weighting,
smoothness_edge_constant=self._smoothness_edge_constant,
stop_gradient_mask=self._stop_gradient_mask,
selfsup_mask=self._selfsup_mask,
smoothness_at_level=self._smoothness_at_level)
losses = {}
if self._train_mode == 'unsupervised':
unsupervised_losses = unsupervised_loss_fn(
images,
flows,
full_size_images=full_size_images,
crop_h=crop_h,
crop_w=crop_w,
pad_h=pad_h,
pad_w=pad_w)
losses.update(unsupervised_losses)
elif self._train_mode == 'sequence-unsupervised':
sequence_unsupervised_losses = smurf_utils.unsupervised_sequence_loss(
images=images,
flows_sequence=flows,
full_size_images=full_size_images,
crop_h=crop_h,
crop_w=crop_w,
pad_h=pad_h,
pad_w=pad_w,
unsupervised_loss_fn=unsupervised_loss_fn)
losses.update(sequence_unsupervised_losses)
else:
raise ValueError(f'Unknown mode {self._train_mode}')
losses['total'] = sum(losses.values())
losses = {key + '-loss': losses[key] for key in losses}
return losses
| [
"[email protected]"
] | |
48cd1bdae7a1166b75c4968c0fb1f53f4f29376d | 235bf57e37733cf265913ba2d6e7e95f915dad06 | /pricealert.py | 4e0e80488f9c421adb80ddd7ee79747cf8f801c9 | [] | no_license | dbrgn/avarulo | 47c3e3ea129a6d80afe55b81e3a62e513f526f47 | bf48d2f6a19bb2f0af0ccecb83c70b9bef3affa2 | refs/heads/master | 2021-05-22T18:13:33.359090 | 2020-04-04T15:56:58 | 2020-04-04T15:56:58 | 253,034,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,065 | py | """
Price Alert
Monitor products for price reductions.
Usage:
pricealert.py [-c <configfile>]
Options:
-c <configfile> [default: config.yml]
"""
import re
import sys
from typing import Optional, Tuple
from bs4 import BeautifulSoup
from docopt import docopt
import requests
import yaml
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:74.0) Gecko/20100101 Firefox/74.0'
def fetch(url: str, raw: bool = False) -> BeautifulSoup:
response = requests.get(url, headers={'User-Agent': USER_AGENT})
response.raise_for_status()
if raw:
return response.text
return BeautifulSoup(response.text, 'html.parser')
def check_galaxus(url: str) -> Tuple[float, Optional[float]]:
"""
Check the price on galaxus.ch.
Current price:
<meta content="49" property="product:price:amount"/>
Regular price:
<meta content="72.4" property="og:price:standard_amount"/>
"""
soup = fetch(url)
current = float(soup.find('meta', property='product:price:amount').get('content'))
standard_amount_meta = soup.find('meta', property='og:price:standard_amount')
if standard_amount_meta:
regular = float(standard_amount_meta.get('content')) # type: Optional[float]
else:
regular = None
return (current, regular)
def check_baechli(url: str) -> Tuple[float, Optional[float]]:
"""
Check the price on baechli-bergsport.ch.
Current price:
<meta content="49" property="product:price:amount"/>
Regular price: ??
"""
soup = fetch(url)
current = float(soup.find('meta', property='product:price:amount').get('content'))
return (current, None)
def check_intersport(url: str) -> Tuple[float, Optional[float]]:
"""
Check the price on achermannsport.ch.
Raw data:
<div class="summary entry-summary">
<h1 class="product_title entry-title">Ligtning Ascent 22 Women raspberry und Gunmetal</h1>
<p class="price">
<del><span class="woocommerce-Price-amount amount"><span class="woocommerce-Price-currencySymbol">CHF</span> 379.00</span></del>
<ins><span class="woocommerce-Price-amount amount"><span class="woocommerce-Price-currencySymbol">CHF</span> 299.90</span></ins>
</p>
...
Raw data (no rebate):
<div class="summary entry-summary">
<h1 class="product_title entry-title">Ligtning Ascent 22 Women raspberry und Gunmetal</h1>
<p class="price">
<span class="woocommerce-Price-amount amount"><span class="woocommerce-Price-currencySymbol">CHF</span> 29.00</span>
</p>
...
"""
soup = fetch(url)
summary = soup.find('div', class_='entry-summary')
prices = summary.find(class_='price')
regular = prices.find('del', recursive=False)
current = prices.find('ins', recursive=False)
def _get_price(element) -> float:
parts = element.find(class_='amount').text.split('\xa0')
assert parts[0] == 'CHF'
return float(parts[1])
if regular and current:
return (_get_price(current), _get_price(regular))
else:
return (_get_price(prices), None)
def check_primal(url: str) -> Tuple[float, Optional[float]]:
"""
Check the price on primal.ch.
Current price:
<meta itemprop="price" content="284.90">
Regular price:
<span class="price--line-through">CHF 469.00 *</span>
"""
soup = fetch(url)
current = float(soup.find('meta', itemprop='price').get('content'))
regular_element = soup.find('span', class_='price--line-through')
if regular_element:
regular = float(re.findall(r'[\d\.]+', regular_element.text)[0])
return (current, regular)
else:
return (current, None)
def check_transa(url: str) -> Tuple[float, Optional[float]]:
"""
Check the price on transa.ch.
Non-promo:
price: {
base: '',
promo: 'CHF 379.90',
savings: 'CHF 0.00',
reducedPriceInfoText: 'Streichpreis entspricht dem zuletzt angezeigten Preis im Onlineshop.',
basicPrice: ''
},
Promo:
price: {
base: 'CHF 899.90',
promo: 'CHF 629.90',
savings: 'CHF 270.00',
reducedPriceInfoText: 'Streichpreis entspricht dem zuletzt angezeigten Preis im Onlineshop.',
basicPrice: ''
},
"""
text = fetch(url, raw=True)
prices = {}
matches = filter(
None,
[re.match(r"^\s*(base|promo): 'CHF ([^']*)',$", line) for line in text.splitlines()],
)
for match in matches:
prices[match.group(1)] = float(match.group(2))
return (prices['promo'], prices.get('base'))
def _load_check_fn(shop: dict) -> dict:
"""
Load a check function by name.
"""
func = globals().get(shop['check_func'])
if func is None:
raise ValueError('Check func not found: {}'.format(shop['check_func']))
shop['check_func'] = func
return shop
def main(config: dict):
# Load shops
shops = {
k: _load_check_fn(v)
for k, v
in config['shops'].items()
}
for product in config['products']:
print('Checking {}:'.format(product['name']))
for shop_id, url in product['shops'].items():
shop = shops[shop_id]
prices = shop['check_func'](url)
print(' {}: {:.2f} CHF'.format(shop['name'], prices[0]), end='')
if prices[1] is None:
print()
else:
assert prices[1] > prices[0], prices
print(' (statt {:.2f} CHF)'.format(prices[1]))
print()
if __name__ == '__main__':
args = docopt(__doc__, version='Price Alert 0.1')
configfile = args['-c'] or 'config.yml'
with open(configfile, 'r') as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError as e:
print('Could not load config file: {}'.format(e))
sys.exit(1)
main(config)
| [
"[email protected]"
] | |
8416beada435e65a21c8e0124f100302ee9f9bf5 | 71501709864eff17c873abbb97ffabbeba4cb5e3 | /llvm14.0.4/lldb/test/API/functionalities/data-formatter/type_summary_list_arg/TestTypeSummaryListArg.py | a47d91434822e44e560cc297352404f11b7429aa | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | LEA0317/LLVM-VideoCore4 | d08ba6e6f26f7893709d3285bdbd67442b3e1651 | 7ae2304339760685e8b5556aacc7e9eee91de05c | refs/heads/master | 2022-06-22T15:15:52.112867 | 2022-06-09T08:45:24 | 2022-06-09T08:45:24 | 189,765,789 | 1 | 0 | NOASSERTION | 2019-06-01T18:31:29 | 2019-06-01T18:31:29 | null | UTF-8 | Python | false | false | 1,235 | py | """
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TypeSummaryListArgumentTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_type_summary_list_with_arg(self):
"""Test that the 'type summary list' command handles command line arguments properly"""
self.expect(
'type summary list Foo',
substrs=[
'Category: default',
'Category: system'])
self.expect(
'type summary list char',
substrs=[
'char ?(\*|\[\])',
'char ?\[[0-9]+\]'])
self.expect(
'type summary list -w default',
substrs=['system'],
matching=False)
self.expect(
'type summary list -w system unsigned',
substrs=[
'default',
'0-9'],
matching=False)
self.expect(
'type summary list -w system char',
substrs=[
'char ?(\*|\[\])',
'char ?\[[0-9]+\]'],
matching=True)
| [
"[email protected]"
] | |
16ac290fc3f25f1b9ba3e8307728a0f77ff6b606 | d90daf0b839349d49439037f6bffe37830e165aa | /settings.py | c9ae21702e837243477afde60e6e602c6faf4b9f | [] | no_license | m2o/fitlog | 2d49ecff12069769c88617f07c600512ef2a0c97 | e8736a3bc677d1d160cf7f3b6201ffa1b0de2760 | refs/heads/master | 2020-05-20T08:54:27.169217 | 2013-12-12T21:24:42 | 2013-12-12T21:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | # Django settings for fitlog project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_PATH = os.path.dirname(__file__)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'NAME' : 'fitlog_db',
#'HOST' : 'localhost',
#'PORT' : '',
#'USER' : 'fitloguser',
#'PASSWORD' : 'fitlogpass'
}
}
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_PATH,'fitlogdb'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(ROOT_PATH, 'site_media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#i+-%8t=-79_pzu7q5yy367h_x5662w!_e9(@z4h9ns5s7i!0x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'fitlog.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH,'fitlogapp/templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'fitlogapp',
'fitlogmodel',
'tagging',
'django.contrib.admin',
'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
] | |
ff59ca30001dc596066bc271b4203a45b303fe6e | 52e8841ac9603e994fc487ecb52f232e55a50e07 | /Bio/GA/Selection/RouletteWheel.py | 0dc44d9302247f22c95f690600dd792906fdb1dd | [] | no_license | rored/RozszerzenieBio.PDB | aff434fddfe57199a7465f79126eba62b1c789ae | 7c9d696faacabff912b1263fe19291d6a198c3c2 | refs/heads/master | 2021-01-21T04:50:37.903227 | 2016-06-23T19:15:42 | 2016-06-23T19:15:42 | 55,064,794 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,863 | py | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Implement Roulette Wheel selection on a population.
This implements Roulette Wheel selection in which individuals are
selected from a population randomly, with their proportion of selection
based on their relative fitness in the population.
"""
# standard modules
import random
import copy
# local modules
from .Abstract import AbstractSelection
__docformat__ = "restructuredtext en"
class RouletteWheelSelection(AbstractSelection):
"""Roulette wheel selection proportional to individuals fitness.
The implements a roulette wheel selector that selects individuals
from the population, and performs mutation and crossover on
the selected individuals.
"""
def __init__(self, mutator, crossover, repairer=None):
"""Initialize the selector.
Arguments:
o mutator -- A Mutation object which will perform mutation
on an individual.
o crossover -- A Crossover object which will take two
individuals and produce two new individuals which may
have had crossover occur.
o repairer -- A class which can do repair on rearranged genomes
to eliminate infeasible individuals. If set at None, so repair
will be done.
"""
AbstractSelection.__init__(self, mutator, crossover, repairer)
def select(self, population):
"""Perform selection on the population based using a Roulette model.
Arguments:
o population -- A population of organisms on which we will perform
selection. The individuals are assumed to have fitness values which
are due to their current genome.
"""
# set up the current probabilities for selecting organisms
# from the population
prob_wheel = self._set_up_wheel(population)
probs = sorted(prob_wheel)
# now create the new population with the same size as the original
new_population = []
for pair_spin in range(len(population) // 2):
# select two individuals using roulette wheel selection
choice_num_1 = random.random()
choice_num_2 = random.random()
# now grab the two organisms from the probabilities
chosen_org_1 = None
chosen_org_2 = None
prev_prob = 0
for cur_prob in probs:
if choice_num_1 > prev_prob and choice_num_1 <= cur_prob:
chosen_org_1 = prob_wheel[cur_prob]
if choice_num_2 > prev_prob and choice_num_2 <= cur_prob:
chosen_org_2 = prob_wheel[cur_prob]
prev_prob = cur_prob
assert chosen_org_1 is not None, "Didn't select organism one"
assert chosen_org_2 is not None, "Didn't select organism two"
# do mutation and crossover to get the new organisms
new_org_1, new_org_2 = self.mutate_and_crossover(chosen_org_1,
chosen_org_2)
new_population.extend([new_org_1, new_org_2])
return new_population
def _set_up_wheel(self, population):
"""Set up the roulette wheel based on the fitnesses.
This creates a fitness proportional 'wheel' that will be used for
selecting based on random numbers.
Returns:
o A dictionary where the keys are the 'high' value that an
individual will be selected. The low value is determined by
the previous key in a sorted list of keys. For instance, if we
have a sorted list of keys like:
[.1, .3, .7, 1]
Then the individual whose key is .1 will be selected if a number
between 0 and .1 is chosen, the individual whose key is .3 will
be selected if the number is between .1 and .3, and so on.
The values of the dictionary are the organism instances.
"""
# first sum up the total fitness in the population
total_fitness = 0
for org in population:
total_fitness += org.fitness
# now create the wheel dictionary for all of the individuals
wheel_dict = {}
total_percentage = 0
for org in population:
org_percentage = float(org.fitness) / float(total_fitness)
# the organisms chance of being picked goes from the previous
# percentage (total_percentage) to the previous percentage
# plus the organisms specific fitness percentage
wheel_dict[total_percentage + org_percentage] = copy.copy(org)
# keep a running total of where we are at in the percentages
total_percentage += org_percentage
return wheel_dict
| [
"[email protected]"
] | |
4b20434b8674ccfded74054b0a37ae33caa44811 | 4fbd844113ec9d8c526d5f186274b40ad5502aa3 | /algorithms/python3/perfect_rectangle.py | adda3b9866b4f36c19ea7e24a53d4cded0ddd0ba | [] | no_license | capric8416/leetcode | 51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1 | 503b2e303b10a455be9596c31975ee7973819a3c | refs/heads/master | 2022-07-16T21:41:07.492706 | 2020-04-22T06:18:16 | 2020-04-22T06:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given N axis-aligned rectangles where N > 0, determine if they all together form an exact cover of a rectangular region.
Each rectangle is represented as a bottom-left point and a top-right point.
For example, a unit square is represented as [1,1,2,2].
(coordinate of bottom-left point is (1, 1) and top-right point is (2, 2)).
Example 1:
rectangles = [
[1,1,3,3],
[3,1,4,2],
[3,2,4,4],
[1,3,2,4],
[2,3,3,4]
]
Return true.
All 5 rectangles together form an exact cover of a rectangular region.
Example 2:
rectangles = [
[1,1,2,3],
[1,3,2,4],
[3,1,4,2],
[3,2,4,4]
]
Return false.
Because there is a gap between the two rectangular regions.
Example 3:
rectangles = [
[1,1,3,3],
[3,1,4,2],
[1,3,2,4],
[3,2,4,4]
]
Return false.
Because there is a gap in the top center.
Example 4:
rectangles = [
[1,1,3,3],
[3,1,4,2],
[1,3,2,4],
[2,2,4,4]
]
Return false.
Because two of the rectangles overlap with each other.
"""
""" ==================== body ==================== """
class Solution:
def isRectangleCover(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: bool
"""
""" ==================== body ==================== """
| [
"[email protected]"
] | |
56afaf83d0bb5a2323b007f43c24edf5e93f4b66 | 3e381dc0a265afd955e23c85dce1e79e2b1c5549 | /hi-A5/kacayyasadin.py | 67e22449145123ac051f0a67ea72997107d722c8 | [] | no_license | serkancam/byfp2-2020-2021 | 3addeb92a3ff5616cd6dbd3ae7b2673e1a1a1a5e | c67206bf5506239d967c3b1ba75f9e08fdbad162 | refs/heads/master | 2023-05-05T04:36:21.525621 | 2021-05-29T11:56:27 | 2021-05-29T11:56:27 | 322,643,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | d_yil=int(input("hangi yıl doğdun:"))
d_ay=int(input("kaçın ay doğdun:"))
y_ay = (2020-d_yil)*12+(11-d_ay)
print(y_ay," ay yaşadın.")
| [
"[email protected]"
] | |
682de6a4f15064d60f1678610e503b2d3074a33e | ab704c85613bc430dfbb4e5d8ed139ba0a1da584 | /manage.py | d1fd55b64f4c533ec2620e28f4a0de4f7f967bb9 | [] | no_license | arthuroe/shopperholics | eb4b1257a32354bb2b71607719f61a3a098b7fcb | 6f941bf370a11d697c1577ca1e92bc1ba8ec4d3b | refs/heads/develop | 2022-12-09T23:16:17.235490 | 2018-08-14T20:39:21 | 2019-02-20T00:01:18 | 139,479,448 | 0 | 0 | null | 2022-12-08T02:21:51 | 2018-07-02T18:25:22 | Python | UTF-8 | Python | false | false | 283 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from api import app
from api.models.model_mixin import db
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
bd44d9a490e19447ce8554605dae7d017f2c7f32 | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/profile/log.py | 06bfe8e09aa791c5d2a2f92dc6686ea8b309b43d | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from ..util import logging
def default(profile):
return logging.get_default()
| [
"[email protected]"
] | |
bfe54bf4a84bae770d8568d0e5d8d98a85bd6046 | f03e771eb4c1f300ae819179090efc388bcc6d32 | /src/pymine/metadata/MetadataValue.py | c155a8834298482b1fd2fe55b4228d4f4dd64f3c | [] | no_license | lacthan28/PyMine | d8d2365b0aabefcb056754260f67095dbcbe62ff | e7d4778f01181d45551c02fa0cef151327fa240a | refs/heads/master | 2021-01-21T19:50:48.417635 | 2017-06-30T05:38:46 | 2017-06-30T05:38:46 | 92,161,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # -*- coding: utf-8 -*-
from ..plugin.Plugin import *
class MetadataValue(metaclass = ABCMeta):
"""
:param Plugin owningPlugin:
"""
owningPlugin = None
def __init__(self, owningPlugin: Plugin):
self.owningPlugin = owningPlugin
def getOwningPlugin(self):
""" :return: Plugin """
return self.owningPlugin
@abstractmethod
def value(self):
"""
Fetches the value of this metadata item.
:return: mixed
"""
@abstractmethod
def invalidate(self):
"""
Invalidates this metadata item, forcing it to recompute when next accessed.
:return:
"""
| [
"[email protected]"
] | |
ca522f6446943697c68630ba2867ef251c86a058 | 34088b8e82bc64a10678a08c03db2732d52f0c1a | /Pinbot/Brick/App/job_hunting/exception.py | 94e3248b2a1d740940ed0965550b67d613038491 | [] | no_license | winghou/myFirstProfile | 757d82f5391f3672e48db4aa5774e26a48a5ecc7 | 8fc5d16de7b6449cba058f4d2459bbb0c8438f77 | refs/heads/master | 2020-05-31T13:42:28.554703 | 2016-03-23T11:30:13 | 2016-03-23T11:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | # coding: utf-8
class CompanyCardInterestExeception(Exception):
'''
企业名片反馈异常
'''
pass
| [
"[email protected]"
] | |
cb609dc44f22d8dceb8c61401be4c62ca445e9d6 | 573d470c9fcb3799e8822e6953e1259b74e0672c | /Course/syntax/example_12.py | ba29a4c138e0d6a7ace344e04386e2ac5c48f214 | [
"Apache-2.0"
] | permissive | zevgenia/Python_shultais | e6f35773e54a72477ea5ee83520dbecfbee7ff48 | e51c31de221c5e7f36ede857a960138009ec8a05 | refs/heads/master | 2020-03-31T21:46:25.061571 | 2018-10-11T13:43:47 | 2018-10-11T13:43:47 | 152,593,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py |
age = 12
male = "f"
location = "Russia"
locations = ["Russia", "Ukraine", "Belarus"]
is_programmer = True
is_admin = False
if (age >= 12
and male == "f"
and location in locations
and (is_programmer or is_admin)):
print("Доступ открыт")
if age >= 12 \
and male == "m" \
and location in locations \
and (is_programmer or is_admin):
print("Доступ открыт")
| [
"[email protected]"
] | |
dbaf7c583e82742b17220b3b45bbabb630c64530 | 697fb11686110f569e7f4284045049d008688221 | /windows/nsist/tests/test_commands.py | 077ef0e8505a5a9f5e8aad17d9ce5190dcfc8f72 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"CC-BY-3.0"
] | permissive | andredoumad/p3env | 433c9174899f0909b149f51c3243b6fe04e076bf | a8850d06755d53eb6fedd9995091dad34f1f9ccd | refs/heads/master | 2023-02-03T20:50:07.357255 | 2020-12-23T09:15:55 | 2020-12-23T09:15:55 | 317,041,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | import io
from testpath import assert_isfile, assert_not_path_exists
from zipfile import ZipFile
from nsist import commands, _assemble_launchers
def test_prepare_bin_dir(tmpdir):
cmds = {
'acommand': {
'entry_point': 'somemod:somefunc',
'extra_preamble': io.StringIO(u'import extra')
}
}
commands.prepare_bin_directory(tmpdir, cmds)
launcher_file = str(tmpdir / 'launcher_exe.dat')
launcher_noconsole_file = str(tmpdir / 'launcher_noconsole_exe.dat')
zip_file = str(tmpdir / 'acommand-append.zip')
zip_file_invalid = str(tmpdir / 'acommand-append-noconsole.zip')
exe_file = str(tmpdir / 'acommand.exe')
assert_isfile(launcher_file)
assert_isfile(launcher_noconsole_file)
assert_isfile(zip_file)
assert_not_path_exists(zip_file_invalid)
assert_not_path_exists(exe_file)
with open(launcher_file, 'rb') as lf:
b_launcher = lf.read()
assert b_launcher[:2] == b'MZ'
with open(launcher_noconsole_file, 'rb') as lf:
assert lf.read(2) == b'MZ'
with ZipFile(zip_file) as zf:
assert zf.testzip() is None
script_contents = zf.read('__main__.py').decode('utf-8')
assert 'import extra' in script_contents
assert 'somefunc()' in script_contents
_assemble_launchers.main(['_assemble_launchers.py', 'C:\\path\\to\\python', str(tmpdir)])
assert_isfile(exe_file)
with open(exe_file, 'rb') as ef, open(zip_file, 'rb') as zf:
b_exe = ef.read()
b_zip = zf.read()
assert b_exe[:len(b_launcher)] == b_launcher
assert b_exe[len(b_launcher):-len(b_zip)].decode('utf-8') == '#!"C:\\path\\to\\python.exe"\r\n'
assert b_exe[-len(b_zip):] == b_zip
with ZipFile(exe_file) as zf:
assert zf.testzip() is None
assert zf.read('__main__.py').decode('utf-8') == script_contents
def test_prepare_bin_dir_noconsole(tmpdir):
cmds = {
'acommand': {
'entry_point': 'somemod:somefunc',
'console': False
}
}
commands.prepare_bin_directory(tmpdir, cmds)
launcher_file = str(tmpdir / 'launcher_exe.dat')
launcher_noconsole_file = str(tmpdir / 'launcher_noconsole_exe.dat')
zip_file = str(tmpdir / 'acommand-append-noconsole.zip')
zip_file_invalid = str(tmpdir / 'acommand-append.zip')
exe_file = str(tmpdir / 'acommand.exe')
assert_isfile(launcher_file)
assert_isfile(launcher_noconsole_file)
assert_isfile(zip_file)
assert_not_path_exists(zip_file_invalid)
assert_not_path_exists(exe_file)
with open(launcher_file, 'rb') as lf:
assert lf.read(2) == b'MZ'
with open(launcher_noconsole_file, 'rb') as lf:
b_launcher = lf.read()
assert b_launcher[:2] == b'MZ'
with ZipFile(zip_file) as zf:
assert zf.testzip() is None
script_contents = zf.read('__main__.py').decode('utf-8')
assert 'import extra' not in script_contents
assert 'somefunc()' in script_contents
_assemble_launchers.main(['_assemble_launchers.py', 'C:\\custom\\python.exe', str(tmpdir)])
assert_isfile(exe_file)
with open(exe_file, 'rb') as ef, open(zip_file, 'rb') as zf:
b_exe = ef.read()
b_zip = zf.read()
assert b_exe[:len(b_launcher)] == b_launcher
assert b_exe[len(b_launcher):-len(b_zip)].decode('utf-8') == '#!"C:\\custom\\pythonw.exe"\r\n'
assert b_exe[-len(b_zip):] == b_zip
with ZipFile(exe_file) as zf:
assert zf.testzip() is None
assert zf.read('__main__.py').decode('utf-8') == script_contents
| [
"[email protected]"
] | |
c5b4fddb74e49044607c17ffb50be7f653c945ad | 1268030197a27bf2ef5e3f5ab8df38993457fed5 | /rasa_core/rasa_core/channels/slack.py | a5422da7f616f89305bfb620133aa3adcc635d78 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | parimalpate123/rasa_slack_chatbot | 439abd9a541d6314b46c6fb303c0275803fc9357 | 206aacab62f12be9df9f009f65736caed3e8edac | refs/heads/master | 2020-04-17T14:13:49.917604 | 2019-05-07T11:08:07 | 2019-05-07T11:08:07 | 166,649,129 | 0 | 1 | null | 2019-01-29T11:09:07 | 2019-01-20T10:32:59 | Python | UTF-8 | Python | false | false | 9,555 | py | import re
import json
import logging
from typing import Text, Optional, List
from flask import Blueprint, request, jsonify, make_response, Response
from slackclient import SlackClient
from rasa_core.channels import InputChannel
from rasa_core.channels.channel import UserMessage, OutputChannel
logger = logging.getLogger(__name__)
class SlackBot(SlackClient, OutputChannel):
"""A Slack communication channel"""
@classmethod
def name(cls):
return "slack"
def __init__(self,
token: Text,
slack_channel: Optional[Text] = None) -> None:
self.slack_channel = slack_channel
super(SlackBot, self).__init__(token)
def send_text_message(self, recipient_id, message):
recipient = self.slack_channel or recipient_id
for message_part in message.split("\n\n"):
super(SlackBot, self).api_call("chat.postMessage",
channel=recipient,
as_user=True, text=message_part)
def send_image_url(self, recipient_id, image_url, message=""):
image_attachment = [{"image_url": image_url,
"text": message}]
recipient = self.slack_channel or recipient_id
return super(SlackBot, self).api_call("chat.postMessage",
channel=recipient,
as_user=True,
attachments=image_attachment)
def send_attachment(self, recipient_id, attachment, message=""):
recipient = self.slack_channel or recipient_id
return super(SlackBot, self).api_call("chat.postMessage",
channel=recipient,
as_user=True,
text=message,
attachments=attachment)
@staticmethod
def _convert_to_slack_buttons(buttons):
return [{"text": b['title'],
"name": b['payload'],
"type": "button"} for b in buttons]
def send_text_with_buttons(self, recipient_id, message, buttons, **kwargs):
recipient = self.slack_channel or recipient_id
if len(buttons) > 5:
logger.warning("Slack API currently allows only up to 5 buttons. "
"If you add more, all will be ignored.")
return self.send_text_message(recipient, message)
button_attachment = [{"fallback": message,
"callback_id": message.replace(' ', '_')[:20],
"actions": self._convert_to_slack_buttons(
buttons)}]
super(SlackBot, self).api_call("chat.postMessage",
channel=recipient,
as_user=True,
text=message,
attachments=button_attachment)
class SlackInput(InputChannel):
"""Slack input channel implementation. Based on the HTTPInputChannel."""
@classmethod
def name(cls):
return "slack"
@classmethod
def from_credentials(cls, credentials):
if not credentials:
cls.raise_missing_credentials_exception()
return cls(credentials.get("slack_token"),
credentials.get("slack_channel"))
def __init__(self,
slack_token: Text,
slack_channel: Optional[Text] = None,
errors_ignore_retry: Optional[List[Text]] = None) -> None:
"""Create a Slack input channel.
Needs a couple of settings to properly authenticate and validate
messages. Details to setup:
https://github.com/slackapi/python-slackclient
Args:
slack_token: Your Slack Authentication token. You can find or
generate a test token
`here <https://api.slack.com/docs/oauth-test-tokens>`_.
slack_channel: the string identifier for a channel to which
the bot posts, or channel name (e.g. 'C1234ABC', 'bot-test'
or '#bot-test') If unset, messages will be sent back
to the user they came from.
errors_ignore_retry: If error code given by slack
included in this list then it will ignore the event.
The code is listed here:
https://api.slack.com/events-api#errors
"""
self.slack_token = slack_token
self.slack_channel = slack_channel
self.errors_ignore_retry = errors_ignore_retry or ('http_timeout',)
@staticmethod
def _is_user_message(slack_event):
return (slack_event.get('event') and
(slack_event.get('event').get('type') == u'message' or
slack_event.get('event').get('type') == u'app_mention') and
slack_event.get('event').get('text') and not
slack_event.get('event').get('bot_id'))
@staticmethod
def _is_button_reply(slack_event):
return (slack_event.get('payload') and
slack_event['payload'][0] and
'name' in slack_event['payload'][0])
@staticmethod
def _get_button_reply(slack_event):
return json.loads(slack_event['payload'][0])['actions'][0]['name']
@staticmethod
def _sanitize_user_message(text, uids_to_remove):
"""Remove superfluous/wrong/problematic tokens from a message.
Probably a good starting point for pre-formatting of user-provided text,
to make NLU's life easier in case they go funky to the power of extreme.
In the current state will just drop self-mentions of bot itself
Args:
text: raw message as sent from slack
uids_to_remove: a list of user ids to remove from the content
Returns:
str: parsed and cleaned version of the input text
"""
for uid_to_remove in uids_to_remove:
# heuristic to format majority cases OK
# can be adjusted to taste later if needed,
# but is a good first approximation
for regex, replacement in [(r'<@{}>\s'.format(uid_to_remove), ''),
(r'\s<@{}>'.format(uid_to_remove), ''),
# a bit arbitrary but probably OK
(r'<@{}>'.format(uid_to_remove), ' ')]:
text = re.sub(regex, replacement, text)
return text.rstrip().lstrip() # drop extra spaces at beginning and end
def process_message(self, on_new_message, text, sender_id):
"""Slack retry to post messages up to 3 times based on
failure conditions defined here:
https://api.slack.com/events-api#failure_conditions
"""
retry_reason = request.headers.environ.get('HTTP_X_SLACK_RETRY_REASON')
retry_count = request.headers.environ.get('HTTP_X_SLACK_RETRY_NUM')
if retry_count and retry_reason in self.errors_ignore_retry:
logger.warning("Received retry #{} request from slack"
" due to {}".format(retry_count, retry_reason))
return Response(status=201, headers={'X-Slack-No-Retry': 1})
try:
out_channel = SlackBot(self.slack_token, self.slack_channel)
user_msg = UserMessage(text, out_channel, sender_id,
input_channel=self.name())
on_new_message(user_msg)
except Exception as e:
logger.error("Exception when trying to handle "
"message.{0}".format(e))
logger.error(str(e), exc_info=True)
return make_response()
def blueprint(self, on_new_message):
slack_webhook = Blueprint('slack_webhook', __name__)
@slack_webhook.route("/", methods=['GET'])
def health():
return jsonify({"status": "ok"})
@slack_webhook.route("/webhook", methods=['GET', 'POST'])
def webhook():
request.get_data()
if request.json:
output = request.json
if "challenge" in output:
return make_response(output.get("challenge"), 200,
{"content_type": "application/json"})
elif self._is_user_message(output):
return self.process_message(
on_new_message,
text=self._sanitize_user_message(
output['event']['text'],
output['authed_users']),
sender_id=output.get('event').get('user'))
elif request.form:
output = dict(request.form)
if self._is_button_reply(output):
sender_id = json.loads(output['payload'][0])['user']['id']
return self.process_message(
on_new_message,
text=self._get_button_reply(output),
sender_id=sender_id)
return make_response()
return slack_webhook
| [
"[email protected]"
] | |
95a9e288c4d96488d88ce163283d339b73a623ce | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql/v2020_01_01/aio/operations/_firewall_rules_operations.py | b134643958a2adf39922a022db347bb3ac1b8102 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 21,790 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallRulesOperations:
"""FirewallRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.postgresql.v2020_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
parameters: "models.FirewallRule",
**kwargs
) -> Optional["models.FirewallRule"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.FirewallRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
parameters: "models.FirewallRule",
**kwargs
) -> AsyncLROPoller["models.FirewallRule"]:
"""Creates a new firewall rule or updates an existing firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:param parameters: The required parameters for creating or updating a firewall rule.
:type parameters: ~azure.mgmt.rdbms.postgresql.v2020_01_01.models.FirewallRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FirewallRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.rdbms.postgresql.v2020_01_01.models.FirewallRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.FirewallRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a server firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs
) -> "models.FirewallRule":
"""Gets information about a server firewall rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the server firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.postgresql.v2020_01_01.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName}'} # type: ignore
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs
) -> AsyncIterable["models.FirewallRuleListResult"]:
"""List all the firewall rules in a given server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.postgresql.v2020_01_01.models.FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForPostgreSQL/servers/{serverName}/firewallRules'} # type: ignore
| [
"[email protected]"
] | |
8b759f07090e27cb15f72da8b4c0b6be93f37dfe | dfdf57c374b3ad281fd9f86bda343b221efe0b55 | /calie/aux/matrices.py | d20fb335227728daa71d8a495c8fac0d260bcdc3 | [
"BSD-3-Clause"
] | permissive | SebastianoF/calie | e08d7d3302839e69b0e17b8c7f3233124fef9807 | 187318fa340b6d2fbf8c5dbc643304b66e9d1c44 | refs/heads/master | 2020-03-23T14:51:09.761126 | 2019-03-17T19:31:45 | 2019-03-17T19:31:45 | 141,703,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,030 | py | import numpy as np
def bch_right_jacobian(r):
"""
BCH_right_jacobian(r) \n
:param r: element of lie algebra so2_a in restricted form
:return: Jacobian (equation [57] Tom tech memo)
"""
theta = r[0]
dtx = r[1]
dty = r[2]
j = np.array([0.0] * 9).reshape(3, 3)
half_theta = theta * 0.5
tan_half_theta = np.tan(theta * 0.5)
prec = np.abs(np.spacing(theta))
if abs(theta) > prec:
factor1 = (half_theta - tan_half_theta) / (theta * tan_half_theta)
factor2 = half_theta / tan_half_theta
else:
factor1 = theta / 12.0
factor2 = 1 - (theta ** 2) / 12.0
j[0, 0] = 1
j[1, 0] = - factor1 * dtx + 0.5 * dty
j[2, 0] = - 0.5 * dtx - dty * factor1
j[1, 1] = factor2
j[2, 2] = factor2
j[2, 1] = 0.5 * theta
j[1, 2] = -0.5 * theta
return j
def time_splitter(t, x, len_range=None, number_of_intervals=5, epsilon=0):
"""
:param t: list or tuple relative to the time if unordered it will be ordered
:param x: values corresponding to the time (same length of t), if t is unordered it will follow the same reordering.
:param len_range: interval of the data that will be splitted.
:param number_of_intervals: number of interval in which we want x to be splitted
:param epsilon: small margin around the time range
:return: x_splitted in intervals
"""
if not len(t) == len(x):
raise TypeError('t and x must have the same dimension')
if not sorted(t) == t:
t, x = (list(z) for z in zip(*sorted(zip(t, x))))
if len_range is None:
if epsilon > 0:
starting_range = t[0] - epsilon
ending_range = t[len(t) - 1] + epsilon
else:
starting_range = np.floor(t[0])
ending_range = np.ceil(t[len(t) - 1])
else:
starting_range = len_range[0]
ending_range = len_range[1]
steps = np.linspace(starting_range, ending_range, num=number_of_intervals + 1)
steps[len(steps) - 1] += 0.1
x_splitted = [
[x[i] for i in range(len(x)) if steps[j - 1] <= t[i] < steps[j]]
for j in range(1, number_of_intervals + 1)
]
return x_splitted
def custom_transposer(d, num_col):
"""
:param d: list of lists of len n*num_col
:param num_col: list of lists in a different order (as transposed)
e.g.
custom_transposer([[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]], num_col = 2)
-> [[1,1],[3,3],[5,5],[2,2],[4,4],[6,6]]
custom_transposer([[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]], num_col = 3)
-> [[1,1],[4,4],[2,2],[5,5],[3,3],[6,6]]
"""
if not num_col % len(d):
raise TypeError('dimensions are not compatible')
ans = []
for r in range(num_col): # reminder
for q in range(0, len(d), num_col): # quotient
ans = ans + [d[q + r]]
return ans
# ---------- list management utils methods ---------------
def remove_k(l, k):
l_new = l[:]
l_new.pop(k)
return l_new
# ---------- data management utils methods ---------------
def get_in_out_liers(data, coeff=0.6745, return_values=True):
"""
:param data: 1d numpy array
:param coeff:
:param return_values:
:return: position of the outliers in the vector
"""
median = np.median(data)
diff = np.sum(np.abs(data - median))
mad = np.median(diff)
thresh = coeff * 0.674491 * mad
out_liers_index = [k for k in range(len(data)) if data[k] > thresh]
in_liers_index = [k for k in range(len(data)) if data[k] > thresh]
in_liers_values = np.delete(data, out_liers_index)
out_liers_values = [data[j] for j in out_liers_index]
if return_values:
return out_liers_values, in_liers_values
else:
return out_liers_index, in_liers_index
# ---------- vectors and matrices defined on a grid manipulators ---------------
def matrix_vector_field_product(j_input, v_input):
"""
:param j_input: matrix m x n x (4 or 9) as for example a jacobian column major
:param v_input: matrix m x n x (2 or 3) to be multiplied by the matrix point-wise.
:return: m x n x (2 or 3) whose each element is the result of the product of the
matrix (i,j,:) multiplied by the corresponding element in the vector v (i,j,:).
In tensor notation for n = 1: R_{i,j,k} = \sum_{l=0}^{2} M_{i,j,l+3k} v_{i,j,l}
### equivalent code in a more readable version:
# dimensions of the problem:
d = v_input.shape[-1]
vol = list(v_input.shape[:-1])
# repeat v input 3 times, one for each row of the input matrix 3x3 or 2x2 in corresponding position:
v = np.tile(v_input, [1]*d + [d])
# element-wise product:
j_times_v = np.multiply(j_input, v)
# Sum the three blocks in the third dimension:
return np.sum(j_times_v.reshape(vol + [d, d]), axis=d+1).reshape(vol + [d])
"""
assert len(j_input.shape) == len(v_input.shape), [j_input.shape, v_input.shape]
d = v_input.shape[-1]
vol = list(v_input.shape[:d])
extra_ones = len(v_input.shape) - (len(vol) + 1)
temp = j_input.reshape(vol + [1] * extra_ones + [d, d]) # transform in squared block with additional ones
return np.einsum('...kl,...l->...k', temp, v_input)
def matrix_fields_product(a_input, b_input):
"""
Multiplies the matrix a_input[i,j,:] times b_input[i,j,:] for each i, j.
works for any dimension
:param a_input:
:param b_input:
:return:
"""
# test
np.testing.assert_array_equal(a_input.shape, b_input.shape)
d = int(np.sqrt(a_input.shape[-1]))
vol = list(a_input.shape[:d])
extra_ones = len(a_input.shape) - (len(vol) + 1)
temp_a = a_input.reshape(vol + [1] * extra_ones + [d, d]) # transform in squared block with additional ones
temp_b = b_input.reshape(vol + [1] * extra_ones + [d, d])
return np.einsum('...kl,...lm', temp_a, temp_b).reshape(vol + [1] * extra_ones + [d * d])
def matrix_fields_product_iterative(a_input, n=1):
"""
Matrix products, for matrices defined at each point of a grid, row major.
Multiplies the matrix a_input[i,j,:] by itself n times for each i,j.
:param a_input: matrix field
:param n: number of iterations
:return: a_input^n point-wise
"""
ans = a_input[...]
for _ in range(1, n):
ans = matrix_fields_product(ans, a_input)
return ans
def id_matrix_field(domain):
"""
From a domain of dimension dim =2,3, it returns the identity field
that at each point of the domain has the (row mayor) vectorized identity
matrix.
:param domain: a squared or cubed domain
:return:
"""
dim = len(domain)
if dim not in [2, 3]:
assert IOError
shape = list(domain) + [1] * (4 - dim) + [dim**2]
flat_id = np.eye(dim).reshape(1, dim**2)
return np.repeat(flat_id, np.prod(domain)).reshape(shape, order='F')
def grid_generator(x_size=101,
y_size=101,
x_step=10,
y_step=10,
line_thickness=1):
m = np.zeros([x_size, y_size])
# preliminary slow version:
for x in range(x_size):
for y in range(y_size):
if 0 <= x % x_step < line_thickness or 0 <= y % y_step < line_thickness:
m[x, y] = 1
return m
'''
def trim_2d(array, passe_partout_size, return_copy=False):
"""
:param array: array input to be trimmed
:param passe_partout_size: passepartout value
:param return_copy: False by default, if you want to adjust the existing field. True otherwise.
:return: the same field trimmed by the value of the passepartout in each dimension.
"""
if return_copy:
new_field = copy.deepcopy(array)
new_field.field = array.field[passe_partout_size:-passe_partout_size,
passe_partout_size:-passe_partout_size,
...]
return new_field
else:
self.field = self.field[passe_partout_size:-passe_partout_size,
passe_partout_size:-passe_partout_size,
...]
'''
'''
def generate_svf(kind='', random_output=False, domain_input=(), parameters=()):
"""
NOTE: all the functions parameters are optional but the default return an error
as sanity check.
:param kind: can be 'SE2', 'GAUSS' or 'ADNII'
:param random: if the parameters are the parameter of a random or a fixed
:param parameters: according to the random variables and the kind provides the
se2 parameters, the sigma of the transformation, the index of the adnii image.
:return: one svf, in accordance with the input data
"""
svf_0 = None
disp_0 = None
if kind is 'SE2':
if random_output:
# generate matrices from parameters -> epsilon, interval_theta (x, y), omega (xa, ya, xb, yb)
epsilon = parameters[0]
interval_theta = parameters[1:3]
omega = parameters[3:]
m_0 = se2_g.randomgen_custom_center(interval_theta=interval_theta,
omega=omega,
epsilon_zero_avoidance=epsilon)
dm_0 = se2_g.log(m_0)
else:
# generate matrices -> theta, tx, ty
theta, tx, ty = parameters[0], parameters[1], parameters[2]
m_0 = se2_g.se2_g(theta, tx, ty)
dm_0 = se2_g.log(m_0)
# Generate svf and disp
svf_0 = SVF.generate_from_matrix(domain_input, dm_0.get_matrix, affine=np.eye(4))
disp_0 = SDISP.generate_from_matrix(domain_input, m_0.get_matrix - np.eye(3), affine=np.eye(4))
elif kind is 'GAUSS':
pass
elif kind is 'ADNII':
pass
else:
raise IOError('The option inserted for kind is not available.')
if disp_0 is not None:
return svf_0, disp_0
else:
return svf_0
'''
| [
"[email protected]"
] | |
c87d5afa14f70ed9c35d2b5894f71311df928142 | a904e99110721719d9ca493fdb91679d09577b8d | /month05/spider/day01_course/day01_code/10_novelSpiderMysql.py | f0fca531e47d09485d510041f8e19a4946197908 | [
"Apache-2.0"
] | permissive | chaofan-zheng/tedu-python-demo | 7c7c64a355e5380d1f8b6464affeddfde0d27be7 | abe983ddc52690f4726cf42cc6390cba815026d8 | refs/heads/main | 2023-03-12T05:17:34.596664 | 2021-02-27T08:33:31 | 2021-02-27T08:33:31 | 323,350,480 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | """
笔趣阁小说爬虫,所抓数据:href、title、author、comment
思路步骤:
1、确认数据来源(右键->查看网页源代码->搜索关键字)
2、确认静态:观察URL地址规律
3、写正则表达式
"""
import requests
import re
import time
import random
import pymysql
class NovelSpider:
def __init__(self):
self.url = 'https://www.biqukan.cc/fenlei1/{}.html'
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
# 连接数据库、创建游标对象
self.db = pymysql.connect(
'localhost','root','123456','noveldb',charset='utf8'
)
self.cur = self.db.cursor()
def get_html(self, url):
"""请求,发请求获取响应内容html"""
html = requests.get(url=url, headers=self.headers).text
# 直接调用解析函数
self.parse_html(html)
def parse_html(self, html):
"""解析提取数据"""
regex = '<div class="caption">.*?href="(.*?)" title="(.*?)">.*?<small class="text-muted fs-12">(.*?)</small>.*?>(.*?)</p>'
# r_list: [(href,title,author,comment), (), ...]
r_list = re.findall(regex, html, re.S)
# 直接调用数据处理函数
self.save_html(r_list)
def save_html(self, r_list):
"""数据处理函数"""
ins = 'insert into novel_tab values(%s,%s,%s,%s)'
for r in r_list:
# execute():第二个参数可为列表、也可为元组
self.cur.execute(ins, r)
self.db.commit()
print(r)
def crawl(self):
"""爬虫逻辑函数"""
for page in range(1, 3):
page_url = self.url.format(page)
self.get_html(url=page_url)
# 控制数据抓取频率
time.sleep(random.randint(1, 3))
# 所有数据抓取完成后,断开数据库连接
self.cur.close()
self.db.close()
if __name__ == '__main__':
spider = NovelSpider()
spider.crawl()
| [
"[email protected]"
] | |
606e40173bb24cbf9d4f95a5b261acd0d044934f | b5fbc01deb2060b2222f885fca0433844a9e7cd1 | /web/lib/python3.6/site-packages/daphne/server.py | fb70c6cdaefb1d92caf40231038ced1b11ffb355 | [] | no_license | Carlosdher/reposicao | 50973b15f8a2bd3a5a6b83b06efe0050f612bb83 | 71ef93e694888e54c79e98e8568c3417ee82ec96 | refs/heads/master | 2020-03-18T04:13:59.493126 | 2018-08-02T13:06:55 | 2018-08-02T13:06:55 | 134,277,105 | 2 | 0 | null | 2018-07-27T19:20:36 | 2018-05-21T14:01:26 | Python | UTF-8 | Python | false | false | 11,655 | py | # This has to be done first as Twisted is import-order-sensitive with reactors
import sys # isort:skip
import warnings # isort:skip
from twisted.internet import asyncioreactor # isort:skip
current_reactor = sys.modules.get("twisted.internet.reactor", None)
if current_reactor is not None:
if not isinstance(current_reactor, asyncioreactor.AsyncioSelectorReactor):
warnings.warn(
"Something has already installed a non-asyncio Twisted reactor. Attempting to uninstall it; " +
"you can fix this warning by importing daphne.server early in your codebase or " +
"finding the package that imports Twisted and importing it later on.",
UserWarning,
)
del sys.modules["twisted.internet.reactor"]
asyncioreactor.install()
else:
asyncioreactor.install()
import asyncio
import logging
import time
import traceback
from concurrent.futures import CancelledError
from twisted.internet import defer, reactor
from twisted.internet.endpoints import serverFromString
from twisted.logger import STDLibLogObserver, globalLogBeginner
from twisted.web import http
from .http_protocol import HTTPFactory
from .ws_protocol import WebSocketFactory
logger = logging.getLogger(__name__)
class Server(object):
def __init__(
self,
application,
endpoints=None,
signal_handlers=True,
action_logger=None,
http_timeout=120,
websocket_timeout=86400,
websocket_connect_timeout=20,
ping_interval=20,
ping_timeout=30,
root_path="",
proxy_forwarded_address_header=None,
proxy_forwarded_port_header=None,
verbosity=1,
websocket_handshake_timeout=5,
application_close_timeout=10,
ready_callable=None,
# Deprecated and does not work, remove in version 2.2
ws_protocols=None,
):
self.application = application
self.endpoints = endpoints or []
self.listeners = []
self.listening_addresses = []
self.signal_handlers = signal_handlers
self.action_logger = action_logger
self.http_timeout = http_timeout
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.proxy_forwarded_address_header = proxy_forwarded_address_header
self.proxy_forwarded_port_header = proxy_forwarded_port_header
self.websocket_timeout = websocket_timeout
self.websocket_connect_timeout = websocket_connect_timeout
self.websocket_handshake_timeout = websocket_handshake_timeout
self.application_close_timeout = application_close_timeout
self.root_path = root_path
self.verbosity = verbosity
self.abort_start = False
self.ready_callable = ready_callable
# Check our construction is actually sensible
if not self.endpoints:
logger.error("No endpoints. This server will not listen on anything.")
sys.exit(1)
def run(self):
# A dict of protocol: {"application_instance":, "connected":, "disconnected":} dicts
self.connections = {}
# Make the factory
self.http_factory = HTTPFactory(self)
self.ws_factory = WebSocketFactory(self, server="Daphne")
self.ws_factory.setProtocolOptions(
autoPingTimeout=self.ping_timeout,
allowNullOrigin=True,
openHandshakeTimeout=self.websocket_handshake_timeout
)
if self.verbosity <= 1:
# Redirect the Twisted log to nowhere
globalLogBeginner.beginLoggingTo([lambda _: None], redirectStandardIO=False, discardBuffer=True)
else:
globalLogBeginner.beginLoggingTo([STDLibLogObserver(__name__)])
# Detect what Twisted features are enabled
if http.H2_ENABLED:
logger.info("HTTP/2 support enabled")
else:
logger.info("HTTP/2 support not enabled (install the http2 and tls Twisted extras)")
# Kick off the timeout loop
reactor.callLater(1, self.application_checker)
reactor.callLater(2, self.timeout_checker)
for socket_description in self.endpoints:
logger.info("Configuring endpoint %s", socket_description)
ep = serverFromString(reactor, str(socket_description))
listener = ep.listen(self.http_factory)
listener.addCallback(self.listen_success)
listener.addErrback(self.listen_error)
self.listeners.append(listener)
# Set the asyncio reactor's event loop as global
# TODO: Should we instead pass the global one into the reactor?
asyncio.set_event_loop(reactor._asyncioEventloop)
# Verbosity 3 turns on asyncio debug to find those blocking yields
if self.verbosity >= 3:
asyncio.get_event_loop().set_debug(True)
reactor.addSystemEventTrigger("before", "shutdown", self.kill_all_applications)
if not self.abort_start:
# Trigger the ready flag if we had one
if self.ready_callable:
self.ready_callable()
# Run the reactor
reactor.run(installSignalHandlers=self.signal_handlers)
def listen_success(self, port):
"""
Called when a listen succeeds so we can store port details (if there are any)
"""
if hasattr(port, "getHost"):
host = port.getHost()
if hasattr(host, "host") and hasattr(host, "port"):
self.listening_addresses.append((host.host, host.port))
logger.info("Listening on TCP address %s:%s", port.getHost().host, port.getHost().port)
def listen_error(self, failure):
logger.critical("Listen failure: %s", failure.getErrorMessage())
self.stop()
def stop(self):
"""
Force-stops the server.
"""
if reactor.running:
reactor.stop()
else:
self.abort_start = True
### Protocol handling
def protocol_connected(self, protocol):
"""
Adds a protocol as a current connection.
"""
if protocol in self.connections:
raise RuntimeError("Protocol %r was added to main list twice!" % protocol)
self.connections[protocol] = {"connected": time.time()}
def protocol_disconnected(self, protocol):
# Set its disconnected time (the loops will come and clean it up)
self.connections[protocol]["disconnected"] = time.time()
### Internal event/message handling
def create_application(self, protocol, scope):
"""
Creates a new application instance that fronts a Protocol instance
for one of our supported protocols. Pass it the protocol,
and it will work out the type, supply appropriate callables, and
return you the application's input queue
"""
# Make sure the protocol has not had another application made for it
assert "application_instance" not in self.connections[protocol]
# Make an instance of the application
input_queue = asyncio.Queue()
application_instance = self.application(scope=scope)
# Run it, and stash the future for later checking
self.connections[protocol]["application_instance"] = asyncio.ensure_future(application_instance(
receive=input_queue.get,
send=lambda message: self.handle_reply(protocol, message),
), loop=asyncio.get_event_loop())
return input_queue
async def handle_reply(self, protocol, message):
"""
Coroutine that jumps the reply message from asyncio to Twisted
"""
protocol.handle_reply(message)
### Utility
def application_checker(self):
"""
Goes through the set of current application Futures and cleans up
any that are done/prints exceptions for any that errored.
"""
for protocol, details in list(self.connections.items()):
disconnected = details.get("disconnected", None)
application_instance = details.get("application_instance", None)
# First, see if the protocol disconnected and the app has taken
# too long to close up
if disconnected and time.time() - disconnected > self.application_close_timeout:
if not application_instance.done():
logger.warning(
"Application instance %r for connection %s took too long to shut down and was killed.",
application_instance,
repr(protocol),
)
application_instance.cancel()
# Then see if the app is done and we should reap it
if application_instance and application_instance.done():
try:
exception = application_instance.exception()
except CancelledError:
# Future cancellation. We can ignore this.
pass
else:
if exception:
if isinstance(exception, KeyboardInterrupt):
# Protocol is asking the server to exit (likely during test)
self.stop()
else:
exception_output = "{}\n{}{}".format(
exception,
"".join(traceback.format_tb(
exception.__traceback__,
)),
" {}".format(exception),
)
logger.error(
"Exception inside application: %s",
exception_output,
)
if not disconnected:
protocol.handle_exception(exception)
del self.connections[protocol]["application_instance"]
application_instance = None
# Check to see if protocol is closed and app is closed so we can remove it
if not application_instance and disconnected:
del self.connections[protocol]
reactor.callLater(1, self.application_checker)
def kill_all_applications(self):
"""
Kills all application coroutines before reactor exit.
"""
# Send cancel to all coroutines
wait_for = []
for details in self.connections.values():
application_instance = details["application_instance"]
if not application_instance.done():
application_instance.cancel()
wait_for.append(application_instance)
logger.info("Killed %i pending application instances", len(wait_for))
# Make Twisted wait until they're all dead
wait_deferred = defer.Deferred.fromFuture(asyncio.gather(*wait_for))
wait_deferred.addErrback(lambda x: None)
return wait_deferred
def timeout_checker(self):
"""
Called periodically to enforce timeout rules on all connections.
Also checks pings at the same time.
"""
for protocol in list(self.connections.keys()):
protocol.check_timeouts()
reactor.callLater(2, self.timeout_checker)
def log_action(self, protocol, action, details):
"""
Dispatches to any registered action logger, if there is one.
"""
if self.action_logger:
self.action_logger(protocol, action, details)
| [
"[email protected]"
] | |
ecb1f88de692273696479a78d63266fb28e7ab08 | 128548b223a941c1570aba99c679a698dedb3e72 | /rename.py | 0b1a95e3d55d759c39a67fb810c8a247760c7484 | [] | no_license | hhk86/Barra | de3e8d715f6ead1b425e5b639538df370a4eb11f | ddcf50766bcbdecb602213cccc65cab01146f88b | refs/heads/master | 2020-07-26T18:04:44.006126 | 2019-09-26T07:36:48 | 2019-09-26T07:36:48 | 208,727,872 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | import sys
from pymongo import MongoClient
from basicFunction import *
import sys
class MongoDB():
'''
Connect to local MongoDB
'''
def __init__(self):
self.host = "18.210.68.192"
self.port = 27017
self.db = "basicdb"
self.username = "user"
self.password = "user"
def __enter__(self):
self.conn = MongoClient(self.host, self.port, username=self.username, password=self.password)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def connect(self):
return self
if __name__ == "__main__":
confirm = input("Please input confirm:\n>>>")
if confirm != "confirm":
sys.exit()
with MongoDB() as mongo:
connection = mongo.connect()
db = connection.conn["basicdb"]
collection = db["basic_balance_new"]
db.basic_balance_new.rename("basic_balance")
# a = db.universe_new2.find()
# for b in a:
# print(b) | [
"[email protected]"
] | |
24ae940898e40cd452fccf9a14a65f0d30687132 | c4b7399a10b7f963f625d8d15e0a8215ea35ef7d | /239.滑动窗口最大值.py | 28a32c944148c249efaaa64c9393453ee7122d3f | [] | no_license | kangkang59812/LeetCode-python | a29a9788aa36689d1f3ed0e8b668f79d9ca43d42 | 276d2137a929e41120c2e8a3a8e4d09023a2abd5 | refs/heads/master | 2022-12-05T02:49:14.554893 | 2020-08-30T08:22:16 | 2020-08-30T08:22:16 | 266,042,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | #
# @lc app=leetcode.cn id=239 lang=python3
#
# [239] 滑动窗口最大值
#
# https://leetcode-cn.com/problems/sliding-window-maximum/description/
#
# algorithms
# Hard (44.47%)
# Likes: 262
# Dislikes: 0
# Total Accepted: 33.6K
# Total Submissions: 75.5K
# Testcase Example: '[1,3,-1,-3,5,3,6,7]\n3'
#
# 给定一个数组 nums,有一个大小为 k 的滑动窗口从数组的最左侧移动到数组的最右侧。你只可以看到在滑动窗口内的 k
# 个数字。滑动窗口每次只向右移动一位。
#
# 返回滑动窗口中的最大值。
#
#
#
# 示例:
#
# 输入: nums = [1,3,-1,-3,5,3,6,7], 和 k = 3
# 输出: [3,3,5,5,6,7]
# 解释:
#
# 滑动窗口的位置 最大值
# --------------- -----
# [1 3 -1] -3 5 3 6 7 3
# 1 [3 -1 -3] 5 3 6 7 3
# 1 3 [-1 -3 5] 3 6 7 5
# 1 3 -1 [-3 5 3] 6 7 5
# 1 3 -1 -3 [5 3 6] 7 6
# 1 3 -1 -3 5 [3 6 7] 7
#
#
#
# 提示:
#
# 你可以假设 k 总是有效的,在输入数组不为空的情况下,1 ≤ k ≤ 输入数组的大小。
#
#
#
# 进阶:
#
# 你能在线性时间复杂度内解决此题吗?
#
#
# @lc code=start
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
from collections import deque
n = len(nums)
if n == 0 or k < 1:
return []
if k == 1:
return nums
if n <= k:
return [max(nums)]
window = deque()
res = []
for i in range(k):
while window and nums[window[-1]] <= nums[i]:
window.pop()
window.append(i)
for i in range(k, n):
res.append(nums[window[0]])
if i-window[0] >= k:
window.popleft()
while window and nums[window[-1]] <= nums[i]:
window.pop()
window.append(i)
res.append(nums[window[0]])
return res
# @lc code=end
| [
"[email protected]"
] | |
bda3a281f014f07766ed3d12be20620f343d84f9 | 4e30d990963870478ed248567e432795f519e1cc | /tests/api/v3_1_1/test_radius_server_sequence.py | 06bc47193d83c29626dc73d020e1b4e717332858 | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 13,545 | py | # -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI radius_server_sequence API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.1', reason='version does not match')
def is_valid_get_radius_server_sequence_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_0d1df0e230765104863b8d63d5beb68e_v3_1_1').validate(obj.response)
return True
def get_radius_server_sequence_by_id(api):
endpoint_result = api.radius_server_sequence.get_radius_server_sequence_by_id(
id='string'
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_radius_server_sequence_by_id(api, validator):
try:
assert is_valid_get_radius_server_sequence_by_id(
validator,
get_radius_server_sequence_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_radius_server_sequence_by_id_default(api):
endpoint_result = api.radius_server_sequence.get_radius_server_sequence_by_id(
id='string'
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_radius_server_sequence_by_id_default(api, validator):
try:
assert is_valid_get_radius_server_sequence_by_id(
validator,
get_radius_server_sequence_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_radius_server_sequence_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_df9ab8ff636353279d5c787585dcb6af_v3_1_1').validate(obj.response)
return True
def update_radius_server_sequence_by_id(api):
endpoint_result = api.radius_server_sequence.update_radius_server_sequence_by_id(
active_validation=False,
before_accept_attr_manipulators_list=[{'action': 'string', 'dictionaryName': 'string', 'attributeName': 'string', 'value': 'string', 'changedVal': 'string'}],
continue_authorz_policy=True,
description='string',
id='string',
local_accounting=True,
name='string',
on_request_attr_manipulator_list=[{'action': 'string', 'dictionaryName': 'string', 'attributeName': 'string', 'value': 'string', 'changedVal': 'string'}],
payload=None,
prefix_separator='string',
radius_server_list=['string'],
remote_accounting=True,
strip_prefix=True,
strip_suffix=True,
suffix_separator='string',
use_attr_set_before_acc=True,
use_attr_set_on_request=True
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_update_radius_server_sequence_by_id(api, validator):
try:
assert is_valid_update_radius_server_sequence_by_id(
validator,
update_radius_server_sequence_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_radius_server_sequence_by_id_default(api):
endpoint_result = api.radius_server_sequence.update_radius_server_sequence_by_id(
active_validation=False,
id='string',
before_accept_attr_manipulators_list=None,
continue_authorz_policy=None,
description=None,
local_accounting=None,
name=None,
on_request_attr_manipulator_list=None,
payload=None,
prefix_separator=None,
radius_server_list=None,
remote_accounting=None,
strip_prefix=None,
strip_suffix=None,
suffix_separator=None,
use_attr_set_before_acc=None,
use_attr_set_on_request=None
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_update_radius_server_sequence_by_id_default(api, validator):
try:
assert is_valid_update_radius_server_sequence_by_id(
validator,
update_radius_server_sequence_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_radius_server_sequence_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_815b13838fa75d6e8d970f6eeb6a4510_v3_1_1').validate(obj.response)
return True
def delete_radius_server_sequence_by_id(api):
endpoint_result = api.radius_server_sequence.delete_radius_server_sequence_by_id(
id='string'
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_delete_radius_server_sequence_by_id(api, validator):
try:
assert is_valid_delete_radius_server_sequence_by_id(
validator,
delete_radius_server_sequence_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_radius_server_sequence_by_id_default(api):
endpoint_result = api.radius_server_sequence.delete_radius_server_sequence_by_id(
id='string'
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_delete_radius_server_sequence_by_id_default(api, validator):
try:
assert is_valid_delete_radius_server_sequence_by_id(
validator,
delete_radius_server_sequence_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_radius_server_sequence(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_c6c330dace185a548f70f4e5d67776ea_v3_1_1').validate(obj.response)
return True
def get_radius_server_sequence(api):
endpoint_result = api.radius_server_sequence.get_radius_server_sequence(
page=0,
size=0
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_radius_server_sequence(api, validator):
try:
assert is_valid_get_radius_server_sequence(
validator,
get_radius_server_sequence(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_radius_server_sequence_default(api):
endpoint_result = api.radius_server_sequence.get_radius_server_sequence(
page=None,
size=None
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_radius_server_sequence_default(api, validator):
try:
assert is_valid_get_radius_server_sequence(
validator,
get_radius_server_sequence_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_radius_server_sequence(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_83ad6ca0642c5750af6ca9905721a9d7_v3_1_1').validate(obj.response)
return True
def create_radius_server_sequence(api):
endpoint_result = api.radius_server_sequence.create_radius_server_sequence(
active_validation=False,
before_accept_attr_manipulators_list=[{'action': 'string', 'dictionaryName': 'string', 'attributeName': 'string', 'value': 'string', 'changedVal': 'string'}],
continue_authorz_policy=True,
description='string',
local_accounting=True,
name='string',
on_request_attr_manipulator_list=[{'action': 'string', 'dictionaryName': 'string', 'attributeName': 'string', 'value': 'string', 'changedVal': 'string'}],
payload=None,
prefix_separator='string',
radius_server_list=['string'],
remote_accounting=True,
strip_prefix=True,
strip_suffix=True,
suffix_separator='string',
use_attr_set_before_acc=True,
use_attr_set_on_request=True
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_create_radius_server_sequence(api, validator):
try:
assert is_valid_create_radius_server_sequence(
validator,
create_radius_server_sequence(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_radius_server_sequence_default(api):
endpoint_result = api.radius_server_sequence.create_radius_server_sequence(
active_validation=False,
before_accept_attr_manipulators_list=None,
continue_authorz_policy=None,
description=None,
local_accounting=None,
name=None,
on_request_attr_manipulator_list=None,
payload=None,
prefix_separator=None,
radius_server_list=None,
remote_accounting=None,
strip_prefix=None,
strip_suffix=None,
suffix_separator=None,
use_attr_set_before_acc=None,
use_attr_set_on_request=None
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_create_radius_server_sequence_default(api, validator):
try:
assert is_valid_create_radius_server_sequence(
validator,
create_radius_server_sequence_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
assert hasattr(obj, 'status_code')
json_schema_validate('jsd_8fb1a72ded19590fa0aa85fc59ea8cfc_v3_1_1').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.radius_server_sequence.get_version(
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.radius_server_sequence.get_version(
)
return endpoint_result
@pytest.mark.radius_server_sequence
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| [
"[email protected]"
] | |
e5aafc8df7b44ff0c83d49b3d4cab9e3ea94de56 | 240e7cbb46bf2a94b3fd337267c71c1db42e7ce1 | /examples/ad_manager/v202002/adjustment_service/get_all_traffic_adjustments.py | 7754fa8a3e02792791e992f180562cb4b0e174bc | [
"Apache-2.0"
] | permissive | andreferraro/googleads-python-lib | 2624fa84ca7064c3b15a7d9d48fc0f023316524d | a9ddeae56c5b9769f50c4e9d37eb32fd1eebe534 | refs/heads/master | 2022-11-13T03:38:38.300845 | 2020-07-03T13:17:59 | 2020-07-03T13:17:59 | 276,904,111 | 0 | 0 | Apache-2.0 | 2020-07-03T13:16:21 | 2020-07-03T13:16:20 | null | UTF-8 | Python | false | false | 1,945 | py | #!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all traffic adjustments."""
from __future__ import print_function
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize the adjustment service.
adjustment_service = client.GetService('AdjustmentService', version='v202002')
# Create a statement to get all forecast traffic adjustments.
statement = ad_manager.StatementBuilder(version='v202002')
# Retrieve a small number of traffic adjustments at a time, paging
# through until all traffic adjustments have been retrieved.
while True:
response = adjustment_service.getTrafficAdjustmentsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for adjustment in response['results']:
# Print out some information for each traffic adjustment.
print('Traffic forecast adjustment with id %d and %d segments was '
'found.' % (adjustment['id'],
len(adjustment['forecastAdjustmentSegments'])))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| [
"[email protected]"
] | |
61109f2520040675af6b0001cb0b033d7f46f74f | 68728961294d360d26e8149e7e0a4816adf20842 | /src/build_seq2seq_transformer/train_helper.py | 847c17f886a834c2ab5cd5579e7cba3bd6108705 | [] | no_license | Dawn-Flying/text_summarization | d334fe884aa3a6341dd7bc381b03c1ab3e2c057e | ab68555c6f455c4f14fead5fc1c49420cdef8dc4 | refs/heads/master | 2023-07-17T07:49:21.995004 | 2021-08-26T15:46:19 | 2021-08-26T15:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | py | import time
import tensorflow as tf
from src.build_seq2seq_transformer.schedules.lr_schedules import CustomSchedule
from src.build_seq2seq_transformer.layers.transformer import create_masks
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
loss_ = tf.reduce_sum(loss_, axis=1) / tf.reduce_sum(mask, axis=1)
return tf.reduce_mean(loss_)
def train_model(model, dataset, params, ckpt_manager):
learning_rate = CustomSchedule(params["d_model"])
optimizer = tf.keras.optimizers.Adam(learning_rate)
def train_step(enc_inp, enc_extended_inp, dec_inp, dec_tar, batch_oov_len, decoder_pad_mask):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(enc_inp, dec_inp)
with tf.GradientTape() as tape:
outputs = model(enc_inp,
enc_extended_inp,
batch_oov_len,
dec_inp,
params['training'],
enc_padding_mask,
combined_mask,
dec_padding_mask)
pred = outputs["logits"]
batch_loss = loss_function(dec_tar, pred)
log_loss, cov_loss = 0., 0.
variables = model.trainable_variables
gradients = tape.gradient(batch_loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss, log_loss, cov_loss
best_loss = 20
epochs = params['epochs']
for epoch in range(epochs):
t0 = time.time()
step = 0
total_loss = 0
total_log_loss = 0
total_cov_loss = 0
# for step, batch in enumerate(dataset.take(params['steps_per_epoch'])):
for encoder_batch_data, decoder_batch_data in dataset:
batch_loss, log_loss, cov_loss = train_step(encoder_batch_data["enc_input"], # shape=(16, 200)
encoder_batch_data["extended_enc_input"], # shape=(16, 200)
decoder_batch_data["dec_input"], # shape=(16, 50)
decoder_batch_data["dec_target"], # shape=(16, 50)
encoder_batch_data["max_oov_len"],
decoder_batch_data['decoder_pad_mask'])
step += 1
total_loss += batch_loss
total_log_loss += log_loss
total_cov_loss += cov_loss
if step % 10 == 0:
print('Epoch {} Batch {} avg_loss {:.4f} log_loss {:.4f} cov_loss {:.4f}'.format(epoch + 1,
step,
total_loss / step,
total_log_loss / step,
total_cov_loss / step))
if epoch % 1 == 0:
if total_loss / step < best_loss:
best_loss = total_loss / step
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {} ,best loss {}'.format(epoch + 1, ckpt_save_path, best_loss))
print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / step))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - t0))
| [
"[email protected]"
] | |
becf43a0b4dfb0ea436cf8140e0e35e4cfda4e6a | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/plugin/account_extend/__yuancloud__.py | 8db29a38062cd53a33ecab90e168d9cf7f69b8a1 | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # -*- coding: utf-8 -*-
{
'name': "财务模块扩展",
'summary': """
""",
'description': """
Long description of module's purpose
""",
'author': "[email protected]",
'website': "http://www.sswyuan.net/yuancloud",
# Categories can be used to filter modules in modules listing
# Check https://github.com/yuancloud/yuancloud/blob/master/yuancloud/addons/base/module/module_data.xml
# for the full list
'category' : 'Finance Management',
'version':'0.3',
# any module necessary for this one to work correctly
'depends': ['base','account_accountant'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/account_move_view.xml',
# 'views/templates.xml',
],
# only loaded in demonstration mode
# 'demo': [
# 'demo/demo.xml',
# ],
} | [
"[email protected]"
] | |
07fccf67a1e8f157cd3475218604638fde43eeab | 0931b32140ba932b3ba02f5109a087c6c70a244d | /frappe/utils/background_jobs.py | f6f35f3c8239acf3302486d691b210bc54e537ca | [
"MIT"
] | permissive | cstkyrilos/frappe | b60ed4e95ce929c74c2fc46000080d10b343190e | 27d9306bc5924c11c2749503454cc6d11a8cc654 | refs/heads/main | 2023-03-23T10:35:42.732385 | 2021-03-22T21:55:58 | 2021-03-22T21:55:58 | 350,292,784 | 0 | 0 | MIT | 2021-03-22T10:01:08 | 2021-03-22T10:01:07 | null | UTF-8 | Python | false | false | 5,097 | py | from __future__ import unicode_literals, print_function
import redis
from rq import Connection, Queue, Worker
from frappe.utils import cstr
from collections import defaultdict
import frappe
import MySQLdb
import os, socket, time
default_timeout = 300
queue_timeout = {
'long': 1500,
'default': 300,
'short': 300
}
def enqueue(method, queue='default', timeout=300, event=None,
async=True, job_name=None, now=False, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: should be either long, default or short
:param timeout: should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: if async=False, the method is executed immediately, else via a worker
:param job_name: can be used to name an enqueue call, which can be used to prevent duplicate calls
:param now: if now=True, the method is executed via frappe.call
:param kwargs: keyword arguments to be passed to the method
'''
if now or frappe.flags.in_migrate:
return frappe.call(method, **kwargs)
q = get_queue(queue, async=async)
if not timeout:
timeout = queue_timeout.get(queue) or 300
return q.enqueue_call(execute_job, timeout=timeout,
kwargs={
"site": frappe.local.site,
"user": frappe.session.user,
"method": method,
"event": event,
"job_name": job_name or cstr(method),
"async": async,
"kwargs": kwargs
})
def execute_job(site, method, event, job_name, kwargs, user=None, async=True, retry=0):
'''Executes job in a worker, performs commit/rollback and logs if there is any error'''
from frappe.utils.scheduler import log
if async:
frappe.connect(site)
if user:
frappe.set_user(user)
if isinstance(method, basestring):
method_name = method
method = frappe.get_attr(method)
else:
method_name = cstr(method.__name__)
try:
method(**kwargs)
except (MySQLdb.OperationalError, frappe.RetryBackgroundJobError), e:
frappe.db.rollback()
if (retry < 5 and
(isinstance(e, frappe.RetryBackgroundJobError) or e.args[0] in (1213, 1205))):
# retry the job if
# 1213 = deadlock
# 1205 = lock wait timeout
# or RetryBackgroundJobError is explicitly raised
frappe.destroy()
time.sleep(retry+1)
return execute_job(site, method, event, job_name, kwargs,
async=async, retry=retry+1)
else:
log(method_name, message=repr(locals()))
raise
except:
frappe.db.rollback()
log(method_name, message=repr(locals()))
raise
else:
frappe.db.commit()
finally:
if async:
frappe.destroy()
def start_worker(queue=None):
'''Wrapper to start rq worker. Connects to redis and monitors these queues.'''
with frappe.init_site():
# empty init is required to get redis_queue from common_site_config.json
redis_connection = get_redis_conn()
with Connection(redis_connection):
queues = get_queue_list(queue)
Worker(queues, name=get_worker_name(queue)).work()
def get_worker_name(queue):
'''When limiting worker to a specific queue, also append queue name to default worker name'''
name = None
if queue:
# hostname.pid is the default worker name
name = '{hostname}.{pid}.{queue}'.format(
hostname=socket.gethostname(),
pid=os.getpid(),
queue=queue)
return name
def get_jobs(site=None, queue=None, key='method'):
'''Gets jobs per queue or per site or both'''
jobs_per_site = defaultdict(list)
for queue in get_queue_list(queue):
q = get_queue(queue)
for job in q.jobs:
if job.kwargs.get('site'):
if site is None:
# get jobs for all sites
jobs_per_site[job.kwargs['site']].append(job.kwargs[key])
elif job.kwargs['site'] == site:
# get jobs only for given site
jobs_per_site[site].append(job.kwargs[key])
else:
print('No site found in job', job.__dict__)
return jobs_per_site
def get_queue_list(queue_list=None):
'''Defines possible queues. Also wraps a given queue in a list after validating.'''
default_queue_list = queue_timeout.keys()
if queue_list:
if isinstance(queue_list, basestring):
queue_list = [queue_list]
for queue in queue_list:
validate_queue(queue, default_queue_list)
return queue_list
else:
return default_queue_list
def get_queue(queue, async=True):
'''Returns a Queue object tied to a redis connection'''
validate_queue(queue)
return Queue(queue, connection=get_redis_conn(), async=async)
def validate_queue(queue, default_queue_list=None):
if not default_queue_list:
default_queue_list = queue_timeout.keys()
if queue not in default_queue_list:
frappe.throw("Queue should be one of {0}".format(', '.join(default_queue_list)))
def get_redis_conn():
if not hasattr(frappe.local, 'conf'):
raise Exception('You need to call frappe.init')
elif not frappe.local.conf.redis_queue:
raise Exception('redis_queue missing in common_site_config.json')
return redis.from_url(frappe.local.conf.redis_queue)
def enqueue_test_job():
enqueue('frappe.utils.background_jobs.test_job', s=100)
def test_job(s):
import time
print('sleeping...')
time.sleep(s) | [
"[email protected]"
] | |
cffac3e938b8f72f426447724e96e8a625dca2f2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc027/D/4117422.py | 388be0989c70f290b2cd9f0347c18a27adb75478 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from itertools import accumulate
S = input()
N = len(S)
M_cnt = S.count('M')
cnt_plus = list(accumulate([(1 if s == '+' else 0) for s in S[::-1]]))[::-1]
cnt_minus = list(accumulate([(1 if s == '-' else 0) for s in S[::-1]]))[::-1]
p = []
for i, s in enumerate(S):
if s == 'M':
p.append(cnt_plus[i] - cnt_minus[i])
p.sort()
ans = sum(p[M_cnt // 2:]) - sum(p[:M_cnt // 2])
print(ans) | [
"[email protected]"
] | |
b2a3deaae28ce2efbd89fe01d8a2d2145f9946ff | c8ed5baad6ce8527f4b75a73d0f15b1207aa660e | /app/discourse/config.py | 41c134bf5724c4ec50aaeec6afe12e15213f4a81 | [] | no_license | GovLab/noi2 | f12aed284c11c743fff0286df4fe59bb8b894050 | 7fce89f0c4c437718916fc95ca4c6c4372cdf464 | refs/heads/master | 2021-04-15T15:10:20.713982 | 2016-10-06T23:27:05 | 2016-10-06T23:27:05 | 40,134,662 | 5 | 9 | null | 2016-06-30T06:37:33 | 2015-08-03T16:10:41 | Python | UTF-8 | Python | false | false | 593 | py | from werkzeug.local import LocalProxy
from flask import current_app
class DiscourseConfig(object):
def __init__(self, config):
self.api_key = config['api_key']
self.origin = config['origin']
self.sso_secret = config['sso_secret']
self.admin_username = 'system'
def url(self, path):
return self.origin + path
@classmethod
def from_app(cls, app):
return cls(app.config['DISCOURSE'])
@classmethod
def from_current_app(cls):
return cls.from_app(current_app)
config = LocalProxy(DiscourseConfig.from_current_app)
| [
"[email protected]"
] | |
32245eaa2f0c0743d15adfd2dd63d081db2d8f46 | 06a18ff78abb52a6f6db1053958cab94948802c6 | /manage.py | 92c8452486877debeeaa290576a2af7842f45a09 | [] | no_license | ZFCon/moviesdand | 28b49d4428d4d4b445f940936c0529f55613f25e | 4f3718e8082c7ec6201a4c5399a7ddb5d5b01031 | refs/heads/master | 2023-04-28T23:57:09.835193 | 2019-12-09T21:19:08 | 2019-12-09T21:19:08 | 224,487,270 | 1 | 0 | null | 2023-04-21T20:41:05 | 2019-11-27T17:51:35 | CSS | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'moviesdand.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
12ab0540bafb80390bcb2af09737bf5eb8328fae | b2ba670818623f8ab18162382f7394baed97b7cb | /test-data/AndroidSlicer/Bites/DD/4.py | b62029f34f1e3997c9a8c10994ecffaead769d2e | [
"MIT"
] | permissive | hsumyatwin/ESDroid-artifact | 012c26c40537a79b255da033e7b36d78086b743a | bff082c4daeeed62ceda3d715c07643203a0b44b | refs/heads/main | 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | #start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'caldwell.ben.bites'
activity ='caldwell.ben.bites.Bites'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.2)
MonkeyRunner.sleep(0.2)
device.touch(4,1371, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(582,784, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(431,846, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(881,1691, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1052,1239, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(946,447, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1000,1859, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(792,1652, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(998,1849, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(247,1894, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(279,1642, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1032,1841, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1015,1873, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1008,1850, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(969,1919, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1059,1914, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(949,1829, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(926,1818, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(998,1917, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(918,1881, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(974,1897, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1079,1867, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(584,1014, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(870,270, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1070,1850, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(1000,1862, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(678,1645, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(437,988, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(338,1337, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.2)
device.touch(443,798, 'DOWN_AND_UP')
| [
"[email protected]"
] | |
3de583f2f00f4914d1aa86d9d991dd8cc02811f0 | 909afe0216a37bdc19683d81e533fa6c094329c1 | /python/leetcode/53-maximum-subarray.py | a2af229326ee18c0e058516c510b22433c0acfdd | [] | no_license | wxnacy/study | af7fdcd9915d668be73c6db81bdc961247e24c73 | 7bca9dc8ec211be15c12f89bffbb680d639f87bf | refs/heads/master | 2023-04-08T17:57:40.801687 | 2023-03-29T08:02:20 | 2023-03-29T08:02:20 | 118,090,886 | 18 | 22 | null | 2022-12-16T03:11:43 | 2018-01-19T07:14:02 | HTML | UTF-8 | Python | false | false | 4,928 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy([email protected])
# Description: 最大子序和 为完成动态规划算法
'''
给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
示例:
输入: [-2,1,-3,4,-1,2,1,-5,4],
输出: 6
解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。
进阶:
如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。
'''
from constants import NUMS_FOR53
class Solution:
def maxSubArray1(self, nums) -> int:
'''
暴力解法,时间超限,详见
https://leetcode-cn.com/submissions/detail/18022319/
'''
if not nums:
return 0
if len(nums) == 1:
return nums[0]
max_sum = -2 ** 23
sums = []
for i in range(len(nums)):
sums.append(nums[i])
if nums[i] > max_sum:
max_sum = nums[i]
for j in range(len(nums)):
k = i + j + 1
if k >= len(nums):
break
s = sums[-1] + nums[k]
sums.append(s)
if s > max_sum:
max_sum = s
return max_sum
def maxSubArray2(self, nums) -> int:
'''
时间复杂度 : O(n)
执行用时 : 64 ms, 在Maximum Subarray的Python3提交中击败了72.70% 的用户
内存消耗 : 13.4 MB, 在Maximum Subarray的Python3提交中击败了96.67% 的用户
'''
if not nums:
return 0
if len(nums) == 1:
return nums[0]
max_index = 0
max_val = -2 ** 23
s = max_val
max_s = s
b = -1
i = 0
l = len(nums)
for i in range(l):
if nums[i] > max_val:
max_val = nums[i]
max_index = i
if b == -1 and nums[i] > 0 and i < l - 1:
bv = nums[i] + nums[i + 1]
if bv > 0:
b = i
s = nums[i]
if s > max_s:
max_s = s
else:
s+=nums[i]
if s <=0:
b = -1
if s > max_s:
max_s = s
return max_s if max_s > max_val else max_val
def maxSubArray3(self, nums) -> int:
'''
时间复杂度 : O(n)
执行用时 : 64 ms, 在Maximum Subarray的Python3提交中击败了72.70% 的用户
内存消耗 : 13.4 MB, 在Maximum Subarray的Python3提交中击败了96.67% 的用户
'''
if not nums:
return 0
s = 0
max_sum = nums[0]
for n in nums:
s += n
if s > max_sum:
max_sum = s
if s < 0:
s = 0
return max_sum
def maxSubArray4(self, nums) -> int:
'''
时间复杂度 : O(n)
执行用时 : 64 ms, 在Maximum Subarray的Python3提交中击败了72.70% 的用户
内存消耗 : 13.4 MB, 在Maximum Subarray的Python3提交中击败了96.67% 的用户
'''
if not nums:
return 0
s = nums[0]
for i in range(1, len(nums)):
if nums[i-1] > 0:
nums[i] += nums[i-1]
if nums[i] > s:
s = nums[i]
return s
import unittest
import utils
s = Solution()
class TestMain(unittest.TestCase):
def setUp(self):
'''before each test function'''
pass
def tearDown(self):
'''after each test function'''
pass
def do(self, func):
nums = [-2,1,-3,4,-1,2,1,-5,4]
self.assertEqual(func(nums), 6)
nums = [-2, 1]
self.assertEqual(func(nums), 1)
nums = [-2, -1]
self.assertEqual(func(nums), -1)
nums = [1, 2]
self.assertEqual(func(nums), 3)
nums = [1, 1, -2]
self.assertEqual(func(nums), 2)
nums = [3,1,-3,-3,2,-1]
self.assertEqual(func(nums), 4)
nums = [8,-19,5,-4,20]
self.assertEqual(func(nums), 21)
nums = [2,0,-3,2,1,0,1,-2]
self.assertEqual(func(nums), 4)
# for nums in TEST_LISTS:
# self.assertEqual(s.maxSubArray2(nums), func(nums))
def test_func(self):
s = Solution()
self.do(s.maxSubArray4)
self.do(s.maxSubArray3)
self.do(s.maxSubArray2)
self.do(s.maxSubArray1)
if __name__ == "__main__":
count = 100
tm = TestMain()
utils.print_func_run_time(count, s.maxSubArray2, nums = NUMS_FOR53)
utils.print_func_run_time(count, s.maxSubArray3, nums = NUMS_FOR53)
utils.print_func_run_time(count, s.maxSubArray4, nums = NUMS_FOR53)
# utils.print_func_run_time1(count, tm.do, s.maxSubArray3)
unittest.main()
| [
"[email protected]"
] | |
3787e3fd933ddd148fab18af2ec10a45fdbe09e4 | caf5807c331ff22b1e7d48f59b626a8869bd418d | /quotes/models.py | d5d4b80e67599700f8a2ceddb5a9ece5e1f0e9a7 | [] | no_license | tahirawan4/cserver | d7cd2953f75485b8f061e0301d2ce4d77605e8fa | fcb7e0e22e3e2bac6e0278aa41709f84f93c9da2 | refs/heads/master | 2021-01-10T02:50:12.879964 | 2015-06-03T19:58:59 | 2015-06-03T19:58:59 | 36,827,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=30)
author_discription = models.CharField(max_length=150)
author_url = models.CharField(max_length=150)
author_image = models.ImageField(upload_to="media/", null=True, blank=True)
def __str__(self): # __unicode__ on Python 2
return self.name
list_display = ('name', 'author_discription', 'author_url')
class Categories(models.Model):
name = models.CharField(max_length=30)
cat_id = models.IntegerField(default=0)
def __str__(self): # __unicode__ on Python 2
return self.name
list_display = ('cat_id', 'name')
class Quote(models.Model):
author = models.ForeignKey(Author)
category = models.ForeignKey(Categories)
quote_id = models.IntegerField(default=0)
quote_discription = models.CharField(max_length=1000)
def __str__(self): # __unicode__ on Python 2
return self.quote_discription
list_display = ('author', 'category','quote_id','quote_discription')
| [
"[email protected]"
] | |
497a9fbcff26fdec2bfd886a40ed6cae1bc0b5fc | 72765c1736a10b86be8583dbd694906aff467068 | /tkinter/tkinter_pack/tkinter_font.py | 86bf8bb16121622c1a8e3b449bc37793646e9847 | [] | no_license | taison2000/Python | 05e3f3834501a4f5ef7a6260d8bf3d4ce41930f3 | 44079700c3db289f92792ea3ec5add6a523f8eae | refs/heads/master | 2021-10-16T07:43:55.202012 | 2019-02-09T02:22:44 | 2019-02-09T02:22:44 | 103,322,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #!/usr/bin/python
# Comment start with #
import sys
import tkinter as tk
#from tkinter import messagebox as msgBox
## tkMessageBox (v2.x) ==> messagebox (v3.x)
top = tk.Tk()
lbl1 = tk.Label(None, text='This is a label #1')
lbl2 = tk.Label(None, text=' label #2', bg="light green")
lbl3 = tk.Label(None, text='ABCDEFHJIJKLMNOPQRSTUVWXYZ', bg="gray", height=3, width=30, \
font=("Harlow", 35), fg="green", cursor='cross', underline=(15))
lbl1.pack()
lbl2.pack()
lbl3.pack()
top.mainloop()
# -----------------------------------------------------------------------------
# Resources
# http://www.tutorialspoint.com/python/python_gui_programming.htm
#
# https://docs.python.org/3.4/tutorial/modules.html
# http://www.tutorialspoint.com/python/tk_label.htm <-- Label
# http://www.tutorialspoint.com/python/tk_cursors.htm <-- cursor names
# http://effbot.org/tkinterbook/label.htm <-- Label
#
# Windows - Font
# - "Control Panel" -> "Appearance and Personalization" -> "Fonts"
# - "Control Panel" -> "Fonts"
#
# * Arial
# * Forte
# * Forte
# * Gungsuh
# * Harrington
#
| [
"[email protected]"
] | |
eb35046e2b6c4f11866a15ec83a1b6d45ec5dcb7 | 6580ba5d135c4f33f1a0996953ba2a65f7458a14 | /applications/ji178/models/fd404.py | 1e76f1088602024a7d98867bbe6b8c5253fb2792 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ali96343/facew2p | 02b038d3853691264a49de3409de21c8a33544b8 | a3881b149045e9caac344402c8fc4e62edadb42f | refs/heads/master | 2021-06-10T17:52:22.200508 | 2021-05-10T23:11:30 | 2021-05-10T23:11:30 | 185,795,614 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | #
# table for controller: X404
#
from gluon.contrib.populate import populate
db.define_table('d404',
Field('f0', label='key', writable = True , length= 1000),
Field('f1', 'text', label='data string', length= 1000),
Field('f2', 'text', label='save data string', length= 1000, default='' ),
)
#
if not db(db.d404.id ).count():
db.d404.insert( f0= 'sp983', f1= '(983)Dashboard')
db.d404.insert( f0= 'sp984', f1= '(984)Components')
db.d404.insert( f0= 'hf985', f1= '(985)Custom Components:')
db.d404.insert( f0= 'aa987', f1= '(987)Buttons')
db.d404.insert( f0= 'aa989', f1= '(989)Cards')
db.d404.insert( f0= 'sp990', f1= '(990)Utilities')
db.d404.insert( f0= 'hf991', f1= '(991)Custom Utilities:')
db.d404.insert( f0= 'aa993', f1= '(993)Colors')
db.d404.insert( f0= 'aa995', f1= '(995)Borders')
db.d404.insert( f0= 'aa997', f1= '(997)Animations')
db.d404.insert( f0= 'aa999', f1= '(999)Other')
db.d404.insert( f0= 'sp1000', f1= '(1000)Pages')
db.d404.insert( f0= 'hf1001', f1= '(1001)Login Screens:')
db.d404.insert( f0= 'aa1003', f1= '(1003)Login')
db.d404.insert( f0= 'aa1005', f1= '(1005)Register')
db.d404.insert( f0= 'aa1007', f1= '(1007)Forgot Password')
db.d404.insert( f0= 'hf1008', f1= '(1008)Other Pages:')
db.d404.insert( f0= 'aa1010', f1= '(1010)404 Page')
db.d404.insert( f0= 'aa1012', f1= '(1012)Blank Page')
db.d404.insert( f0= 'sp1014', f1= '(1014)Charts')
db.d404.insert( f0= 'sp1016', f1= '(1016)Tables')
db.d404.insert( f0= 'pb1017', f1= '(1017)Search for...')
db.d404.insert( f0= 'pb1018', f1= '(1018)Search for...')
db.d404.insert( f0= 'sx1019', f1= '(1019)3+')
db.d404.insert( f0= 'di1020', f1= '(1020)December 12, 2019')
db.d404.insert( f0= 'sx1021', f1= '(1021)A new monthly report is ready to download!')
db.d404.insert( f0= 'di1022', f1= '(1022)December 7, 2019')
db.d404.insert( f0= 'di1023', f1= '(1023)December 2, 2019')
db.d404.insert( f0= 'aa1024', f1= '(1024)Show All Alerts')
db.d404.insert( f0= 'di1025', f1= '(1025)Hi there! I am wondering if you can help me with a problem I ve been having.')
db.d404.insert( f0= 'di1026', f1= '(1026)Emily Fowler 58m')
db.d404.insert( f0= 'di1027', f1= '(1027)I have the photos that you ordered last month, how would you like them sent to you?')
db.d404.insert( f0= 'di1028', f1= '(1028)Jae Chun 1d')
db.d404.insert( f0= 'di1029', f1= '(1029)Last month s report looks great, I am very happy with the progress so far, keep up the good work!')
db.d404.insert( f0= 'di1030', f1= '(1030)Morgan Alvarez 2d')
db.d404.insert( f0= 'di1031', f1= '(1031)Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren t good...')
db.d404.insert( f0= 'di1032', f1= '(1032)Chicken the Dog 2w')
db.d404.insert( f0= 'aa1033', f1= '(1033)Read More Messages')
db.d404.insert( f0= 'sx1034', f1= '(1034)Valerie Luna')
db.d404.insert( f0= 'di1035', f1= '(1035)404')
db.d404.insert( f0= 'pc1036', f1= '(1036)Page Not Found')
db.d404.insert( f0= 'pc1037', f1= '(1037)It looks like you found a glitch in the matrix...')
db.d404.insert( f0= 'sp1039', f1= '(1039)Copyright © Your Website 2019')
db.d404.insert( f0= 'he1040', f1= '(1040)Ready to Leave?')
db.d404.insert( f0= 'sx1041', f1= '(1041)')
db.d404.insert( f0= 'di1042', f1= '(1042)Select Logout below if you are ready to end your current session.')
db.d404.insert( f0= 'bu1043', f1= '(1043)Cancel')
db.d404.insert( f0= 'aa1045', f1= '(1045)Logout')
db.commit()
#
| [
"[email protected]"
] | |
287ea9e6f95498d1cc39def3a007c223b9d638fb | 99a472a443ed55652de88dc82451fdcc22d601f9 | /label_maker.py | 1cc3d5b5bff9ef693b587a327bb524bf5bd29a8e | [] | no_license | JeremyGibson/gvision | c954144c8c8648619fb0e34cec855a6d72d23cb3 | b218e0255d076c7bdb5f5c0c694264399fa4419d | refs/heads/main | 2023-03-07T02:44:53.521075 | 2021-02-15T21:18:57 | 2021-02-15T21:39:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | #!/usr/bin/python
import os
from pathlib import Path
from datetime import datetime
from google.cloud import vision
LOG_PATH = Path("logs")
PATH_TO_IMAGES = Path(os.environ["PHOTOS_DIR"])
client = vision.ImageAnnotatorClient()
POTENTIAL_DOCUMENT = ['font', 'material property', 'parallel', 'stationery', 'recipe', 'paper', 'paper product',
'letter', 'document', 'post-it note', 'screenshot', '']
def is_paper(labels: list):
test = [x for x in labels if x in POTENTIAL_DOCUMENT]
match_per = len(test) / len(POTENTIAL_DOCUMENT)
if len(test) > 0:
return True, match_per
return False, match_per
def get_file():
images = [".jpg", ".png"]
for p in PATH_TO_IMAGES.glob("**/*.*"):
if p.suffix.lower() in images:
yield p
def label_maker():
log = LOG_PATH / f"potential_documents_{datetime.now()}.log"
with log.open('w') as logfh:
for f in get_file():
print(f"Examining: {f}")
with f.open('rb') as fh:
content = fh.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
labels = [x.description.lower() for x in labels]
potential, percentage = is_paper(labels)
if potential:
print(f"{f} is probably a document.")
logfh.write(f"{f}: {percentage} {labels}\n")
if __name__ == "__main__":
label_maker()
| [
"[email protected]"
] | |
1f4fc6c7eee5b82ea686875e7a379a7e1f509552 | e311664619d469addd2c77566ec97d24affcbfd9 | /src/apps/alumno_profesor/migrations/0007_alumno_last_login.py | bf2c5409384950bcfb799164bb23bacfe9e2d549 | [] | no_license | danielhuamani/Proyecto-taller-base-datos | 361dc8c915dff36a9ce96a7147c11f0af9d51227 | 5d791383f77f8042a2890db4cfd31079c6d1dc7b | refs/heads/master | 2016-08-11T13:47:03.169317 | 2015-12-22T04:28:52 | 2015-12-22T04:28:52 | 46,673,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumno_profesor', '0006_auto_20151220_1948'),
]
operations = [
migrations.AddField(
model_name='alumno',
name='last_login',
field=models.DateTimeField(null=True, verbose_name=b'\xc3\x9altimo Login', blank=True),
),
]
| [
"[email protected]"
] | |
e81481b04fb1f65a6e1d9d47e39919236d78028e | 81a62053841c03d9621fd31f8e7984c712c7aed2 | /zoo/BEVFormer/attacks/attacker/pgd.py | f0b9a265b5f49216b58f49230fbcad46f430c69e | [
"Apache-2.0"
] | permissive | Daniel-xsy/BEV-Attack | d0eb3a476875f9578c53df9bcb21564dea18ce0c | 7970b27396c1af450c80b12eb312e76a8ab52a0a | refs/heads/master | 2023-05-23T01:13:44.121533 | 2023-02-22T05:48:14 | 2023-02-22T05:48:14 | 540,328,937 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,428 | py | import torch
import torch.nn as nn
import numpy as np
import random
import mmcv
from attacks.attacker.base import BaseAttacker
from attacks.attacker.builder import ATTACKER
@ATTACKER.register_module()
class PGD(BaseAttacker):
def __init__(self,
epsilon,
step_size,
num_steps,
loss_fn,
assigner,
category="Madry",
rand_init=False,
single_camera=False,
*args,
**kwargs):
""" PGD pixel attack
Args:
epsilon (float): L_infty norm bound for visual percep
step_size (float): step size of one attack iteration
num_steps (int): attack iteration number
loss_fn (class): adversarial objective function
category (str): `trades` or `Madry`, which type of initialization of attack
rand_init (bool): random initialize adversarial noise or zero initialize
assigner (class): assign prediction bbox to ground truth bbox
single_camera (bool): only attack random choose single camera
"""
super().__init__(*args, **kwargs)
self.epsilon = epsilon
self.step_size = step_size
self.num_steps = num_steps
self.loss_fn = loss_fn
self.category = category
self.single_camera = single_camera
self.rand_init = rand_init
self.assigner = assigner
def run(self, model, img, img_metas, gt_bboxes_3d, gt_labels_3d):
"""Run PGD attack optimization
Args:
model (nn.Module): model to be attacked
img (DataContainer): [B, M, C, H, W]
img_metas (DataContainer): img_meta information
gt_bboxes_3d: ground truth of bboxes
gt_labels_3d: ground truth of labels
Return:
inputs: (dict) {'img': img, 'img_metas': img_metas}
"""
model.eval()
camera = random.randint(0, 5)
img_ = img[0].data[0].clone()
B, M, C, H, W = img_.size()
assert B == 1, f"Batchsize should set to 1 in attack, but now is {B}"
# only calculate grad of single camera image
if self.single_camera:
camera_mask = torch.zeros((B, M, C, H, W))
camera_mask[:, camera] = 1
if self.category == "trades":
if self.single_camera:
x_adv = img_.detach() + camera_mask * 0.001 * torch.randn(img_.shape).to(img_.device).detach() if self.rand_init else img_.detach()
else:
x_adv = img_.detach() + 0.001 * torch.randn(img_.shape).to(img_.device).detach() if self.rand_init else img_.detach()
if self.category == "Madry":
if self.single_camera:
x_adv = img_.detach() + camera_mask * torch.from_numpy(np.random.uniform(-self.epsilon, self.epsilon, img_.shape)).float().to(img_.device) if self.rand_init else img_.detach()
else:
x_adv = img_.detach() + torch.from_numpy(np.random.uniform(-self.epsilon, self.epsilon, img_.shape)).float().to(img_.device) if self.rand_init else img_.detach()
x_adv = torch.clamp(x_adv, self.lower.view(1, 1, C, 1, 1), self.upper.view(1, 1, C, 1, 1))
for k in range(self.num_steps):
x_adv.requires_grad_()
img[0].data[0] = x_adv
inputs = {'img': img, 'img_metas': img_metas}
# with torch.no_grad():
outputs = model(return_loss=False, rescale=True, adv_mode=True, **inputs)
# assign pred bbox to ground truth
assign_results = self.assigner.assign(outputs, gt_bboxes_3d, gt_labels_3d)
# no prediction are assign to ground truth, stop attack
if assign_results is None:
break
loss_adv = self.loss_fn(**assign_results)
loss_adv.backward()
eta = self.step_size * x_adv.grad.sign()
if self.single_camera:
eta = eta * camera_mask
x_adv = x_adv.detach() + eta
x_adv = torch.min(torch.max(x_adv, img_ - self.epsilon), img_ + self.epsilon)
x_adv = torch.clamp(x_adv, self.lower.view(1, 1, C, 1, 1), self.upper.view(1, 1, C, 1, 1))
img[0].data[0] = x_adv.detach()
torch.cuda.empty_cache()
return {'img': img, 'img_metas':img_metas}
| [
"[email protected]"
] | |
9012f3d446c9811c846cbdf005bfb6e188fa54c8 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/rottenOranges_20200810193226.py | 9ad8d01ffd15107d6776d9a79b8fb4622742316f | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | def markrotten(i,j,row,column,grid):
if (i < 0 or i >= row or j < 0 or j >= column) or grid[i][j] !=1:
return
else:
grid[i][j] == 2
print('grid',grid)
# checking its neighbours
markrotten(i+1,j,row,column,grid)
markrotten(i,j+1,row,column,grid)
markrotten(i,j-1,row,column,grid)
markrotten(i-1,j,row,column,grid)
def oranges(grid):
if len(grid) == 0:
return 0
# loop through the grid
# if there is no fresh orange just return 0
# if there is a two check all its four neighbours
# recursive call
# count when a one becomes a two
row = len(grid)
column = len(grid[0])
minutes = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 2:
markrotten(i,j,row,column,grid)
minutes +=1
print(minutes)
print(grid)
oranges( [[2,1,1],[0,1,1],[1,0,1]])
| [
"[email protected]"
] | |
1edb9ae7fd8237f94d649a9af47757a6ad086796 | 95ad637ede0acdd88bbfffd10e344aff89ec298f | /impc_etl/jobs/loaders/quml_loader.py | faeb2f1ac5cd8b9827afbcbfbb00603878b02695 | [
"Apache-2.0"
] | permissive | luwei1234567/impc-etl | c13386b0f6871c17353ae4884cc444222e66f91b | 8bad084c00ec63b2b5d109f84db77dd0172d142d | refs/heads/master | 2020-04-03T20:43:59.859205 | 2018-10-31T12:43:11 | 2018-10-31T12:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | """
Queen Mary University of London data loader
load_diseases:
load_disease_models:
"""
| [
"[email protected]"
] | |
9f7ab36b72ea976d64ec31fe193a1ffd67c51b33 | 3ced55b04ec82df5257f0e3b500fba89ddf73a8a | /src/stk/molecular/functional_groups/factories/bromo_factory.py | efd349b3d0830c3e38e99827798093df7e4a8813 | [
"MIT"
] | permissive | rdguerrerom/stk | 317282d22f5c4c99a1a8452023c490fd2f711357 | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | refs/heads/master | 2023-08-23T21:04:46.854062 | 2021-10-16T14:01:38 | 2021-10-16T14:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | """
Bromo Factory
=============
"""
from __future__ import annotations
import typing
from collections import abc
from .functional_group_factory import FunctionalGroupFactory
from .utilities import get_atom_ids
from ..functional_groups import Bromo
from ...molecule import Molecule
from ...elements import Br
__all__ = (
'BromoFactory',
)
_ValidIndex = typing.Literal[0, 1]
class BromoFactory(FunctionalGroupFactory):
"""
Creates :class:`.Bromo` instances.
Creates functional groups from substructures, which match the
``[*][Br]`` functional group string.
Examples:
*Creating Functional Groups with the Factory*
You want to create a building block which has :class:`.Bromo`
functional groups. You want the atom bonded to the bromine to
be the *bonder* atom, and the bromine atom to be the *deleter*
atom.
.. testcode:: creating-functional-groups-with-the-factory
import stk
building_block = stk.BuildingBlock(
smiles='BrCCCBr',
functional_groups=(stk.BromoFactory(), ),
)
.. testcode:: creating-functional-groups-with-the-factory
:hide:
assert all(
isinstance(functional_group, stk.Bromo)
for functional_group
in building_block.get_functional_groups()
)
assert building_block.get_num_functional_groups() == 2
See Also:
:class:`.GenericFunctionalGroup`
Defines *bonders* and *deleters*.
"""
def __init__(
self,
bonders: tuple[_ValidIndex, ...] = (0, ),
deleters: tuple[_ValidIndex, ...] = (1, ),
placers: typing.Optional[tuple[_ValidIndex, ...]] = None,
) -> None:
"""
Initialize a :class:`.BromoFactory` instance.
Parameters:
bonders:
The indices of atoms in the functional group string,
which are *bonder* atoms.
deleters:
The indices of atoms in the functional group string,
which are *deleter* atoms.
placers:
The indices of atoms in the functional group string,
which are *placer* atoms. If ``None``, `bonders` will
be used.
"""
self._bonders = bonders
self._deleters = deleters
self._placers = bonders if placers is None else placers
def get_functional_groups(
self,
molecule: Molecule,
) -> abc.Iterable[Bromo]:
for atom_ids in get_atom_ids('[*][Br]', molecule):
atoms = tuple(molecule.get_atoms(atom_ids))
yield Bromo(
bromine=typing.cast(Br, atoms[1]),
atom=atoms[0],
bonders=tuple(atoms[i] for i in self._bonders),
deleters=tuple(atoms[i] for i in self._deleters),
placers=tuple(atoms[i] for i in self._placers),
)
| [
"[email protected]"
] | |
a9b66bfafdae81a479bdb341cbce153b8d8dec62 | 0fe394b10b39864915fcc4073a5fa050aa02502e | /SoloLearn_Project/skip.py | 15ca46a183e4985396b92c323c1ecce9196dcecc | [] | no_license | JohnAssebe/Python | 9997d47bba4a056fdcd74c6e5207fc52b002cbfd | b88a7c2472f245dc6a0e8900bbea490cb0e0beda | refs/heads/master | 2022-05-14T10:08:37.311345 | 2022-05-09T19:48:53 | 2022-05-09T19:48:53 | 212,562,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | def skip_elements(elements):
# Initialize variables
new_list = []
i = 0
if len(elements)==0:
return []
# Iterate through the list
for i in range(0,len(elements),2):
# Does this element belong in the resulting list?
if elements[i] not in new_list:
# Add this element to the resulting list
new_list.append(elements[i])
return new_list
print(skip_elements(["a", "b", "c", "d", "e", "f", "g"])) # Should be ['a', 'c', 'e', 'g']
print(skip_elements(['Orange', 'Pineapple', 'Strawberry', 'Kiwi', 'Peach'])) # Should be ['Orange', 'Strawberry', 'Peach']
print(skip_elements([])) # Should be []
| [
"[email protected]"
] | |
736d83f69f045da85f0dcc4f44f46644a627a50f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03472/s538005754.py | db417a8a26fe90ea010a6da9d14aba392ac1ffdb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | n,h = map(int,input().split())
katana_a = []
katana_b = []
for i in range(n):
a,b = map(int,input().split())
katana_a.append(a)
katana_b.append(b)
amax = max(katana_a)
katana_b.sort(reverse=True)
ans = 0
k = 0
while h > 0:
if k == n:
break
if katana_b[k] > amax:
h -= katana_b[k]
ans += 1
k += 1
else:
break
if h <= 0:
print(ans)
else:
print(ans + (h+amax-1)//amax)
| [
"[email protected]"
] | |
db431d9e93dd7b65b8dad33e2dccb31adb4e6276 | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /pyqtgraph/examples/contextMenu.py | c2c5918dbf2642bbf316f24c4f9143c973f8d93c | [
"BSD-2-Clause",
"MIT"
] | permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 4,497 | py | # -*- coding: utf-8 -*-
"""
Demonstrates adding a custom context menu to a GraphicsItem
and extending the context menu of a ViewBox.
PyQtGraph implements a system that allows each item in a scene to implement its
own context menu, and for the menus of its parent items to be automatically
displayed as well.
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.GraphicsWindow()
win.setWindowTitle('pyqtgraph example: context menu')
view = win.addViewBox()
# add two new actions to the ViewBox context menu:
zoom1 = view.menu.addAction('Zoom to box 1')
zoom2 = view.menu.addAction('Zoom to box 2')
# define callbacks for these actions
def zoomTo1():
# note that box1 is defined below
view.autoRange(items=[box1])
zoom1.triggered.connect(zoomTo1)
def zoomTo2():
# note that box1 is defined below
view.autoRange(items=[box2])
zoom2.triggered.connect(zoomTo2)
class MenuBox(pg.GraphicsObject):
"""
This class draws a rectangular area. Right-clicking inside the area will
raise a custom context menu which also includes the context menus of
its parents.
"""
def __init__(self, name):
self.name = name
self.pen = pg.mkPen('r')
# menu creation is deferred because it is expensive and often
# the user will never see the menu anyway.
self.menu = None
# note that the use of super() is often avoided because Qt does not
# allow to inherit from multiple QObject subclasses.
pg.GraphicsObject.__init__(self)
# All graphics items must have paint() and boundingRect() defined.
def boundingRect(self):
return QtCore.QRectF(0, 0, 10, 10)
def paint(self, p, *args):
p.setPen(self.pen)
p.drawRect(self.boundingRect())
# On right-click, raise the context menu
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
if self.raiseContextMenu(ev):
ev.accept()
def raiseContextMenu(self, ev):
menu = self.getContextMenus()
# Let the scene add on to the end of our context menu
# (this is optional)
menu = self.scene().addParentContextMenus(self, menu, ev)
pos = ev.screenPos()
menu.popup(QtCore.QPoint(pos.x(), pos.y()))
return True
# This method will be called when this item's _children_ want to raise
# a context menu that includes their parents' menus.
def getContextMenus(self, event=None):
if self.menu is None:
self.menu = QtGui.QMenu()
self.menu.setTitle(self.name+ " options..")
green = QtGui.QAction("Turn green", self.menu)
green.triggered.connect(self.setGreen)
self.menu.addAction(green)
self.menu.green = green
blue = QtGui.QAction("Turn blue", self.menu)
blue.triggered.connect(self.setBlue)
self.menu.addAction(blue)
self.menu.green = blue
alpha = QtGui.QWidgetAction(self.menu)
alphaSlider = QtGui.QSlider()
alphaSlider.setOrientation(QtCore.Qt.Horizontal)
alphaSlider.setMaximum(255)
alphaSlider.setValue(255)
alphaSlider.valueChanged.connect(self.setAlpha)
alpha.setDefaultWidget(alphaSlider)
self.menu.addAction(alpha)
self.menu.alpha = alpha
self.menu.alphaSlider = alphaSlider
return self.menu
# Define context menu callbacks
def setGreen(self):
self.pen = pg.mkPen('g')
# inform Qt that this item must be redrawn.
self.update()
def setBlue(self):
self.pen = pg.mkPen('b')
self.update()
def setAlpha(self, a):
self.setOpacity(a/255.)
# This box's context menu will include the ViewBox's menu
box1 = MenuBox("Menu Box #1")
view.addItem(box1)
# This box's context menu will include both the ViewBox's menu and box1's menu
box2 = MenuBox("Menu Box #2")
box2.setParentItem(box1)
box2.setPos(5, 5)
box2.scale(0.2, 0.2)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"none"
] | none |
83c644506d964c736a4d5cae65f08195d41dc016 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /7yo5FJX4xFbNxim5q_22.py | 9783436a95859a2a95ce2cbc4b159661968c0f8b | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
def harry(po):
if len(po[0])==0:
return -1
downright = sum(po[-1])+sum(po[i][0] for i in range(len(po)-1))
rightdown = sum(po[0])+sum(po[i][-1] for i in range(len(po)-1))
return max(downright,rightdown)
| [
"[email protected]"
] | |
1974a71145eabc83d069ae9eb89f4b22ccb733e3 | 6044266e775c87afed99397c8bb88366fbbca0e7 | /scrapy_projt/xpath_tutorial_1/xpath_normalize-space_span.py | a61ab6ea4dc655eeab231d5be07451831d6f7fc6 | [] | no_license | ranafge/all-documnent-projects | e4434b821354076f486639419598fd54039fb5bd | c9d65ddea291c53b8e101357547ac63a36406ed9 | refs/heads/main | 2023-05-08T20:01:20.343856 | 2021-05-30T10:44:28 | 2021-05-30T10:44:28 | 372,186,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import scrapy
html="""<form class="variants" action="/cart">
<a class="thumb fancybox image_outer" href="products/apple-iphone-5s-16gb-black--space-gray-chernyj" data-fancybox-group="gallery5">
<img src="http://first-store.ru/files/products/iphone%205S%20black_1.100x112.jpg?16ef5c4132fc88594851f92ccc2f3437" alt="Apple iPhone 5s 16GB Black & Space Gray (Чёрный)" title="Apple iPhone 5s 16GB Black & Space Gray (Чёрный)">
</a>
<h1>
<a class="name_mark" data-product="1075" href="products/apple-iphone-5s-16gb-black--space-gray-chernyj">Apple iPhone 5s 16GB Black & Space Gray (Чёрный)</a>
</h1>
<span class="price price_mark price_value">26 990 <span class="currency">руб</span>
<input id="variants_2927" name="variant" value="2927" type="radio" class="variant_radiobutton" checked="" style="display:none;">
<input class="button buy buy_button buy_button_catalog" type="submit" value="Купить" data-result-text="Добавлено">
</span>
</form>"""
data = scrapy.Selector(text=html)
print(data.xpath("span[contains(concat(' ', normalize-space(@class), ' '), ' price ')]"))
| [
"[email protected]"
] | |
c55d7e21b155df85decbb4db71b4bff34ba005ab | c80b3cc6a8a144e9858f993c10a0e11e633cb348 | /components/ally-core-http/__setup__/ally_core_http/definition_time_zone.py | c7622168d8027cd0b24ee99b10fc4017a9d64a68 | [] | no_license | cristidomsa/Ally-Py | e08d80b67ea5b39b5504f4ac048108f23445f850 | e0b3466b34d31548996d57be4a9dac134d904380 | refs/heads/master | 2021-01-18T08:41:13.140590 | 2013-11-06T09:51:56 | 2013-11-06T09:51:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | '''
Created on Jul 17, 2013
@package: ally core http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides the time zone header definitions.
'''
from ..ally_core.definition import definitions, defin, errors, error, desc
from .definition_header import CATEGORY_HEADER, VERIFY_CATEGORY, \
updateDescriptionsForHeaders
from ally.container import ioc
from ally.core.http.spec.codes import TIME_ZONE_ERROR
from ally.core.impl.definition import Name
# --------------------------------------------------------------------
try: import pytz # @UnusedImport
except ImportError: pass
else:
from pytz import all_timezones
from ally.core.http.impl.processor.time_zone import TIME_ZONE, CONTENT_TIME_ZONE
from .processor_time_zone import default_time_zone
# --------------------------------------------------------------------
VERIFY_TIME_ZONE = Name(TIME_ZONE.name) & VERIFY_CATEGORY
VERIFY_CONTENT_TIME_ZONE = Name(CONTENT_TIME_ZONE.name) & VERIFY_CATEGORY
# --------------------------------------------------------------------
@ioc.before(definitions)
def updateDefinitionsForTimeZone():
defin(category=CATEGORY_HEADER, name=TIME_ZONE.name)
defin(category=CATEGORY_HEADER, name=CONTENT_TIME_ZONE.name)
@ioc.before(errors)
def updateDefinitionErrorForTimeZone():
error(TIME_ZONE_ERROR.code, VERIFY_TIME_ZONE | VERIFY_CONTENT_TIME_ZONE, 'The time zone headers')
@ioc.before(updateDescriptionsForHeaders)
def updateDescriptionsForTimeZone():
sample, curr = [], None
for tz in all_timezones:
if curr != tz[:1]:
sample.append(tz)
curr = tz[:1]
# This is based on @see: updateDefinitionsForTimeZone().
desc(Name(TIME_ZONE.name),
'the time zone to render the time stamps in, as an example:\n%(sample)s',
'the default time zone is %(default)s', sample=sample, default=default_time_zone())
desc(Name(CONTENT_TIME_ZONE.name),
'same as \'%(name)s\' but for parsed content', name=TIME_ZONE.name)
| [
"[email protected]"
] | |
d0c4d0d97f9a1c42d2bc97257e59f28c94e033e4 | 6f56dbc188abcc8156eb7dae625243192516675b | /python/jittor/test/test_lazy_execution.py | ce276b4798848f4d6ea6f3e482b3ff93859baa1f | [
"Apache-2.0"
] | permissive | linker666/jittor | 80e03d2e8dec91bb69d4d6f7b0d222bfbf6c750f | 96545765eca7364ec4938e1fa756bce4cb84dfb8 | refs/heads/master | 2023-02-09T21:28:48.706061 | 2021-01-05T15:08:33 | 2021-01-05T15:08:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # ***************************************************************
# Copyright (c) 2020 Jittor. All Rights Reserved.
# Maintainers:
# Meng-Hao Guo <[email protected]>
# Dun Liang <[email protected]>.
#
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
import unittest
import sys, os
from subprocess import getoutput
class TestLazyExecution(unittest.TestCase):
@unittest.skipIf(not jt.has_cuda, "No cuda found")
def test_lazy_execution(self):
code = """
import jittor as jt
jt.flags.use_cuda = 1
a = jt.zeros(1)
b = jt.code([1], a.dtype, [a],
cuda_header='''
#include <assert.h>
''',
cuda_src='''
__global__ void kernel(float32* a, float32* b) {
b[0] = a[0];
assert(a[0] == 1);
}
kernel<<<1,1>>>(in0_p, out0_p);
''')
c = a+b
print(c)
"""
fpath = os.path.join(jt.flags.cache_path, "lazy_error.py")
with open(fpath, 'w') as f:
f.write(code)
res = getoutput(f"{sys.executable} {fpath}")
assert 'print(c)' in res
res = getoutput(f"lazy_execution=0 {sys.executable} {fpath}")
assert "''')" in res
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
f663b36df0521678d0c0adddcc3dfdefd10e7c2d | 9fb0162a88190f20d84b2ffd0b505c0a9fff4e19 | /draughtcraft/tests/controllers/test_signup.py | 981bb2b6cc8b60638bd5fc27f842cdf91e271e06 | [] | no_license | johnmontero/draughtcraft | 3414924e7d0f659f6c3c6aaab47507f3fc0e7818 | eb48473f880555119fe9497ed3db23e8a9f5c5e9 | refs/heads/master | 2021-01-15T18:36:34.137605 | 2011-11-27T22:01:24 | 2011-11-27T22:01:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,998 | py | from pecan import conf, abort
from draughtcraft.tests import TestApp
from draughtcraft import model
import fudge
class TestSignup(TestApp):
def test_signup_form(self):
assert model.User.query.count() == 0
self.get('/signup')
assert model.User.query.count() == 0
def test_schema_validation(self):
params = {
'username' : 'testing',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
for k in params:
copy = params.copy()
del copy[k]
response = self.post('/signup/', params=copy)
assert response.status_int == 200
assert 'validation_errors' in response.request.pecan
assert model.User.query.count() == 0
@fudge.patch('draughtcraft.lib.email.send')
def test_successful_signup(self, fake_send):
(fake_send.expects_call().with_args(
'[email protected]',
'signup',
'Welcome to DraughtCraft',
{'username':'test'},
bcc = [conf.signups.bcc]
))
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 1
user = model.User.get(1)
assert user.username == 'test'
assert user.password
assert user.email == '[email protected]'
@fudge.patch('draughtcraft.lib.email.send', 'pecan.redirect')
def test_signup_failure(self, fake_send, fake_redirect):
"""
If signup fails for some reason (a traceback is thrown, for instance)
a welcome email should *not* be queued through Postmark.
"""
#
# Cause the redirect at the end of the signup method to fail and throw
# an HTTP 500 for some reason.
#
(fake_redirect.expects_call().with_args(
'/login?welcome'
).calls(lambda code: abort(500)))
# Make sure that emaillib.send() is not called.
(fake_send.is_callable().times_called(0))
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params, status=500)
assert model.User.query.count() == 0
def test_username_length(self):
"""
Usernames should be >= 4 chars
"""
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 1
params = {
'username' : 'tes',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
self.post('/signup/', params=params)
assert model.User.query.count() == 1
def test_username_uniqueness(self):
"""
Usernames should be globally unique
"""
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 1
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
self.post('/signup/', params=params)
assert model.User.query.count() == 1
def test_username_regex_format(self):
"""
Usernames should only contain numbers, letters, and underscores
"""
params = {
'username' : 'testing_023456789_TESTING_',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 1
for username in [
'testing_023456789_TESTING_ ',
'testing_023456789_TESTING_-',
'testing_023456789_TESTING_?',
'testing_023456789_TESTING_!',
'testing_023456789_TESTING_$',
]:
params = {
'username' : username,
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
self.post('/signup/', params=params)
assert model.User.query.count() == 1
def test_password_match(self):
"""
Passwords should match exactly
"""
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret2',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 0
def test_password_length(self):
"""
Passwords should be at least 4 characters in length.
"""
params = {
'username' : 'test',
'password' : 'foo',
'password_confirm' : 'foo',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 0
def test_invalid_email(self):
"""
Emails should be valid email addresses
"""
for email in [
'ryan',
'ryan@',
'ryan@example',
'ryan@example.',
'[email protected]',
]:
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : email
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 0
def test_email_uniqueness(self):
"""
Emails should be globally unique
"""
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
self.post('/signup/', params=params)
assert model.User.query.count() == 1
params = {
'username' : 'testing',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
self.post('/signup/', params=params)
assert model.User.query.count() == 1
class TestRecipeConversion(TestApp):
def test_trial_recipe_conversion(self):
"""
Create a recipe as a guest.
After signup, the recipe should belong to the newly created user.
"""
params = {
'name' : 'Rocky Mountain River IPA',
'type' : 'MASH',
'volume' : 25,
'unit' : 'GALLON'
}
self.post('/recipes/create', params=params)
assert model.Recipe.query.count() == 1
assert model.Recipe.get(1).author == None
params = {
'username' : 'test',
'password' : 'secret',
'password_confirm' : 'secret',
'email' : '[email protected]'
}
assert model.User.query.count() == 0
response = self.post('/signup/', params=params)
assert model.User.query.count() == 1
user = model.User.get(1)
assert user.username == 'test'
assert user.password
assert user.email == '[email protected]'
#
# The recipe should have been attached to the new user, and the
# `trial_recipe_id` record should have been removed from the session.
#
assert len(user.recipes) == 1
assert 'trial_recipe_id' not in response.environ['beaker.session']
| [
"[email protected]"
] | |
a42e0f404a06eceeff048e1750ca7e2890973dfc | 7da3fe4ea12be962b574c8be63c35014df0d2faf | /facade.py | 7f78edabd4455cfb95a3de2bfdde477d571562e5 | [] | no_license | hanmiton/patronesDise-o | 2a4b581fc90a512bf7db26f728a17ce0f48eef83 | 523d993dfc60318e4af4a4dbc7fa236b9ae0bc94 | refs/heads/master | 2020-04-18T16:54:08.084782 | 2016-09-01T06:16:29 | 2016-09-01T06:16:29 | 66,985,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | class Scanner:
def __init__(self):
self.name = "Scanner"
class Parser:
def __init__(self):
self.name = "Parser"
class Compiler:
def __init__(self):
self.name = "Compiler"
self.scanner = Scanner()
self.parser = Parser()
def compile(self):
print("Compiling ...")
print("Scanning %s" % self.scanner.name)
print("Parsing %s" % self.parser.name)
if __name__ == "__main__":
compiler = Compiler()
compiler.compile()
| [
"[email protected]"
] | |
c0474becc0c7f964e582da615406555736dbaf11 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/17/usersdata/132/6773/submittedfiles/lecker.py | 4b0c09bdee76966f8925264740fabe32156d6d34 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a= input(' digite um valor:')
b= input(' digite um valor:')
c= input(' digite um valor:')
d= input(' digite um valor:')
if a>b>c>d:
print('S')
if a>b>c<d:
print('N')
if a>b<c>d:
print('N')
if a>b<c<d:
print('N')
if a<b>c>d:
print('S')
if a<b>c<d:
print('N')
if a<b<c>d:
print('S')
if a<b<c<d:
print('S') | [
"[email protected]"
] | |
570b3e7425911bb1015aa72da24b6775e9a9cf9f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03030/s750969702.py | 3af20d6d293086c39ad8ce1585af7340da080628 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | n = int(input())
lst = []
for i in range(n):
s, p = input().split()
lst.append([s, -(int(p)), i+1])
lst.sort()
for i in range(n):
print(lst[i][2])
| [
"[email protected]"
] | |
050b92002ed36ae06083ba938f6d02ed2827a17f | 5ecaded45e28c1041c1986c13db446806a28b3ee | /function-arguments/learn-python-function-arguments/positional-argument-unpacking.py | 15f83e9c8a7c4b9fe6c11866c9e470762c69f06a | [] | no_license | 109658067/Python3_Codecademy | 12206ec74e8dc95cc1200491b4ed75b856bfb25e | 8480912c6dd15649b3c51f4c205afdd253ea462b | refs/heads/master | 2022-09-15T18:21:26.742741 | 2020-06-04T05:48:58 | 2020-06-04T05:48:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from os.path import join
path_segment_1 = "/Home/User"
path_segment_2 = "Codecademy/videos"
path_segment_3 = "cat_videos/surprised_cat.mp4"
# join all three of the paths here!
print(join(path_segment_1, path_segment_2, path_segment_3))
def myjoin(*args):
joined_string = args[0]
for arg in args[1:]:
joined_string += '/' + arg
return joined_string
print(myjoin(path_segment_1, path_segment_2, path_segment_3)) | [
"[email protected]"
] | |
a506c46467e4562eaec2431f7bc7348fc991d68b | a4e41b84931ba69d7d8548a7df0ca4fe68ed02b5 | /view/customer_test.py | 6ab1159cf42343e130a8f828604dc27e840080ab | [] | no_license | LittleDeveloper-CSharp/typography_rmp | b05869b735df1c72c6d0c11addb6b1c68fda62f3 | 6a0e50f7ffbdc51b609761be3bf23920721682b3 | refs/heads/master | 2023-04-13T21:36:22.589576 | 2021-04-25T20:05:52 | 2021-04-25T20:05:52 | 361,526,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,927 | py | import models.customer
from tkinter import Entry, END, Button, Label, Frame, VERTICAL
from tkinter.ttk import Treeview, Scrollbar
class CustomerWindow:
def fill_tree_view(self):
self.list_customer = models.customer.select_customer()
for item in self.list_customer:
self.tree_view_customer.insert("", "end", values=item)
def filter_by_name(self, event, name):
self.final_filter()
def filter_by_patronymic(self, event, patronymic):
self.final_filter()
def filter_by_last_name(self, event, last_name):
self.final_filter()
@staticmethod
def final_filter():
global list_customer
a = 23
def accept_action(self):
action = self.bt_accept['text']
if action == "Добавить":
models.customer.insert_customer(list())
else:
models.customer.update_customer(self.index_customer, list())
self.fill_tree_view()
def delete_action(self):
models.customer.delete_customer(self.index_customer)
self.fill_tree_view()
def __init__(self, master):
self.master = master
self.frame = Frame(self.master)
self.index_customer = 0
self.list_customer = ()
self.frame_output_info = Frame()
self.frame_first_name_search = Frame(self.frame_output_info)
self.first_name_search_entry = Entry(self.frame_output_info)
self.first_name_search_entry.grid(row=0, column=0)
self.columns = ("1", "2", "3", "4", "5", "6")
self.tree_view_customer = Treeview(self.frame_output_info, show="headings", columns=self.columns,
displaycolumns=("2", "3", "4", "5", "6"))
self.tree_view_customer.heading("2", text="Фамилия")
self.tree_view_customer.heading("3", text="Имя")
self.tree_view_customer.heading("4", text="Отчество")
self.tree_view_customer.heading("5", text="Адрес")
self.tree_view_customer.heading("6", text="Телефон")
self.ysb = Scrollbar(orient=VERTICAL, command=self.tree_view_customer.yview)
self.tree_view_customer.config(yscroll=self.ysb.set)
self.tree_view_customer.grid(column=0)
self.frame_output_info.grid(row=0, column=0)
self.frame_for_add = Frame()
self.frame_last_name = Frame(self.frame_for_add)
Label(self.frame_last_name, text="Фамилия").grid(row=0)
self.last_name_entry = Entry(self.frame_last_name)
self.last_name_entry.grid(row=1)
self.frame_last_name.grid(row=0)
self.frame_first_name = Frame(self.frame_for_add)
Label(self.frame_first_name, text="Имя").grid(row=0)
self.first_name_entry = Entry(self.frame_first_name)
self.first_name_entry.grid(row=1)
self.frame_first_name.grid(row=1)
self.frame_patronymic = Frame(self.frame_for_add)
Label(self.frame_patronymic, text="Отчество").grid(row=0)
self.patronymic_entry = Entry(self.frame_patronymic)
self.patronymic_entry.grid(row=1)
self.frame_patronymic.grid(row=2)
self.frame_address = Frame(self.frame_for_add)
Label(self.frame_address, text="Адрес").grid(row=0)
self.address_entry = Entry(self.frame_address)
self.address_entry.grid(row=1)
self.frame_address.grid(row=3)
self.frame_phone = Frame(self.frame_for_add)
Label(self.frame_phone, text="Телефон").grid(row=0)
self.phone_entry = Entry(self.frame_phone)
self.phone_entry.grid(row=1)
self.frame_phone.grid(row=4)
self.bt_accept = Button(self.frame_for_add, text="Добавить")
self.bt_accept.grid(row=5)
Button(self.frame_for_add, text="Удалить").grid(row=6)
self.frame_for_add.grid(row=0, column=1)
self.fill_tree_view()
| [
"test"
] | test |
c0f9dbecd67de39faf7a200cd391cce07c3eb470 | c80e4dea4548de89d32f2abd6ca58812670ecc7b | /scripts/regal/RegalDispatch.py | 49aa3451931469377a6fd4c1f560c3bfcb014de4 | [
"Unlicense",
"MIT",
"LicenseRef-scancode-glut",
"BSD-3-Clause",
"SGI-B-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | chinmaygarde/regal | 971699cc8d991633b7257ce9ced2b4d65dd6d9b2 | db0832075bd78241afe003b9c1b8f6ac0051370b | refs/heads/master | 2021-01-17T04:42:10.354459 | 2012-10-10T09:08:13 | 2012-10-10T09:08:13 | 6,150,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,311 | py | #!/usr/bin/python -B
from string import Template, upper, replace
from ApiUtil import outputCode
from ApiUtil import typeIsVoid
from ApiCodeGen import *
from RegalDispatchLog import apiDispatchFuncInitCode
from RegalDispatchEmu import dispatchSourceTemplate
from RegalContextInfo import cond
##############################################################################################
def apiGlobalDispatchTableDefineCode(apis, args):
categoryPrev = None
code = ''
code += 'struct DispatchTableGlobal {\n'
code += '\n'
code += ' DispatchTableGlobal();\n'
code += ' ~DispatchTableGlobal();\n'
for api in apis:
code += '\n'
if api.name in cond:
code += '#if %s\n' % cond[api.name]
for function in api.functions:
if function.needsContext:
continue
if getattr(function,'regalOnly',False)==True:
continue
name = function.name
params = paramsDefaultCode(function.parameters, True)
rType = typeCode(function.ret.type)
category = getattr(function, 'category', None)
version = getattr(function, 'version', None)
if category:
category = category.replace('_DEPRECATED', '')
elif version:
category = version.replace('.', '_')
category = 'GL_VERSION_' + category
# Close prev if block.
if categoryPrev and not (category == categoryPrev):
code += '\n'
# Begin new if block.
if category and not (category == categoryPrev):
code += ' // %s\n\n' % category
code += ' %s(REGAL_CALL *%s)(%s);\n' % (rType, name, params)
categoryPrev = category
if api.name in cond:
code += '#endif // %s\n' % cond[api.name]
code += '\n'
# Close pending if block.
if categoryPrev:
code += '\n'
code += '};\n'
return code
def apiDispatchTableDefineCode(apis, args):
categoryPrev = None
code = ''
code += 'struct DispatchTable {\n'
code += '\n'
code += ' bool _enabled;\n'
code += ' DispatchTable *_prev;\n'
code += ' DispatchTable *_next;\n'
code += '''
// Lookup a function pointer from the table,
// or deeper in the stack as necessary.
template<typename T>
T call(T *func)
{
RegalAssert(func);
if (_enabled && *func)
return *func;
DispatchTable *i = this;
RegalAssert(i);
RegalAssert(reinterpret_cast<void *>(func)>=reinterpret_cast<void *>(i));
RegalAssert(reinterpret_cast<void *>(func)< reinterpret_cast<void *>(i+1));
std::size_t offset = reinterpret_cast<char *>(func) - reinterpret_cast<char *>(i);
T f = *func;
// Step down the stack for the first available function in an enabled table
while (!f || !i->_enabled)
{
// Find the next enabled dispatch table
for (i = i->_next; !i->_enabled; i = i->_next) { RegalAssert(i); }
// Get the function pointer
RegalAssert(i);
RegalAssert(i->_enabled);
f = *reinterpret_cast<T *>(reinterpret_cast<char *>(i)+offset);
}
return f;
}
'''
for api in apis:
code += '\n'
if api.name in cond:
code += '#if %s\n' % cond[api.name]
for function in api.functions:
if not function.needsContext:
continue
if getattr(function,'regalOnly',False)==True:
continue
name = function.name
params = paramsDefaultCode(function.parameters, True)
rType = typeCode(function.ret.type)
category = getattr(function, 'category', None)
version = getattr(function, 'version', None)
if category:
category = category.replace('_DEPRECATED', '')
elif version:
category = version.replace('.', '_')
category = 'GL_VERSION_' + category
# Close prev if block.
if categoryPrev and not (category == categoryPrev):
code += '\n'
# Begin new if block.
if category and not (category == categoryPrev):
code += ' // %s\n\n' % category
code += ' %s(REGAL_CALL *%s)(%s);\n' % (rType, name, params)
categoryPrev = category
if api.name in cond:
code += '#endif // %s\n' % cond[api.name]
code += '\n'
# Close pending if block.
if categoryPrev:
code += '\n'
code += '};\n'
return code
dispatchHeaderTemplate = Template( '''${AUTOGENERATED}
${LICENSE}
#ifndef __${HEADER_NAME}_H__
#define __${HEADER_NAME}_H__
#include "RegalUtil.h"
REGAL_GLOBAL_BEGIN
#include <GL/Regal.h>
REGAL_GLOBAL_END
REGAL_NAMESPACE_BEGIN
${API_GLOBAL_DISPATCH_TABLE_DEFINE}
extern DispatchTableGlobal dispatchTableGlobal;
${API_DISPATCH_TABLE_DEFINE}
REGAL_NAMESPACE_END
#endif // __${HEADER_NAME}_H__
''')
def generateDispatchHeader(apis, args):
globalDispatchTableDefine = apiGlobalDispatchTableDefineCode( apis, args )
dispatchTableDefine = apiDispatchTableDefineCode(apis, args)
# Output
substitute = {}
substitute['LICENSE'] = args.license
substitute['AUTOGENERATED'] = args.generated
substitute['COPYRIGHT'] = args.copyright
substitute['HEADER_NAME'] = 'REGAL_DISPATCH'
substitute['API_GLOBAL_DISPATCH_TABLE_DEFINE'] = globalDispatchTableDefine
substitute['API_DISPATCH_TABLE_DEFINE'] = dispatchTableDefine
outputCode( '%s/RegalDispatch.h' % args.outdir, dispatchHeaderTemplate.substitute(substitute))
| [
"[email protected]"
] | |
dc55651adbffad521879c30bf2544d1f19ac7c98 | 472370808bd279442f25b1bb96c20e8a164d3b06 | /train_k7.py | 772ef38767cbcb6b81ba9125cae686aaaed5a87c | [] | no_license | Qidian213/Emotion_challenge | d2983a078aa6c0ff76d052d0120acc9f387ecb6d | b214c532a4b079d6654507d2865ec65336ead65e | refs/heads/master | 2021-08-28T20:58:45.547325 | 2021-08-18T11:10:46 | 2021-08-18T11:10:46 | 246,117,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | import os
import logging
import torch
import torch.optim
import numpy as np
from torch.optim import lr_scheduler
from datasets import make_dataloader,RandomSampler,train_collate
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.nn.parallel.data_parallel import data_parallel
from model import Baseline
import pandas as pd
from collections import defaultdict
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
kd_id = 7
kd_num = 10
batch_size = 64
instance_num = 2
tr_w = 0.5
#device = torch.device("cuda:1,2" if torch.cuda.is_available() else "cpu")
ind2label = [ 'N_N', '1_1', '1_2', '1_3', '1_4', '1_5', '1_6', '1_7',
'2_1', '2_2', '2_3', '2_4', '2_5', '2_6', '2_7',
'3_1', '3_2', '3_3', '3_4', '3_5', '3_6', '3_7',
'4_1', '4_2', '4_3', '4_4', '4_5', '4_6', '4_7',
'5_1', '5_2', '5_3', '5_4', '5_5', '5_6', '5_7',
'6_1', '6_2', '6_3', '6_4', '6_5', '6_6', '6_7',
'7_1', '7_2', '7_3', '7_4', '7_5', '7_6', '7_7' ]
def adjust_lr(optimizer, epoch):
for param_group in optimizer.param_groups:
if epoch < 2:
param_group['lr'] = 0.00001
elif epoch < 20:
param_group['lr'] = 0.0001
else:
param_group['lr'] = param_group['lr'] * 0.95
print('Adjust learning rate: {}'.format(param_group['lr']))
def train_fuc(model, epoch):
model = model.train()
step_loss = 0
correct = 0
num_all = 0
for step, (images,lms,labels)in enumerate(train_loader):
images = images.cuda()
labels = labels.cuda()
lms = lms.cuda()
prds,feat = model(images, lms)
# loss = model.criterion(prds, labels) + tr_w*model.triplet(feat, labels)
loss = model.xent(prds, labels) + tr_w*model.triplet(feat, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prediction = torch.argmax(prds.detach(), 1)
correct += (prediction == labels).sum().float()
num_all += len(labels)
step_loss += loss
if(step%20 == 0):
print('[{}/{}/{}], Loss:{}, Acc: {}'.format(step, train_length, epoch,
'%.5f' % (step_loss/20), '%.5f' % (correct/num_all)))
step_loss = 0
correct = 0
num_all = 0
print('--------------------------------------------------------------------')
def val_fuc(model, epoch):
model = model.eval()
correct = 0
num_all = 0
with torch.no_grad():
result = defaultdict(list)
for step, (images,lms,labels)in enumerate(val_loader):
images = images.cuda()
labels = labels.cuda()
lms = lms.cuda()
prds,_ = model(images, lms)
prediction = torch.argmax(prds.detach(), 1)
correct += (prediction == labels).sum().float()
num_all += len(labels)
prds = F.softmax(prds)
prds = prds.cpu().numpy()
for pred, label in zip(prds, labels):
pred = list(pred)
for ind, prd in enumerate(pred):
result[ind2label[ind]].append(prd)
result['gt'].append(ind2label[label])
dataframe = pd.DataFrame(result)
dataframe.to_csv("models/val_" + str(kd_id) + '_' + model_name + '_' + '%.5f' % (correct/num_all) +".csv",index=False,sep=',')
print('Epoch: {}, Val_Acc: {}'.format(epoch, '%.5f' % (correct/num_all)))
print('--------------------------------------------------------------------')
return correct/num_all
# model_name = 'mobilfacenet'
# model_path='model_mobilefacenet.pth'
# model_name = 'model_ir_se50'
# model_path='model_ir_se50.pth'
# model_name = 'resnet50_ibn_a'
# model_path = 'resnet50_ibn_a.pth.tar'
# model_name = 'se_resnet50'
# model_path = 'se_resnet50-ce0d4300.pth'
model_name = 'AlexNet'
model_path='model_mobilefacenet.pth'
# model_name = 'MiniXception'
# model_path = ' '
# model_name = 'ConvNet'
# model_path = ' '
# model_name = 'MixNet'
# model_path = ' '
model = Baseline(model='train',model_name = model_name, model_path=model_path)
#model.load_param('models/model_1_180000.pth')
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
#exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
# kd_id = 0
# kd_num = 7
# batch_size = 48
# instance_num = 1
train_data, val_data, trains, vals = make_dataloader(kd_id,kd_num)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, sampler=RandomSampler(trains, batch_size, instance_num), shuffle=False, num_workers=2, collate_fn=train_collate)
#train_loader = DataLoader(dataset=train_data, batch_size=48, shuffle=False, num_workers=2, collate_fn=train_collate)
val_loader = DataLoader(dataset=val_data, batch_size=64, shuffle=False, num_workers=2, collate_fn=train_collate )
train_length = len(train_loader)
val_length = len(val_loader)
if __name__ == '__main__':
max_epoch = 50
max_val_acc = 0
for epoch in range(0,max_epoch):
adjust_lr(optimizer, epoch)
train_fuc(model, epoch)
val_acc = val_fuc(model, epoch)
torch.save(model.state_dict(), 'models/'+ str(kd_id)+'_'+ model_name + '_'+ '%.5f'%(val_acc) +'_'+ str(epoch) +'.pth') | [
"[email protected]"
] | |
0003c2297c1495ccc44a04b288a33319f48c4b73 | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon-manila-lease.py | 1581d1595db3ca80cdc426413cd5941385cba107 | [] | no_license | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 19,983 | py | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
import six
import pymysql
import json
from datetime import datetime
from pytz import timezone
import pytz
import smtplib
import socket
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import os
def notify(receiver, vmname, flavor, whattodo,ipaddressa,submitter):
me = "[email protected]"
msg = MIMEMultipart('related')
msgAlternative = MIMEMultipart('alternative')
receiversplit = receiver.split("@")
if len(receiversplit)==2:
aaaa=""
else:
receiver = "[email protected]"
msg['From'] = me
you = receiver;
msg['To'] = receiver;
msg.preamble = 'This is a multi-part message in MIME format.'
text = "Hi!"
font = "<font face=verdana size=2>"
if whattodo == "createrequeststart":
msg['Subject'] = "Your request for virtual Machine " + vmname + "" + " is submitted"
html1 = "<html><head></head><body>" + font + "<b>VM Name : </b>" + vmname + "<br><b>Flavor : </b>" + flavor + "<br><br> <b>Thanks and Regards</b><br>Expostack Administrator"
if whattodo == "deleterequeststart":
msg['Subject'] = "Your request for virtual Machine " + vmname + "" + " destroy has been submitted"
html1 = "<html><head></head><body>" + font + "<b>VM Name : </b>" + vmname + "<br><b>Submitted By</b> :" + submitter + "<br><b> Thanks and Regards</b><br>Expostack Administrator"
if whattodo == "createvolumerequeststart":
msg['Subject'] = "Your request for volume " + vmname + "" + " is submitted"
html1 = "<html><head></head><body>" + font + "<b>Volume Name : </b>" + vmname + "<br><b>Size : </b>" + flavor + " GB<br><br> <b>Thanks and Regards</b><br>Expostack Administrator"
if whattodo == "deletevolumerequeststart":
msg['Subject'] = "Your request for Volume " + vmname + "" + " destroy has been submitted"
html1 = "<html><head></head><body>" + font + "<b>Volume Name : </b>" + vmname + "<br><b>Submitted By</b> :" + submitter + "<br><b> Thanks and Regards</b><br>Expostack Administrator"
if whattodo == "createsharerequeststart":
msg['Subject'] = "Your request for share " + vmname + "" + " is submitted"
html1 = "<html><head></head><body>" + font + "<b>Share Name : </b>" + vmname + "<br><b>Size : </b>" + flavor + " GB<br><br> <b>Thanks and Regards</b><br>Expostack Administrator"
if whattodo == "deletesharerequeststart":
msg['Subject'] = "Your request for share " + vmname + "" + " destroy has been submitted"
html1 = "<html><head></head><body>" + font + "<b>Share Name : </b>" + vmname + "<br><b>Submitted By</b> :" + submitter + "<br><b> Thanks and Regards</b><br>Expostack Administrator"
htmlfinal = []
htmlb = ''.join(htmlfinal)
html = ''.join(str(x) for x in (html1,htmlb))
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(msgAlternative)
msgAlternative.attach(part1)
msgAlternative.attach(part2)
s = smtplib.SMTP('mail.mydomain.com')
s.sendmail(me, you, msg.as_string())
s.quit()
def id_to_ownername(controllername, ownerid):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_keystone')
cursor = db.cursor()
sql = "select local_id from id_mapping where public_id='"+ownerid+"'"
cursor.execute(sql)
result = cursor.fetchall()
for rows in result:
ownername = rows[0]
return ownername
def lease_project_verify(tenant_id, controllername):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_keystone')
cursor = db.cursor()
cursor.execute("select d.local_id, e.name, f.name from assignment c, id_mapping d, project e, role f where c.actor_id=d.public_id and d.local_id='[email protected]' and c.target_id=e.id and e.id='" + tenant_id + "' and c.role_id=f.id and f.name='admin'")
if cursor.rowcount == 0:
return False
else:
return True
def check_instance_owner(controllername, instancename):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_nova')
cursor = db.cursor()
sql = "select owner from lease_active_vms where hostname='" + instancename + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
for rows in resultnew:
owner = rows[0]
return owner
def check_instance_lease(controllername, instancename , instancetenantid):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_nova')
cursor = db.cursor()
date_formatv='%Y-%m-%d %H:%M:%S'
datev = datetime.now(tz=pytz.utc)
currentdatev = datetime.strftime(datev, date_formatv);
sql = "select case requesttype when 'days' then DATE_ADD(created_on,INTERVAL leasedays DAY) when 'hours' then DATE_ADD(created_on,INTERVAL leasedays HOUR) end as leasehowmuch ,case requesttype when 'days' then timestampdiff(MINUTE,'"+ currentdatev +"',DATE_ADD(created_on,INTERVAL leasedays DAY)) " + "when 'hours' then timestampdiff(MINUTE,'"+ currentdatev +"',DATE_ADD(created_on,INTERVAL leasedays HOUR)) end as jordan from lease_active_vms where hostname='" + instancename + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
for rows in resultnew:
if rows[1] < 0:
leaseexpire = "Expired";
else:
leaseexpire = rows[0]
if lease_project_verify(instancetenantid, controllername) == False:
leaseexpire = "Never"
return leaseexpire
def check_volume_owner(controllername, instancename):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_cinder')
cursor = db.cursor()
sql = "select owner from lease_active_volumes where voluuid='" + instancename + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
owner = ""
for rows in resultnew:
owner = rows[0]
#if not some_string:
return owner
#return id_to_ownername(controllername,owner)
def check_volume_lease(controllername, instancename , instancetenantid):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_cinder')
cursor = db.cursor()
date_formatv='%Y-%m-%d %H:%M:%S'
datev = datetime.now(tz=pytz.utc)
currentdatev = datetime.strftime(datev, date_formatv);
sql = "select case requesttype when 'days' then DATE_ADD(created_on,INTERVAL leasedays DAY) when 'hours' then DATE_ADD(created_on,INTERVAL leasedays HOUR) end as leasehowmuch ,case requesttype when 'days' then timestampdiff(MINUTE,'"+ currentdatev +"',DATE_ADD(created_on,INTERVAL leasedays DAY)) " + "when 'hours' then timestampdiff(MINUTE,'"+ currentdatev +"',DATE_ADD(created_on,INTERVAL leasedays HOUR)) end as jordan from lease_active_volumes where voluuid='" + instancename + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
for rows in resultnew:
if rows[1] < 0:
leaseexpire = "Expired";
else:
leaseexpire = rows[0]
if lease_project_verify(instancetenantid, controllername) == False:
leaseexpire = "Never"
return leaseexpire
def update_volume_lease(controllername, volume_id, lease_days):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_cinder')
cursor = db.cursor()
lease_days_pass = str(lease_days)
sql = "update lease_active_volumes set leasedays='" + lease_days_pass + "' where voluuid='"+volume_id+"'"
cursor.execute(sql)
db.commit()
db.close()
#api.lease.create_lease_record(controllername, name, lease_days_pass, request.user.username, flavor_name.name)
def create_lease_record(controllername ,instancename, lease_days, owner, flavor):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_nova')
cursor = db.cursor()
lease_days_pass = str(lease_days)
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
checkexist = "select * from lease_active_vms where hostname='" + instancename + "'"
cursor.execute(checkexist)
if cursor.rowcount == 0:
checkleaseinfo = "select * from leaseinfo where vmname='" + instancename + "'"
cursor.execute(checkleaseinfo)
if cursor.rowcount == 0:
sql = "insert into leaseinfo values ('','" + instancename + "','" + lease_days_pass + "')";
else:
sql = "update leaseinfo set leasedays='" + lease_days_pass + "' where vmname='" + instancename + "'"
cursor.execute(sql)
db.commit()
db.close()
notify(owner,instancename,flavor,"createrequeststart","nil","nil")
def create_sahara_lease_record(controllername ,clustername, lease_days, owner):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_sahara')
cursor = db.cursor()
lease_days_pass = str(lease_days)
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
checkexist = "select * from lease_active_clusters where cluster_name='" + clustername + "'"
cursor.execute(checkexist)
if cursor.rowcount == 0:
checkleaseinfo = "select * from leaseinfo where cluster_name='" + clustername + "'"
cursor.execute(checkleaseinfo)
if cursor.rowcount == 0:
sql = "insert into leaseinfo values ('','" + clustername + "','" + lease_days_pass + "')";
else:
sql = "update leaseinfo set leasedays='" + lease_days_pass + "' where cluster_name='" + clustername + "'"
cursor.execute(sql)
db.commit()
db.close()
#notify(owner,instancename,flavor,"createrequeststart","nil","nil")
#api.lease.delete_lease_record(controllername, instance, trueowner)
def delete_lease_record(controllername, instance, trueowner):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_nova')
cursor = db.cursor()
sql = "select d.network_info,c.hostname from instances c, instance_info_caches d where c.uuid=d.instance_uuid and c.uuid='" + instance + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
for rows in resultnew:
vmname = rows[1]
trueownersql = "select owner from lease_active_vms where hostname='" + vmname +"'"
cursor.execute(trueownersql)
resulttrueowner = cursor.fetchall()
for trueownerrows in resulttrueowner:
trueowner = trueownerrows[0]
db.close()
notify(trueowner,vmname,"nil","deleterequeststart","nil",trueowner)
def trove_instance_delete(controllername, instance_id, owner):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_nova')
cursor = db.cursor()
dba = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_trove')
cursora = dba.cursor()
trovesql = "select compute_instance_id from instances where id='" + instance_id + "'"
cursora.execute(trovesql)
instancenovaid = "nil"
if cursora.rowcount == 0:
aka=""
else:
resulttrove = cursora.fetchall()
for rowstrove in resulttrove:
instancenovaid = str(rowstrove[0])
sql = "select d.network_info,c.hostname from instances c, instance_info_caches d where c.uuid=d.instance_uuid and c.uuid='" + instancenovaid + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
trueowner = owner
vmname = "nil"
for rows in resultnew:
vmname = rows[1]
trueownersql = "select owner from lease_active_vms where hostname='" + vmname +"'"
cursor.execute(trueownersql)
resulttrueowner = cursor.fetchall()
for trueownerrows in resulttrueowner:
trueowner = trueownerrows[0]
dba.close()
db.close()
if vmname == "nil":
asdas=""
else:
notify(trueowner,vmname,"nil","deleterequeststart","nil",owner)
def create_volume_lease_record(controllername ,instancename, lease_days, owner, size):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_cinder')
cursor = db.cursor()
lease_days_pass = str(lease_days)
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
checkexist = "select * from lease_active_volumes where volumename='" + instancename + "'"
sizea=str(size)
cursor.execute(checkexist)
if cursor.rowcount == 0:
checkleaseinfo = "select * from leaseinfo where volumename='" + instancename + "'"
cursor.execute(checkleaseinfo)
if cursor.rowcount == 0:
sql = "insert into leaseinfo values ('','" + instancename + "','" + lease_days_pass + "')";
else:
sql = "update leaseinfo set leasedays='" + lease_days_pass + "' where volumename='" + instancename + "'"
cursor.execute(sql)
db.commit()
db.close()
notify(owner,instancename,sizea,"createvolumerequeststart","nil","nil")
def delete_volume_lease_record(controllername, instance, trueowner):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_cinder')
cursor = db.cursor()
sql = "select id,display_name from volumes where id='" + instance + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
trueowner = ""
for rows in resultnew:
vmname = rows[1]
trueownersql = "select owner from lease_active_volumes where volumename='" + vmname +"' and voluuid='" + rows[0] + "'"
cursor.execute(trueownersql)
resulttrueowner = cursor.fetchall()
for trueownerrows in resulttrueowner:
trueowner = trueownerrows[0]
db.close()
notify(trueowner,vmname,"nil","deletevolumerequeststart","nil",trueowner)
def create_share_lease_record(controllername ,instancename, lease_days, owner, size):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_manila')
cursor = db.cursor()
lease_days_pass = str(lease_days)
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
checkexist = "select * from lease_active_shares where sharename='" + instancename + "'"
sizea=str(size)
cursor.execute(checkexist)
if cursor.rowcount == 0:
checkleaseinfo = "select * from leaseinfo where sharename='" + instancename + "'"
cursor.execute(checkleaseinfo)
if cursor.rowcount == 0:
sql = "insert into leaseinfo values ('','" + instancename + "','" + lease_days_pass + "')";
else:
sql = "update leaseinfo set leasedays='" + lease_days_pass + "' where sharename='" + instancename + "'"
cursor.execute(sql)
db.commit()
db.close()
notify(owner,instancename,sizea,"createsharerequeststart","nil","nil")
def delete_share_lease_record(controllername, instance, trueowner):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_manila')
cursor = db.cursor()
sql = "select id,display_name from shares where id='" + instance + "'"
cursor.execute(sql)
resultnew = cursor.fetchall()
generatetime1 = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
generatetime = str(generatetime1)
trueowner = "[email protected]"
for rows in resultnew:
vmname = rows[1]
trueownersql = "select owner from lease_active_shares where sharename='" + vmname +"' and shareuuid='" + rows[0] + "'"
cursor.execute(trueownersql)
resulttrueowner = cursor.fetchall()
for trueownerrows in resulttrueowner:
trueowner = trueownerrows[0]
db.close()
notify(trueowner,vmname,"nil","deletesharerequeststart","nil",trueowner)
def online_share(storage_name,manilaid,controller):
filepath = '/usr/local/src/lease_expire/netapp-manageability-sdk-5.3/src/sample/Data_ONTAP/Python'
netapp_user = 'cinderapi'
netapp_passwd = 'netapp123'
if '10.29.16.67' in controller or '10.29.16.253' in controller or '10.29.16.60' in controller or 'expostack.tf-net.mydomain.com' in controller:
netapp_url = 'fcl02-mgmt.scl1.us.mydomain.com'
else:
netapp_url = 'fcl01-mgmt.prod.la1.us.mydomain.com'
mount_cmd = "/usr/bin/python " + filepath + "/apitest.py -v svm-dev-saas-01" + " " + netapp_url + " " + netapp_user + " " + netapp_passwd + " volume-mount volume-name " + storage_name + " junction-path /"+ storage_name + " >>/dev/null 2>&1"
online_cmd = "/usr/bin/python " + filepath + "/apitest.py -v svm-dev-saas-01" + " " + netapp_url + " " + netapp_user + " " + netapp_passwd + " volume-online name " + storage_name + " >>/dev/null 2>&1"
#rint_cmd = 'printf "\n `date` : Online Netapp Share: ' + storage_name + ' ( ' + manilaid + ' ) ' + ' " |tee -a /var/log/lease_expire/renew_share_list.txt '
try:
os.system(online_cmd)
os.system(mount_cmd)
# os.system(print_cmd)
return "success"
except:
print "\tError trying to online " + storage_name + " " + manilaid
return "failure"
def update_share_lease(controllername, share_id, lease_days):
db = pymysql.connect(host=controllername, port=3306, user='expostack', passwd='XXXXXX', db='expo_manila')
cursor = db.cursor()
lease_days_pass = str(lease_days)
sql = "update lease_active_shares set leasedays='" + lease_days_pass + "' where shareuuid='"+share_id+"'"
cursor.execute(sql)
db.commit()
status_cmd = "select share_status from lease_active_shares where shareuuid = '"+share_id+ "'"
cursor.execute(status_cmd)
sharestatus = cursor.fetchall()
share_status = sharestatus[0][0]
if 'online' in share_status:
db.close()
else:
name_sql = "select storage_name from lease_active_shares where shareuuid ='"+share_id+"'"
cursor.execute(name_sql)
storagename = cursor.fetchall()
storage_name = str(storagename).split("'")[1]
online_res = online_share(storage_name,share_id,controllername)
if "success" in online_res:
update_share = "update lease_active_shares set share_status='online' where shareuuid='"+share_id+"'"
cursor.execute(update_share)
db.commit()
db.close()
| [
"[email protected]"
] | |
5b7cc4476300d195ed7f2a305a7db60edd92fcc5 | dc40794d0d17f4ee552d58621c4340e60a998d68 | /leetcode/python/remove-all-adjacent-duplicates-in-string.py | c6153b82c0e4c1b7c2a2b2f055323d3690212110 | [] | no_license | lsom11/coding-challenges | 26e67e440bea4c42a0f28051653290f2cb08d0e7 | 4c127fdeb2ccbbdcdee0c8cd5d5ba47631508479 | refs/heads/master | 2021-11-23T11:40:37.516896 | 2021-10-27T15:50:35 | 2021-10-27T15:50:35 | 193,889,529 | 1 | 3 | null | 2020-10-27T12:59:54 | 2019-06-26T11:15:27 | Python | UTF-8 | Python | false | false | 238 | py | class Solution:
def removeDuplicates(self, S):
stack = []
for s in S:
if not stack or s != stack[-1]:
stack += [s]
else:
stack.pop()
return ''.join(stack) | [
"[email protected]"
] | |
5a1531e3ab03bca5ad92b86fd67eda6eaa285b9a | 13acfcb4a300d6c9f40c79f56175c74b0f673c3f | /ILI9341/examples/01_basic/05a_println.py | 9be665576366149f3decadc57fbec3e13c34cad2 | [] | no_license | mchobby/pyboard_drive | 821d7fce0f6791877397159cdf3c0636916628ae | 8e32dc68b131d31aba38087588883fac26534d0f | refs/heads/master | 2021-01-18T16:13:50.127858 | 2016-04-10T19:43:21 | 2016-04-10T19:43:21 | 55,912,768 | 1 | 0 | null | 2016-04-10T17:25:57 | 2016-04-10T17:25:56 | Python | UTF-8 | Python | false | false | 1,103 | py | # The driver allows you to draw text (string) on the screen.
# Text drawing has the following feature:
# * Support of various font
# * Support for text color (and background color)
# * Cursor blinking
# * Draw from position (x,y)
#
from lcd import *
from fonts.arial_14 import Arial_14
import pyb
l = LCD( rate=21000000 ) # step down the SPI bus speed to 21 MHz may be opportune when using 150+ mm wires
l.fillMonocolor( CYAN )
# Create an object that can print string on the screen
# * initCh() create a BaseChars object which retains graphical properties about the printed string
# * bgcolor, color: defines the background color and the text color
# * font : Arial_14 by default allows you to define the font to use
# * scale: scale the font (1, 2, 3)
# * bctimes: number of time to blink the cursor (when requested)
#
c = l.initCh(font=Arial_14, color=RED, bgcolor=CYAN)
# Print the string at position x=10, y=10
# bc: False by default, allows to show the blinking cursor when the string is printed
# scale: scale the font (1, 2, 3)
#
c.printLn( "Hello PyBoard", 10, 10 )
| [
"[email protected]"
] | |
8257e2e8157015ef95f33c5f7785f5aef2c3375c | 170026ff5b435027ce6e4eceea7fff5fd0b02973 | /glycan_profiling/serialize/base.py | b71b0e0048d9fda6a2af63717a16b06d6e733fa8 | [
"Apache-2.0"
] | permissive | mstim/glycresoft | 78f64ae8ea2896b3c4f4c185e069387824e6c9f5 | 1d305c42c7e6cba60326d8246e4a485596a53513 | refs/heads/master | 2022-12-24T23:44:53.957079 | 2020-09-29T13:38:20 | 2020-09-29T13:38:20 | 276,471,357 | 0 | 0 | NOASSERTION | 2020-07-01T20:04:43 | 2020-07-01T20:04:42 | null | UTF-8 | Python | false | false | 1,397 | py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column, Numeric, Integer, String, ForeignKey, PickleType,
Boolean)
from sqlalchemy.orm import validates
from sqlalchemy.orm.session import object_session
Base = declarative_base()
def Mass(index=True):
return Column(Numeric(14, 6, asdecimal=False), index=index)
def find_by_name(session, model_class, name):
return session.query(model_class).filter(model_class.name == name).first()
def make_unique_name(session, model_class, name):
marked_name = name
i = 1
while find_by_name(session, model_class, marked_name) is not None:
marked_name = "%s (%d)" % (name, i)
i += 1
return marked_name
class HasUniqueName(object):
name = Column(String(128), default=u"", unique=True)
uuid = Column(String(64), index=True, unique=True)
@classmethod
def make_unique_name(cls, session, name):
return make_unique_name(session, cls, name)
@classmethod
def find_by_name(cls, session, name):
return find_by_name(session, cls, name)
@validates("name")
def ensure_unique_name(self, key, name):
session = object_session(self)
if session is not None:
model_class = self.__class__
name = make_unique_name(session, model_class, name)
return name
else:
return name
| [
"[email protected]"
] | |
07d85bc48b7aef4f34e224228e7271515c99901c | ffad2a50d566fe755cca2a2aae652c58a6b08c66 | /manage.py | 0857525cb3a588e585cc8cf0732318421947565d | [] | no_license | adiram17/pyanysite | 04dc48c3f02e1f80262d893a50bc4354e4f3a725 | b7f2a66fb636bb409820d07da1b6760453a9d60e | refs/heads/master | 2022-04-19T16:28:51.341707 | 2020-04-17T09:55:29 | 2020-04-17T09:55:29 | 256,464,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pyanysite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6ca479bcdf4e9c8908115b2b62bd2584bc93d313 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/comp/rthyper.py | 71878443385b40d056e3b9550a6af0f2bd67120c | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 5,085 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtHyper(Mo):
"""
A target relation to the compute hypervisor. Note that this relation is an internal object.
"""
meta = TargetRelationMeta("cobra.model.comp.RtHyper", "cobra.model.fv.Ep")
meta.moClassName = "compRtHyper"
meta.rnFormat = "rtfvHyper-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "None"
meta.writeAccessMask = 0x5
meta.readAccessMask = 0x625
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.comp.Hv")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtfvHyper-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 12384, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2030
prop.defaultValueStr = "fvEp"
prop._addConstant("dhcpCEp", None, 1469)
prop._addConstant("dhcpPEp", None, 1468)
prop._addConstant("fvCEp", None, 2033)
prop._addConstant("fvDEp", None, 2066)
prop._addConstant("fvEp", None, 2030)
prop._addConstant("fvEpDef", None, 2051)
prop._addConstant("fvEpDefRef", None, 2056)
prop._addConstant("fvTunDef", None, 2062)
prop._addConstant("fvTunDefRef", None, 2063)
prop._addConstant("fvVDEp", None, 2067)
prop._addConstant("fvtopoEp", None, 2105)
prop._addConstant("opflexIDEp", None, 1104)
prop._addConstant("spanCEpDef", None, 4197)
prop._addConstant("spanCEpDefRef", None, 5618)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 12383, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
70d79955bc6a0c1c85b69b4bd25c4cd38ba5ee4b | c4e74cb97febd177ea5795e0010ff575560209ad | /udemy-dl.py | f4c5a11b8d0bda0d844157c912741f3c5eeb51f7 | [
"MIT"
] | permissive | Simerpreet-K/udemy-dl | 2a0f403def1ba5daa7649df44d61ecdd0b7e5ae6 | df0c99ec007f34854745c035878a23e2b971e232 | refs/heads/master | 2020-03-17T08:48:58.605904 | 2018-05-15T02:57:09 | 2018-05-15T02:57:09 | 133,450,817 | 0 | 0 | MIT | 2018-05-15T02:56:02 | 2018-05-15T02:56:02 | null | UTF-8 | Python | false | false | 80,016 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import udemy
import argparse
from pprint import pprint
from udemy import __version__
from udemy._colorized import *
from udemy._compat import pyver
from udemy._getpass import GetPass
from udemy._vtt2srt import WebVtt2Srt
from udemy._progress import ProgressBar
from udemy._colorized.banner import banner
from udemy._utils import cache_credentials
from udemy._utils import use_cached_credentials
getpass = GetPass()
class Udemy(WebVtt2Srt, ProgressBar):
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
super(Udemy, self).__init__()
def _write_to_file(self, filepath='', lecture=''):
retVal = {}
filename = filepath
if pyver == 3:
with open('{}.txt'.format(filename), 'a', encoding='utf-8') as f:
try:
f.write('{}\n'.format(lecture.url))
except Exception as e:
retVal = {'status' : 'False', 'msg' : 'Python3 Exception : {}'.format(e)}
else:
retVal = {'status' : 'True', 'msg' : 'download'}
f.close()
else:
with open('{}.txt'.format(filename), 'a') as f:
try:
f.write('{}\n'.format(lecture.url))
except Exception as e:
retVal = {'status' : 'False', 'msg' : 'Python2 Exception : {}'.format(e)}
else:
retVal = {'status' : 'True', 'msg' : 'download'}
f.close()
return retVal
def course_save(self, path='', quality='', caption_only=False, skip_captions=False):
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to login as " + fm + sb +"(%s)" % (self.username) + fg + sb +"...\n")
course = udemy.course(self.url, self.username, self.password)
course_id = course.id
course_name = course.title
total_lectures = course.lectures
total_chapters = course.chapters
course_name = (course_name.lower()).replace(' ', '-')
chapters = course.get_chapters()
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % (course_name))
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % (total_chapters))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (total_lectures))
if path:
if '~' in path:
path = os.path.expanduser(path)
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
else:
path = os.getcwd()
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
filepath = '%s.txt' % (course_path)
if os.path.isfile(filepath):
with open(filepath, 'w') as f:
f.close()
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Writing course content(s) to '%s.txt'\n" % (course_name))
for chapter in chapters:
chapter_id = chapter.id
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
for lecture in lectures:
lecture_id = lecture.id
lecture_streams = lecture.streams
lecture_best = lecture.getbest()
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if caption_only and not skip_captions:
if lecture_subtitles:
for subtitle in lecture_subtitles:
self._write_to_file(filepath=course_path, lecture=subtitle)
if lecture_assets:
for asset in lecture_assets:
self._write_to_file(filepath=course_path, lecture=asset)
elif skip_captions and not caption_only:
if lecture_best:
self._write_to_file(filepath=course_path, lecture=lecture_best)
if lecture_assets:
for asset in lecture_assets:
self._write_to_file(filepath=course_path, lecture=asset)
else:
if lecture_best:
self._write_to_file(filepath=course_path, lecture=lecture_best)
if lecture_assets:
for asset in lecture_assets:
self._write_to_file(filepath=course_path, lecture=asset)
if lecture_subtitles:
for subtitle in lecture_subtitles:
self._write_to_file(filepath=course_path, lecture=subtitle)
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Written successfully under '{name}.txt'.\n".format(name=course_path))
def course_list_down(self, chapter_number='', lecture_number=''):
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to login as " + fm + sb +"(%s)" % (self.username) + fg + sb +"...\n")
course = udemy.course(self.url, self.username, self.password)
course_id = course.id
course_name = course.title
total_lectures = course.lectures
total_chapters = course.chapters
chapters = course.get_chapters()
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % (course_name))
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % (total_chapters))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (total_lectures))
if chapter_number and chapter_number > 0 and chapter_number <= total_chapters:
chapter = chapters[chapter_number-1]
chapter_id = chapter.id
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
sys.stdout.write ('\n' + fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s-%s)\n" % (chapter_title, chapter_id))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (lectures_count))
if lecture_number and lecture_number > 0 and lecture_number <= lectures_count:
lecture = lectures[lecture_number-1]
lecture_id = lecture.id
lecture_streams = lecture.streams
lecture_best = lecture.getbest()
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
if lecture_streams:
sys.stdout.write(fc + sd + " - " + fy + sb + "duration : " + fm + sb + str(lecture.duration)+ fy + sb + ".\n")
sys.stdout.write(fc + sd + " - " + fy + sb + "Lecture id : " + fm + sb + str(lecture_id)+ fy + sb + ".\n")
for stream in lecture_streams:
content_length = stream.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
if lecture_best.dimention[1] == stream.dimention[1]:
in_MB = in_MB + fc + sb + "(Best)" + fg + sd
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(stream), stream.dimention[1] + 'p', sz, in_MB, fy, sb))
if lecture_assets:
for asset in lecture_assets:
if asset.mediatype != 'external_link':
content_length = asset.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(asset), asset.extension, sz, in_MB, fy, sb))
if lecture_subtitles:
for subtitle in lecture_subtitles:
content_length = subtitle.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(subtitle), subtitle.extension, sz, in_MB, fy, sb))
else:
for lecture in lectures:
lecture_id = lecture.id
lecture_streams = lecture.streams
lecture_best = lecture.getbest()
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
if lecture_streams:
sys.stdout.write(fc + sd + " - " + fy + sb + "duration : " + fm + sb + str(lecture.duration)+ fy + sb + ".\n")
sys.stdout.write(fc + sd + " - " + fy + sb + "Lecture id : " + fm + sb + str(lecture_id)+ fy + sb + ".\n")
for stream in lecture_streams:
content_length = stream.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
if lecture_best.dimention[1] == stream.dimention[1]:
in_MB = in_MB + fc + sb + "(Best)" + fg + sd
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(stream), stream.dimention[1] + 'p', sz, in_MB, fy, sb))
if lecture_assets:
for asset in lecture_assets:
if asset.mediatype != 'external_link':
content_length = asset.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(asset), asset.extension, sz, in_MB, fy, sb))
if lecture_subtitles:
for subtitle in lecture_subtitles:
content_length = subtitle.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(subtitle), subtitle.extension, sz, in_MB, fy, sb))
else:
for chapter in chapters:
chapter_id = chapter.id
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
sys.stdout.write ('\n' + fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s-%s)\n" % (chapter_title, chapter_id))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (lectures_count))
for lecture in lectures:
lecture_id = lecture.id
lecture_streams = lecture.streams
lecture_best = lecture.getbest()
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
if lecture_streams:
sys.stdout.write(fc + sd + " - " + fy + sb + "duration : " + fm + sb + str(lecture.duration)+ fy + sb + ".\n")
sys.stdout.write(fc + sd + " - " + fy + sb + "Lecture id : " + fm + sb + str(lecture_id)+ fy + sb + ".\n")
for stream in lecture_streams:
content_length = stream.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
if lecture_best.dimention[1] == stream.dimention[1]:
in_MB = in_MB + fc + sb + "(Best)" + fg + sd
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(stream), stream.dimention[1] + 'p', sz, in_MB, fy, sb))
if lecture_assets:
for asset in lecture_assets:
if asset.mediatype != 'external_link':
content_length = asset.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(asset), asset.extension, sz, in_MB, fy, sb))
if lecture_subtitles:
for subtitle in lecture_subtitles:
content_length = subtitle.get_filesize()
if content_length != 0:
if content_length <= 1048576.00:
size = round(float(content_length) / 1024.00, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = 'KB' if size < 1024.00 else 'MB'
else:
size = round(float(content_length) / 1048576, 2)
sz = format(size if size < 1024.00 else size/1024.00, '.2f')
in_MB = "MB " if size < 1024.00 else 'GB '
sys.stdout.write('\t- ' + fg + sd + "{:<22} {:<8}{}{}{}{}\n".format(str(subtitle), subtitle.extension, sz, in_MB, fy, sb))
def download_assets(self, lecture_assets='', filepath=''):
if lecture_assets:
for assets in lecture_assets:
title = assets.filename
mediatype = assets.mediatype
if mediatype == "external_link":
assets.download(filepath=filepath, quiet=True, callback=self.show_progress)
else:
sys.stdout.write(fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading asset(s)\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading (%s)\n" % (title))
try:
retval = assets.download(filepath=filepath, quiet=True, callback=self.show_progress)
except KeyboardInterrupt:
sys.stdout.write (fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
else:
msg = retval.get('msg')
if msg == 'already downloaded':
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Asset : '%s' " % (assets.filename) + fy + sb + "(already downloaded).\n")
elif msg == 'download':
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Downloaded (%s)\n" % (assets.filename))
else:
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Asset : '%s' " % (assets.filename) + fc + sb + "(download skipped).\n")
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "{}\n".format(msg))
def download_subtitles(self, lecture_subtitles='', filepath=''):
if lecture_subtitles:
for subtitles in lecture_subtitles:
title = subtitles.title + '-' + subtitles.language
filename = "%s\\%s" % (filepath, subtitles.filename) if os.name == 'nt' else "%s/%s" % (filepath, subtitles.filename)
sys.stdout.write(fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading subtitle(s)\n")
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading (%s)\n" % (title))
try:
retval = subtitles.download(filepath=filepath, quiet=True, callback=self.show_progress)
except KeyboardInterrupt:
sys.stdout.write (fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
else:
msg = retval.get('msg')
if msg == 'already downloaded':
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Subtitle : '%s' " % (title) + fy + sb + "(already downloaded).\n")
self.convert(filename=filename)
elif msg == 'download':
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Downloaded (%s)\n" % (title))
self.convert(filename=filename)
else:
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Subtitle : '%s' " % (title) + fc + sb + "(download skipped).\n")
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "{}\n".format(msg))
def download_lectures(self, lecture_best='', lecture_title='', inner_index='', lectures_count='', filepath=''):
if lecture_best:
sys.stdout.write(fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) : ({index} of {total})\n".format(index=inner_index, total=lectures_count))
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Downloading (%s)\n" % (lecture_title))
try:
retval = lecture_best.download(filepath=filepath, quiet=True, callback=self.show_progress)
except KeyboardInterrupt:
sys.stdout.write (fc + sd + "\n[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
else:
msg = retval.get('msg')
if msg == 'already downloaded':
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture : '%s' " % (lecture_title) + fy + sb + "(already downloaded).\n")
elif msg == 'download':
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Downloaded (%s)\n" % (lecture_title))
else:
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture : '%s' " % (lecture_title) + fc + sb + "(download skipped).\n")
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "{}\n".format(msg))
def download_captions_only(self, lecture_subtitles='', lecture_assets='', filepath=''):
if lecture_subtitles:
self.download_subtitles(lecture_subtitles=lecture_subtitles, filepath=filepath)
if lecture_assets:
self.download_assets(lecture_assets=lecture_assets, filepath=filepath)
def download_lectures_only(self, lecture_best='', lecture_title='', inner_index='', lectures_count='', lecture_assets='', filepath=''):
if lecture_best:
self.download_lectures(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, filepath=filepath)
if lecture_assets:
self.download_assets(lecture_assets=lecture_assets, filepath=filepath)
def download_lectures_and_captions(self, lecture_best='', lecture_title='', inner_index='', lectures_count='', lecture_subtitles='', lecture_assets='', filepath=''):
if lecture_best:
self.download_lectures(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, filepath=filepath)
if lecture_subtitles:
self.download_subtitles(lecture_subtitles=lecture_subtitles, filepath=filepath)
if lecture_assets:
self.download_assets(lecture_assets=lecture_assets, filepath=filepath)
def course_download(self, path='', quality='', caption_only=False, skip_captions=False):
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to login as " + fm + sb +"(%s)" % (self.username) + fg + sb +"...\n")
course = udemy.course(self.url, self.username, self.password)
course_id = course.id
course_name = course.title
chapters = course.get_chapters()
total_lectures = course.lectures
total_chapters = course.chapters
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % (course_name))
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % (total_chapters))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (total_lectures))
if path:
if '~' in path:
path = os.path.expanduser(path)
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
else:
path = os.getcwd()
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
for chapter in chapters:
chapter_id = chapter.id
chapter_index = chapter.index
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
filepath = "%s\\%s" % (course_path, chapter_title) if os.name == 'nt' else "%s/%s" % (course_path, chapter_title)
try:
os.makedirs(filepath)
except Exception as e:
pass
sys.stdout.write (fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fm + sb + "Downloading chapter : ({index} of {total})\n".format(index=chapter_index, total=total_chapters))
sys.stdout.write (fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s)\n" % (chapter_title))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Found (%s) lectures ...\n" % (lectures_count))
inner_index = 1
for lecture in lectures:
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
inner_index += 1
def chapter_download(self, chapter_number='', chapter_start='', chapter_end='', lecture_number='', lecture_start='', lecture_end='', path='', quality='', caption_only=False, skip_captions=False):
sys.stdout.write(fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Trying to login as " + fm + sb +"(%s)" % (self.username) + fg + sb +"...\n")
course = udemy.course(self.url, self.username, self.password)
course_id = course.id
course_name = course.title
chapters = course.get_chapters()
total_lectures = course.lectures
total_chapters = course.chapters
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sb + "Course " + fb + sb + "'%s'.\n" % (course_name))
sys.stdout.write (fc + sd + "[" + fm + sb + "+" + fc + sd + "] : " + fg + sd + "Chapter(s) (%s).\n" % (total_chapters))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Lecture(s) (%s).\n" % (total_lectures))
if path:
if '~' in path:
path = os.path.expanduser(path)
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
else:
path = os.getcwd()
course_path = "%s\\%s" % (path, course_name) if os.name == 'nt' else "%s/%s" % (path, course_name)
_lectures_start, _lectures_end = lecture_start, lecture_end
if chapter_start and not chapter_end:
chapter_end = total_chapters
if chapter_number and chapter_number > 0 and chapter_number <= total_chapters:
chapter = chapters[chapter_number-1]
if chapter:
chapter_id = chapter.id
chapter_index = chapter.index
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
if lecture_end and lecture_end > lectures_count:
lecture_end = lectures_count
filepath = "%s\\%s" % (course_path, chapter_title) if os.name == 'nt' else "%s/%s" % (course_path, chapter_title)
try:
os.makedirs(filepath)
except Exception as e:
pass
sys.stdout.write (fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fm + sb + "Downloading chapter : ({index})\n".format(index=chapter_index))
sys.stdout.write (fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s)\n" % (chapter_title))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Found (%s) lectures ...\n" % (lectures_count))
lecture_start = _lectures_start
lecture_end = lectures_count if lecture_start and not lecture_end else _lectures_end
if lecture_number and lecture_number > 0 and lecture_number <= lectures_count:
lecture = lectures[lecture_number-1]
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_number, lectures_count=lectures_count, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_number, lectures_count=lectures_count, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif lecture_start and lecture_start > 0 and lecture_start <= lecture_end and lecture_end <= lectures_count:
while lecture_start <= lecture_end:
lecture = lectures[lecture_start-1]
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_start, lectures_count=lecture_end, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_start, lectures_count=lecture_end, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
lecture_start += 1
else:
inner_index = 1
for lecture in lectures:
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
if lecture.html:
lecture.dump(filepath=filepath)
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
inner_index += 1
elif chapter_start and chapter_start > 0 and chapter_start <= chapter_end and chapter_end <= total_chapters:
while chapter_start <= chapter_end:
chapter = chapters[chapter_start-1]
chapter_id = chapter.id
chapter_index = chapter.index
chapter_title = chapter.title
lectures = chapter.get_lectures()
lectures_count = chapter.lectures
filepath = "%s\\%s" % (course_path, chapter_title) if os.name == 'nt' else "%s/%s" % (course_path, chapter_title)
try:
os.makedirs(filepath)
except Exception as e:
pass
sys.stdout.write (fc + sd + "\n[" + fm + sb + "*" + fc + sd + "] : " + fm + sb + "Downloading chapter : ({index} of {total})\n".format(index=chapter_start, total=chapter_end))
sys.stdout.write (fc + sd + "[" + fw + sb + "+" + fc + sd + "] : " + fw + sd + "Chapter (%s)\n" % (chapter_title))
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Found (%s) lectures ...\n" % (lectures_count))
lecture_start = _lectures_start
lecture_end = lectures_count if lecture_start and not lecture_end else _lectures_end
if lecture_number and lecture_number > 0 and lecture_number <= lectures_count:
lecture = lectures[lecture_number-1]
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_number, lectures_count=lectures_count, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_number, lectures_count=lectures_count, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif lecture_start and lecture_start > 0 and lecture_start <= lecture_end and lecture_end <= lectures_count:
while lecture_start <= lecture_end:
lecture = lectures[lecture_start-1]
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_start, lectures_count=lecture_end, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=lecture_start, lectures_count=lecture_end, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
lecture_start += 1
else:
inner_index = 1
for lecture in lectures:
lecture_id = lecture.id
lecture_index = lecture.index
lecture_title = lecture.title
lecture_assets = lecture.assets
lecture_subtitles = lecture.subtitles
lecture_best = lecture.getbest()
lecture_streams = lecture.streams
if caption_only and not skip_captions:
self.download_captions_only(lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
elif skip_captions and not caption_only:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_only(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_assets=lecture_assets, filepath=filepath)
else:
if quality:
index = 0
while index < len(lecture_streams):
dimension = int(lecture_streams[index].dimention[1])
if dimension == quality:
lecture_best = lecture_streams[index]
break
index += 1
if not lecture_best:
lecture_best = lecture_best
self.download_lectures_and_captions(lecture_best=lecture_best, lecture_title=lecture_title, inner_index=inner_index, lectures_count=lectures_count, lecture_subtitles=lecture_subtitles, lecture_assets=lecture_assets, filepath=filepath)
inner_index += 1
chapter_start += 1
else:
if not chapter_end and not chapter_number and not chapter_start:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Argument(s) are missing : Chapter(s) range or chapter(s) number is required.\n")
elif chapter_end and chapter_end > total_chapters or chapter_number and chapter_number > total_chapters:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Chapter(s) Range exceeded : Chapter(s) ending or chapter(s) number is out of range\n")
elif chapter_start and chapter_start > chapter_end:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Chapter(s) Range exception : Chapter(s) starting point cannot be greater than chapter(s) ending point\n")
elif chapter_end and not chapter_start:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Argument(s) are missing : Chapter(s) range starting point is missing ..\n")
sys.stdout.write (fc + sd + "[" + fy + sb + "i" + fc + sd + "] : " + fw + sb + "Chapter(s) number or range should be in between ({start} to {end}).\n".format(start=1, end=total_chapters))
sys.exit(0)
def main():
sys.stdout.write(banner())
version = "%(prog)s {version}".format(version=__version__)
description = 'A cross-platform python based utility to download courses from udemy for personal offline use.'
parser = argparse.ArgumentParser(description=description, conflict_handler="resolve")
parser.add_argument('course', help="Udemy course.", type=str)
general = parser.add_argument_group("General")
general.add_argument(
'-h', '--help',\
action='help',\
help="Shows the help.")
general.add_argument(
'-v', '--version',\
action='version',\
version=version,\
help="Shows the version.")
authentication = parser.add_argument_group("Authentication")
authentication.add_argument(
'-u', '--username',\
dest='username',\
type=str,\
help="Username in udemy.",metavar='')
authentication.add_argument(
'-p', '--password',\
dest='password',\
type=str,\
help="Password of your account.",metavar='')
advance = parser.add_argument_group("Advance")
advance.add_argument(
'-o', '--output',\
dest='output',\
type=str,\
help="Download to specific directory.",metavar='')
advance.add_argument(
'-q', '--quality',\
dest='quality',\
type=int,\
help="Download specific video quality.",metavar='')
advance.add_argument(
'-c', '--chapter',\
dest='chapter',\
type=int,\
help="Download specific chapter from course.",metavar='')
advance.add_argument(
'-l', '--lecture',\
dest='lecture',\
type=int,\
help="Download specific lecture from chapter(s).",metavar='')
advance.add_argument(
'--chapter-start',\
dest='chapter_start',\
type=int,\
help="Download from specific position within course.",metavar='')
advance.add_argument(
'--chapter-end',\
dest='chapter_end',\
type=int,\
help="Download till specific position within course.",metavar='')
advance.add_argument(
'--lecture-start',\
dest='lecture_start',\
type=int,\
help="Download from specific position within chapter(s).",metavar='')
advance.add_argument(
'--lecture-end',\
dest='lecture_end',\
type=int,\
help="Download till specific position within chapter(s).",metavar='')
other = parser.add_argument_group("Others")
other.add_argument(
'--save',\
dest='save',\
action='store_true',\
help="Do not download but save links to a file.")
other.add_argument(
'--info',\
dest='list',\
action='store_true',\
help="List all lectures with available resolution.")
other.add_argument(
'--cache',\
dest='cache',\
action='store_true',\
help="Cache your credentials to use it later.")
other.add_argument(
'--sub-only',\
dest='caption_only',\
action='store_true',\
help="Download captions/subtitle only.")
other.add_argument(
'--skip-sub',\
dest='skip_captions',\
action='store_true',\
help="Download course but skip captions/subtitle.")
options = parser.parse_args()
if not options.username and not options.password:
username = fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Username : " + fg + sb
password = fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Password : " + fc + sb
config = use_cached_credentials()
if config and isinstance(config, dict):
sys.stdout.write (fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Loading configs..")
email = config.get('username') or None
passwd = config.get('password') or None
quality = config.get('quality') or None
output = config.get('output') or None
time.sleep(1)
if email and passwd:
sys.stdout.write ("\r" + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Loading configs.. (" + fc + sb + "done" + fg + sd + ")\n")
else:
sys.stdout.write ("\r" + fc + sd + "[" + fm + sb + "*" + fc + sd + "] : " + fg + sd + "Loading configs.. (" + fr + sb + "failed" + fg + sd + ")\n")
email = getpass.getuser(prompt=username)
passwd = getpass.getpass(prompt=password)
print("")
else:
email = getpass.getuser(prompt=username)
passwd = getpass.getpass(prompt=password)
print("")
if email and passwd:
udemy = Udemy(url=options.course, username=email, password=passwd)
else:
sys.stdout.write('\n' + fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sb + "Username and password is required.\n")
sys.exit(0)
if options.cache:
cache_credentials()
if options.list and not options.save:
try:
udemy.course_list_down(chapter_number=options.chapter, lecture_number=options.lecture)
except KeyboardInterrupt as e:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
elif not options.list and options.save:
try:
udemy.course_save(path=options.output, quality=options.quality, caption_only=options.caption_only, skip_captions=options.skip_captions)
except KeyboardInterrupt as e:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
elif not options.list and not options.save:
if options.chapter and not options.chapter_end and not options.chapter_start:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, quality=options.quality)
elif options.chapter_start and options.chapter_end and not options.chapter:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, quality=options.quality)
elif options.chapter_start and not options.chapter_end and not options.chapter:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.course_download(caption_only=options.caption_only, path=options.output)
elif not options.caption_only and options.skip_captions:
udemy.course_download(skip_captions=options.skip_captions, path=options.output, quality=options.quality)
else:
udemy.course_download(path=options.output, quality=options.quality)
elif options.username and options.password:
udemy = Udemy(url=options.course, username=options.username, password=options.password)
if options.cache:
cache_credentials(username=options.username, password=options.password)
if options.list and not options.save:
try:
udemy.course_list_down(chapter_number=options.chapter, lecture_number=options.lecture)
except KeyboardInterrupt as e:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
elif not options.list and options.save:
try:
udemy.course_save(path=options.output, quality=options.quality, caption_only=options.caption_only, skip_captions=options.skip_captions)
except KeyboardInterrupt as e:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
elif not options.list and not options.save:
if options.chapter and not options.chapter_end and not options.chapter_start:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter,lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_number=options.chapter, path=options.output, quality=options.quality)
elif options.chapter_start and options.chapter_end and not options.chapter:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, chapter_end=options.chapter_end, path=options.output, quality=options.quality)
elif options.chapter_start and not options.chapter_end and not options.chapter:
if options.lecture and not options.lecture_end and not options.lecture_start:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_number=options.lecture, path=options.output, quality=options.quality)
elif options.lecture_start and options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, lecture_end=options.lecture_end, path=options.output, quality=options.quality)
elif options.lecture_start and not options.lecture_end and not options.lecture:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, lecture_start=options.lecture_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, caption_only=options.caption_only)
elif not options.caption_only and options.skip_captions:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, quality=options.quality, skip_captions=options.skip_captions)
else:
udemy.chapter_download(chapter_start=options.chapter_start, path=options.output, quality=options.quality)
else:
if options.caption_only and not options.skip_captions:
udemy.course_download(caption_only=options.caption_only, path=options.output)
elif not options.caption_only and options.skip_captions:
udemy.course_download(skip_captions=options.skip_captions, path=options.output, quality=options.quality)
else:
udemy.course_download(path=options.output, quality=options.quality)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stdout.write (fc + sd + "[" + fr + sb + "-" + fc + sd + "] : " + fr + sd + "User Interrupted..\n")
sys.exit(0)
| [
"[email protected]"
] | |
d5b88f8629f9f6924631d18a30e861b74631cacc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_354/ch118_2020_10_05_20_26_56_452982.py | e6c9398b5afb5dea7dc4d9be024f67529850913e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import math
def reflexao_total_interna(n1,n2,o2):
y=(n2*math.sin(math.radians(o2)))/n1
if y > 1:
return True
else:
return False
| [
"[email protected]"
] | |
31cc256c5c48f8f108aebbc668b02473408ab0fc | 0f9f8e8478017da7c8d408058f78853d69ac0171 | /python3/l0203_remove_linked_list_elements.py | 11775486cd12a337eda8f3139c2959e183cdbebc | [] | no_license | sprax/1337 | dc38f1776959ec7965c33f060f4d43d939f19302 | 33b6b68a8136109d2aaa26bb8bf9e873f995d5ab | refs/heads/master | 2022-09-06T18:43:54.850467 | 2020-06-04T17:19:51 | 2020-06-04T17:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from common import ListNode
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
p = dummy = ListNode(0)
dummy.next = head
while p.next:
if p.next.val == val:
p.next = p.next.next
else:
p = p.next
return dummy.next
| [
"[email protected]"
] | |
a1730b0036d7737aea484625171644187585d455 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5630113748090880_0/Python/Dog/mainB.py | 8ad199a6e9d658b1e59232341d93ba4eca935dc0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | #! /usr/bin/env python3
import sys
def solve(numbers):
counts = dict((n, 0) for n in set(numbers))
for n in numbers:
counts[n] += 1
row = list()
for (n, c) in counts.items():
if c % 2 == 1:
row.append(n)
return ' '.join(map(str, sorted(row)))
#################################################################
if __name__ == '__main__':
filename = sys.argv[1]
with open(filename) as f:
content = f.read().strip()
numbers = list(map(int, content.split()))
T = numbers[0]
numbers = numbers[1:]
for c in range(T):
N = numbers[0]
numbers = numbers[1:]
ns = (2*N - 1)*N
lists = numbers[:ns]
numbers = numbers[ns:]
print('Case #', c+1, ': ', solve(lists), sep='')
| [
"[email protected]"
] | |
a85936ec234127243476ed150e317b33f2431a3c | 8f0aa0b8b8a9c9a8884fa6cb769ee34639e2f355 | /lending_library/lender_profile/models.py | ea35d7bdcd283a51b6d3d6541fb9033377044c57 | [] | no_license | datatalking/django-lender-401d7 | 3f9e2b46e73a0efd17c082b87edf4705ad7ddded | 64eae040c4c778cb96e2dedbdb2de5dc2bc1223b | refs/heads/master | 2020-03-11T17:16:31.631481 | 2017-11-30T23:11:14 | 2017-11-30T23:11:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class LenderProfile(models.Model):
location = models.CharField(max_length=200, blank=True, null=True)
user = models.OneToOneField(User, related_name='profile')
| [
"[email protected]"
] | |
0480de31fa8374c05cecf776dc5f07449d15f22e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/2141.py | e328217501accf9709c862aaf76b4cc5c0f26338 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from string import maketrans
import sys
def trans(input):
alpha = "abcdefghijklmnopqrstuvwxyz"
trans = "yhesocvxduiglbkrztnwjpfmaq"
transtab = maketrans(alpha,trans)
return input.translate(transtab)
if __name__ == "__main__":
f = sys.stdin
if len(sys.argv) >= 2:
fn = sys.argv[1]
if fn != '-':
f = open(fn)
t = int(f.readline())
for s in xrange(t):
inp = f.readline()
print "Case #%d: %s" %(s+1,trans(inp).strip()) | [
"[email protected]"
] | |
dd9ac21ba1fb5bced046fac8d050cf79d2b20897 | fb28a622b21f5127c83c7fe6193b6312294b2dbe | /apps/order/migrations/0010_auto_20190729_0947.py | 911a373cc6830572f340789dd65c5e59e26d2a0d | [] | no_license | laoyouqing/video | 0cd608b1f9d3a94da4a537867fafce6f7dcd1297 | 9aa7ecf17f0145437408a8c979f819bb61617294 | refs/heads/master | 2022-12-19T11:02:01.343892 | 2019-08-21T04:00:13 | 2019-08-21T04:00:13 | 203,500,521 | 0 | 0 | null | 2022-12-08T06:03:17 | 2019-08-21T03:40:13 | Python | UTF-8 | Python | false | false | 536 | py | # Generated by Django 2.1.1 on 2019-07-29 01:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0009_auto_20190729_0941'),
]
operations = [
migrations.AlterField(
model_name='ordergoods',
name='video',
field=models.ForeignKey(help_text='订单商品', null=True, on_delete=django.db.models.deletion.CASCADE, to='videos.Video', verbose_name='订单商品'),
),
]
| [
"[email protected]"
] | |
1f083451b4ca8c802f6a551d8b929f910b51cb85 | 914a83057719d6b9276b1a0ec4f9c66fea064276 | /test/performance-regression/full-apps/qmcpack/nexus/library/periodic_table.py | 352f55194bb23de4e080377de887896104cf75e5 | [
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"BSD-2-Clause"
] | permissive | jjwilke/hclib | e8970675bf49f89c1e5e2120b06387d0b14b6645 | 5c57408ac009386702e9b96ec2401da0e8369dbe | refs/heads/master | 2020-03-31T19:38:28.239603 | 2018-12-21T20:29:44 | 2018-12-21T20:29:44 | 152,505,070 | 0 | 0 | Apache-2.0 | 2018-10-11T00:02:52 | 2018-10-11T00:02:51 | null | UTF-8 | Python | false | false | 57,267 | py | from generic import obj
from developer import DevBase
from unit_converter import UnitConverter
def phys_value_dict(value=None,units=None):
vdict = UnitConverter.convert_scalar_to_all(units,value)
return obj(**vdict)
#end def phys_value_dict
class SimpleElement(DevBase):
def __init__(self):
self.atomic_number = None
self.name = None
self.symbol = None
self.group = None
self.atomic_weight = None
self.atomic_radius = None
self.nuclear_charge = None
self.abundance = None
self.electron_affinity = None
self.electronegativity = None
self.ionization_energy = None
self.ionic_radius = None
self.melting_point = None
self.boiling_point = None
#self. = None
#self. = None
#self. = None
#self. = None
self.string_rep = None
self.var_dict = None
#end def __init__
def create_var_dict(self):
self.var_dict = dict()
self.var_dict['atomic_number' ] = self.atomic_number
self.var_dict['name' ] = self.name
self.var_dict['symbol' ] = self.symbol
self.var_dict['group' ] = self.group
self.var_dict['atomic_weight' ] = self.atomic_weight
self.var_dict['atomic_radius' ] = self.atomic_radius
self.var_dict['nuclear_charge' ] = self.nuclear_charge
self.var_dict['abundance' ] = self.abundance
self.var_dict['electron_affinity'] = self.electron_affinity
self.var_dict['electronegativity'] = self.electronegativity
self.var_dict['ionization_energy'] = self.ionization_energy
self.var_dict['ionic_radius' ] = self.ionic_radius
self.var_dict['melting_point' ] = self.melting_point
self.var_dict['boiling_point' ] = self.boiling_point
self.replace_None()
#end def create_var_dict
def replace_None(self):
none_rep = -1.0
for k,v in self.var_dict.iteritems():
if(v==None):
self.var_dict[k] = none_rep
#end if
#end for
self.atomic_number = self.var_dict['atomic_number' ]
self.name = self.var_dict['name' ]
self.symbol = self.var_dict['symbol' ]
self.group = self.var_dict['group' ]
self.atomic_weight = self.var_dict['atomic_weight' ]
self.atomic_radius = self.var_dict['atomic_radius' ]
self.nuclear_charge = self.var_dict['nuclear_charge' ]
self.abundance = self.var_dict['abundance' ]
self.electron_affinity = self.var_dict['electron_affinity']
self.electronegativity = self.var_dict['electronegativity']
self.ionization_energy = self.var_dict['ionization_energy']
self.ionic_radius = self.var_dict['ionic_radius' ]
self.melting_point = self.var_dict['melting_point' ]
self.boiling_point = self.var_dict['boiling_point' ]
#end def replace_None
def create_string_representation(self):
ind = 4*' '
iformat = '%6i'
rformat = '%7.5f'
s = ''
s += self.symbol+'{\n'
s += ind + 'atomic_number = ' + str(self.atomic_number)+'\n'
s += ind + 'name = ' + str(self.name)+'\n'
s += ind + 'symbol = ' + str(self.symbol)+'\n'
s += ind + 'group = ' + str(self.group)+'\n'
s += ind + 'atomic_weight = ' + str(self.atomic_weight)+'\n'
s += ind + 'atomic_radius = ' + str(self.atomic_radius)+'\n'
s += ind + 'nuclear_charge = ' + str(self.nuclear_charge)+'\n'
s += ind + 'abundance = ' + str(self.abundance)+'\n'
s += ind + 'electron_affinity = ' + str(self.electron_affinity)+'\n'
s += ind + 'electronegativity = ' + str(self.electronegativity)+'\n'
s += ind + 'ionization_energy = ' + str(self.ionization_energy)+'\n'
s += ind + 'ionic_radius = ' + str(self.ionic_radius)+'\n'
s += ind + 'melting_point = ' + str(self.melting_point)+'\n'
s += ind + 'boiling_point = ' + str(self.boiling_point)+'\n'
s += '}\n'
self.string_rep = s
#end def create_string_representation
#end class SimpleElement
class Element(SimpleElement):
def __init__(self,se):
SimpleElement.__init__(self)
awu = PeriodicTable.atomic_weight_unit
aru = PeriodicTable.atomic_radius_unit
ncu = PeriodicTable.nuclear_charge_unit
eau = PeriodicTable.electron_affinity_unit
ieu = PeriodicTable.ionization_energy_units
iru = PeriodicTable.ionic_radius_units
tcu = PeriodicTable.thermal_cond_units
mpu = PeriodicTable.melting_point_units
bpu = PeriodicTable.boiling_point_units
self.atomic_number = se.atomic_number
self.name = se.name
self.symbol = se.symbol
self.group = PeriodicTable.group_dict[se.group]
self.abundance = se.abundance
self.atomic_weight = phys_value_dict(se.atomic_weight , awu)
self.atomic_radius = phys_value_dict(se.atomic_radius , aru)
self.nuclear_charge = phys_value_dict(se.nuclear_charge , ncu)
self.electron_affinity = phys_value_dict(se.electron_affinity, eau)
self.ionization_energy = phys_value_dict(se.ionization_energy, ieu)
self.ionic_radius = phys_value_dict(se.ionic_radius , iru)
self.thermal_cond = phys_value_dict(se.thermal_cond , tcu)
self.melting_point = phys_value_dict(se.melting_point , mpu)
self.boiling_point = phys_value_dict(se.boiling_point , bpu)
#end def __init__
#end class Element
class PeriodicTable(DevBase):
element_set=set([\
'Ac','Al','Am','Sb','Ar','As','At','Ba','Bk','Be','Bi','B' ,'Br',\
'Cd','Ca','Cf','C' ,'Ce','Cs','Cl','Cr','Co','Cu','Cm','Dy','Es',\
'Er','Eu','Fm','F' ,'Fr','Gd','Ga','Ge','Au','Hf','Ha','Hs','He',\
'Ho','H' ,'In','I' ,'Ir','Fe','Kr','La','Lr','Pb','Li','Lu','Mg',\
'Mn','Mt','Md','Hg','Mo','Ns','Nd','Ne','Np','Ni','Nb','N' ,'No',\
'Os','O' ,'Pd','P' ,'Pt','Pu','Po','K' ,'Pr','Pm','Pa','Ra','Rn',\
'Re','Rh','Rb','Ru','Rf','Sm','Sc','Sg','Se','Si','Ag','Na','Sr',\
'S' ,'Ta','Tc','Te','Tb','Tl','Th','Tm','Sn','Ti','W' ,'U' ,'V' ,\
'Xe','Yb','Y' ,'Zn','Zr'])
element_dict=dict({\
'Ac':'Actinium',\
'Al':'Aluminum',\
'Am':'Americium',\
'Sb':'Antimony',\
'Ar':'Argon',\
'As':'Arsenic',\
'At':'Astatine',\
'Ba':'Barium',\
'Bk':'Berkelium',\
'Be':'Beryllium',\
'Bi':'Bismuth',\
'B':'Boron',\
'Br':'Bromine',\
'Cd':'Cadmium',\
'Ca':'Calcium',\
'Cf':'Californium',\
'C' :'Carbon',\
'Ce':'Cerium',\
'Cs':'Cesium',\
'Cl':'Chlorine',\
'Cr':'Chromium',\
'Co':'Cobalt',\
'Cu':'Copper',\
'Cm':'Curium',\
'Dy':'Dysprosium',\
'Es':'Einsteinium',\
'Er':'Erbium',\
'Eu':'Europium',\
'Fm':'Fermium',\
'F' :'Flourine',\
'Fr':'Francium',\
'Gd':'Gadolinium',\
'Ga':'Gallium',\
'Ge':'Germanium',\
'Au':'Gold',\
'Hf':'Hafnium',\
'Ha':'Hahnium',\
'Hs':'Hassium',\
'He':'Helium',\
'Ho':'Holmium',\
'H' :'Hydrogen',\
'In':'Indium',\
'I' :'Iodine',\
'Ir':'Iridium',\
'Fe':'Iron',\
'Kr':'Krypton',\
'La':'Lanthanum',\
'Lr':'Lawrencium',\
'Pb':'Lead',\
'Li':'Lithium',\
'Lu':'Lutetium',\
'Mg':'Magnesium',\
'Mn':'Manganese',\
'Mt':'Meitnerium',\
'Md':'Mendelevium',\
'Hg':'Mercury',\
'Mo':'Molybdenum',\
'Ns':'Neilsborium',\
'Nd':'Neodymium',\
'Ne':'Neon',\
'Np':'Neptunium',\
'Ni':'Nickel',\
'Nb':'Niobium',\
'N' :'Nitrogen',\
'No':'Nobelium',\
'Os':'Osmium',\
'O' :'Oxygen',\
'Pd':'Palladium',\
'P' :'Phosphorus',\
'Pt':'Platinum',\
'Pu':'Plutonium',\
'Po':'Polonium',\
'K' :'Potassium',\
'Pr':'Praseodymium',\
'Pm':'Promethium',\
'Pa':'Protactinium',\
'Ra':'Radium',\
'Rn':'Radon',\
'Re':'Rhenium',\
'Rh':'Rhodium',\
'Rb':'Rubidium',\
'Ru':'Ruthenium',\
'Rf':'Rutherfordium',\
'Sm':'Samarium',\
'Sc':'Scandium',\
'Sg':'Seaborgium',\
'Se':'Selenium',\
'Si':'Silicon',\
'Ag':'Silver',\
'Na':'Sodium',\
'Sr':'Strontium',\
'S' :'Sulfur',\
'Ta':'Tantalum',\
'Tc':'Technetium',\
'Te':'Tellurium',\
'Tb':'Terbium',\
'Tl':'Thalium',\
'Th':'Thorium',\
'Tm':'Thulium',\
'Sn':'Tin',\
'Ti':'Titanium',\
'W' :'Tungsten',\
'U' :'Uranium',\
'V' :'Vanadium',\
'Xe':'Xenon',\
'Yb':'Ytterbium',\
'Y' :'Yttrium',\
'Zn':'Zinc',\
'Zr':'Zirconium',\
})
group_dict = dict([\
(0 ,'LanAct'),\
(1 ,'IA'),\
(2 ,'IIA'),\
(3 ,'IIIB'),\
(4 ,'IVB'),\
(5 ,'VB'),\
(6 ,'VIB'),\
(7 ,'VIIB'),\
(8 ,'VII'),\
(9 ,'VII'),\
(10,'VII'),\
(11,'IB'),\
(12,'IIB'),\
(13,'IIIA'),\
(14,'IVA'),\
(15,'VA'),\
(16,'VIA'),\
(17,'VIIA'),\
(18,'0')\
])
atomic_weight_unit = 'amu'
atomic_radius_unit = 'pm'
nuclear_charge_unit = 'e'
electron_affinity_unit = 'kJ_mol'
ionization_energy_units = 'eV'
ionic_radius_units = 'pm'
thermal_cond_units = 'W_mK'
melting_point_units = 'degC'
boiling_point_units = 'degC'
def __init__(self):
self.nelements = None
self.elements = None
nelements = 103
e = obj()
for i in range(1,nelements+1):
e[i] = SimpleElement()
#end for
for i in range(1,nelements+1):
e[i].atomic_number = i
#end for
e[1].symbol='H'
e[2].symbol='He'
e[3].symbol='Li'
e[4].symbol='Be'
e[5].symbol='B'
e[6].symbol='C'
e[7].symbol='N'
e[8].symbol='O'
e[9].symbol='F'
e[10].symbol='Ne'
e[11].symbol='Na'
e[12].symbol='Mg'
e[13].symbol='Al'
e[14].symbol='Si'
e[15].symbol='P'
e[16].symbol='S'
e[17].symbol='Cl'
e[18].symbol='Ar'
e[19].symbol='K'
e[20].symbol='Ca'
e[21].symbol='Sc'
e[22].symbol='Ti'
e[23].symbol='V'
e[24].symbol='Cr'
e[25].symbol='Mn'
e[26].symbol='Fe'
e[27].symbol='Co'
e[28].symbol='Ni'
e[29].symbol='Cu'
e[30].symbol='Zn'
e[31].symbol='Ga'
e[32].symbol='Ge'
e[33].symbol='As'
e[34].symbol='Se'
e[35].symbol='Br'
e[36].symbol='Kr'
e[37].symbol='Rb'
e[38].symbol='Sr'
e[39].symbol='Y'
e[40].symbol='Zr'
e[41].symbol='Nb'
e[42].symbol='Mo'
e[43].symbol='Tc'
e[44].symbol='Ru'
e[45].symbol='Rh'
e[46].symbol='Pd'
e[47].symbol='Ag'
e[48].symbol='Cd'
e[49].symbol='In'
e[50].symbol='Sn'
e[51].symbol='Sb'
e[52].symbol='Te'
e[53].symbol='I'
e[54].symbol='Xe'
e[55].symbol='Cs'
e[56].symbol='Ba'
e[57].symbol='La'
e[58].symbol='Ce'
e[59].symbol='Pr'
e[60].symbol='Nd'
e[61].symbol='Pm'
e[62].symbol='Sm'
e[63].symbol='Eu'
e[64].symbol='Gd'
e[65].symbol='Tb'
e[66].symbol='Dy'
e[67].symbol='Ho'
e[68].symbol='Er'
e[69].symbol='Tm'
e[70].symbol='Yb'
e[71].symbol='Lu'
e[72].symbol='Hf'
e[73].symbol='Ta'
e[74].symbol='W'
e[75].symbol='Re'
e[76].symbol='Os'
e[77].symbol='Ir'
e[78].symbol='Pt'
e[79].symbol='Au'
e[80].symbol='Hg'
e[81].symbol='Tl'
e[82].symbol='Pb'
e[83].symbol='Bi'
e[84].symbol='Po'
e[85].symbol='At'
e[86].symbol='Rn'
e[87].symbol='Fr'
e[88].symbol='Ra'
e[89].symbol='Ac'
e[90].symbol='Th'
e[91].symbol='Pa'
e[92].symbol='U'
e[93].symbol='Np'
e[94].symbol='Pu'
e[95].symbol='Am'
e[96].symbol='Cm'
e[97].symbol='Bk'
e[98].symbol='Cf'
e[99].symbol='Es'
e[100].symbol='Fm'
e[101].symbol='Md'
e[102].symbol='No'
e[103].symbol='Lr'
for i in range(1,len(e)):
e[i].name = PeriodicTable.element_dict[e[i].symbol]
#end for
e[1].group = 1
e[2].group = 18
e[3].group = 1
e[4].group = 2
e[5].group = 13
e[6].group = 14
e[7].group = 15
e[8].group = 16
e[9].group = 17
e[10].group = 18
e[11].group = 1
e[12].group = 2
e[13].group = 13
e[14].group = 14
e[15].group = 15
e[16].group = 16
e[17].group = 17
e[18].group = 18
e[19].group = 1
e[20].group = 2
e[21].group = 3
e[22].group = 4
e[23].group = 5
e[24].group = 6
e[25].group = 7
e[26].group = 8
e[27].group = 9
e[28].group = 10
e[29].group = 11
e[30].group = 12
e[31].group = 13
e[32].group = 14
e[33].group = 15
e[34].group = 16
e[35].group = 17
e[36].group = 18
e[37].group = 1
e[38].group = 2
e[39].group = 3
e[40].group = 4
e[41].group = 5
e[42].group = 6
e[43].group = 7
e[44].group = 8
e[45].group = 9
e[46].group = 10
e[47].group = 11
e[48].group = 12
e[49].group = 13
e[50].group = 14
e[51].group = 15
e[52].group = 16
e[53].group = 17
e[54].group = 18
e[55].group = 1
e[56].group = 2
e[57].group = 3
e[58].group = 0
e[59].group = 0
e[60].group = 0
e[61].group = 0
e[62].group = 0
e[63].group = 0
e[64].group = 0
e[65].group = 0
e[66].group = 0
e[67].group = 0
e[68].group = 0
e[69].group = 0
e[70].group = 0
e[71].group = 0
e[72].group = 4
e[73].group = 5
e[74].group = 6
e[75].group = 7
e[76].group = 8
e[77].group = 9
e[78].group = 10
e[79].group = 11
e[80].group = 12
e[81].group = 13
e[82].group = 14
e[83].group = 15
e[84].group = 16
e[85].group = 17
e[86].group = 18
e[87].group =1
e[88].group = 2
e[89].group = 3
e[90].group = 0
e[91].group = 0
e[92].group = 0
e[93].group = 0
e[94].group = 0
e[95].group = 0
e[96].group = 0
e[97].group = 0
e[98].group = 0
e[99].group = 0
e[100].group = 0
e[101].group = 0
e[102].group = 0
e[103].group = 0
e[1].atomic_weight = 1.00794
e[2].atomic_weight = 4.002602
e[3].atomic_weight = 6.941
e[4].atomic_weight = 9.0122
e[5].atomic_weight = 10.811
e[6].atomic_weight = 12.011000
e[7].atomic_weight = 14.007
e[8].atomic_weight = 15.999
e[9].atomic_weight = 18.998
e[10].atomic_weight = 20.180
e[11].atomic_weight = 22.990
e[12].atomic_weight = 24.305
e[13].atomic_weight = 26.982
e[14].atomic_weight = 28.086
e[15].atomic_weight = 30.974
e[16].atomic_weight = 32.064
e[17].atomic_weight = 35.453
e[18].atomic_weight = 39.948
e[19].atomic_weight = 39.098
e[20].atomic_weight = 40.08
e[21].atomic_weight = 44.956
e[22].atomic_weight = 47.90
e[23].atomic_weight = 50.942
e[24].atomic_weight = 51.996
e[25].atomic_weight = 54.938
e[26].atomic_weight = 55.845
e[27].atomic_weight = 58.933
e[28].atomic_weight = 58.69
e[29].atomic_weight = 63.546
e[30].atomic_weight = 65.38
e[31].atomic_weight = 65.38
e[32].atomic_weight = 72.61
e[33].atomic_weight = 74.992
e[34].atomic_weight = 78.96
e[35].atomic_weight = 79.904
e[36].atomic_weight = 83.80
e[37].atomic_weight = 85.47
e[38].atomic_weight = 87.956
e[39].atomic_weight = 88.905
e[40].atomic_weight = 91.22
e[41].atomic_weight = 92.906
e[42].atomic_weight = 95.94
e[43].atomic_weight = 98.00
e[44].atomic_weight = 101.07
e[45].atomic_weight = 102.91
e[46].atomic_weight = 106.42
e[47].atomic_weight = 107.87
e[48].atomic_weight = 112.41
e[49].atomic_weight = 114.82
e[50].atomic_weight = 118.69
e[51].atomic_weight = 121.175
e[52].atomic_weight = 127.60
e[53].atomic_weight = 126.90
e[54].atomic_weight = 131.29
e[55].atomic_weight = 132.91
e[56].atomic_weight = 137.33
e[57].atomic_weight = 138.92
e[58].atomic_weight = 140.12
e[59].atomic_weight = 140.91
e[60].atomic_weight = 144.24
e[61].atomic_weight = 145.00
e[62].atomic_weight = 150.36
e[63].atomic_weight = 151.97
e[64].atomic_weight = 157.25
e[65].atomic_weight = 158.924
e[66].atomic_weight = 162.5
e[67].atomic_weight = 164.930
e[68].atomic_weight = 167.26
e[69].atomic_weight = 169.934
e[70].atomic_weight = 173.04
e[71].atomic_weight = 174.97
e[72].atomic_weight = 178.49
e[73].atomic_weight = 180.948
e[74].atomic_weight = 183.85
e[75].atomic_weight = 186.2
e[76].atomic_weight = 190.2
e[77].atomic_weight = 192.2
e[78].atomic_weight = 195.09
e[79].atomic_weight = 196.197
e[80].atomic_weight = 200.59
e[81].atomic_weight = 204.37
e[82].atomic_weight = 207.19
e[83].atomic_weight = 208.980
e[84].atomic_weight = 209.0
e[85].atomic_weight = 210.0
e[86].atomic_weight = 222.0
e[87].atomic_weight = 223.0
e[88].atomic_weight = 226.0
e[89].atomic_weight = 227.028
e[90].atomic_weight = 204.37
e[91].atomic_weight = 231.0
e[92].atomic_weight = 238.03
e[93].atomic_weight = 237.05
e[94].atomic_weight = 244.0
e[95].atomic_weight = 243.0
e[96].atomic_weight = 245.0
e[97].atomic_weight = 247.0
e[98].atomic_weight = 249.0
e[99].atomic_weight = 254.0
e[100].atomic_weight = 252.0
e[101].atomic_weight = 256.0
e[102].atomic_weight = 254.0
e[103].atomic_weight = 257
#atomic radius (in picometers)
e[1].atomic_radius = 78.000000
e[2].atomic_radius = 128.000000
e[3].atomic_radius = 152.000000
e[4].atomic_radius = 111.300000
e[5].atomic_radius = 79.500000
e[6].atomic_radius = 77.200000
e[7].atomic_radius = 54.900000
e[8].atomic_radius = 60.400000
e[9].atomic_radius = 70.900000
e[10].atomic_radius = 0.000000
e[11].atomic_radius = 185.800000
e[12].atomic_radius = 159.900000
e[13].atomic_radius = 143.200000
e[14].atomic_radius = 117.600000
e[15].atomic_radius = 110.500000
e[16].atomic_radius = 103.500000
e[17].atomic_radius = 99.400000
e[18].atomic_radius = 180.000000
e[19].atomic_radius = 227.200000
e[20].atomic_radius = 197.400000
e[21].atomic_radius = 160.600000
e[22].atomic_radius = 144.800000
e[23].atomic_radius = 131.100000
e[24].atomic_radius = 124.900000
e[25].atomic_radius = 136.700000
e[26].atomic_radius = 124.100000
e[27].atomic_radius = 125.300000
e[28].atomic_radius = 124.600000
e[29].atomic_radius = 127.800000
e[30].atomic_radius = 133.500000
e[31].atomic_radius = 122.100000
e[32].atomic_radius = 122.500000
e[33].atomic_radius = 124.500000
e[34].atomic_radius = 116.000000
e[35].atomic_radius = 114.500000
e[36].atomic_radius = 0.000000
e[37].atomic_radius = 247.500000
e[38].atomic_radius = 215.100000
e[39].atomic_radius = 177.600000
e[40].atomic_radius = 159.000000
e[41].atomic_radius = 142.900000
e[42].atomic_radius = 136.300000
e[43].atomic_radius = 135.200000
e[44].atomic_radius = 132.500000
e[45].atomic_radius = 134.500000
e[46].atomic_radius = 137.600000
e[47].atomic_radius = 144.500000
e[48].atomic_radius = 148.900000
e[49].atomic_radius = 162.600000
e[50].atomic_radius = 140.500000
e[51].atomic_radius = 145.000000
e[52].atomic_radius = 143.200000
e[53].atomic_radius = 133.100000
e[54].atomic_radius = 210.000000
e[55].atomic_radius = 265.500000
e[56].atomic_radius = 217.400000
e[57].atomic_radius = 187.000000
e[58].atomic_radius = 182.500000
e[59].atomic_radius = 182.000000
e[60].atomic_radius = 181.400000
e[61].atomic_radius = 181.000000
e[62].atomic_radius = 180.200000
e[63].atomic_radius = 199.500000
e[64].atomic_radius = 178.700000
e[65].atomic_radius = 176.300000
e[66].atomic_radius = 175.200000
e[67].atomic_radius = 174.300000
e[68].atomic_radius = 173.400000
e[69].atomic_radius = 172.400000
e[70].atomic_radius = 194.000000
e[71].atomic_radius = 171.800000
e[72].atomic_radius = 156.400000
e[73].atomic_radius = 143.000000
e[74].atomic_radius = 137.000000
e[75].atomic_radius = 137.100000
e[76].atomic_radius = 133.800000
e[77].atomic_radius = 135.700000
e[78].atomic_radius = 137.300000
e[79].atomic_radius = 144.200000
e[80].atomic_radius = 150.300000
e[81].atomic_radius = 170.000000
e[82].atomic_radius = 175.000000
e[83].atomic_radius = 154.500000
e[84].atomic_radius = 167.300000
e[85].atomic_radius = 0.000000
e[86].atomic_radius = 0.000000
e[87].atomic_radius = 270.000000
e[88].atomic_radius = 223.000000
e[89].atomic_radius = 187.800000
e[90].atomic_radius = 179.800000
e[91].atomic_radius = 156.100000
e[92].atomic_radius = 138.500000
e[93].atomic_radius = 130.000000
e[94].atomic_radius = 151.300000
e[95].atomic_radius = 0.000000
e[96].atomic_radius = 0.000000
e[97].atomic_radius = 0.000000
e[98].atomic_radius = 0.000000
e[99].atomic_radius = 0.000000
e[100].atomic_radius = 0.000000
e[101].atomic_radius = 0.000000
e[102].atomic_radius = 0.000000
e[103].atomic_radius = 0.000000
# Nuclear charge (Slater)
# 0 for those not available
e[1].nuclear_charge = 1.00
e[2].nuclear_charge = 1.70
e[3].nuclear_charge = 1.30
e[4].nuclear_charge = 1.95
e[5].nuclear_charge = 2.60
e[6].nuclear_charge = 3.25
e[7].nuclear_charge = 3.90
e[8].nuclear_charge = 4.55
e[9].nuclear_charge = 5.20
e[10].nuclear_charge = 5.85
e[11].nuclear_charge = 2.20
e[12].nuclear_charge = 2.85
e[13].nuclear_charge = 3.50
e[14].nuclear_charge = 4.15
e[15].nuclear_charge = 4.80
e[16].nuclear_charge = 5.45
e[17].nuclear_charge = 6.10
e[18].nuclear_charge = 6.75
e[19].nuclear_charge = 2.20
e[20].nuclear_charge = 2.85
e[21].nuclear_charge = 3.00
e[22].nuclear_charge = 3.15
e[23].nuclear_charge = 3.30
e[24].nuclear_charge = 3.45
e[25].nuclear_charge = 3.60
e[26].nuclear_charge = 3.75
e[27].nuclear_charge = 3.90
e[28].nuclear_charge = 4.05
e[29].nuclear_charge = 4.20
e[30].nuclear_charge = 4.35
e[31].nuclear_charge = 5.00
e[32].nuclear_charge = 5.65
e[33].nuclear_charge = 6.30
e[34].nuclear_charge = 6.95
e[35].nuclear_charge = 7.60
e[36].nuclear_charge = 8.25
e[37].nuclear_charge = 2.20
e[38].nuclear_charge = 2.85
e[39].nuclear_charge = 3.00
e[40].nuclear_charge = 3.15
e[41].nuclear_charge = 3.30
e[42].nuclear_charge = 3.45
e[43].nuclear_charge = 3.60
e[44].nuclear_charge = 3.75
e[45].nuclear_charge = 3.90
e[46].nuclear_charge = 4.05
e[47].nuclear_charge = 4.20
e[48].nuclear_charge = 4.35
e[49].nuclear_charge = 5.00
e[50].nuclear_charge = 5.65
e[51].nuclear_charge = 6.30
e[52].nuclear_charge = 6.95
e[53].nuclear_charge = 7.60
e[54].nuclear_charge = 8.25
e[55].nuclear_charge = 2.20
e[56].nuclear_charge = 2.85
e[57].nuclear_charge = 2.85
e[58].nuclear_charge = 2.85
e[59].nuclear_charge = 2.85
e[60].nuclear_charge = 2.85
e[61].nuclear_charge = 2.85
e[62].nuclear_charge = 2.85
e[63].nuclear_charge = 2.85
e[64].nuclear_charge = 2.85
e[65].nuclear_charge = 2.85
e[66].nuclear_charge = 2.85
e[67].nuclear_charge = 2.85
e[68].nuclear_charge = 2.85
e[69].nuclear_charge = 2.85
e[70].nuclear_charge = 2.854
e[71].nuclear_charge = 3.00
e[72].nuclear_charge = 3.15
e[73].nuclear_charge = 3.30
e[74].nuclear_charge = 4.35
e[75].nuclear_charge = 3.60
e[76].nuclear_charge = 3.75
e[77].nuclear_charge = 3.90
e[78].nuclear_charge = 4.05
e[79].nuclear_charge = 4.20
e[80].nuclear_charge = 4.35
e[81].nuclear_charge = 5.00
e[82].nuclear_charge = 5.65
e[83].nuclear_charge = 6.30
e[84].nuclear_charge = 6.95
e[85].nuclear_charge = 7.60
e[86].nuclear_charge = 8.25
e[87].nuclear_charge = 2.20
e[88].nuclear_charge = 1.65
e[89].nuclear_charge = 1.8
e[90].nuclear_charge = 1.95
e[91].nuclear_charge = 1.80
e[92].nuclear_charge = 1.80
e[93].nuclear_charge = 1.80
e[94].nuclear_charge = 1.65
e[95].nuclear_charge = 4.65
e[96].nuclear_charge = 1.80
e[97].nuclear_charge = 1.65
e[98].nuclear_charge = 1.65
e[99].nuclear_charge = 1.65
e[100].nuclear_charge = 1.65
e[101].nuclear_charge = 1.65
e[102].nuclear_charge = 1.65
e[103].nuclear_charge = 1.8
e[1].abundance = 0.880000
e[2].abundance = 0.000000
e[3].abundance = 0.006000
e[4].abundance = 0.000500
e[5].abundance = 0.001000
e[6].abundance = 0.090000
e[7].abundance = 0.030000
e[8].abundance = 49.400000
e[9].abundance = 0.030000
e[10].abundance = 0.000000
e[11].abundance = 2.640000
e[12].abundance = 1.940000
e[13].abundance = 7.570000
e[14].abundance = 25.800000
e[15].abundance = 0.090000
e[16].abundance = 0.050000
e[17].abundance = 0.190000
e[18].abundance = 0.000400
e[19].abundance = 2.400000
e[20].abundance = 3.390000
e[21].abundance = 0.000500
e[22].abundance = 0.410000
e[23].abundance = 0.010000
e[24].abundance = 0.020000
e[25].abundance = 0.090000
e[26].abundance = 4.700000
e[27].abundance = 0.004000
e[28].abundance = 0.010000
e[29].abundance = 0.010000
e[30].abundance = 0.010000
e[31].abundance = 0.001000
e[32].abundance = 0.000600
e[33].abundance = 0.000600
e[34].abundance = 0.000100
e[35].abundance = 0.000600
e[36].abundance = 0.000000
e[37].abundance = 0.030000
e[38].abundance = 0.010000
e[39].abundance = 0.003000
e[40].abundance = 0.020000
e[41].abundance = 0.002000
e[42].abundance = 0.001000
e[43].abundance = 0.000000
e[44].abundance = 0.000002
e[45].abundance = 0.000000
e[46].abundance = 0.000001
e[47].abundance = 0.000010
e[48].abundance = 0.000030
e[49].abundance = 0.000010
e[50].abundance = 0.001000
e[51].abundance = 0.000100
e[52].abundance = 0.000001
e[53].abundance = 0.000006
e[54].abundance = 0.000000
e[55].abundance = 0.000600
e[56].abundance = 0.030000
e[57].abundance = 0.002000
e[58].abundance = 0.004000
e[59].abundance = 0.000500
e[60].abundance = 0.002000
e[61].abundance = 0.000000
e[62].abundance = 0.000600
e[63].abundance = 0.000010
e[64].abundance = 0.000600
e[65].abundance = 0.000090
e[66].abundance = 0.000400
e[67].abundance = 0.000100
e[68].abundance = 0.000200
e[69].abundance = 0.000020
e[70].abundance = 0.000020
e[71].abundance = 0.000070
e[72].abundance = 0.000400
e[73].abundance = 0.000800
e[74].abundance = 0.006000
e[75].abundance = 0.000000
e[76].abundance = 0.000001
e[77].abundance = 0.000000
e[78].abundance = 0.000000
e[79].abundance = 0.000000
e[80].abundance = 0.000040
e[81].abundance = 0.000030
e[82].abundance = 0.002000
e[83].abundance = 0.000020
e[84].abundance = 0.000000
e[85].abundance = 0.000000
e[86].abundance = 0.000000
e[87].abundance = 0.000000
e[88].abundance = 0.000000
e[89].abundance = 0.000000
e[90].abundance = 0.001000
e[91].abundance = 9.0
e[92].abundance = 0.000300
e[93].abundance = 0.000000
e[94].abundance = 0.000000
e[95].abundance = 0.000000
e[96].abundance = 0.000000
e[97].abundance = 0.000000
e[98].abundance = 0.000000
e[99].abundance = 0.000000
e[100].abundance = 0.000000
e[101].abundance = 0.000000
e[102].abundance = 0.000000
e[103].abundance = 0.000000
# Electron Aff.
# 0 for those not available
# Defined as 0 for Elements 2, 25,66 and 72
e[1].electron_affinity = 72.8
e[2].electron_affinity = 0.0
e[3].electron_affinity = 59.6
e[4].electron_affinity = -18
e[5].electron_affinity = 26.7
e[6].electron_affinity = 121.9
e[7].electron_affinity = -7
e[8].electron_affinity = 141
e[9].electron_affinity = 328
e[10].electron_affinity = -29
e[11].electron_affinity = 52.9
e[12].electron_affinity = -21
e[13].electron_affinity = 44
e[14].electron_affinity = 133.6
e[15].electron_affinity = 72
e[16].electron_affinity = 200.4
e[17].electron_affinity = 349.0
e[18].electron_affinity = -35
e[19].electron_affinity = 48.4
e[20].electron_affinity = -186
e[21].electron_affinity = 18.1
e[22].electron_affinity = 7.6
e[23].electron_affinity = 50.7
e[24].electron_affinity = 64.3
e[25].electron_affinity = 0
e[26].electron_affinity = 15.7
e[27].electron_affinity = 63.8
e[28].electron_affinity = 156
e[29].electron_affinity = 188.5
e[30].electron_affinity = 9
e[31].electron_affinity = 30
e[32].electron_affinity = 116
e[33].electron_affinity = 78
e[34].electron_affinity = 195
e[35].electron_affinity = 324.7
e[36].electron_affinity = -39
e[37].electron_affinity = 46.9
e[38].electron_affinity = -164
e[39].electron_affinity = 29.6
e[40].electron_affinity = 41.1
e[41].electron_affinity = 86.2
e[42].electron_affinity = 72.0
e[43].electron_affinity = 96
e[44].electron_affinity = 101
e[45].electron_affinity = 109.7
e[46].electron_affinity = 53.7
e[47].electron_affinity = 125.7
e[48].electron_affinity = -26
e[49].electron_affinity = 30
e[50].electron_affinity = 116
e[51].electron_affinity = 101
e[52].electron_affinity = 190.2
e[53].electron_affinity = 295.2
e[54].electron_affinity = -41
e[55].electron_affinity = 45.5
e[56].electron_affinity = -46
e[57].electron_affinity = 50
e[58].electron_affinity = 50
e[59].electron_affinity = 50
e[60].electron_affinity = 50
e[61].electron_affinity = 50
e[62].electron_affinity = 50
e[63].electron_affinity = 50
e[64].electron_affinity = 50
e[65].electron_affinity = 50
e[66].electron_affinity = 0
e[67].electron_affinity = 50
e[68].electron_affinity = 50
e[69].electron_affinity = 50
e[70].electron_affinity = 50
e[71].electron_affinity = 50
e[72].electron_affinity = 0
e[73].electron_affinity = 14
e[74].electron_affinity = 78.6
e[75].electron_affinity = 14
e[76].electron_affinity = 106
e[77].electron_affinity = 151
e[78].electron_affinity = 205.3
e[79].electron_affinity = 222.8
e[80].electron_affinity = -18
e[81].electron_affinity = 20
e[82].electron_affinity = 35.1
e[83].electron_affinity = 91.3
e[84].electron_affinity = 183
e[85].electron_affinity = 270
e[86].electron_affinity = -41
e[87].electron_affinity = 44
e[88].electron_affinity = 159
e[89].electron_affinity = 406
e[90].electron_affinity = 598.3
e[91].electron_affinity = 607
e[92].electron_affinity = 535.6
e[93].electron_affinity = 0
e[94].electron_affinity = 0
e[95].electron_affinity = 0
e[96].electron_affinity = 0
e[97].electron_affinity = 0
e[98].electron_affinity = 0
e[99].electron_affinity = 50
e[100].electron_affinity = 0
e[101].electron_affinity = 0
e[102].electron_affinity = 0
e[103].electron_affinity = 0
# Electronegativity (Pauling)
# 0 for those not available
# Some noble gases defined as zero
e[1].electronegativity = 2.20
e[2].electronegativity = 0
e[3].electronegativity = 0.98
e[4].electronegativity = 1.57
e[5].electronegativity = 2.04
e[6].electronegativity = 2.55
e[7].electronegativity = 3.04
e[8].electronegativity = 3.44
e[9].electronegativity = 3.98
e[10].electronegativity = 0
e[11].electronegativity = 0.93
e[12].electronegativity = 1.31
e[13].electronegativity = 1.61
e[14].electronegativity = 1.90
e[15].electronegativity = 2.19
e[16].electronegativity = 2.58
e[17].electronegativity = 3.16
e[18].electronegativity = 0
e[19].electronegativity = 0.82
e[20].electronegativity = 1.00
e[21].electronegativity = 1.36
e[22].electronegativity = 1.54
e[23].electronegativity = 1.63
e[24].electronegativity = 1.66
e[25].electronegativity = 1.55
e[26].electronegativity = 1.83
e[27].electronegativity = 1.88
e[28].electronegativity = 1.91
e[29].electronegativity = 1.90
e[30].electronegativity = 1.65
e[31].electronegativity = 1.81
e[32].electronegativity = 2.01
e[33].electronegativity = 2.18
e[34].electronegativity = 2.55
e[35].electronegativity = 2.96
e[36].electronegativity = 0
e[37].electronegativity = 0.82
e[38].electronegativity = 0.95
e[39].electronegativity = 1.22
e[40].electronegativity = 1.33
e[41].electronegativity = 1.6
e[42].electronegativity = 2.16
e[43].electronegativity = 1.9
e[44].electronegativity = 2.2
e[45].electronegativity = 2.28
e[46].electronegativity = 2.20
e[47].electronegativity = 1.93
e[48].electronegativity = 1.96
e[49].electronegativity = 1.78
e[50].electronegativity = 1.96
e[51].electronegativity = 2.05
e[52].electronegativity = 2.1
e[53].electronegativity = 2.66
e[54].electronegativity = 2.6
e[55].electronegativity = 0.79
e[56].electronegativity = 0.89
e[57].electronegativity = 1.10
e[58].electronegativity = 1.12
e[59].electronegativity = 1.13
e[60].electronegativity = 1.14
e[61].electronegativity = 0
e[62].electronegativity = 1.17
e[63].electronegativity = 0
e[64].electronegativity = 1.20
e[65].electronegativity = 0
e[66].electronegativity = 1.22
e[67].electronegativity = 1.23
e[68].electronegativity = 1.24
e[69].electronegativity = 1.25
e[70].electronegativity = 0
e[71].electronegativity = 1.27
e[72].electronegativity = 1.3
e[73].electronegativity = 1.5
e[74].electronegativity = 2.36
e[75].electronegativity = 1.9
e[76].electronegativity = 2.2
e[77].electronegativity = 2.20
e[78].electronegativity = 2.28
e[79].electronegativity = 2.54
e[80].electronegativity = 2.00
e[81].electronegativity = 2.04
e[82].electronegativity = 2.33
e[83].electronegativity = 2.02
e[84].electronegativity = 2.0
e[85].electronegativity = 2.2
e[86].electronegativity = 0
e[87].electronegativity = 0.7
e[88].electronegativity = 0.89
e[89].electronegativity = 1.1
e[90].electronegativity = 1.3
e[91].electronegativity = 1.5
e[92].electronegativity = 1.38
e[93].electronegativity = 1.36
e[94].electronegativity = 1.28
e[95].electronegativity = 1.3
e[96].electronegativity = 1.3
e[97].electronegativity = 1.3
e[98].electronegativity = 1.3
e[99].electronegativity = 1.3
e[100].electronegativity = 1.3
e[101].electronegativity = 1.3
e[102].electronegativity = 1.3
e[103].electronegativity = 1.3
# ionization energy (in electronvolts].ionization_energy
e[1].ionization_energy = 13.598
e[2].ionization_energy = 24.587000
e[3].ionization_energy = 5.392000
e[4].ionization_energy = 9.322000
e[5].ionization_energy = 8.298000
e[6].ionization_energy = 11.260000
e[7].ionization_energy = 14.534000
e[8].ionization_energy = 13.618000
e[9].ionization_energy = 17.422000
e[10].ionization_energy = 21.564000
e[11].ionization_energy = 5.139000
e[12].ionization_energy = 7.646000
e[13].ionization_energy = 5.986000
e[14].ionization_energy = 8.151000
e[15].ionization_energy = 10.486000
e[16].ionization_energy = 10.360000
e[17].ionization_energy = 12.967000
e[18].ionization_energy = 15.759000
e[19].ionization_energy = 4.341000
e[20].ionization_energy = 6.113000
e[21].ionization_energy = 6.540000
e[22].ionization_energy = 6.820000
e[23].ionization_energy = 6.740000
e[24].ionization_energy = 6.766000
e[25].ionization_energy = 7.435000
e[26].ionization_energy = 7.870000
e[27].ionization_energy = 7.860000
e[28].ionization_energy = 7.635000
e[29].ionization_energy = 7.726000
e[30].ionization_energy = 9.394000
e[31].ionization_energy = 5.999000
e[32].ionization_energy = 7.899000
e[33].ionization_energy = 9.810000
e[34].ionization_energy = 9.752000
e[35].ionization_energy = 11.814000
e[36].ionization_energy = 13.999000
e[37].ionization_energy = 4.177000
e[38].ionization_energy = 5.695000
e[39].ionization_energy = 6.380000
e[40].ionization_energy = 6.840000
e[41].ionization_energy = 6.880000
e[42].ionization_energy = 7.099000
e[43].ionization_energy = 7.280000
e[44].ionization_energy = 7.370000
e[45].ionization_energy = 7.460000
e[46].ionization_energy = 8.340000
e[47].ionization_energy = 7.576000
e[48].ionization_energy = 8.993000
e[49].ionization_energy = 5.786000
e[50].ionization_energy = 7.344000
e[51].ionization_energy = 8.641000
e[52].ionization_energy = 9.009000
e[53].ionization_energy = 10.451000
e[54].ionization_energy = 12.130000
e[55].ionization_energy = 3.894000
e[56].ionization_energy = 5.212000
e[57].ionization_energy = 5.577000
e[58].ionization_energy = 5.470000
e[59].ionization_energy = 5.420000
e[60].ionization_energy = 5.490000
e[61].ionization_energy = 5.550000
e[62].ionization_energy = 5.630000
e[63].ionization_energy = 5.670000
e[64].ionization_energy = 6.140000
e[65].ionization_energy = 5.850000
e[66].ionization_energy = 5.930000
e[67].ionization_energy = 6.020000
e[68].ionization_energy = 6.100000
e[69].ionization_energy = 6.180000
e[70].ionization_energy = 6.254000
e[71].ionization_energy = 5.426000
e[72].ionization_energy = 7.000000
e[73].ionization_energy = 7.890000
e[74].ionization_energy = 7.980000
e[75].ionization_energy = 7.880000
e[76].ionization_energy = 8.700000
e[77].ionization_energy = 9.100000
e[78].ionization_energy = 9.000000
e[79].ionization_energy = 9.255000
e[80].ionization_energy = 10.437000
e[81].ionization_energy = 6.108000
e[82].ionization_energy = 6.108000
e[83].ionization_energy = 7.289000
e[84].ionization_energy = 8.420000
e[85].ionization_energy = 9.500000
e[86].ionization_energy = 10.748000
e[87].ionization_energy = 4.000000
e[88].ionization_energy = 5.279000
e[89].ionization_energy = 6.900000
e[90].ionization_energy = 6.950000
e[91].ionization_energy = 0.000000
e[92].ionization_energy = 6.080000
e[93].ionization_energy = 0.000000
e[94].ionization_energy = 5.800000
e[95].ionization_energy = 6.000000
e[96].ionization_energy = 0.000000
e[97].ionization_energy = 0.000000
e[98].ionization_energy = 0.000000
e[99].ionization_energy = 0.000000
e[100].ionization_energy = 0.000000
e[101].ionization_energy = 0.000000
e[102].ionization_energy = 0.000000
e[103].ionization_energy = 0.000000
# Ionic Radius (picometers)
# Radius for smallest charge where more than one possible
# Radius for H is for hydride
# 0 for those not available or those that don't form ions
e[1].ionic_radius = 154
e[2].ionic_radius = 0
e[3].ionic_radius = 78
e[4].ionic_radius = 34
e[5].ionic_radius = 23
e[6].ionic_radius = 260
e[7].ionic_radius = 171
e[8].ionic_radius = 132
e[9].ionic_radius = 133
e[10].ionic_radius = 112
e[11].ionic_radius = 98
e[12].ionic_radius = 78
e[13].ionic_radius = 57
e[14].ionic_radius = 271
e[15].ionic_radius = 212
e[16].ionic_radius = 184
e[17].ionic_radius = 181
e[18].ionic_radius = 154
e[19].ionic_radius = 133
e[20].ionic_radius = 106
e[21].ionic_radius = 83
e[22].ionic_radius = 80
e[23].ionic_radius = 72
e[24].ionic_radius = 84
e[25].ionic_radius = 91
e[26].ionic_radius = 82
e[27].ionic_radius = 82
e[28].ionic_radius = 78
e[29].ionic_radius = 96
e[30].ionic_radius = 83
e[31].ionic_radius = 113
e[32].ionic_radius = 90
e[33].ionic_radius = 69
e[34].ionic_radius = 69
e[35].ionic_radius = 196
e[36].ionic_radius = 169
e[37].ionic_radius = 149
e[38].ionic_radius = 127
e[39].ionic_radius = 106
e[40].ionic_radius = 109
e[41].ionic_radius = 74
e[42].ionic_radius = 92
e[43].ionic_radius = 95
e[44].ionic_radius = 77
e[45].ionic_radius = 86
e[46].ionic_radius = 86
e[47].ionic_radius = 113
e[48].ionic_radius = 114
e[49].ionic_radius = 132
e[50].ionic_radius = 93
e[51].ionic_radius = 89
e[52].ionic_radius = 211
e[53].ionic_radius = 220
e[54].ionic_radius = 190
e[55].ionic_radius = 165
e[56].ionic_radius = 143
e[57].ionic_radius = 122
e[58].ionic_radius = 107
e[59].ionic_radius = 106
e[60].ionic_radius = 104
e[61].ionic_radius = 106
e[62].ionic_radius = 111
e[63].ionic_radius = 112
e[64].ionic_radius = 97
e[65].ionic_radius = 93
e[66].ionic_radius = 91
e[67].ionic_radius = 89
e[68].ionic_radius = 89
e[69].ionic_radius = 87
e[70].ionic_radius = 113
e[71].ionic_radius = 85
e[72].ionic_radius = 84
e[73].ionic_radius = 72
e[74].ionic_radius = 68
e[75].ionic_radius = 72
e[76].ionic_radius = 89
e[77].ionic_radius = 89
e[78].ionic_radius = 85
e[79].ionic_radius = 137
e[80].ionic_radius = 127
e[81].ionic_radius = 149
e[82].ionic_radius = 132
e[83].ionic_radius = 96
e[84].ionic_radius = 65
e[85].ionic_radius = 227
e[86].ionic_radius = 0
e[87].ionic_radius = 180
e[88].ionic_radius = 152
e[89].ionic_radius = 118
e[90].ionic_radius = 101
e[91].ionic_radius = 113
e[92].ionic_radius = 103
e[93].ionic_radius = 110
e[94].ionic_radius = 108
e[95].ionic_radius = 107
e[96].ionic_radius = 119
e[97].ionic_radius =118
e[98].ionic_radius = 117
e[99].ionic_radius = 116
e[100].ionic_radius = 115
e[101].ionic_radius = 114
e[102].ionic_radius = 113
e[103].ionic_radius = 112
# Thermal Conditions (W/mK at 300K)
# 0 for those not available
e[1].thermal_cond = 0.1815
e[2].thermal_cond = 0.152
e[3].thermal_cond = 84.7
e[4].thermal_cond = 200
e[5].thermal_cond = 27
e[6].thermal_cond = 1960
e[7].thermal_cond = 0.02598
e[8].thermal_cond = 0.2674
e[9].thermal_cond = 0.0279
e[10].thermal_cond = 0.0493
e[11].thermal_cond = 141
e[12].thermal_cond = 156
e[13].thermal_cond = 273
e[14].thermal_cond = 148
e[15].thermal_cond = 0.235
e[16].thermal_cond = 0.269
e[17].thermal_cond = 0.0089
e[18].thermal_cond = 0.0177
e[19].thermal_cond = 102.4
e[20].thermal_cond = 200
e[21].thermal_cond = 15.8
e[22].thermal_cond = 21.9
e[23].thermal_cond = 30.7
e[24].thermal_cond = 93.7
e[25].thermal_cond = 7.82
e[26].thermal_cond = 80.2
e[27].thermal_cond = 100
e[28].thermal_cond = 90.7
e[29].thermal_cond = 401
e[30].thermal_cond = 116
e[31].thermal_cond = 40.6
e[32].thermal_cond = 59.9
e[33].thermal_cond = 50.0
e[34].thermal_cond = 2.04
e[35].thermal_cond = 0.122
e[36].thermal_cond = 0.00949
e[37].thermal_cond = 58.2
e[38].thermal_cond = 35.3
e[39].thermal_cond = 17.2
e[40].thermal_cond = 22.7
e[41].thermal_cond = 53.7
e[42].thermal_cond = 138
e[43].thermal_cond = 50.6
e[44].thermal_cond = 117
e[45].thermal_cond = 150
e[46].thermal_cond = 71.8
e[47].thermal_cond = 429
e[48].thermal_cond = 96.8
e[49].thermal_cond = 81.6
e[50].thermal_cond = 66.6
e[51].thermal_cond = 24.3
e[52].thermal_cond = 2.35
e[53].thermal_cond = 0.449
e[54].thermal_cond = 0.00569
e[55].thermal_cond = 35.9
e[56].thermal_cond = 18.4
e[57].thermal_cond = 13.5
e[58].thermal_cond = 11.4
e[59].thermal_cond = 12.5
e[60].thermal_cond = 16.5
e[61].thermal_cond = 17.9
e[62].thermal_cond = 13.3
e[63].thermal_cond = 13.9
e[64].thermal_cond = 10.6
e[65].thermal_cond = 11.1
e[66].thermal_cond = 10.7
e[67].thermal_cond = 16.2
e[68].thermal_cond = 14.3
e[69].thermal_cond = 16.8
e[70].thermal_cond = 34.9
e[71].thermal_cond = 16.4
e[72].thermal_cond = 23
e[73].thermal_cond = 57.5
e[74].thermal_cond = 174
e[75].thermal_cond = 47.9
e[76].thermal_cond = 87.6
e[77].thermal_cond = 147
e[78].thermal_cond = 71.6
e[79].thermal_cond = 317
e[80].thermal_cond = 8.34
e[81].thermal_cond = 46.1
e[82].thermal_cond = 35.3
e[83].thermal_cond = 7.87
e[84].thermal_cond = 20
e[85].thermal_cond = 1.7
e[86].thermal_cond = 0.00364
e[87].thermal_cond = 15
e[88].thermal_cond = 18.6
e[89].thermal_cond = 12
e[90].thermal_cond = 54.0
e[91].thermal_cond = 47
e[92].thermal_cond = 27.6
e[93].thermal_cond = 6.3
e[94].thermal_cond = 6.74
e[95].thermal_cond = 10
e[96].thermal_cond = 10
e[97].thermal_cond = 10
e[98].thermal_cond = 10
e[99].thermal_cond = 10
e[100].thermal_cond = 10
e[101].thermal_cond = 10
e[102].thermal_cond = 10
e[103].thermal_cond = 10
# mpt.m creates e[deg C].melting_point
e[1].melting_point=-259.14
e[2].melting_point=-272.2
e[3].melting_point=180.54
e[4].melting_point=1278.000000
e[5].melting_point=2300.
e[6].melting_point=3550.000000
e[7].melting_point=-209.86
e[8].melting_point=-218.4
e[9].melting_point=-219.62
e[10].melting_point=-248.67
e[11].melting_point=97.81
e[12].melting_point=648.8
e[13].melting_point=660.37
e[14].melting_point=1410.
e[15].melting_point=44.100000
e[16].melting_point=112.8
e[17].melting_point=-100.98
e[18].melting_point=-189.2
e[19].melting_point=63.65
e[20].melting_point=839.000
e[21].melting_point=1541.
e[22].melting_point=1660.
e[23].melting_point=1890.
e[24].melting_point=1857.
e[25].melting_point=1244.
e[26].melting_point=1553.
e[27].melting_point=1495.
e[28].melting_point=1453.
e[29].melting_point=1083.4
e[30].melting_point=419.58
e[31].melting_point=29.78
e[32].melting_point=937.4
e[33].melting_point=817.00
e[34].melting_point=217.
e[35].melting_point=-7.2
e[36].melting_point=-156.6
e[37].melting_point=38.89
e[38].melting_point=769.
e[39].melting_point=1522
e[40].melting_point=1852.00
e[41].melting_point=2468.
e[42].melting_point=2617.
e[43].melting_point=2172.
e[44].melting_point=2310.
e[45].melting_point=1966
e[46].melting_point=1552.
e[47].melting_point=961.93
e[48].melting_point=320.9
e[49].melting_point=156.61
e[50].melting_point=231.9681
e[51].melting_point=630.74
e[52].melting_point=449.5
e[53].melting_point=113.5
e[54].melting_point=-111.9
e[55].melting_point=28.40
e[56].melting_point=725.
e[57].melting_point=921
e[58].melting_point=799
e[59].melting_point=931
e[60].melting_point=1021
e[61].melting_point=1168
e[62].melting_point=1077
e[63].melting_point=822
e[64].melting_point=1313
e[65].melting_point=1356
e[66].melting_point=1356
e[67].melting_point=1474
e[68].melting_point=1529
e[69].melting_point=1545
e[70].melting_point=819
e[71].melting_point=1663
e[72].melting_point=2227.0
e[73].melting_point=2996
e[74].melting_point=3410.
e[75].melting_point=3180.
e[76].melting_point=3045.
e[77].melting_point=2410.
e[78].melting_point=1772.
e[79].melting_point=1064.43
e[80].melting_point=-38.87
e[81].melting_point=303.5
e[82].melting_point=327.502
e[83].melting_point=271.3
e[84].melting_point=254.
e[85].melting_point=302.
e[86].melting_point=-71.
e[87].melting_point=27.
e[88].melting_point=700.
e[89].melting_point=1050.
e[90].melting_point=1750.
e[91].melting_point=1554.000000
e[92].melting_point=1132.3
e[93].melting_point=640.
e[94].melting_point=641.
e[95].melting_point=994.
e[96].melting_point=1340.
e[97].melting_point=986.
e[98].melting_point=900.0000
# bpt.m creates e[deg C].boiling_point
e[1].boiling_point=-252.87
e[2].boiling_point=-268.934
e[3].boiling_point=1347
e[4].boiling_point=2870.0
e[5].boiling_point=2550
e[6].boiling_point=4827.0
e[7].boiling_point=-195.8
e[8].boiling_point=-183.962
e[9].boiling_point=-188.14
e[10].boiling_point=-246.048
e[11].boiling_point=882.9
e[12].boiling_point=1090
e[13].boiling_point=2467
e[14].boiling_point=2355
e[15].boiling_point=280
e[16].boiling_point=444.674
e[17].boiling_point=-34.6
e[18].boiling_point=-185.7
e[19].boiling_point=774
e[20].boiling_point=1484
e[21].boiling_point=2831
e[22].boiling_point=3287
e[23].boiling_point=3380
e[24].boiling_point=2672
e[25].boiling_point=1962
e[26].boiling_point=2750
e[27].boiling_point=2870
e[28].boiling_point=2732
e[29].boiling_point=2567
e[30].boiling_point=907
e[31].boiling_point=2403
e[32].boiling_point=2830
e[33].boiling_point=613.0
e[34].boiling_point=684.9
e[35].boiling_point=58.78
e[36].boiling_point=-152.30
e[37].boiling_point=688
e[38].boiling_point=1384
e[39].boiling_point=3338
e[40].boiling_point=4377
e[41].boiling_point=4742
e[42].boiling_point=4612
e[43].boiling_point=4877
e[44].boiling_point=3900
e[45].boiling_point=3727
e[46].boiling_point=3140
e[47].boiling_point=2212
e[48].boiling_point=765
e[49].boiling_point=2080
e[50].boiling_point=2270
e[51].boiling_point=1750
e[52].boiling_point=989.8
e[53].boiling_point=184.35
e[54].boiling_point=-107.100000
e[55].boiling_point=678.4
e[56].boiling_point=1640
e[57].boiling_point=3457
e[58].boiling_point=3426
e[59].boiling_point=3512
e[60].boiling_point=3068
e[61].boiling_point=2700
e[62].boiling_point=1791
e[63].boiling_point=1597
e[64].boiling_point=3266
e[65].boiling_point=3123
e[66].boiling_point=2562
e[67].boiling_point=2695
e[68].boiling_point=2863
e[69].boiling_point=1947
e[70].boiling_point=1194
e[71].boiling_point=3395
e[72].boiling_point=4602
e[73].boiling_point=5425
e[74].boiling_point=5660
e[75].boiling_point=5627
e[76].boiling_point=5027
e[77].boiling_point=4130
e[78].boiling_point=3827
e[79].boiling_point=2807
e[80].boiling_point=356.58
e[81].boiling_point=1457
e[82].boiling_point=1740
e[83].boiling_point=560
e[84].boiling_point=962
e[85].boiling_point=337
e[86].boiling_point=-61.8
e[87].boiling_point=677
e[88].boiling_point=1140
e[86].boiling_point=3200
e[90].boiling_point=4790
e[92].boiling_point=3818
e[93].boiling_point=3902
e[94].boiling_point=3232
e[95].boiling_point=2607
for i in range(1,nelements+1):
e[i].create_var_dict()
#end for
#for i in range(len(e)):
# e[i].create_string_representation()
##end for
isotope_masses = obj(
H = {1:1.00782503207, 2:2.0141017778, 3:3.0160492777},
He = {3:3.0160293191, 4:4.00260325415},
Li = {6:6.015122795, 7:7.01600455},
Be = {9:9.0121822},
B = {10:10.0129370, 11:11.0093054},
C = {12:12.0000000, 13:13.0033548378, 14:14.003241989},
N = {14:14.0030740048, 15:15.0001088982},
O = {16:15.99491461956, 17:16.99913170, 18:17.9991610},
F = {19:18.99840322},
Ne = {20:19.9924401754, 21:20.99384668, 22:21.991385114},
Na = {23:22.9897692809},
Mg = {24:23.985041700, 25:24.98583692, 26:25.982592929},
Al = {27:26.98153863},
Si = {28:27.9769265325, 29:28.976494700, 30:29.97377017},
P = {31:30.97376163},
S = {32:31.97207100, 33:32.97145876, 34:33.96786690, 36:35.96708076},
Cl = {35:34.96885268, 37:36.96590259},
Ar = {36:35.967545106, 38:37.9627324, 40:39.9623831225},
K = {39:38.96370668, 40:39.96399848, 41:40.96182576},
Ca = {40:39.96259098, 42:41.95861801, 43:42.9587666, 44:43.9554818, 46:45.9536926, 48:47.952534},
Sc = {45:44.9559119},
Ti = {46:45.9526316, 47:46.9517631, 48:47.9479463, 49:48.9478700, 50:49.9447912},
V = {50:49.9471585, 51:50.9439595},
Cr = {50:49.9460442, 52:51.9405075, 53:52.9406494, 54:53.9388804},
Mn = {55:54.9380451},
Fe = {54:53.9396105, 56:55.9349375, 57:56.9353940, 58:57.9332756},
Co = {59:58.9331950},
Ni = {58:57.9353429, 60:59.9307864, 61:60.9310560, 62:61.9283451, 64:63.9279660},
Cu = {63:62.9295975, 65:64.9277895},
Zn = {64:63.9291422, 66:65.9260334, 67:66.9271273, 68:67.9248442, 70:69.9253193},
Ga = {69:68.9255736, 71:70.9247013},
Ge = {70:69.9242474, 72:71.9220758, 73:72.9234589, 74:73.9211778, 76:75.9214026},
As = {75:74.9215965},
Se = {74:73.9224764, 76:75.9192136, 77:76.9199140, 78:77.9173091, 80:79.9165213, 82:81.9166994},
Br = {79:78.9183371, 81:80.9162906},
Kr = {78:77.9203648, 80:79.9163790, 82:81.9134836, 83:82.914136, 84:83.911507, 86:85.91061073},
Rb = {85:84.911789738, 87:86.909180527},
Sr = {84:83.913425, 86:85.9092602, 87:86.9088771, 88:87.9056121},
Y = {89:88.9058483},
Zr = {90:89.9047044, 91:90.9056458, 92:91.9050408, 94:93.9063152, 96:95.9082734},
Nb = {93:92.9063781},
Mo = {92:91.906811, 94:93.9050883, 95:94.9058421, 96:95.9046795, 97:96.9060215, 98:97.9054082, 100:99.907477},
Tc = {97:96.906365, 98:97.907216, 99:98.9062547},
Ru = {96:95.907598, 98:97.905287, 99:98.9059393, 100:99.9042195, 101:100.9055821, 102:101.9043493, 104:103.905433},
Rh = {103:102.905504},
Pd = {102:101.905609, 104:103.904036, 105:104.905085, 106:105.903486, 108:107.903892, 110:109.905153},
Ag = {107:106.905097, 109:108.904752},
Cd = {106:105.906459, 108:107.904184, 110:109.9030021, 111:110.9041781, 112:111.9027578, 113:112.9044017, 114:113.9033585, 116:115.904756},
In = {113:112.904058, 115:114.903878},
Sn = {112:111.904818, 114:113.902779, 115:114.903342, 116:115.901741, 117:116.902952, 118:117.901603, 119:118.903308, 120:119.9021947, 122:121.9034390, 124:123.9052739},
Sb = {121:120.9038157, 123:122.9042140},
Te = {120:119.904020, 122:121.9030439, 123:122.9042700, 124:123.9028179, 125:124.9044307, 126:125.9033117, 128:127.9044631, 130:129.9062244},
I = {127:126.904473},
Xe = {124:123.9058930, 126:125.904274, 128:127.9035313, 129:128.9047794, 130:129.9035080, 131:130.9050824, 132:131.9041535, 134:133.9053945, 136:135.907219},
Cs = {133:132.905451933},
Ba = {130:129.9063208, 132:131.9050613, 134:133.9045084, 135:134.9056886, 136:135.9045759, 137:136.9058274, 138:137.9052472},
La = {138:137.907112, 139:138.9063533},
Ce = {136:135.907172, 138:137.905991, 140:139.9054387, 142:141.909244},
Pr = {141:140.9076528},
Nd = {142:141.9077233, 143:142.9098143, 144:143.9100873, 145:144.9125736, 146:145.9131169, 148:147.916893, 150:149.920891},
Pm = {145:144.912749, 147:146.9151385},
Sm = {144:143.911999, 147:146.9148979, 148:147.9148227, 149:148.9171847, 150:149.9172755, 152:151.9197324, 154:153.9222093},
Eu = {151:150.9198502, 153:152.9212303},
Gd = {152:151.9197910, 154:153.9208656, 155:154.9226220, 156:155.9221227, 157:156.9239601, 158:157.9241039, 160:159.9270541},
Tb = {159:158.9253468},
Dy = {156:155.924283, 158:157.924409, 160:159.9251975, 161:160.9269334, 162:161.9267984, 163:162.9287312, 164:163.9291748},
Ho = {165:164.9303221},
Er = {162:161.928778, 164:163.929200, 166:165.9302931, 167:166.9320482, 168:167.9323702, 170:169.9354643},
Tm = {169:168.9342133},
Yb = {168:167.933897, 170:169.9347618, 171:170.9363258, 172:171.9363815, 173:172.9382108, 174:173.9388621, 176:175.9425717},
Lu = {175:174.9407718, 176:175.9426863},
Hf = {174:173.940046, 176:175.9414086, 177:176.9432207, 178:177.9436988, 179:178.9458161, 180:179.9465500},
Ta = {180:179.9474648, 181:180.9479958},
W = {180:179.946704, 182:181.9482042, 183:182.9502230, 184:183.9509312, 186:185.9543641},
Re = {185:184.9529550, 187:186.9557531},
Os = {184:183.9524891, 186:185.9538382, 187:186.9557505, 188:187.9558382, 189:188.9581475, 190:189.9584470, 192:191.9614807},
Ir = {191:190.9605940, 193:192.9629264},
Pt = {190:189.959932, 192:191.9610380, 194:193.9626803, 195:194.9647911, 196:195.9649515, 198:197.967893},
Au = {197:196.9665687},
Hg = {196:195.965833, 198:197.9667690, 199:198.9682799, 200:199.9683260, 201:200.9703023, 202:201.9706430, 204:203.9734939},
Tl = {203:202.9723442, 205:204.9744275},
Pb = {204:203.9730436, 206:205.9744653, 207:206.9758969, 208:207.9766521},
Bi = {209:208.9803987},
Po = {209:208.9824304, 210:209.9828737},
At = {210:209.987148, 211:210.9874963},
Rn = {211:210.990601, 220:220.0113940, 222:222.0175777},
Fr = {223:223.0197359},
Ra = {223:223.0185022, 224:224.0202118, 226:226.0254098, 228:228.0310703},
Ac = {227:227.0277521},
Th = {230:230.0331338, 232:232.0380553},
Pa = {231:231.0358840},
U = {233:233.0396352, 234:234.0409521, 235:235.0439299, 236:236.0455680, 238:238.0507882},
Np = {236:236.046570, 237:237.0481734},
Pu = {238:238.0495599, 239:239.0521634, 240:240.0538135, 241:241.0568515, 242:242.0587426, 244:244.064204},
Am = {241:241.0568291, 243:243.0613811},
Cm = {243:243.0613891, 244:244.0627526, 245:245.0654912, 246:246.0672237, 247:247.070354, 248:248.072349},
Bk = {247:247.070307, 249:249.0749867},
Cf = {249:249.0748535, 250:250.0764061, 251:251.079587, 252:252.081626},
Es = {252:252.082980},
Fm = {257:257.095105},
Md = {258:258.098431, 260:260.10365},
No = {259:259.10103},
Lr = {262:262.10963},
Rf = {265:265.11670},
Db = {268:268.12545},
Sg = {271:271.13347},
Bh = {272:272.13803},
Hs = {270:270.13465},
Mt = {276:276.15116},
Ds = {281:281.16206},
Rg = {280:280.16447},
Cn = {285:285.17411}
)
self.nelements = nelements
self.simple_elements = e
self.elements = obj()
for i in range(1,self.nelements+1):
elem = self.simple_elements[i]
element = Element(elem)
self.elements[elem.symbol] = element
self[elem.symbol] = element
#end for
isotopes = obj()
for symbol,element in self.elements.iteritems():
elem_isotopes = obj()
for mass_number,mass in isotope_masses[symbol].iteritems():
isotope = element.copy()
isotope.atomic_weight = phys_value_dict(mass,'amu')
elem_isotopes[mass_number] = isotope
#end for
isotopes[symbol] = elem_isotopes
#end for
self.isotopes = isotopes
#end def __init__
def show(self):
for i in range(self.nelements):
print
print self.elements[i].string_rep
#end for
#end def show
#end class PeriodicTable
pt = PeriodicTable()
periodic_table = pt
| [
"[email protected]"
] | |
5326a03c579aa60eca70ccc490e2ce6b66223d77 | 15f0514701a78e12750f68ba09d68095172493ee | /Python3/488.py | 2fe4101ca082c932b0960dd56c0a27563aed247f | [
"MIT"
] | permissive | strengthen/LeetCode | 5e38c8c9d3e8f27109b9124ae17ef8a4139a1518 | 3ffa6dcbeb787a6128641402081a4ff70093bb61 | refs/heads/master | 2022-12-04T21:35:17.872212 | 2022-11-30T06:23:24 | 2022-11-30T06:23:24 | 155,958,163 | 936 | 365 | MIT | 2021-11-15T04:02:45 | 2018-11-03T06:47:38 | null | UTF-8 | Python | false | false | 3,473 | py | __________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
from collections import Counter
balls_count = Counter(hand)
return self.dfs(board, balls_count)
def dfs(self, board, balls_count):
if not board:
return 0
answer = float('inf')
i = 0
while i < len(board):
j = i + 1
while j < len(board) and board[j] == board[i]:
j += 1
gap = 3 - (j - i)
if balls_count[board[i]] >= gap:
if (j - i) > 3:
gap = 0
balls_count[board[i]] -= gap
a = self.dfs(board[:i] + board[j:], balls_count)
if a >= 0:
answer = min(answer, a + gap)
balls_count[board[i]] += gap
i = j
return answer if answer != float('inf') else -1
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
if not board or len(board) == 0:
return -1
hand_map = {}
for b in hand:
hand_map[b] = hand_map.get(b, 0) + 1
min_res = [len(hand) + 1]
self.dfs(board, hand_map, 0, min_res)
return min_res[0] if min_res[0] != len(hand) + 1 else -1
def dfs(self, board, hand_map, used, min_res):
l = len(board)
if l == 0:
if min_res[0] > used:
min_res[0] = used
return
if len(hand_map) == 0:
return
for i in range(l):
ch = board[i]
if ch not in hand_map:
continue
count = hand_map[ch]
if i < l-1 and board[i+1] == ch:
new_count = count - 1
if new_count == 0:
del hand_map[ch]
else:
hand_map[ch] = new_count
new_board = self.create_board(board, i-1, i+2)
self.dfs(new_board, hand_map, used+1, min_res)
hand_map[ch] = count
elif count >= 2:
new_count = count - 2
if new_count == 0:
del hand_map[ch]
else:
hand_map[ch] = new_count
new_board = self.create_board(board, i-1, i+1)
self.dfs(new_board, hand_map, used+2, min_res)
hand_map[ch] = count
def create_board(self, board, left, right):
l = len(board)
while left >= 0 and right < l:
ch = board[left]
count = 0
i, j = left, right
while i >= 0 and board[i] == ch:
i -= 1
count += 1
while j < l and board[j] == ch:
j += 1
count += 1
if count < 3:
break
else:
left, right = i, j
return board[:left+1] + board[right:]
__________________________________________________________________________________________________
| [
"[email protected]"
] | |
c4a6e8d1854c60e586f254827e4f58e654aec6df | 910d1c6f0531982ac85cfbfcfd96f694d77d53d9 | /tornado-restfulapi/celery_app/celeryconfig.py | b54ff2f18fb2036a8084450817b73fe78a3fcf5c | [
"MIT"
] | permissive | zhangyong7887/tornado | d00ed173a542c187a2272fd9d250679894cc3263 | 2a89ce36380c7f322acbcd7cf5b035b3e8d99619 | refs/heads/main | 2023-03-27T07:26:11.600519 | 2021-03-11T22:08:18 | 2021-03-11T22:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | from celery.schedules import crontab
from datetime import timedelta
from kombu import Queue
from kombu import Exchange
# 设置任务接受的类型,默认是{'json'}
accept_content = ['json']
# 请任务接受后存储时的类型
result_accept_content = ['json']
# 时间格式化为中国的标准
timezone = "Asia/Shanghai"
# 结果序列化为json格式 默认值json从4.0开始(之前为pickle)。
result_serializer = 'json'
# 指定borker为redis 如果指定rabbitmq broker_url = 'amqp://guest:guest@localhost:5672//'
broker_url = "redis://127.0.0.1/0"
# 指定存储结果的地方,支持使用rpc、数据库、redis等等,具体可参考文档 # result_backend = 'db+mysql://scott:tiger@localhost/foo' # mysql 作为后端数据库
result_backend = "redis://127.0.0.1/1"
# 设置任务过期时间 默认是一天,为None或0 表示永不过期
result_expires = 60 * 60 * 24
# 设置worker并发数,默认是cpu核心数
worker_concurrency = 12
# 设置每个worker最大任务数
worker_max_tasks_per_child = 100
# 指定任务队列,使不同的任务在不同的队列中被执行 如果配置,在启动celery时需要增加 -Q add 多个示例 -Q add,mul
# 示例:celery -A celery_app worker -Q add -l info -P eventlet(在Windows下启动worker,它将只消费add队列中的消息,也就是只执行add任务)
# 示例:celery -A celery_app worker -Q add,mul -l info(在Linux下启动worker,它将只消费add和mul队列中的消息,也就是只执行add和mul任务)
# task_routes = {
# 'celery_app.tasks.add': {'queue': 'add'},
# 'celery_app.tasks.mul': {'queue': 'mul'},
# 'celery_app.tasks.xsum': {'queue': 'xsum'},
# }
# 指定任务的位置
imports = (
'celery_app.tasks',
'apps.users.tasks',
)
# 后台运行worker示例: | [
"[email protected]"
] | |
7f9cf2c3ee18d83cacfa58b96162d62ed0d51a03 | 52cb25dca22292fce4d3907cc370098d7a57fcc2 | /BAEKJOON/다이나믹 프로그래밍/9461_파도반 수열.py | 9696a17c3a5037916b2f467d9cac06307a38b7ef | [] | no_license | shjang1013/Algorithm | c4fc4c52cbbd3b7ecf063c716f600d1dbfc40d1a | 33f2caa6339afc6fc53ea872691145effbce0309 | refs/heads/master | 2022-09-16T12:02:53.146884 | 2022-08-31T16:29:04 | 2022-08-31T16:29:04 | 227,843,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # 동적 계획법(다이나믹 프로그래밍)을 이용
# 4번째부터 규칙 존재
def triangle(N):
f = [0, 1, 1, 1]
for i in range(4, N+1):
f.append(f[i-3]+f[i-2])
return f[N]
T = int(input())
for _ in range(T):
N = int(input())
print(triangle(N))
| [
"[email protected]"
] | |
2189c521d643b8c9feae447145f8e67196cd1384 | ea727658bb22df6dd0a4d8aaff5d13beec8ec8b5 | /examples/大數據資料分析/範例程式/第11章/program11-2.py | ee59c8722f865802cb46b9adc72cedd22431edec | [] | no_license | kelvinchoiwc/DataScience_1082 | f8e31c230776f45d70f6b96ef81d16881118e453 | 199f915540afe6c9a9ec7055aac5911420d783be | refs/heads/master | 2023-07-24T04:29:01.763893 | 2021-09-01T12:33:21 | 2021-09-01T12:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | import random
s2 = tuple([random.randint(1, 49) for i in range(1,100)])
print(s2)
lottoNums = 50*[0]
for i in range(len(s2)):
k = s2[i]
lottoNums[k] += 1
for j in range(1, len(lottoNums)):
print('%d: %d'%(j, lottoNums[j])) | [
"[email protected]"
] | |
e03e4f0d8e5feaf4628a344d6adcdffa5bdb7312 | 0adb19e463ec27dda57b7f551b2d49e229053b8c | /film_editing/film_edit_skill.py | 16cc540b54225aae15ee09161b532391f16a7bf9 | [] | no_license | alam-mahtab/FastapiCD | c8d36852a674583bba088eee8b1cb98a24acbea1 | 7206179408a9ae67ba485a4620d14b470009153d | refs/heads/main | 2023-02-25T05:56:35.685081 | 2021-02-03T16:16:26 | 2021-02-03T16:16:26 | 335,682,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | from typing import List
from fastapi import Depends,File, UploadFile, APIRouter
from sqlalchemy.orm import Session
from film_editing import crud, models
from writer.database import SessionLocal, engine
from film_editing.schemas import FilmeditBase, FilmeditList
from film_editing.models import Filmedit
# Pagination
from fastapi_pagination import Page, pagination_params
from fastapi_pagination.paginator import paginate
router = APIRouter()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
models.Base.metadata.create_all(bind=engine)
import uuid
from pathlib import Path
import time
#from fastapi.staticfiles import StaticFiles
from starlette.staticfiles import StaticFiles
import os
from os.path import dirname, abspath, join
import shutil
router.mount("/static", StaticFiles(directory="static"), name="static")
dirname = dirname(dirname(abspath(__file__)))
images_path = join(dirname, '/static')
@router.post("/film_editing/")
def create_film_edit(
desc:str,name:str,file_pro: UploadFile= File(...), file_cover: UploadFile= File(...), db: Session = Depends(get_db)
):
extension_pro = file_pro.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension_pro:
return "Image must be jpg or png format!"
suffix_pro = Path(file_pro.filename).suffix
filename_pro = time.strftime( str(uuid.uuid4().hex) + "%Y%m%d-%H%M%S" + suffix_pro )
with open("static/"+filename_pro, "wb") as image:
shutil.copyfileobj(file_pro.file, image)
url_profile = os.path.join(images_path, filename_pro)
extension_cover = file_cover.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension_cover:
return "Image must be jpg or png format!"
suffix_cover =Path(file_cover.filename).suffix
filename_cover = time.strftime( str(uuid.uuid4().hex) + "%Y%m%d-%H%M%S" + suffix_cover )
with open("static/"+filename_cover, "wb") as image:
shutil.copyfileobj(file_cover.file, image)
url_cover = os.path.join(images_path, filename_cover)
return crud.create_film_edit(db=db,name=name,desc=desc,url_profile=url_profile,url_cover=url_cover)
@router.get("/film_editings/" ,dependencies=[Depends(pagination_params)])
def film_edit_list(db: Session = Depends(get_db)):
film_edit_all =crud.film_edit_list(db=db)
return paginate(film_edit_all)
@router.get("/film_editings/{film_editing_id}")
def film_edit_detail(film_edit_id:int,db: Session = Depends(get_db)):
return crud.get_film_edit(db=db, id=film_edit_id)
@router.delete("film_editings/{film_editings_id}")
async def delete(film_edit_id: int, db: Session = Depends(get_db)):
deleted = await crud.delete(db, film_edit_id)
return {"deleted": deleted} | [
"[email protected]"
] | |
f0806efc13466846b13387d88cdb1bf970b40434 | 5254c3a7e94666264120f26c87734ad053c54541 | /4. Aleatoridad/4.2 Random/ejercicio_4.9.py | dfd5b986fc932ed4cef71f7090ca15df0345c9d0 | [] | no_license | ccollado7/UNSAM---Python | 425eb29a2df8777e9f892b08cc250bce9b2b0b8c | f2d0e7b3f64efa8d03f9aa4707c90e992683672d | refs/heads/master | 2023-03-21T17:42:27.210599 | 2021-03-09T13:06:45 | 2021-03-09T13:06:45 | 286,613,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 09:13:45 2020
@author: User
"""
#Ejercicio 4.9
import random
def genear_punto():
x = random.random()
y = random.random()
return(x,y)
print(genear_punto()) | [
"[email protected]"
] | |
aff205bbbf740f5431d56b6de1825f1a154142f0 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /components/timers/DEPS | 413f57b96cae54eaae8224d794ee782a7eca92c5 | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 237 | include_rules = [
# This directory is shared with Chrome OS, which only links against
# base/. We don't want any other dependencies to creep in.
"-build",
"-content",
"-library_loaders",
"-net",
"-third_party",
"-url",
] | [
"[email protected]"
] | ||
b057623939aa28e36698dfcfc578da6ed76d54a2 | 9c4e02ba5201794a4c5cbff548db1be7c87409c1 | /venv/lib/python3.9/site-packages/pre_commit/main.py | f1e8d03db183cc99d62e7a9d1091c71ac824bca5 | [
"Apache-2.0",
"MIT"
] | permissive | ClassWizard/PodLockParser | 4faf4679d404158b3cf2b1ceb4faabca461b0008 | 84f6d3fced521849657d21ae4cb9681f5897b957 | refs/heads/master | 2022-12-23T20:39:48.096729 | 2022-02-08T09:49:01 | 2022-02-08T09:49:01 | 167,668,617 | 2 | 1 | MIT | 2022-12-14T10:01:41 | 2019-01-26T08:50:35 | Python | UTF-8 | Python | false | false | 14,606 | py | import argparse
import logging
import os
import sys
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import pre_commit.constants as C
from pre_commit import git
from pre_commit.color import add_color_option
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.gc import gc
from pre_commit.commands.hook_impl import hook_impl
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.commands.run import run
from pre_commit.commands.sample_config import sample_config
from pre_commit.commands.try_repo import try_repo
from pre_commit.error_handler import error_handler
from pre_commit.logging_handler import logging_handler
from pre_commit.store import Store
logger = logging.getLogger('pre_commit')
# https://github.com/pre-commit/pre-commit/issues/217
# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
# to install packages to the wrong place. We don't want anything to deal with
# pyvenv
os.environ.pop('__PYVENV_LAUNCHER__', None)
COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}
def _add_config_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-c', '--config', default=C.CONFIG_FILE,
help='Path to alternate config file',
)
class AppendReplaceDefault(argparse.Action):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.appended = False
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[str], None],
option_string: Optional[str] = None,
) -> None:
if not self.appended:
setattr(namespace, self.dest, [])
self.appended = True
getattr(namespace, self.dest).append(values)
def _add_hook_type_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-t', '--hook-type', choices=(
'pre-commit', 'pre-merge-commit', 'pre-push', 'prepare-commit-msg',
'commit-msg', 'post-commit', 'post-checkout', 'post-merge',
'post-rewrite',
),
action=AppendReplaceDefault,
default=['pre-commit'],
dest='hook_types',
)
def _add_run_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument('hook', nargs='?', help='A single hook-id to run')
parser.add_argument('--verbose', '-v', action='store_true', default=False)
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
help='Run on all the files in the repo.',
)
mutex_group.add_argument(
'--files', nargs='*', default=[],
help='Specific filenames to run hooks on.',
)
parser.add_argument(
'--show-diff-on-failure', action='store_true',
help='When hooks fail, run `git diff` directly afterward.',
)
parser.add_argument(
'--hook-stage', choices=C.STAGES, default='commit',
help='The stage during which the hook is fired. One of %(choices)s',
)
parser.add_argument(
'--remote-branch', help='Remote branch ref used by `git push`.',
)
parser.add_argument(
'--local-branch', help='Local branch ref used by `git push`.',
)
parser.add_argument(
'--from-ref', '--source', '-s',
help=(
'(for usage with `--from-ref`) -- this option represents the '
'original ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch you are pushing '
'to. '
'For `post-checkout` hooks, this represents the branch that was '
'previously checked out.'
),
)
parser.add_argument(
'--to-ref', '--origin', '-o',
help=(
'(for usage with `--to-ref`) -- this option represents the '
'destination ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch being pushed. '
'For `post-checkout` hooks, this represents the branch that is '
'now checked out.'
),
)
parser.add_argument(
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
parser.add_argument(
'--remote-name', help='Remote name used by `git push`.',
)
parser.add_argument('--remote-url', help='Remote url used by `git push`.')
parser.add_argument(
'--checkout-type',
help=(
'Indicates whether the checkout was a branch checkout '
'(changing branches, flag=1) or a file checkout (retrieving a '
'file from the index, flag=0).'
),
)
parser.add_argument(
'--is-squash-merge',
help=(
'During a post-merge hook, indicates whether the merge was a '
'squash merge'
),
)
parser.add_argument(
'--rewrite-command',
help=(
'During a post-rewrite hook, specifies the command that invoked '
'the rewrite'
),
)
def _adjust_args_and_chdir(args: argparse.Namespace) -> None:
# `--config` was specified relative to the non-root working directory
if os.path.exists(args.config):
args.config = os.path.abspath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.abspath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.abspath(args.repo)
toplevel = git.get_root()
os.chdir(toplevel)
args.config = os.path.relpath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.relpath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.relpath(args.repo)
def main(argv: Optional[Sequence[str]] = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='pre-commit')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {C.VERSION}',
)
subparsers = parser.add_subparsers(dest='command')
autoupdate_parser = subparsers.add_parser(
'autoupdate',
help="Auto-update pre-commit config to the latest repos' versions.",
)
add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
'--bleeding-edge', action='store_true',
help=(
'Update to the bleeding edge of `master` instead of the latest '
'tagged version (the default behavior).'
),
)
autoupdate_parser.add_argument(
'--freeze', action='store_true',
help='Store "frozen" hashes in `rev` instead of tag names',
)
autoupdate_parser.add_argument(
'--repo', dest='repos', action='append', metavar='REPO',
help='Only update this repository -- may be specified multiple times.',
)
clean_parser = subparsers.add_parser(
'clean', help='Clean out pre-commit files.',
)
add_color_option(clean_parser)
_add_config_option(clean_parser)
hook_impl_parser = subparsers.add_parser('hook-impl')
add_color_option(hook_impl_parser)
_add_config_option(hook_impl_parser)
hook_impl_parser.add_argument('--hook-type')
hook_impl_parser.add_argument('--hook-dir')
hook_impl_parser.add_argument(
'--skip-on-missing-config', action='store_true',
)
hook_impl_parser.add_argument(dest='rest', nargs=argparse.REMAINDER)
gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')
add_color_option(gc_parser)
_add_config_option(gc_parser)
init_templatedir_parser = subparsers.add_parser(
'init-templatedir',
help=(
'Install hook script in a directory intended for use with '
'`git config init.templateDir`.'
),
)
add_color_option(init_templatedir_parser)
_add_config_option(init_templatedir_parser)
init_templatedir_parser.add_argument(
'directory', help='The directory in which to write the hook script.',
)
init_templatedir_parser.add_argument(
'--no-allow-missing-config',
action='store_false',
dest='allow_missing_config',
help='Assume cloned repos should have a `pre-commit` config.',
)
_add_hook_type_option(init_templatedir_parser)
install_parser = subparsers.add_parser(
'install', help='Install the pre-commit script.',
)
add_color_option(install_parser)
_add_config_option(install_parser)
install_parser.add_argument(
'-f', '--overwrite', action='store_true',
help='Overwrite existing hooks / remove migration mode.',
)
install_parser.add_argument(
'--install-hooks', action='store_true',
help=(
'Whether to install hook environments for all environments '
'in the config file.'
),
)
_add_hook_type_option(install_parser)
install_parser.add_argument(
'--allow-missing-config', action='store_true', default=False,
help=(
'Whether to allow a missing `pre-commit` configuration file '
'or exit with a failure code.'
),
)
install_hooks_parser = subparsers.add_parser(
'install-hooks',
help=(
'Install hook environments for all environments in the config '
'file. You may find `pre-commit install --install-hooks` more '
'useful.'
),
)
add_color_option(install_hooks_parser)
_add_config_option(install_hooks_parser)
migrate_config_parser = subparsers.add_parser(
'migrate-config',
help='Migrate list configuration to new map configuration.',
)
add_color_option(migrate_config_parser)
_add_config_option(migrate_config_parser)
run_parser = subparsers.add_parser('run', help='Run hooks.')
add_color_option(run_parser)
_add_config_option(run_parser)
_add_run_options(run_parser)
sample_config_parser = subparsers.add_parser(
'sample-config', help=f'Produce a sample {C.CONFIG_FILE} file',
)
add_color_option(sample_config_parser)
_add_config_option(sample_config_parser)
try_repo_parser = subparsers.add_parser(
'try-repo',
help='Try the hooks in a repository, useful for developing new hooks.',
)
add_color_option(try_repo_parser)
_add_config_option(try_repo_parser)
try_repo_parser.add_argument(
'repo', help='Repository to source hooks from.',
)
try_repo_parser.add_argument(
'--ref', '--rev',
help=(
'Manually select a rev to run against, otherwise the `HEAD` '
'revision will be used.'
),
)
_add_run_options(try_repo_parser)
uninstall_parser = subparsers.add_parser(
'uninstall', help='Uninstall the pre-commit script.',
)
add_color_option(uninstall_parser)
_add_config_option(uninstall_parser)
_add_hook_type_option(uninstall_parser)
help = subparsers.add_parser(
'help', help='Show help for a specific command.',
)
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
# argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ['run']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
with error_handler(), logging_handler(args.color):
git.check_for_cygwin_mismatch()
if args.command not in COMMANDS_NO_GIT:
_adjust_args_and_chdir(args)
store = Store()
store.mark_config_used(args.config)
if args.command == 'autoupdate':
return autoupdate(
args.config, store,
tags_only=not args.bleeding_edge,
freeze=args.freeze,
repos=args.repos,
)
elif args.command == 'clean':
return clean(store)
elif args.command == 'gc':
return gc(store)
elif args.command == 'hook-impl':
return hook_impl(
store,
config=args.config,
color=args.color,
hook_type=args.hook_type,
hook_dir=args.hook_dir,
skip_on_missing_config=args.skip_on_missing_config,
args=args.rest[1:],
)
elif args.command == 'install':
return install(
args.config, store,
hook_types=args.hook_types,
overwrite=args.overwrite,
hooks=args.install_hooks,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'init-templatedir':
return init_templatedir(
args.config, store, args.directory,
hook_types=args.hook_types,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'install-hooks':
return install_hooks(args.config, store)
elif args.command == 'migrate-config':
return migrate_config(args.config)
elif args.command == 'run':
return run(args.config, store, args)
elif args.command == 'sample-config':
return sample_config()
elif args.command == 'try-repo':
return try_repo(args)
elif args.command == 'uninstall':
return uninstall(hook_types=args.hook_types)
else:
raise NotImplementedError(
f'Command {args.command} not implemented.',
)
raise AssertionError(
f'Command {args.command} failed to exit with a returncode',
)
if __name__ == '__main__':
raise SystemExit(main())
| [
"[email protected]"
] | |
bdd400d86f704fe57d59d284b35088dda056e839 | a7058080e41af37eb77c146fc09a5e4db57f7ec6 | /Solved/11723/11723_set.py | 7a52dc728f80e451173148d39006f3a366c611eb | [] | no_license | Jinmin-Goh/BOJ_PS | bec0922c01fbf6e440589cc684d0cd736e775066 | 09a285bd1369bd0d73f86386b343d271dc08a67d | refs/heads/master | 2022-09-24T02:24:50.823834 | 2022-09-21T02:16:22 | 2022-09-21T02:16:22 | 223,768,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | # Problem No.: 11723
# Solver: Jinmin Goh
# Date: 20200410
# URL: https://www.acmicpc.net/problem/11723
import sys
def main():
n = int(input())
numSet = set()
for _ in range(n):
order = list(sys.stdin.readline().split())
if len(order) == 2:
num = int(order[1])
order = order[0]
if order == "add":
if num not in numSet:
numSet.add(num)
elif order == "check":
if num in numSet:
print(1)
else:
print(0)
elif order == "remove":
if num in numSet:
numSet.remove(num)
elif order == "toggle":
if num in numSet:
numSet.remove(num)
else:
numSet.add(num)
elif order == "all":
numSet = set([_ for _ in range(1, 21)])
elif order == "empty":
numSet = set()
return
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
531144ed358910083d951eca813d686840b83120 | 5ffc395c36e3469ec13da4a51ff23cd388d6bef2 | /tic-tac-toe.py | 3fb7b5a26866f8de6efc4f371d99309b6ceb4917 | [] | no_license | jyash28/Advance-Tic-Tac-Toe-Game | 30bd01407f12f442201ae8a4157e56faf7221a9d | e83dc798aca3c1841c7b111f6a53d99b39d5c47e | refs/heads/main | 2023-01-21T13:39:19.225062 | 2020-11-21T19:40:04 | 2020-11-21T19:40:04 | 314,888,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,840 | py | import tkinter.messagebox
from tkinter import *
root=Tk()
root.geometry("1350x750+0+0")
root.title("Tic Tac Toe")
root.configure(background="Cadet Blue")
Tops = Frame(root,bg ="Cadet Blue",pady = 2,width =1350,height=100,relief=RIDGE)
Tops.grid(row=0,column=0)
lblTitle = Label(Tops,font=("arial",50,"bold"),text="Advance Tic Tac Toe Game",bd=21,bg="Cadet Blue",fg="Cornsilk",justify=CENTER)
lblTitle.grid(row=0,column=0)
MainFrame = Frame(root,bg="Powder Blue",bd=10,width=1350,height=600,relief=RIDGE)
MainFrame.grid(row=1,column=0)
LeftFrame = Frame(MainFrame,bd=10,width=750,height=500,pady=2,padx=10,bg="Cadet Blue",relief=RIDGE)
LeftFrame.pack(side=LEFT)
RightFrame = Frame(MainFrame,bd=10,width=560,height=500,pady=2,padx=10,bg="Cadet Blue",relief=RIDGE)
RightFrame.pack(side=RIGHT)
RightFrame1 = Frame(RightFrame,bd=10,width=560,height=200,pady=2,padx=10,bg="Cadet Blue",relief=RIDGE)
RightFrame1.grid(row=0,column=0)
RightFrame2 = Frame(RightFrame,bd=10,width=560,height=200,pady=2,padx=10,bg="Cadet Blue",relief=RIDGE)
RightFrame2.grid(row=1,column=0)
PlayerX=IntVar()
Player0=IntVar()
PlayerX.set(0)
Player0.set(0)
buttons= StringVar()
click=True
def checker(buttons):
global click
if buttons["text"] == " " and click == True:
buttons["text"] = "X"
click = False
scorekeeper()
elif buttons["text"] == " " and click == False:
buttons["text"] = "0"
click = True
scorekeeper()
def scorekeeper():
if(button1["text"]=="X" and button2["text"]=="X" and button3["text"]=="X" ):
button1.configure(background="powder blue")
button2.configure(background="powder blue")
button3.configure(background="powder blue")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button4["text"]=="X" and button5["text"]=="X" and button6["text"]=="X" ):
button4.configure(background="Red")
button5.configure(background="Red")
button6.configure(background="Red")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button7["text"]=="X" and button8["text"]=="X" and button9["text"]=="X" ):
button7.configure(background="cadet blue")
button8.configure(background="cadet blue")
button9.configure(background="cadet blue")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button3["text"]=="X" and button5["text"]=="X" and button7["text"]=="X" ):
button3.configure(background="cadet blue")
button5.configure(background="cadet blue")
button7.configure(background="cadet blue")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button1["text"]=="X" and button5["text"]=="X" and button9["text"]=="X" ):
button1.configure(background="Red")
button5.configure(background="Red")
button9.configure(background="Red")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button1["text"]=="X" and button4["text"]=="X" and button7["text"]=="X" ):
button1.configure(background="Yellow")
button4.configure(background="Yellow")
button7.configure(background="Yellow")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button2["text"]=="X" and button5["text"]=="X" and button8["text"]=="X" ):
button2.configure(background="Pink")
button5.configure(background="Pink")
button8.configure(background="Pink")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button3["text"]=="X" and button6["text"]=="X" and button9["text"]=="X" ):
button3.configure(background="cadet blue")
button6.configure(background="cadet blue")
button9.configure(background="cadet blue")
n = float(PlayerX.get())
score = (n+1)
PlayerX.set(score)
tkinter.messagebox.showinfo("Winner : X","You have just won a game")
if(button1["text"]=="0" and button2["text"]=="0" and button3["text"]=="0" ):
button1.configure(background="Orange")
button2.configure(background="Orange")
button3.configure(background="Orange")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button4["text"]=="0" and button5["text"]=="0" and button6["text"]=="0" ):
button4.configure(background="Blue")
button5.configure(background="Blue")
button6.configure(background="Blue")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button7["text"]=="0" and button8["text"]=="0" and button9["text"]=="0" ):
button7.configure(background="Green")
button8.configure(background="Green")
button9.configure(background="Green")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button3["text"]=="0" and button5["text"]=="0" and button7["text"]=="0" ):
button3.configure(background="cadet blue")
button5.configure(background="cadet blue")
button7.configure(background="cadet blue")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button1["text"]=="0" and button5["text"]=="0" and button9["text"]=="0" ):
button1.configure(background="Orange")
button5.configure(background="Orange")
button9.configure(background="Orange")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button1["text"]=="0" and button4["text"]=="0" and button7["text"]=="0" ):
button1.configure(background="powder blue")
button4.configure(background="powder blue")
button7.configure(background="powder blue")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button2["text"]=="0" and button5["text"]=="0" and button8["text"]=="0" ):
button2.configure(background="cadet blue")
button5.configure(background="cadet blue")
button8.configure(background="cadet blue")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
if(button3["text"]=="0" and button6["text"]=="0" and button9["text"]=="0" ):
button3.configure(background="Red")
button6.configure(background="Red")
button9.configure(background="Red")
n = float(Player0.get())
score = (n+1)
Player0.set(score)
tkinter.messagebox.showinfo("Winner : 0","You have just won a game")
def reset():
button1["text"]= " "
button2["text"]= " "
button3["text"]= " "
button4["text"]= " "
button5["text"]= " "
button6["text"]= " "
button7["text"]= " "
button8["text"]= " "
button9["text"]= " "
button1.configure(background="gainsboro")
button2.configure(background="gainsboro")
button3.configure(background="gainsboro")
button4.configure(background="gainsboro")
button5.configure(background="gainsboro")
button6.configure(background="gainsboro")
button7.configure(background="gainsboro")
button8.configure(background="gainsboro")
button9.configure(background="gainsboro")
def NewGame():
reset()
PlayerX.set(0)
Player0.set(0)
lblPlayerX = Label(RightFrame1,font=("arial",40,"bold"),text="Player X :",padx=2,pady=2,bg="Cadet Blue")
lblPlayerX.grid(row=0,column=0,sticky=W)
txtPlayerX=Entry(RightFrame1,font=("arial", 40,"bold"),bd=2,fg="black",textvariable=PlayerX,width=14,justify=LEFT)
txtPlayerX.grid(row=0,column=1)
lblPlayer0 = Label(RightFrame1,font=("arial",40,"bold"),text="Player 0 :",padx=2,pady=2,bg="Cadet Blue")
lblPlayer0.grid(row=1,column=0,sticky=W)
txtPlayer0=Entry(RightFrame1,font=("arial", 40,"bold"),bd=2,fg="black",textvariable=Player0,width=14,justify=LEFT)
txtPlayer0.grid(row=1,column=1)
btnReset = Button(RightFrame2,text="Reset ",font=("arial", 40, "bold"),height =1,width=20,command=reset)
btnReset.grid(row=2,column=0,padx=6,pady=11)
btnNewGame = Button(RightFrame2,text="NewGame ",font=("arial", 40, "bold"),height =1,width=20,command=NewGame)
btnNewGame.grid(row=3,column=0,padx=6,pady=10)
button1 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button1))
button1.grid(row=1,column=0,sticky = S+N+E+W)
button2 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button2))
button2.grid(row=1,column=1,sticky = S+N+E+W)
button3 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button3))
button3.grid(row=1,column=2,sticky = S+N+E+W)
button4 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button4))
button4.grid(row=2,column=0,sticky = S+N+E+W)
button5 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button5))
button5.grid(row=2,column=1,sticky = S+N+E+W)
button6 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button6))
button6.grid(row=2,column=2,sticky = S+N+E+W)
button7 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button7))
button7.grid(row=3,column=0,sticky = S+N+E+W)
button8 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button8))
button8.grid(row=3,column=1,sticky = S+N+E+W)
button9 = Button(LeftFrame,text=" ",font=("Times 26 bold"),height =3,width=8,bg="gainsboro",command=lambda: checker(button9))
button9.grid(row=3,column=2,sticky = S+N+E+W)
root.mainloop() | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.