blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c92357915dcd8ff23b997d1555ab25e5573ab1f | 115417ecd6c0e4c871d548d8df5c38875cc91a08 | /blog/blog_api/viewsets.py | 072e7ce987bd077d0490a236fdc39d149c360251 | [] | no_license | idrissabanli/ecommerce_api_react_group | 262f81ea34f0be819a5f32d2fdf88d4949b2368b | 6a9c0c4ae394c3417d0a05bac9252cba53d56eb8 | refs/heads/master | 2022-12-25T13:16:38.326633 | 2022-01-25T08:55:57 | 2022-01-25T08:55:57 | 243,542,086 | 2 | 0 | null | 2022-12-08T03:41:52 | 2020-02-27T14:55:25 | JavaScript | UTF-8 | Python | false | false | 509 | py | from blog.models import Blog, Blogger
from rest_framework import permissions
from blog.blog_api.serializers import *
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
class BloggerViewSet(ModelViewSet):
permission_classes = [permissions.AllowAny,]
queryset = Blogger.objects.all()
serializer_class = BloggerSerializer
class BlogViewSet(ModelViewSet):
permission_classes = [permissions.AllowAny,]
queryset = Blog.objects.all()
serializer_class = BlogSerializer
| [
"[email protected]"
] | |
48b854181197dd32e477a33487d331b43ddbf2a4 | e3cfab409afb5ff9a0b3812bf848be6ca9239cee | /pygeodesy/streprs.py | 082d4e9a31b23b74f6c42cc529a3e56e69f00a14 | [
"MIT"
] | permissive | mrJean1/PyGeodesy | 565266a4f7f6cda5abe98e915bbd868f6cbe1760 | eba35704b248a7a0388b30f3cea19793921e99b7 | refs/heads/master | 2023-08-23T13:58:20.069917 | 2023-08-20T18:50:45 | 2023-08-20T18:50:45 | 68,028,481 | 283 | 66 | null | 2022-04-09T00:40:52 | 2016-09-12T16:49:10 | Python | UTF-8 | Python | false | false | 22,878 | py |
# -*- coding: utf-8 -*-
u'''Floating point and other formatting utilities.
'''
from pygeodesy.basics import _0_0, isint, islistuple, isscalar, isstr, _zip
# from pygeodesy.constants import _0_0
from pygeodesy.errors import _AttributeError, _IsnotError, itemsorted, _or, \
_TypeError, _ValueError, _xkwds_get, _xkwds_pop
from pygeodesy.interns import NN, _0_, _0to9_, MISSING, _BAR_, _COMMASPACE_, \
_DOT_, _dunder_nameof, _E_, _ELLIPSIS_, _EQUAL_, \
_H_, _LR_PAIRS, _N_, _name_, _not_, _not_scalar_, \
_PERCENT_, _SPACE_, _STAR_, _UNDER_
from pygeodesy.interns import _convergence_, _distant_, _e_, _eps_, _exceeds_, \
_EQUALSPACED_, _f_, _F_, _g_, _limit_, _no_, \
_tolerance_ # PYCHOK used!
from pygeodesy.lazily import _ALL_LAZY, _ALL_MODS as _MODS
from math import fabs, log10 as _log10
__all__ = _ALL_LAZY.streprs
__version__ = '23.06.12'
_EN_PREC = 6 # max MGRS/OSGR precision, 1 micrometer
_EN_WIDE = 5 # number of MGRS/OSGR units, log10(_100km)
_OKd_ = '._-' # acceptable name characters
_PAREN_g = '(%g)' # PYCHOK used!
_threshold_ = 'threshold' # PYCHOK used!
class _Fmt(str): # in .streprs
'''(INTERNAL) Callable formatting.
'''
name = NN
def __call__(self, *name_value_, **name_value):
'''Format a C{name=value} pair or C{name, value} pair
or just a single C{value}.
'''
for n, v in name_value.items():
break
else:
if len(name_value_) > 1:
n, v = name_value_[:2]
elif name_value_:
n, v = NN, name_value_[0]
else:
n, v = NN, MISSING
t = str.__mod__(self, v)
return NN(n, t) if n else t
# def __mod__(self, arg, **unused):
# '''Regular C{%} operator.
# '''
# return str.__mod__(self, arg)
class Fstr(str):
'''(INTERNAL) C{float} format.
'''
name = NN
def __call__(self, flt, prec=None, ints=False):
'''Format the B{C{flt}} like function L{fstr}.
'''
# see also function C{fstr} if isscalar case below
t = str.__mod__(_pct(self), flt) if prec is None else next(
_streprs(prec, (flt,), self, ints, True, None))
return t
def __mod__(self, arg, **unused):
'''Regular C{%} operator.
@arg arg: A C{scalar} value to be formatted (either
the C{scalar}, or a 1-tuple C{(scalar,)},
or 2-tuple C{(prec, scalar)}.
@raise TypeError: Non-scalar B{C{arg}} value.
@raise ValueError: Invalid B{C{arg}}.
'''
def _error(arg):
n = _DOT_(Fstr.__name__, self.name or self)
return _SPACE_(n, _PERCENT_, repr(arg))
prec = 6 # default std %f and %F
if islistuple(arg):
n = len(arg)
if n == 1:
arg = arg[0]
elif n == 2:
prec, arg = arg
else:
raise _ValueError(_error(arg))
if not isscalar(arg):
raise _TypeError(_error(arg))
return self(arg, prec=prec)
class _Sub(str):
'''(INTERNAL) Class list formatter.
'''
# see .ellipsoidalNvector.LatLon.deltaTo
def __call__(self, *Classes):
t = _or(*(C.__name__ for C in Classes))
return str.__mod__(self, t or MISSING)
class Fmt(object):
'''Formatting options.
'''
ANGLE = _Fmt('<%s>')
COLON = _Fmt(':%s')
# COLONSPACE = _Fmt(': %s') # == _COLONSPACE_(n, v)
# COMMASPACE = _Fmt(', %s') # == _COMMASPACE_(n, v)
convergence = _Fmt(_convergence_(_PAREN_g))
CURLY = _Fmt('{%s}') # BRACES
distant = _Fmt(_distant_('(%.3g)'))
DOT = _Fmt('.%s') # == NN(_DOT_, n)
e = Fstr(_e_)
E = Fstr(_E_)
EQUAL = _Fmt(_EQUAL_(NN, '%s'))
EQUALg = _Fmt(_EQUAL_(NN, '%g'))
EQUALSPACED = _Fmt(_EQUALSPACED_(NN, '%s'))
exceeds_eps = _Fmt(_exceeds_(_eps_, _PAREN_g))
exceeds_limit = _Fmt(_exceeds_(_limit_, _PAREN_g))
f = Fstr(_f_)
F = Fstr(_F_)
g = Fstr(_g_)
G = Fstr('G')
h = Fstr('%+.*f') # height, .streprs.hstr
limit = _Fmt(' %s limit') # .units
LOPEN = _Fmt('(%s]') # left-open range (L, R]
PAREN = _Fmt('(%s)')
PAREN_g = _Fmt(_PAREN_g)
PARENSPACED = _Fmt(' (%s)')
QUOTE2 = _Fmt('"%s"')
ROPEN = _Fmt('[%s)') # right-open range [L, R)
# SPACE = _Fmt(' %s') # == _SPACE_(n, v)
SQUARE = _Fmt('[%s]') # BRACKETS
sub_class = _Sub('%s (sub-)class')
TAG = ANGLE
TAGEND = _Fmt('</%s>')
tolerance = _Fmt(_tolerance_(_PAREN_g))
zone = _Fmt('%02d') # .epsg, .mgrs, .utmupsBase
def __init__(self):
for n, a in self.__class__.__dict__.items():
if isinstance(a, (Fstr, _Fmt)):
setattr(a, _name_, n)
def __call__(self, obj, prec=9):
'''Return C{str(B{obj})} or C{repr(B{obj})}.
'''
return str(obj) if isint(obj) else next(
_streprs(prec, (obj,), Fmt.g, False, False, repr))
def no_convergence(self, _d, *tol, **thresh):
t = Fmt.convergence(fabs(_d))
if tol:
t = _COMMASPACE_(t, Fmt.tolerance(tol[0]))
if thresh and _xkwds_get(thresh, thresh=False):
t = t.replace(_tolerance_, _threshold_)
return _no_(t)
Fmt = Fmt() # PYCHOK singleton
Fmt.__name__ = Fmt.__class__.__name__
_DOTSTAR_ = Fmt.DOT(_STAR_)
# formats %G and %g drop all trailing zeros and the
# decimal point, making the float appear as an int
_Gg = (Fmt.G, Fmt.g)
_FfEeGg = (Fmt.F, Fmt.f, Fmt.E, Fmt.e) + _Gg # float formats
_Fspec_ = NN('[%[<flags>][<width>]', _DOTSTAR_, ']', _BAR_.join(_FfEeGg)) # in testStreprs
def anstr(name, OKd=_OKd_, sub=_UNDER_):
'''Make a valid name of alphanumeric and OKd characters.
@arg name: The original name (C{str}).
@kwarg OKd: Other acceptable characters (C{str}).
@kwarg sub: Substitute for invalid charactes (C{str}).
@return: The modified name (C{str}).
@note: Leading and trailing whitespace characters are removed,
intermediate whitespace characters are coalesced and
substituted.
'''
s = n = str(name).strip()
for c in n:
if not (c.isalnum() or c in OKd or c in sub):
s = s.replace(c, _SPACE_)
return sub.join(s.strip().split())
def attrs(inst, *names, **Nones_True__pairs_kwds): # prec=6, fmt=Fmt.F, ints=False, Nones=True, sep=_EQUAL_
'''Get instance attributes as I{name=value} strings, with C{float}s
formatted by function L{fstr}.
@arg inst: The instance (any C{type}).
@arg names: The attribute names, all other positional (C{str}).
@kwarg Nones_True__pairs_kwds: Keyword argument for function L{pairs}, except
C{B{Nones}=True} to in-/exclude missing or C{None}-valued attributes.
@return: A C{tuple(B{sep}.join(t) for t in zip(B{names}, reprs(values, ...)))}
of C{str}s.
'''
def _items(inst, names, Nones):
for n in names:
v = getattr(inst, n, None)
if Nones or v is not None:
yield n, v
def _Nones_kwds(Nones=True, **kwds):
return Nones, kwds
Nones, kwds = _Nones_kwds(**Nones_True__pairs_kwds)
return pairs(_items(inst, names, Nones), **kwds)
def enstr2(easting, northing, prec, *extras, **wide_dot):
'''Return an MGRS/OSGR easting, northing string representations.
@arg easting: Easting from false easting (C{meter}).
@arg northing: Northing from from false northing (C{meter}).
@arg prec: Precision, the number of I{decimal} digits (C{int}) or if
negative, the number of I{units to drop}, like MGRS U{PRECISION
<https://GeographicLib.SourceForge.io/C++/doc/GeoConvert.1.html#PRECISION>}.
@arg extras: Optional leading items (C{str}s).
@kwarg wide_dot: Optional keword argument C{B{wide}=%d} for the number of I{unit digits}
(C{int}) and C{B{dot}=False} (C{bool}) to insert a decimal point.
@return: B{C{extras}} + 2-tuple C{(str(B{easting}), str(B{northing}))} or
+ 2-tuple C{("", "")} for C{B{prec} <= -B{wide}}.
@raise ValueError: Invalid B{C{easting}}, B{C{northing}} or B{C{prec}}.
@note: The B{C{easting}} and B{C{northing}} values are I{truncated, not rounded}.
'''
t = extras
try: # like .dms.compassPoint
p = min(int(prec), _EN_PREC)
w = p + _xkwds_get(wide_dot, wide=_EN_WIDE)
if w > 0:
f = 10**p # truncate
d = (-p) if p > 0 and _xkwds_get(wide_dot, dot=False) else 0
t += (_0wdot(w, int(easting * f), d),
_0wdot(w, int(northing * f), d))
else: # prec <= -_EN_WIDE
t += (NN, NN)
except (TypeError, ValueError) as x:
raise _ValueError(easting=easting, northing=northing, prec=prec, cause=x)
return t
if enstr2.__doc__: # PYCHOK expected
enstr2.__doc__ %= (_EN_WIDE,)
def _enstr2m3(estr, nstr, wide=_EN_WIDE): # in .mgrs, .osgr
'''(INTERNAL) Convert east- and northing C{str}s to meter and resolution.
'''
def _s2m2(s, m): # e or n str to float meter
if _DOT_ in s:
m = 1 # meter
else:
s += _0_ * wide
s = _DOT_(s[:wide], s[wide:wide+_EN_PREC])
return float(s), m
e, m = _s2m2(estr, 0)
n, m = _s2m2(nstr, m)
if not m:
p = max(len(estr), len(nstr)) # 2 = Km, 5 = m, 7 = cm
m = 10**max(-_EN_PREC, wide - p) # resolution, meter
return e, n, m
def fstr(floats, prec=6, fmt=Fmt.F, ints=False, sep=_COMMASPACE_, strepr=None):
'''Convert one or more floats to string, optionally stripped of trailing zero decimals.
@arg floats: Single or a list, sequence, tuple, etc. (C{scalar}s).
@kwarg prec: The C{float} precision, number of decimal digits (0..9).
Trailing zero decimals are stripped if B{C{prec}} is
positive, but kept for negative B{C{prec}} values. In
addition, trailing decimal zeros are stripped for U{alternate,
form '#'<https://docs.Python.org/3/library/stdtypes.html
#printf-style-string-formatting>}.
@kwarg fmt: Optional, C{float} format (C{str}).
@kwarg ints: Optionally, remove the decimal dot for C{int} values (C{bool}).
@kwarg sep: Separator joining the B{C{floats}} (C{str}).
@kwarg strepr: Optional callable to format non-C{floats} (typically
C{repr}, C{str}) or C{None} to raise a TypeError.
@return: The C{sep.join(strs(floats, ...)} joined (C{str}) or single
C{strs((floats,), ...)} (C{str}) if B{C{floats}} is C{scalar}.
'''
if isscalar(floats): # see Fstr.__call__ above
return next(_streprs(prec, (floats,), fmt, ints, True, strepr))
else:
return sep.join(_streprs(prec, floats, fmt, ints, True, strepr))
def _fstrENH2(inst, prec, m): # in .css, .lcc, .utmupsBase
# (INTERNAL) For C{Css.} and C{Lcc.} C{toRepr} and C{toStr} and C{UtmUpsBase._toStr}.
t = inst.easting, inst.northing
t = tuple(_streprs(prec, t, Fmt.F, False, True, None))
T = _E_, _N_
if m is not None and fabs(inst.height): # fabs(self.height) > EPS
t += hstr(inst.height, prec=-2, m=m),
T += _H_,
return t, T
def _fstrLL0(inst, prec, toRepr): # in .azimuthal, .css
# (INTERNAL) For C{_AlbersBase.}, C{_AzimuthalBase.} and C{CassiniSoldner.}
t = tuple(_streprs(prec, inst.latlon0, Fmt.F, False, True, None))
if toRepr:
n = inst.name
if n:
t += Fmt.EQUAL(_name_, repr(n)),
t = Fmt.PAREN(inst.classname, _COMMASPACE_.join(t))
return t
def fstrzs(efstr, ap1z=False):
'''Strip trailing zero decimals from a C{float} string.
@arg efstr: Float with or without exponent (C{str}).
@kwarg ap1z: Append the decimal point and one zero decimal
if the B{C{efstr}} is all digits (C{bool}).
@return: Float (C{str}).
'''
s = efstr.find(_DOT_)
if s >= 0:
e = efstr.rfind(Fmt.e)
if e < 0:
e = efstr.rfind(Fmt.E)
if e < 0:
e = len(efstr)
s += 2 # keep 1st _DOT_ + _0_
if s < e and efstr[e-1] == _0_:
efstr = NN(efstr[:s], efstr[s:e].rstrip(_0_), efstr[e:])
elif ap1z:
# %.G and %.g formats may drop the decimal
# point and all trailing zeros, ...
if efstr.isdigit():
efstr += _DOT_ + _0_ # ... append or ...
else: # ... insert one dot and zero
e = efstr.rfind(Fmt.e)
if e < 0:
e = efstr.rfind(Fmt.E)
if e > 0:
efstr = NN(efstr[:e], _DOT_, _0_, efstr[e:])
return efstr
def hstr(height, prec=2, fmt=Fmt.h, ints=False, m=NN):
'''Return a string for the height value.
@arg height: Height value (C{float}).
@kwarg prec: The C{float} precision, number of decimal digits (0..9).
Trailing zero decimals are stripped if B{C{prec}} is
positive, but kept for negative B{C{prec}} values.
@kwarg fmt: Optional, C{float} format (C{str}).
@kwarg ints: Optionally, remove the decimal dot for C{int} values (C{bool}).
@kwarg m: Optional unit of the height (C{str}).
'''
h = next(_streprs(prec, (height,), fmt, ints, True, None))
return NN(h, str(m)) if m else h
def instr(inst, *args, **kwds):
'''Return the string representation of an instantiation.
@arg inst: The instance (any C{type}).
@arg args: Optional positional arguments.
@kwarg kwds: Optional keyword arguments.
@return: Representation (C{str}).
'''
return unstr(_MODS.named.classname(inst), *args, **kwds)
def lrstrip(txt, lrpairs=_LR_PAIRS):
'''Left- I{and} right-strip parentheses, brackets, etc. from a string.
@arg txt: String to be stripped (C{str}).
@kwarg lrpairs: Parentheses, etc. to remove (C{dict} of one or several
C{(Left, Right)} pairs).
@return: Stripped B{C{txt}} (C{str}).
'''
_e, _s, _n = str.endswith, str.startswith, len
while _n(txt) > 2:
for L, R in lrpairs.items():
if _e(txt, R) and _s(txt, L):
txt = txt[_n(L):-_n(R)]
break # restart
else:
return txt
def pairs(items, prec=6, fmt=Fmt.F, ints=False, sep=_EQUAL_):
'''Convert items to I{name=value} strings, with C{float}s handled like L{fstr}.
@arg items: Name-value pairs (C{dict} or 2-{tuple}s of any C{type}s).
@kwarg prec: The C{float} precision, number of decimal digits (0..9).
Trailing zero decimals are stripped if B{C{prec}} is
positive, but kept for negative B{C{prec}} values.
@kwarg fmt: Optional, C{float} format (C{str}).
@kwarg ints: Optionally, remove the decimal dot for C{int} values (C{bool}).
@kwarg sep: Separator joining I{names} and I{values} (C{str}).
@return: A C{tuple(B{sep}.join(t) for t in B{items}))} of C{str}s.
'''
try:
if isinstance(items, dict):
items = itemsorted(items)
elif not islistuple(items):
items = tuple(items)
# can't unzip empty items tuple, list, etc.
n, v = _zip(*items) if items else ((), ()) # strict=True
except (TypeError, ValueError):
raise _IsnotError(dict.__name__, '2-tuples', items=items)
v = _streprs(prec, v, fmt, ints, False, repr)
return tuple(sep.join(t) for t in _zip(map(str, n), v)) # strict=True
def _pct(fmt):
'''(INTERNAL) Prefix C{%} if needed.
'''
return fmt if _PERCENT_ in fmt else NN(_PERCENT_, fmt)
def reprs(objs, prec=6, fmt=Fmt.F, ints=False):
'''Convert objects to C{repr} strings, with C{float}s handled like L{fstr}.
@arg objs: List, sequence, tuple, etc. (any C{type}s).
@kwarg prec: The C{float} precision, number of decimal digits (0..9).
Trailing zero decimals are stripped if B{C{prec}} is
positive, but kept for negative B{C{prec}} values.
@kwarg fmt: Optional, C{float} format (C{str}).
@kwarg ints: Optionally, remove the decimal dot for C{int} values (C{bool}).
@return: A C{tuple(map(fstr|repr, objs))} of C{str}s.
'''
return tuple(_streprs(prec, objs, fmt, ints, False, repr)) if objs else ()
def _resolution10(resolution, Error=ValueError): # in .mgrs, .osgr
'''(INTERNAL) Validate C{resolution} in C{meter}.
'''
try:
r = int(_log10(resolution))
if _EN_WIDE < r or r < -_EN_PREC:
raise ValueError
except (ValueError, TypeError):
raise Error(resolution=resolution)
return _MODS.units.Meter(resolution=10**r)
def _streprs(prec, objs, fmt, ints, force, strepr):
'''(INTERNAL) Helper for C{fstr}, C{pairs}, C{reprs} and C{strs}
'''
# <https://docs.Python.org/3/library/stdtypes.html#printf-style-string-formatting>
if fmt in _FfEeGg:
fGg = fmt in _Gg
fmt = NN(_PERCENT_, _DOT_, abs(prec), fmt)
elif fmt.startswith(_PERCENT_):
fGg = False
try: # to make sure fmt is valid
f = fmt.replace(_DOTSTAR_, Fmt.DOT(abs(prec)))
_ = f % (_0_0,)
except (TypeError, ValueError):
raise _ValueError(fmt=fmt, txt=_not_(repr(_DOTSTAR_)))
fmt = f
else:
raise _ValueError(fmt=fmt, txt=_not_(repr(_Fspec_)))
for i, o in enumerate(objs):
if force or isinstance(o, float):
t = fmt % (float(o),)
if ints and t.rstrip(_0to9_ if isint(o, both=True) else
_0_).endswith(_DOT_):
t = t.split(_DOT_)[0]
elif prec > 1:
t = fstrzs(t, ap1z=fGg)
elif strepr:
t = strepr(o)
else:
t = Fmt.PARENSPACED(Fmt.SQUARE(objs=i), o)
raise TypeError(_SPACE_(t, _not_scalar_))
yield t
def strs(objs, prec=6, fmt=Fmt.F, ints=False):
'''Convert objects to C{str} strings, with C{float}s handled like L{fstr}.
@arg objs: List, sequence, tuple, etc. (any C{type}s).
@kwarg prec: The C{float} precision, number of decimal digits (0..9).
Trailing zero decimals are stripped if B{C{prec}} is
positive, but kept for negative B{C{prec}} values.
@kwarg fmt: Optional, C{float} format (C{str}).
@kwarg ints: Optionally, remove the decimal dot for C{int} values (C{bool}).
@return: A C{tuple(map(fstr|str, objs))} of C{str}s.
'''
return tuple(_streprs(prec, objs, fmt, ints, False, str)) if objs else ()
def unstr(where, *args, **kwds):
'''Return the string representation of an invokation.
@arg where: Class, function, method (C{type}) or name (C{str}).
@arg args: Optional positional arguments.
@kwarg kwds: Optional keyword arguments, except
C{B{_ELLIPSIS}=False}.
@return: Representation (C{str}).
'''
t = reprs(args, fmt=Fmt.g) if args else ()
if kwds and _xkwds_pop(kwds, _ELLIPSIS=False):
t += _ELLIPSIS_,
if kwds:
t += pairs(itemsorted(kwds), fmt=Fmt.g)
n = where if isstr(where) else _dunder_nameof(where)
return Fmt.PAREN(n, _COMMASPACE_.join(t))
def _0wd(*w_i): # in .osgr, .wgrs
'''(INTERNAL) Int formatter'.
'''
return '%0*d' % w_i
def _0wdot(w, f, dot=0):
'''(INTERNAL) Int and Float formatter'.
'''
s = _0wd(w, int(f))
if dot:
s = _DOT_(s[:dot], s[dot:])
return s
def _0wpF(*w_p_f): # in .dms, .osgr
'''(INTERNAL) Float deg, min, sec formatter'.
'''
return '%0*.*f' % w_p_f # XXX was F
def _xattrs(insto, other, *attrs): # see .errors._xattr
'''(INTERNAL) Copy attribute values from B{C{other}} to B{C{insto}}.
@arg insto: Object to copy attribute values to (any C{type}).
@arg other: Object to copy attribute values from (any C{type}).
@arg attrs: One or more attribute names (C{str}s).
@return: Object B{C{insto}}, updated.
@raise AttributeError: An B{C{attrs}} doesn't exist
or is not settable.
'''
def _getattr(o, a):
if hasattr(o, a):
return getattr(o, a)
try:
n = o._DOT_(a)
except AttributeError:
n = Fmt.DOT(a)
raise _AttributeError(o, name=n)
for a in attrs:
s = _getattr(other, a)
g = _getattr(insto, a)
if (g is None and s is not None) or g != s:
setattr(insto, a, s) # not settable?
return insto
def _xzipairs(names, values, sep=_COMMASPACE_, fmt=NN, pair_fmt=Fmt.COLON):
'''(INTERNAL) Zip C{names} and C{values} into a C{str}, joined and bracketed.
'''
try:
t = sep.join(pair_fmt(*t) for t in _zip(names, values)) # strict=True
except Exception as x:
raise _ValueError(names=names, values=values, cause=x)
return (fmt % (t,)) if fmt else t
# **) MIT License
#
# Copyright (C) 2016-2023 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
| [
"[email protected]"
] | |
601313093805fa5b52a669f4aabdb2abb23443da | c4702d1a06640555829b367852138cc93ba4a161 | /dym_report_control_bank_book/report/__init__.py | 649ebd86e2fc758b147fe0472d1187289f512c42 | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import dym_report_control_bank_book
import dym_report_control_bank_book_xls | [
"[email protected]"
] | |
fbf38f59c9fbe582c839e6e470b99747c98c8744 | de739e91015f33e3ba4b72c1585288c27a995698 | /src/contacts/views.py | dbddb0926b146c0d87b98c156c839e6ff136a2b5 | [] | no_license | bartoszper/Django-FullBackend-Home-s-App | 771b2bab280938a1b56865fe5af8e73510504c45 | e1059a4dc3b5d62c22de376104656234183c3789 | refs/heads/main | 2023-02-15T03:42:11.110947 | 2021-01-07T14:03:33 | 2021-01-07T14:03:33 | 324,780,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Contact
from django.core.mail import send_mail
# Create your views here.
def contact(request):
if request.method =='POST':
listing_id = request.POST['listing_id']
listing = request.POST['listing']
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
message = request.POST['message']
user_id = request.POST['user_id']
realtor_email = request.POST['realtor_email']
#Check if user has made inquiry already
if request.user.is_authenticated:
user_id = request.user.id
has_contacted = Contact.objects.all().filter(listing_id=listing_id, user_id=user_id)
if has_contacted:
messages.error(request,'You have already made an inquiry for this listing')
return redirect('/listings/'+listing_id)
contact = Contact(listing=listing,listing_id=listing_id,name=name, email=email, phone=phone, message=message, user_id=user_id)
contact.save()
#Send email
send_mail(
'Property Listing Inquiry',
'There has been an Inquiry for '+ listing_id + '. Sign into the admin panel for more info',
'[email protected]',
[realtor_email, '[email protected]'],
fail_silently=False
)
messages.success(request,'Your request has been submitted, a realtor will get back to you soon')
return redirect('/listings/'+listing_id) | [
"[email protected]"
] | |
4578ef9e5f77420c4060536efef1bb30511a1791 | fcd4d1bd5d600167b1c7c426ede24eb24b93533c | /test/test_core.py | 24f851bb81460fb4371fed69f30ae380ce262d51 | [
"MIT"
] | permissive | flying-tiger/tec_util | ed3a0c754c82491263a6eebc0c8a1c086977dfb7 | 293c39d11f5f080d7b6c2e3ba5aeea7419e56023 | refs/heads/master | 2023-04-14T15:10:34.944183 | 2023-03-24T05:33:02 | 2023-03-24T05:33:02 | 97,327,361 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,789 | py | import math
import tecplot as tp
import tecplot.constant as tpc
import tec_util
import tempfile
import test
import unittest
def load_and_replace(dataset_name):
return tp.data.load_tecplot(dataset_name, read_data_option=tpc.ReadDataOption.Replace)
class TestDifferenceDatasets(unittest.TestCase):
''' Unit tests for the difference_datasets function '''
def test_nskip(self):
''' Check behavior of the nskip option '''
# default nskip: no variables should be diff'd
with test.temp_workspace():
tec_util.difference_datasets(
test.data_item_path("cube.dat"),
test.data_item_path("cube.dat"),
"diff.dat",
)
ds = load_and_replace("diff.dat")
self.assertEqual(
[ v.name for v in ds.variables() ],
[ "x", "y", "z" ],
)
for v in ds.variables():
zone_maxima = [ v.values(i).max() for i in range(v.num_zones) ]
self.assertAlmostEqual(max(zone_maxima), 0.5, delta=1e-6)
# nskip = 1: two variables should be diff'd
with test.temp_workspace():
tec_util.difference_datasets(
test.data_item_path("sphere.dat"),
test.data_item_path("sphere.dat"),
"diff.dat",
nskip=1,
)
ds = load_and_replace("diff.dat")
max_vals = {
"x" : 1.00,
"delta_y": 0.00,
"delta_z": 0.00,
}
for v in ds.variables():
zone_maxima = [ v.values(i).max() for i in range(v.num_zones) ]
self.assertAlmostEqual(max(zone_maxima), max_vals[v.name], delta=1e-6)
def test_variable_filter(self):
''' Test that we can select variables for differencing '''
# Compute delta on just z; keep x as grid variable
with test.temp_workspace():
tec_util.difference_datasets(
test.data_item_path("cube.dat"),
test.data_item_path("cube.dat"),
"diff.dat",
nskip=1,
select_vars="z",
)
ds = load_and_replace("diff.dat")
self.assertEqual(ds.num_variables, 2)
self.assertEqual(ds.variable(0).name, "x")
self.assertEqual(ds.variable(1).name, "delta_z")
def test_zone_filter(self):
''' Test that we can select zones for differencing '''
# Compute delta on just the even zones
with test.temp_workspace():
tec_util.difference_datasets(
test.data_item_path("cube.dat"),
test.data_item_path("cube.dat"),
"diff.dat",
nskip=1,
select_zones="*:[246]",
)
ds = load_and_replace("diff.dat")
self.assertEqual(ds.num_zones, 3)
self.assertTrue(ds.zone(0).name.endswith(":2"))
self.assertTrue(ds.zone(1).name.endswith(":4"))
self.assertTrue(ds.zone(2).name.endswith(":6"))
class TestExtract(unittest.TestCase):
''' Unit tests for extract function '''
def test_extract(self):
with test.temp_workspace():
ds = load_and_replace(test.data_item_path("sphere.dat"))
self.assertEqual(ds.num_variables,3)
self.assertEqual(ds.num_zones,6)
tec_util.extract(
test.data_item_path("sphere.dat"),
"extract.dat",
select_vars=['x','y'],
select_zones=['*:[246]'],
)
ds = load_and_replace("extract.dat")
self.assertEqual(ds.num_variables,2)
self.assertEqual(ds.num_zones,3)
class TestMergeDatasets(unittest.TestCase):
''' Unit tests for merge_datasets fucntion '''
def test_merge(self):
with test.temp_workspace():
tec_util.merge_datasets(
test.data_item_path("merge1.dat"),
test.data_item_path("merge2.dat"),
"merge.dat",
warn_duplicates=False
)
ds = load_and_replace("merge.dat")
self.assertEqual(ds.num_variables,5)
self.assertEqual(ds.num_zones,2)
# When variable in both dataset, values from dataset2 is used.
self.assertAlmostEqual(-6.4280895E-05, ds.zone('ZoneA').values('x')[15])
class TestRenameVariables(unittest.TestCase):
''' Unit test for the rename_variables function '''
def test_basic_rename(self):
''' Test that we can rename specific variable in the dataset '''
with test.temp_workspace():
tec_util.rename_variables(
test.data_item_path("cube.dat"),
"cube.dat",
{ "x":"xx", "z":"zz" }
)
ds = load_and_replace("cube.dat")
self.assertEqual(ds.variable(0).name, "xx")
self.assertEqual(ds.variable(1).name, "y")
self.assertEqual(ds.variable(2).name, "zz")
class TestRenameZones(unittest.TestCase):
''' Unit test for the rename_zones function '''
def test_basic_rename(self):
''' Test that we can rename specific zones in the dataset '''
with test.temp_workspace():
tec_util.rename_zones(
test.data_item_path("cube.dat"),
"cube.dat",
{
'cube.x:1' : 'front',
'cube.x:6' : 'bottom',
}
)
ds = load_and_replace("cube.dat")
self.assertEqual(ds.zone(0).name, "front")
self.assertEqual(ds.zone(5).name, "bottom")
class TestRevolveDataset(unittest.TestCase):
''' Unit test for the revolve_dataset function '''
def test_basic_useage(self):
''' Test that we can revolve a dataset and get the correct file out '''
with test.temp_workspace():
tec_util.revolve_dataset(
test.data_item_path("axi_sphere.plt"),
"sphere.plt",
planes = 13,
angle = 90.0,
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','z','q1','q2','v1','v2'])
self.assertEqual(ds.zone(0).dimensions,(11,9,13))
self.assertEqual(
ds.zone(0).values('y').minmax(),
ds.zone(0).values('z').minmax()
)
def test_radial_coord(self):
''' Verify ability to select the radial coordinate '''
with test.temp_workspace():
tec_util.revolve_dataset(
test.data_item_path("axi_sphere.plt"),
"sphere.plt",
radial_coord = 'v2',
planes = 13,
angle = 90.0,
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','q1','q2','v1','v2','z'])
self.assertEqual(
ds.zone(0).values('v2').minmax(),
ds.zone(0).values('z').minmax(),
)
tec_util.revolve_dataset(
test.data_item_path("axi_sphere.plt"),
"sphere.plt",
radial_coord = {'v2':('ry','rz')},
planes = 13,
angle = 90.0,
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','q1','q2','v1','v2','ry','rz'])
self.assertEqual(
ds.zone(0).values('v2').minmax(),
ds.zone(0).values('ry').minmax(),
)
self.assertEqual(
ds.zone(0).values('v2').minmax(),
ds.zone(0).values('rz').minmax(),
)
def test_vector_vars(self):
''' Verify we can specify variable to treat as vector quantities '''
with test.temp_workspace():
tec_util.revolve_dataset(
test.data_item_path("axi_sphere.plt"),
"sphere.plt",
planes = 13,
angle = 90.0,
vector_vars = ['v1','v2'],
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','z','q1','q2','v1','v1_cos','v1_sin','v2','v2_cos','v2_sin'])
z0 = ds.zone(0)
self.assertEqual(z0.values('v1').minmax(), z0.values('v1_cos').minmax())
self.assertEqual(z0.values('v1').minmax(), z0.values('v1_sin').minmax())
tec_util.revolve_dataset(
test.data_item_path("axi_sphere.plt"),
"sphere.plt",
planes = 13,
angle = 90.0,
vector_vars = {
'v1' : ('v1y','v1z'),
'v2' : ('v2y','v2z'),
},
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','z','q1','q2','v1','v1y','v1z','v2','v2y','v2z'])
z0 = ds.zone(0)
self.assertEqual(z0.values('v1').minmax(), z0.values('v1y').minmax())
self.assertEqual(z0.values('v1').minmax(), z0.values('v1z').minmax())
def test_surface_grid(self):
''' Verify we can create a surface by revoling a 1D generatrix '''
with test.temp_workspace():
tec_util.revolve_dataset(
test.data_item_path("axi_sphere_surf.plt"),
"sphere.plt",
planes = 13,
angle = 90.0,
)
ds = load_and_replace("sphere.plt")
vars = [v.name for v in ds.variables()]
self.assertEqual(vars,['x','y','z','q1','q2','v1','v2'])
self.assertEqual(ds.zone(0).dimensions,(11,13,1))
self.assertEqual(
ds.zone(0).values('y').minmax(),
ds.zone(0).values('z').minmax()
)
class TestInterpolate(unittest.TestCase):
''' Unit test for the interpolate_datasets function '''
def test_basic_function(self):
with test.temp_workspace():
tec_util.interpolate_dataset(
test.data_item_path("interp_src.dat"),
test.data_item_path("interp_tgt.dat"),
"interp_out.plt",
)
ds = load_and_replace("interp_out.plt")
vrange = ds.variable("r").values(0).minmax()
self.assertAlmostEqual(max(vrange), 6.39408e-01, delta=1e-6)
self.assertAlmostEqual(min(vrange), 5.10930e-01, delta=1e-6)
| [
"[email protected]"
] | |
0b07d60a906b142fe9a3afb10c3e60954ff00437 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/resourcemanager/v3/resourcemanager-v3-py/google/cloud/resourcemanager_v3/services/projects/transports/grpc.py | b8b9c4703520260c1770ea86f373b20bcb2f9fdf | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,856 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.resourcemanager_v3.types import projects
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from .base import ProjectsTransport, DEFAULT_CLIENT_INFO
class ProjectsGrpcTransport(ProjectsTransport):
"""gRPC backend transport for Projects.
Manages Google Cloud Projects.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'cloudresourcemanager.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'cloudresourcemanager.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def get_project(self) -> Callable[
[projects.GetProjectRequest],
projects.Project]:
r"""Return a callable for the get project method over gRPC.
Retrieves the project identified by the specified ``name`` (for
example, ``projects/415104041262``).
The caller must have ``resourcemanager.projects.get`` permission
for this project.
Returns:
Callable[[~.GetProjectRequest],
~.Project]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_project' not in self._stubs:
self._stubs['get_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/GetProject',
request_serializer=projects.GetProjectRequest.serialize,
response_deserializer=projects.Project.deserialize,
)
return self._stubs['get_project']
@property
def list_projects(self) -> Callable[
[projects.ListProjectsRequest],
projects.ListProjectsResponse]:
r"""Return a callable for the list projects method over gRPC.
Lists projects that are direct children of the specified folder
or organization resource. ``list()`` provides a strongly
consistent view of the projects underneath the specified parent
resource. ``list()`` returns projects sorted based upon the
(ascending) lexical ordering of their ``display_name``. The
caller must have ``resourcemanager.projects.list`` permission on
the identified parent.
Returns:
Callable[[~.ListProjectsRequest],
~.ListProjectsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_projects' not in self._stubs:
self._stubs['list_projects'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/ListProjects',
request_serializer=projects.ListProjectsRequest.serialize,
response_deserializer=projects.ListProjectsResponse.deserialize,
)
return self._stubs['list_projects']
@property
def search_projects(self) -> Callable[
[projects.SearchProjectsRequest],
projects.SearchProjectsResponse]:
r"""Return a callable for the search projects method over gRPC.
Search for projects that the caller has both
``resourcemanager.projects.get`` permission on, and also satisfy
the specified query.
This method returns projects in an unspecified order.
This method is eventually consistent with project mutations;
this means that a newly created project may not appear in the
results or recent updates to an existing project may not be
reflected in the results. To retrieve the latest state of a
project, use the
[GetProject][google.cloud.resourcemanager.v3.Projects.GetProject]
method.
Returns:
Callable[[~.SearchProjectsRequest],
~.SearchProjectsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_projects' not in self._stubs:
self._stubs['search_projects'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/SearchProjects',
request_serializer=projects.SearchProjectsRequest.serialize,
response_deserializer=projects.SearchProjectsResponse.deserialize,
)
return self._stubs['search_projects']
@property
def create_project(self) -> Callable[
[projects.CreateProjectRequest],
operations_pb2.Operation]:
r"""Return a callable for the create project method over gRPC.
Request that a new project be created. The result is an
``Operation`` which can be used to track the creation process.
This process usually takes a few seconds, but can sometimes take
much longer. The tracking ``Operation`` is automatically deleted
after a few hours, so there is no need to call
``DeleteOperation``.
Returns:
Callable[[~.CreateProjectRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_project' not in self._stubs:
self._stubs['create_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/CreateProject',
request_serializer=projects.CreateProjectRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_project']
@property
def update_project(self) -> Callable[
[projects.UpdateProjectRequest],
operations_pb2.Operation]:
r"""Return a callable for the update project method over gRPC.
Updates the ``display_name`` and labels of the project
identified by the specified ``name`` (for example,
``projects/415104041262``). Deleting all labels requires an
update mask for labels field.
The caller must have ``resourcemanager.projects.update``
permission for this project.
Returns:
Callable[[~.UpdateProjectRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_project' not in self._stubs:
self._stubs['update_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/UpdateProject',
request_serializer=projects.UpdateProjectRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_project']
@property
def move_project(self) -> Callable[
[projects.MoveProjectRequest],
operations_pb2.Operation]:
r"""Return a callable for the move project method over gRPC.
Move a project to another place in your resource hierarchy,
under a new resource parent.
Returns an operation which can be used to track the process of
the project move workflow. Upon success, the
``Operation.response`` field will be populated with the moved
project.
The caller must have ``resourcemanager.projects.update``
permission on the project and have
``resourcemanager.projects.move`` permission on the project's
current and proposed new parent.
Returns:
Callable[[~.MoveProjectRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'move_project' not in self._stubs:
self._stubs['move_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/MoveProject',
request_serializer=projects.MoveProjectRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['move_project']
@property
def delete_project(self) -> Callable[
[projects.DeleteProjectRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete project method over gRPC.
Marks the project identified by the specified ``name`` (for
example, ``projects/415104041262``) for deletion.
This method will only affect the project if it has a lifecycle
state of
[ACTIVE][google.cloud.resourcemanager.v3.Project.State.ACTIVE].
This method changes the Project's lifecycle state from
[ACTIVE][google.cloud.resourcemanager.v3.Project.State.ACTIVE]
to
[DELETE_REQUESTED][google.cloud.resourcemanager.v3.Project.State.DELETE_REQUESTED].
The deletion starts at an unspecified time, at which point the
Project is no longer accessible.
Until the deletion completes, you can check the lifecycle state
checked by retrieving the project with [GetProject]
[google.cloud.resourcemanager.v3.Projects.GetProject], and the
project remains visible to [ListProjects]
[google.cloud.resourcemanager.v3.Projects.ListProjects].
However, you cannot update the project.
After the deletion completes, the project is not retrievable by
the [GetProject]
[google.cloud.resourcemanager.v3.Projects.GetProject],
[ListProjects]
[google.cloud.resourcemanager.v3.Projects.ListProjects], and
[SearchProjects][google.cloud.resourcemanager.v3.Projects.SearchProjects]
methods.
This method behaves idempotently, such that deleting a
``DELETE_REQUESTED`` project will not cause an error, but also
won't do anything.
The caller must have ``resourcemanager.projects.delete``
permissions for this project.
Returns:
Callable[[~.DeleteProjectRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_project' not in self._stubs:
self._stubs['delete_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/DeleteProject',
request_serializer=projects.DeleteProjectRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_project']
@property
def undelete_project(self) -> Callable[
[projects.UndeleteProjectRequest],
operations_pb2.Operation]:
r"""Return a callable for the undelete project method over gRPC.
Restores the project identified by the specified ``name`` (for
example, ``projects/415104041262``). You can only use this
method for a project that has a lifecycle state of
[DELETE_REQUESTED] [Projects.State.DELETE_REQUESTED]. After
deletion starts, the project cannot be restored.
The caller must have ``resourcemanager.projects.undelete``
permission for this project.
Returns:
Callable[[~.UndeleteProjectRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'undelete_project' not in self._stubs:
self._stubs['undelete_project'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/UndeleteProject',
request_serializer=projects.UndeleteProjectRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['undelete_project']
@property
def get_iam_policy(self) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Returns the IAM access control policy for the
specified project. Permission is denied if the policy or
the resource do not exist.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_iam_policy' not in self._stubs:
self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/GetIamPolicy',
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['get_iam_policy']
@property
def set_iam_policy(self) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy for the specified project.
CAUTION: This method will replace the existing policy, and
cannot be used to append additional IAM settings.
Note: Removing service accounts from policies or changing their
roles can render services completely inoperable. It is important
to understand how the service account is being used before
removing or updating its roles.
The following constraints apply when using ``setIamPolicy()``:
- Project does not support ``allUsers`` and
``allAuthenticatedUsers`` as ``members`` in a ``Binding`` of
a ``Policy``.
- The owner role can be granted to a ``user``,
``serviceAccount``, or a group that is part of an
organization. For example, [email protected]
could be added as an owner to a project in the
myownpersonaldomain.com organization, but not the
examplepetstore.com organization.
- Service accounts can be made owners of a project directly
without any restrictions. However, to be added as an owner, a
user must be invited using the Cloud Platform console and
must accept the invitation.
- A user cannot be granted the owner role using
``setIamPolicy()``. The user must be granted the owner role
using the Cloud Platform Console and must explicitly accept
the invitation.
- Invitations to grant the owner role cannot be sent using
``setIamPolicy()``; they must be sent only using the Cloud
Platform Console.
- Membership changes that leave the project without any owners
that have accepted the Terms of Service (ToS) will be
rejected.
- If the project is not part of an organization, there must be
at least one owner who has accepted the Terms of Service
(ToS) agreement in the policy. Calling ``setIamPolicy()`` to
remove the last ToS-accepted owner from the policy will fail.
This restriction also applies to legacy projects that no
longer have owners who have accepted the ToS. Edits to IAM
policies will be rejected until the lack of a ToS-accepting
owner is rectified.
- Calling this method requires enabling the App Engine Admin
API.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_iam_policy' not in self._stubs:
self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/SetIamPolicy',
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs['set_iam_policy']
@property
def test_iam_permissions(self) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on the
specified project.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'test_iam_permissions' not in self._stubs:
self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary(
'/google.cloud.resourcemanager.v3.Projects/TestIamPermissions',
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs['test_iam_permissions']
__all__ = (
'ProjectsGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
5940e046c703f060ca753fc376250d9a4521d510 | 17044b6e4bc0c1e21721f0936784d674426905f6 | /venv/bin/trial | c6574dda2adaafd7beac587085bcc2f0f9f4c506 | [] | no_license | Moni1995/covid_info_scraper | 2ed0f68f7bcd2a7dc81cb6af69cd5b292d443fee | 9c4e246283c18846c59c2fcc77e2d681741a89d7 | refs/heads/master | 2021-04-09T18:20:04.895628 | 2020-04-07T21:11:55 | 2020-04-07T21:11:55 | 248,864,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/moni/Desktop/spider/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.trial import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
ba4e6bcc6fefc8c0f12f444e54eccfff3a42cd05 | 6c67574a224ac943fd37f12ff7511c64d788506a | /uiprog/streamlitdemo/tests/test_redshifts.py | 29bc7ca95687630f2510aec49926b56ea999cd8a | [
"MIT"
] | permissive | colinleach/tinkerings | b0354d9c13b2ae0d71b2647f9508099cedde8ae0 | 38f1595dd11f5cb1ebe435f834cfde2d3c5b1899 | refs/heads/master | 2023-04-12T23:01:07.675844 | 2023-04-12T13:33:00 | 2023-04-12T13:33:00 | 229,802,539 | 0 | 0 | MIT | 2023-01-31T04:21:21 | 2019-12-23T18:19:27 | Jupyter Notebook | UTF-8 | Python | false | false | 824 | py | import numpy as np
import astropy.units as u
from astropy.units import Unit
from pytest import approx
from streamlitdemo import redshifts
rs = redshifts.Redshifts()
def test_init():
# currently 6 cosmologies, astropy may change this in future
cosm_count = len(rs.cosmologies)
assert cosm_count >= 6
assert rs.df.shape[0] == 200
assert rs.df.shape[1] == cosm_count + 1
assert rs.df_long.shape[0] == 200 * cosm_count
assert rs.df_long.shape[1] == 3
def test_calc_angular_diameter():
dist, ages, ageticks = rs.calc_angular_diameter()
assert len(dist) == 200
expected_ages = np.array([13., 10., 8., 6., 5., 4., 3., 2., 1.5, 1.2, 1.])
assert len(ages) == len(expected_ages)
assert not (ages.value - expected_ages).any()
assert len(ageticks) == len(expected_ages)
| [
"[email protected]"
] | |
6e10852633ec27578c9b29311731fbc6a9728556 | 80f9b05dcbbc6ff543120e0755e38adf4db3cd73 | /run_ner.py | eb3601d387e74aea2f27c7a89969648b9de98449 | [
"Unlicense"
] | permissive | cacco-kozaka/ren-treb | 9f7166a0e7d87d1a1d4d3b4fba44f685cd243900 | 5af167aca311f98a87065455969e3a3330d67542 | refs/heads/main | 2023-02-18T22:33:11.914179 | 2021-01-18T04:37:19 | 2021-01-18T04:37:19 | 330,552,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,814 | py | from __future__ import absolute_import, division, print_function
import argparse
import csv
import json
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_transformers import (WEIGHTS_NAME, AdamW, BertConfig,
BertForTokenClassification, BertTokenizer,
WarmupLinearSchedule)
from torch import nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from seqeval.metrics import classification_report
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Ner(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, valid_ids=None, attention_mask_label=None):
sequence_output = self.bert(
input_ids, token_type_ids, attention_mask, head_mask=None)[0]
batch_size, max_len, feat_dim = sequence_output.shape
valid_output = torch.zeros(
batch_size, max_len, feat_dim, dtype=torch.float32, device='cuda')
for i in range(batch_size):
jj = -1
for j in range(max_len):
if valid_ids[i][j].item() == 1:
jj += 1
valid_output[i][jj] = sequence_output[i][j]
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=0)
# Only keep active parts of the loss
#attention_mask_label = None
if attention_mask_label is not None:
active_loss = attention_mask_label.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(
logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
def readfile(filename):
'''
read file
'''
f = open(filename)
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(' ')
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
return readfile(input_file)
class NerProcessor(DataProcessor):
"""Processor for the CoNLL-2003 data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "valid.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "[CLS]", "[SEP]"]
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = ' '.join(sentence)
text_b = None
label = label
examples.append(InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list, 1)}
features = []
for (ex_index, example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" %
" ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
valid_ids=valid,
label_mask=label_mask))
return features
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
# Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval or not.")
parser.add_argument("--eval_on",
default="dev",
help="Whether to run eval on the dev set or test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='',
help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='',
help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {"ner": NerProcessor}
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = len(label_list) + 1
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = 0
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
# Prepare model
config = BertConfig.from_pretrained(
args.bert_model, num_labels=num_labels, finetuning_task=args.task_name)
model = Ner.from_pretrained(args.bert_model,
from_tf=False,
config=config)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
label_map = {i: label for i, label in enumerate(label_list, 1)}
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor(
[f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor(
[f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor(
[f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, valid_ids, l_mask = batch
loss = model(input_ids, segment_ids, input_mask,
label_ids, valid_ids, l_mask)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
label_map = {i: label for i, label in enumerate(label_list, 1)}
model_config = {"bert_model": args.bert_model, "do_lower": args.do_lower_case,
"max_seq_length": args.max_seq_length, "num_labels": len(label_list)+1, "label_map": label_map}
json.dump(model_config, open(os.path.join(
args.output_dir, "model_config.json"), "w"))
# Load a trained model and config that you have fine-tuned
else:
# Load a trained model and vocabulary that you have fine-tuned
model = Ner.from_pretrained(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if args.eval_on == "dev":
eval_examples = processor.get_dev_examples(args.data_dir)
elif args.eval_on == "test":
eval_examples = processor.get_test_examples(args.data_dir)
else:
raise ValueError("eval on dev or test set only")
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(
[f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor(
[f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor(
[f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids, all_label_ids, all_valid_ids, all_lmask_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
y_true = []
y_pred = []
label_map = {i: label for i, label in enumerate(label_list, 1)}
for input_ids, input_mask, segment_ids, label_ids, valid_ids, l_mask in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask,
valid_ids=valid_ids, attention_mask_label=l_mask)
logits = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for i, label in enumerate(label_ids):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif label_ids[i][j] == len(label_map):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map[label_ids[i][j]])
temp_2.append(label_map[logits[i][j]])
report = classification_report(y_true, y_pred, digits=4)
logger.info("\n%s", report)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info("\n%s", report)
writer.write(report)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
05a365a39f280182d1f4070a9632757d8047cb8a | 93e62e12c9e70f8adcb514692314fc7e3665b85b | /src/example/transmogrifier/importurban.py | 1b959fdf9b48f4699634685459c1174a255de912 | [] | no_license | jjaumotte/example.transmogrifier | 4087a0a9f4c9966874b31017ddc1bd8fbec65ac7 | c9ad6259d3cc199025fb907b647e52af58f7c18b | refs/heads/master | 2021-01-14T13:58:16.445241 | 2016-07-18T08:38:24 | 2016-07-18T08:38:24 | 63,138,221 | 0 | 0 | null | 2016-07-12T07:53:33 | 2016-07-12T07:53:32 | null | UTF-8 | Python | false | false | 1,208 | py | from collective.transmogrifier.interfaces import ISectionBlueprint
from collective.transmogrifier.interfaces import ISection
from zope.interface import classProvides
from zope.interface import implements
from collective.transmogrifier.utils import Expression, Condition
class ImportUrbanSection(object):
classProvides(ISectionBlueprint)
implements(ISection)
def __init__(self, transmogrifier, name, options, previous):
self.key = Expression(options['key'], transmogrifier, name, options)
self.value = Expression(options['value'], transmogrifier, name,
options)
self.condition = Condition(options.get('condition', 'python:True'),
transmogrifier, name, options)
self.previous = previous
def __iter__(self):
for item in self.previous:
key = self.key(item)
# import ipdb;
# ipdb.set_trace()
if item['GENRE'] is not None:
item['_path'] = item['GENRE']
if self.condition(item, key=key):
import ipdb; ipdb.set_trace()
item[key] = self.value(item, key=key)
yield item
| [
"[email protected]"
] | |
215444e18369626b84e7db4e1d0ed595cabc24ae | ddd69cbb6acab8c321cd667d48899c4a245003d6 | /telemetry/telemetry/internal/platform/cast_platform_backend.py | 211dbd4b2fc7027f72baf14e6f05564e45ce9b2e | [
"BSD-3-Clause"
] | permissive | hyl946/catapult | 71895532b76dc7e6ad017f90cf3573f7aa0e5af2 | 22aa2610ecac1299d73eb94e1d82bb07edad62f1 | refs/heads/main | 2022-04-20T13:23:50.444266 | 2022-04-12T22:39:04 | 2022-04-13T00:42:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import logging
import platform
try:
from pexpect import pxssh # pylint: disable=import-error
except ImportError as e:
if platform.system() == 'Windows':
logging.info('pxssh not supported on Windows')
pxssh = None
from telemetry.core import platform as telemetry_platform
from telemetry.internal.platform import cast_device
from telemetry.internal.platform import platform_backend
class CastPlatformBackend(platform_backend.PlatformBackend):
def __init__(self, device):
super(CastPlatformBackend, self).__init__(device)
self._ip_addr = None
self._output_dir = device.output_dir
self._runtime_exe = device.runtime_exe
if device.ip_addr:
self._ip_addr = device.ip_addr
@classmethod
def SupportsDevice(cls, device):
return isinstance(device, cast_device.CastDevice)
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
assert cls.SupportsDevice(device)
return telemetry_platform.Platform(CastPlatformBackend(device))
@property
def output_dir(self):
return self._output_dir
@property
def runtime_exe(self):
return self._runtime_exe
@property
def ip_addr(self):
return self._ip_addr
def GetSSHSession(self):
ssh = pxssh.pxssh()
ssh.login(self._ip_addr, username='root', password='root')
return ssh
def IsRemoteDevice(self):
return False
def GetArchName(self):
return 'Arch type of device not yet supported in Cast'
def GetOSName(self):
return 'castos'
def GetDeviceTypeName(self):
return 'Cast Device'
def GetOSVersionName(self):
return ''
def GetOSVersionDetailString(self):
return 'CastOS'
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
return False
def IsThermallyThrottled(self):
return False
def InstallApplication(self, application):
raise NotImplementedError()
def LaunchApplication(self, application, parameters=None,
elevate_privilege=False):
raise NotImplementedError()
def PathExists(self, path, timeout=None, retries=None):
raise NotImplementedError()
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
return None
def FlushSystemCacheForDirectory(self, directory):
return None
def StartActivity(self, intent, blocking):
raise NotImplementedError()
def CooperativelyShutdown(self, proc, app_name):
return False
def SupportFlushEntireSystemCache(self):
return False
def StartDisplayTracing(self):
raise NotImplementedError()
def StopDisplayTracing(self):
raise NotImplementedError()
def TakeScreenshot(self, file_path):
return None
def GetTypExpectationsTags(self):
tags = super(CastPlatformBackend, self).GetTypExpectationsTags()
tags.append(self.GetDeviceTypeName())
return tags
| [
"[email protected]"
] | |
6fdb000f918c40b42e6cb2b970ba10d123c1b355 | 267f9752460bc161af1f81eab822663b943618c0 | /Lista2/Tester.py | 5d340c9001af1bedb4ee70c7fe5873625094a23a | [] | no_license | damianKokot/TS | e9d610519f78ae7c8ce74209a16ae867924ee81b | 0dd5e56df7d98db2c316a96a8de93aac6913cc80 | refs/heads/master | 2022-07-18T20:31:17.418208 | 2020-05-18T09:23:00 | 2020-05-18T10:30:39 | 256,575,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | import networkx as nx
from networkx.readwrite import json_graph
import numpy as np
import random
from copy import deepcopy
import json
edgeSpeed = 0
def loadGraphModel():
with open('networkData', 'r') as networkFile:
networkData = json.load(networkFile)
graph = json_graph.node_link_graph(networkData['graph'])
intensityMatrix = networkData['intensityMatrix']
return graph, intensityMatrix
def main():
graph, intensityMatrix = loadGraphModel()
graph.graph['sumOfIntensities'] = sum(map(sum, intensityMatrix))
attempts = 10000
averageDataSize = 5000
Tmax = 0.001
edgeSpeed = 100000000
propability = .95
stats = testModel(graph, intensityMatrix, edgeSpeed,
averageDataSize, Tmax, propability, attempts)
print('Reliability: {:.2f}% and average time passed: {:.6f}'.format(
stats['reliability'], stats['delay']))
print('where in {:.2f}% cases there was connection timeout'.format(
stats['timeouts']))
'''
Model testing functions
'''
def averageWaitTime(graph, m):
totalTime = 0
sumOfIntensity = graph.graph['sumOfIntensities']
for edge in graph.edges:
edgeData = graph.get_edge_data(*edge)
totalTime += edgeData['a']/(edgeData['c']/m - edgeData['a'])
return totalTime / sumOfIntensity
def testModel(graph, intensityMatrix, edgeSpeed, averageDataSize, Tmax, p, attempts):
passedAttempts = 0
delaysTotal = 0
timeoutsCount = 0
tooBigDataCount = 0
for _ in range(attempts):
updatedGraph = modifyMainGraphModel(graph, p)
if not nx.is_connected(updatedGraph):
continue
nx.set_edge_attributes(updatedGraph, edgeSpeed, 'c')
if not updateAOnPaths(updatedGraph, averageDataSize, intensityMatrix):
tooBigDataCount += 1
continue
waitTime = averageWaitTime(updatedGraph, averageDataSize)
if waitTime < Tmax:
passedAttempts += 1
delaysTotal += waitTime
else:
timeoutsCount += 1
return {
'reliability': getReliability(passedAttempts, attempts),
'timeouts': getTimeoutPercentage(timeoutsCount, attempts - passedAttempts),
'delay': getAverageDelay(delaysTotal, passedAttempts)
}
'''
Statistics functions
'''
def getReliability(passedAttempts, attempts):
return passedAttempts / attempts * 100
def getTimeoutPercentage(timeoutsCount, totalFailures):
if totalFailures == 0:
return 0
return timeoutsCount / totalFailures * 100
def getAverageDelay(timeTotal, passedAttempts):
if passedAttempts == 0:
return 0
return timeTotal / passedAttempts
'''
Graph modification functions
'''
def getRandom():
return random.randrange(1000) / 1000
def filterRandomEdges(edgesList, p):
return list(filter(lambda edge: getRandom() <= p, edgesList))
def modifyMainGraphModel(graph, p):
newGraph = nx.Graph()
newGraph.add_nodes_from(graph)
newGraph.add_edges_from(filterRandomEdges(graph.edges, p))
newGraph.graph = graph.graph
return newGraph
def updateAOnPaths(graph, averageDataSize, intensityMatrix):
'''
Updating edge attributes on path, to update weights on edges in graph
'''
nx.set_edge_attributes(graph, 0.0, 'a')
for source, row in enumerate(intensityMatrix):
for target, weight in enumerate(row):
if source == target:
continue
path = nx.shortest_path(graph, source, target)
# Adding weight on path
for nodeIndex in range(len(path) - 1):
graph[path[nodeIndex]][path[nodeIndex + 1]]['a'] += weight
edge = graph[path[nodeIndex]][path[nodeIndex + 1]]
if edge['a'] * averageDataSize >= edge['c']:
return False
return True
main()
| [
"[email protected]"
] | |
529d1fdd77f81a07145c991e2bdee4399f9c54e6 | 67731d1c93094cf42cec62aefd277c8db4367e34 | /nlp_submission_fadhelharizdzulfikar_v2.py | 148e408bafb19173118e77a0a6bc276ecb9127c7 | [] | no_license | Ndhel97/submissions_BPLM | 2fad018ebed6f1cb37cf7a2452b8281dee2d1362 | 2a27b218777f96e551374628c7e2e20a912e17f1 | refs/heads/master | 2023-02-27T22:50:00.486240 | 2020-09-29T02:23:45 | 2020-09-29T02:23:45 | 337,601,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,940 | py | # -*- coding: utf-8 -*-
"""nlp-submission-fadhelharizdzulfikar_v2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wGOFvKiAkdLhV1KG9eMaqBujDrbT4c23
"""
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
df = pd.read_json('/content/drive/My Drive/Dicoding/Belajar Pengembangan Machine Learning/News_Category_Dataset_v2.json', lines=True)
df.head()
df.shape
len(df.category.unique())
"""There are 41 categories in this data. And i think it is too many for me as a beginner. So, I only take 5 categories for this submission. (POLITICS, FOOD & DRINK, BUSINESS, HOME & LIVING, TRAVEL)"""
df_first = df.loc[(df['category'] == 'POLITICS') | (df['category'] == 'FOOD & DRINK') | (df['category'] == 'BUSINESS') | (df['category'] == 'HOME & LIVING') | (df['category'] == 'TRAVEL')]
"""Delete some columns"""
df_second = df_first.drop(columns=['authors', 'link', 'date'])
"""Dataframe dimension"""
df_second.shape
"""Now, the data only contains 58984 rows.
Import libraries
"""
from keras.layers import Input, LSTM, Bidirectional, SpatialDropout1D, Dropout, Flatten, Dense, Embedding, BatchNormalization
from keras.models import Model
from keras.callbacks import EarlyStopping
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import nltk, os, re, string
from nltk.corpus import stopwords
# nltk.download() #wordnet stopwords
"""Clean the data"""
# LOWER CASE ALL CHARACTERS
df_second.headline = df_second.headline.apply(lambda x: x.lower())
df_second.short_description = df_second.short_description.apply(lambda x: x.lower())
## LEMMATIZATION
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
lemmatizer = WordNetLemmatizer()
def lem(text):
pos_dict = {'N': wn.NOUN, 'V': wn.VERB, 'J': wn.ADJ, 'R': wn.ADV}
return(' '.join([lemmatizer.lemmatize(w,pos_dict.get(t, wn.NOUN)) for w,t in nltk.pos_tag(text.split())]))
df_second.headline = df_second.headline.apply(lambda x: lem(x))
df_second.short_description = df_second.short_description.apply(lambda x: lem(x))
# REMOVING PUNCTUATION
def cleaner(text):
return(text.translate(str.maketrans('','', string.punctuation)))
df_second.headline = df_second.headline.apply(lambda x: cleaner(x))
df_second.short_description = df_second.short_description.apply(lambda x: lem(x))
# REMOVING NUMBERS
def rem_numbers(text):
return re.sub('[0-9]+','',text)
df_second['headline'].apply(rem_numbers)
df_second['short_description'].apply(rem_numbers)
# REMOVING STOPWORDS
st_words = stopwords.words()
def stopword(text):
return(' '.join([w for w in text.split() if w not in st_words ]))
df_second.headline = df_second.headline.apply(lambda x: stopword(x))
df_second.short_description = df_second.short_description.apply(lambda x: lem(x))
df_second.head()
"""Apply one hot encoding to the data."""
category = pd.get_dummies(df_second.category)
df_third = pd.concat([df_second, category], axis=1)
df_third = df_third.drop(columns='category')
df_third
"""concantenate headlines column with each short_description into headline dataframe. Put the labels in label dataframe."""
headline = df_third['short_description'].values + ' ' + df_third['headline'].values
label = df_third[['FOOD & DRINK', 'HOME & LIVING', 'POLITICS', 'BUSINESS', 'TRAVEL']].values
headline
"""Split data into training and validation"""
from sklearn.model_selection import train_test_split
headline_train, headline_test, label_train, label_test = train_test_split(headline, label, test_size=0.2, shuffle=True)
"""Process each words into token using tokenize function from Keras."""
max_len = 256
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(lower=True, char_level=False)
tokenizer.fit_on_texts(headline_train)
tokenizer.fit_on_texts(headline_test)
sekuens_train = tokenizer.texts_to_sequences(headline_train)
sekuens_test = tokenizer.texts_to_sequences(headline_test)
padded_train = pad_sequences(sekuens_train, padding='post', maxlen=max_len)
padded_test = pad_sequences(sekuens_test, padding='post', maxlen=max_len)
word_to_index = tokenizer.word_index
"""Use GloVe learning algorithm for obtaining vector representations for words devolped by Stanford"""
vocab_size = len(word_to_index)
oov_tok = "<OOV>"
embedding_dim = 200
import numpy as np
embeddings_index = {};
# !unzip '/content/drive/My Drive/Dicoding/Belajar Pengembangan Machine Learning/1835_3176_compressed_glove.6B.200d.txt.zip'
with open('/content/glove.6B.200d.txt') as f:
for line in f:
values = line.split();
word = values[0];
coefs = np.asarray(values[1:], dtype='float32');
embeddings_index[word] = coefs;
embeddings_matrix = np.zeros((vocab_size+1, embedding_dim));
for word, i in word_to_index.items():
embedding_vector = embeddings_index.get(word);
if embedding_vector is not None:
embeddings_matrix[i] = embedding_vector;
"""Design the model."""
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, weights=[embeddings_matrix], trainable=False, input_length = max_len),
tf.keras.layers.Bidirectional(LSTM(256, return_sequences=True)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.SpatialDropout1D(0.5),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128,activation = 'relu'),
tf.keras.layers.Dense(64,activation = 'relu'),
tf.keras.layers.Dense(32,activation = 'relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(5, activation='softmax')
])
# model = modeling()
model.compile(optimizer = 'adam', metrics = ['accuracy'], loss= 'categorical_crossentropy')
model.summary()
"""Train the model."""
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', mode='min', patience=5)
num_epochs = 20
history = model.fit(padded_train, label_train, epochs=num_epochs,
validation_data=(padded_test, label_test), verbose=2, callbacks=[es])
"""As you can see, we got validation accuracy about 90%. Next, plot the accuracy and loss."""
print(history.history.keys())
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for accuracy
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| [
"[email protected]"
] | |
f417d703c2a3646665254a487f6ff83029952ce2 | 13132aa50944d60c1d72f53cdc19e78172be2487 | /src/tests/core/search/search_test.py | b66ad5001d3c85b9313a5e3ad80d131928009f7a | [] | no_license | mikemeko/6.01_Tools | c8cff53d05a52e25d4ccd370f778569246e6f5a1 | 95461a6f748b51b3d98d16ef9db37e051f3e6dd3 | refs/heads/master | 2021-03-24T09:46:50.169250 | 2014-02-10T02:56:58 | 2014-02-10T02:56:58 | 7,247,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | """
Unittests for search.py.
"""
__author__ = '[email protected] (Michael Mekonnen)'
from core.search.search import a_star
from core.search.search import Search_Node
from unittest import main
from unittest import TestCase
class Test_Search_Node(Search_Node):
"""
S
/ \
/ \
A B
| / \
| / \
C D E
"""
def __init__(self, state='S', parent=None, cost=0):
Search_Node.__init__(self, state, parent, cost)
def get_children(self):
return [Test_Search_Node(new_state, self, self.cost + 1) for new_state in
{'S': ['A', 'B'], 'A': ['C'], 'B': ['D', 'E']}.get(self.state, [])]
class Search_Test(TestCase):
"""
Tests for core/search/search.
"""
def test_search(self):
assert a_star(Test_Search_Node(), lambda state: state == 'S')[0].state == (
'S')
assert a_star(Test_Search_Node(), lambda state: state == 'E')[0].state == (
'E')
assert a_star(Test_Search_Node(), lambda state: state == 'F')[0] is None
assert a_star(Test_Search_Node(), lambda state: state in ['A',
'D'])[0].state == 'A'
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5cc0cfd2cde036f6f550f6a6a8b808d5e20882fc | 5fc21a604a8644edc8ad7414b0272fc2a3e67760 | /master/inplace-config/templates/master.cfg.j2 | 4faa40c669d096a1a5b4c6173cf3ff2460965f11 | [
"MIT"
] | permissive | jpic/ansible-buildbot | e66e18540942690ddd1054885a5b1752cbb8c449 | b691306d3a5ca86a8b4e42643f1b20b11646bb0a | refs/heads/develop | 2020-03-28T15:35:48.163276 | 2015-06-28T06:58:09 | 2015-06-28T06:58:09 | 65,032,559 | 0 | 0 | null | 2016-08-05T16:18:02 | 2016-08-05T16:18:01 | null | UTF-8 | Python | false | false | 725 | j2 | # -*- python -*-
# ex: set syntax=python:
# {{ ansible_managed }}
import os
from buildbot.plugins import *
from {{ buildbot_master_inplace_name }}.common import config, loaders
from {{ buildbot_master_inplace_name }}.factories import reconfig
reload(config)
reload(loaders)
reload(reconfig)
cfg = config.BuildbotConfigurationWrapper()
cfg.setTitle("{{ buildbot_master_title }}")
cfg.setTitleUrl("{{ buildbot_master_title_url }}")
cfg.setConnection("{{ buildbot_master_url }}", {{ buildbot_master_slave_port }})
slaveNames = loaders.SlaveLoader.load(cfg, os.path.join(os.getcwd(), 'slaves/'))
loaders.ProjectLoader.load(cfg, os.path.join(os.getcwd(), 'projects/'), slaveNames)
c = BuildmasterConfig = cfg.getConfigDict()
| [
"[email protected]"
] | |
fb078c5e35ee4a3abfc8bb8dce0c5cfb6e846091 | 8e127742a638a3c35d6ad002e34c736d3c84db97 | /build/build_game_permutations/string_permutations.py | 58d50fa21732a8a2cae9a62981b8c0ab4d356f67 | [] | no_license | Adarsh2910/tictactoe | e4331ea0f1a9a71b19d51837211deb3fa807ec0f | 597b465541e7658ae510bc04319d3f7ba2286c96 | refs/heads/master | 2020-03-26T06:10:18.006172 | 2018-08-13T14:43:48 | 2018-08-13T14:43:48 | 144,592,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | #SOURCE:
## http://www.geeksforgeeks.org/write-a-c-program-to-print-all-permutations-of-a-given-string/
# Python program to print all permutations with
# duplicates allowed
def toString(List):
return ''.join(List)
# Function to print permutations of string
# This function takes three parameters:
# 1. String
# 2. Starting index of the string
# 3. Ending index of the string.
def permute(a, l, r):
if l==r:
print toString(a)
else:
for i in xrange(l,r+1):
a[l], a[i] = a[i], a[l]
permute(a, l+1, r)
a[l], a[i] = a[i], a[l] # backtrack
# Driver program to test the above function
string = "123456789"
n = len(string)
a = list(string)
permute(a, 0, n-1)
# This code is contributed by Bhavya Jain | [
"[email protected]"
] | |
352f49df71d8216ca3c17a3626f578b5ce858adf | cd1095198b723745882787215ffbe8b2475bf544 | /US_Immigration_Trends/Model_Source.py | caf958328e8c8b10becf6eed31878be410864fc7 | [] | no_license | krishns18/Data_Projects | e1a5d8b3a30b3e1e7a1e44af49d5f7685d76b8db | a2bdeb7fa107d2d3ccd111fe007f136d408fb8b7 | refs/heads/master | 2020-12-23T11:28:59.985931 | 2020-09-17T03:22:47 | 2020-09-17T03:22:47 | 237,136,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | class ModelSource:
def __init__(self,immi_trans,demog_trans,airport_clean):
self.immi_trans = immi_trans
self.demog_trans = demog_trans
self.airport_clean = airport_clean
def model_fact(self):
# Function to model fact table
immi_fact = self.immi_trans.join(self.demog_trans,self.immi_trans.i94addr == self.demog_trans.state_code,
how = 'left_semi')
immi_fact = immi_fact.join(self.airport_clean,immi_fact.i94port == self.airport_clean.local_code,
how = 'left_semi')
return immi_fact
def star_model(self):
# Function to model and generate fact and dimension tables
immi_fact = self.model_fact()
dim_demog = self.demog_trans
dmi_airport = self.airport_clean
return immi_fact,self.demog_trans,self.airport_clean
| [
"[email protected]"
] | |
b948d559685a567428fab7d26c078d1cf6e55f8a | 1002160fec10d11ded23ffe3b555cb382a3568ca | /PY4E/exercisesANS/ex7_1.py | 731df58f14268b339349f92e46cfbf5d1cc4172c | [] | no_license | AREKKUSU-hyper/Python-Libraries | 8358a1093f9083286dd02e263f6c9d11d5221bd3 | 07bada98d250d8370706294d78bce04c6b39b0e4 | refs/heads/master | 2022-11-17T02:13:59.933905 | 2020-06-28T09:26:43 | 2020-06-28T09:26:43 | 275,550,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | f=open("mbox.txt","r")
# inp=f.read() # Reads the entire file into the variable inp as a string
# print(len(inp)) # how many characters
# print(inp[:10]) # 換行符號算一個字元
for line in f:
line=line.rstrip() # 去掉文件中的分行符號(空白)
if line.startswith("From"):
# if not line.startswith("From"): # opposite
# continue
if not "Q" in line:
continue
print(line)
| [
"[email protected]"
] | |
26d4b5bda0b588df51d043b24e83719cc9ec940b | 9953a5220d9811ede9760f4916c08b3bf8981eda | /src/model.py | 0a48153f2b1233f08ef4ff5f15101c1264f3dfd8 | [
"MIT"
] | permissive | sonalif/gpt-2 | 97450338dac1dddfa129c85aac62c9ecf6bf1ab3 | e8969b13bcca554795bf06cc1b12bdd9a21bd63b | refs/heads/master | 2023-05-30T15:22:01.278426 | 2020-02-12T21:39:46 | 2020-02-12T21:39:46 | 240,081,801 | 0 | 0 | MIT | 2023-05-22T22:17:08 | 2020-02-12T18:11:37 | Python | UTF-8 | Python | false | false | 6,634 | py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.training import HParams
#from hparams import HParams
def default_hparams():
return HParams(
n_vocab=0,
n_ctx=1024,
n_embd=768,
n_head=12,
n_layer=12,
)
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def softmax(x, axis=-1):
x = x - tf.reduce_max(x, axis=axis, keepdims=True)
ex = tf.exp(x)
return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
def gelu(x):
return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))
def norm(x, scope, *, axis=-1, epsilon=1e-5):
"""Normalize to mean = 0, std = 1, then do a diagonal affine transform."""
with tf.variable_scope(scope):
n_state = x.shape[-1].value
g = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
def split_states(x, n):
"""Reshape the last dimension of x into [n, x.shape[-1]/n]."""
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
def merge_states(x):
"""Smash the last two dimensions of x into a single dimension."""
*start, a, b = shape_list(x)
return tf.reshape(x, start + [a*b])
def conv1d(x, scope, nf, *, w_init_stdev=0.02):
with tf.variable_scope(scope):
*start, nx = shape_list(x)
w = tf.get_variable('w', [1, nx, nf], initializer=tf.random_normal_initializer(stddev=w_init_stdev))
b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])
return c
def attention_mask(nd, ns, *, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def attn(x, scope, n_state, *, past, hparams):
assert x.shape.ndims == 3 # Should be [batch, sequence, features]
assert n_state % hparams.n_head == 0
if past is not None:
assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence, features], where 2 is [k, v]
def split_heads(x):
# From [batch, sequence, features] to [batch, heads, sequence, features]
return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])
def merge_heads(x):
# Reverse of split_heads
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def mask_attn_weights(w):
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w*b - tf.cast(1e10, w.dtype)*(1-b)
return w
def multihead_attn(q, k, v):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))
w = mask_attn_weights(w)
w = softmax(w)
a = tf.matmul(w, v)
return a
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3)
q, k, v = map(split_heads, tf.split(c, 3, axis=2))
present = tf.stack([k, v], axis=1)
if past is not None:
pk, pv = tf.unstack(past, axis=1)
k = tf.concat([pk, k], axis=-2)
v = tf.concat([pv, v], axis=-2)
a = multihead_attn(q, k, v)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state)
return a, present
def mlp(x, scope, n_state, *, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
h = gelu(conv1d(x, 'c_fc', n_state))
h2 = conv1d(h, 'c_proj', nx)
return h2
def block(x, scope, *, past, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)
x = x + a
m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)
x = x + m
return x, present
def past_shape(*, hparams, batch_size=None, sequence=None):
return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
def positions_for(tokens, past_length):
batch_size = tf.shape(tokens)[0]
nsteps = tf.shape(tokens)[1]
return expand_tile(past_length + tf.range(nsteps), batch_size)
def model(hparams, X, past=None, scope='model', reuse=tf.compat.v1.AUTO_REUSE):
with tf.variable_scope(scope, reuse=reuse):
results = {}
batch, sequence = shape_list(X)
wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
past_length = 0 if past is None else tf.shape(past)[-2]
h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))
# Transformer
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)
if layer == 10:
tf.add_to_collection('checkpoints', h)
presents.append(present)
results['present'] = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])
logits = tf.matmul(h_flat, wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
results['logits'] = logits
return results
| [
"[email protected]"
] | |
543c3b56bb2486234ee20e5aec6adb3ef20f7edf | 939eec126d7bd9a46847c3a98be07b2fba2dc08f | /EigVal.py | d9d76a80a6b8e26e9cecd7d40f2c2426f3613086 | [
"MIT"
] | permissive | OceanNuclear/Statistics | 0fbfeed6bc25028fdb15f785239f9a8ca36b9354 | 6ae5d16c78b8c68f7504b2d6cbe7efd4914d8cb5 | refs/heads/master | 2021-08-03T06:58:16.328017 | 2021-07-26T15:02:31 | 2021-07-26T15:02:31 | 163,417,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rn
from numpy import array as ary
from numpy import sqrt
from numpy.linalg import svd, eig, eigvals, inv, pinv
def set_offdiag(mat, triu, inplace=True):
'''sets the off-diagonal elements of a symmetric matrix when the top triangle's values are given.'''
triu = ary(triu).flatten()
indices = ary(np.triu_indices_from(mat, k=1)).T
if inplace:
for ij, ord in zip(indices, triu):
i,j = ij
mat[i,j] = ord
mat[j,i] = ord
return mat
else:
matcopy = mat.copy()
for ij, ord in zip(indices, triu):
i,j = ij
matcopy[i,j] = ord
matcopy[j,i] = ord
return matcopy
if __name__=="__main__":
main_diag = [sqrt(1),.01]
covar_mat = np.diag(ary(main_diag, dtype=float))
set_offdiag(covar_mat, [0], inplace=True)
eigval, eigvec = eig(covar_mat)
print("eigval=", eigval)
print("eigvec=\n", eigvec)
xy = rn.multivariate_normal([0,0], covar_mat, size=1000)
x, y = xy.T
ax = plt.subplot()
ax.scatter(x,y)
ax.set_aspect(1) # equal aspect ratio
plt.show()
plt.clf() | [
"[email protected]"
] | |
8de0766a8ba323676ac585c2f0a37b9c61c626fb | 60de64366da69a2032612ef51bbdd3f7b880a538 | /tests/test_wsdl_soap.py | 9902c5a4bdb3fab5080d8f3d98a793337b20ec5c | [
"MIT",
"BSD-3-Clause"
] | permissive | VertigoRay/python-zeep | 52264b459ab19db89c5d0049dd679a2bafa77bca | 1970e10d5c68a1944be721c95d917b23316e0891 | refs/heads/master | 2021-01-17T10:57:52.583089 | 2016-05-15T05:38:29 | 2016-05-15T05:38:29 | 58,754,770 | 0 | 0 | null | 2016-05-13T16:12:05 | 2016-05-13T16:12:04 | null | UTF-8 | Python | false | false | 5,494 | py | from collections import OrderedDict
from lxml import etree
from tests.utils import assert_nodes_equal, load_xml
from zeep import xsd
from zeep.wsdl import definitions, soap
def test_document_message_serializer():
msg = soap.DocumentMessage(
wsdl=None,
name=None,
operation=None,
nsmap=soap.Soap11Binding.nsmap)
namespace = 'http://docs.python-zeep.org/tests/document'
# Fake resolve()
msg.body = xsd.Element(
etree.QName(namespace, 'response'),
xsd.ComplexType([
xsd.Element(etree.QName(namespace, 'arg1'), xsd.String()),
xsd.Element(etree.QName(namespace, 'arg2'), xsd.String()),
])
)
msg.namespace = {
'body': 'http://docs.python-zeep.org/tests/document',
'header': None,
'headerfault': None
}
body, header, headerfault = msg.serialize(arg1='ah1', arg2='ah2')
expected = """
<?xml version="1.0"?>
<soap-env:Body
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:soap-env="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<ns0:response xmlns:ns0="http://docs.python-zeep.org/tests/document">
<ns0:arg1>ah1</ns0:arg1>
<ns0:arg2>ah2</ns0:arg2>
</ns0:response>
</soap-env:Body>
"""
assert_nodes_equal(expected, body)
def test_document_message_deserializer():
response_body = load_xml("""
<SOAP-ENV:Body
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<mns:response xmlns:mns="http://docs.python-zeep.org/tests/document"
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<mns:return type="xsd:string">foobar</mns:return>
</mns:response>
</SOAP-ENV:Body>
""") # noqa
msg = soap.DocumentMessage(
wsdl=None,
name=None,
operation=None,
nsmap=soap.Soap11Binding.nsmap)
# Fake resolve()
namespace = 'http://docs.python-zeep.org/tests/document'
msg.abstract = definitions.AbstractMessage(
etree.QName(namespace, 'Method1Response'))
msg.abstract.parts = OrderedDict([
('body', definitions.MessagePart(
element=xsd.Element(
etree.QName(namespace, 'response'),
xsd.ComplexType([
xsd.Element(etree.QName(namespace, 'return'), xsd.String()),
])
),
type=None))
])
msg.namespace = {
'body': 'http://docs.python-zeep.org/tests/document',
'header': None,
'headerfault': None
}
result = msg.deserialize(response_body)
assert result == 'foobar'
def test_rpc_message_serializer():
msg = soap.RpcMessage(
wsdl=None,
name=None,
operation=None,
nsmap=soap.Soap11Binding.nsmap)
# Fake resolve()
msg.abstract = definitions.AbstractMessage(
etree.QName('{http://docs.python-zeep.org/tests/rpc}Method1Response'))
msg.abstract.parts = OrderedDict([
('arg1', definitions.MessagePart(
element=None, type=xsd.String())),
('arg2', definitions.MessagePart(
element=None, type=xsd.String())),
])
msg.namespace = {
'body': 'http://docs.python-zeep.org/tests/rpc',
'header': None,
'headerfault': None
}
body, header, headerfault = msg.serialize(arg1='ah1', arg2='ah2')
expected = """
<?xml version="1.0"?>
<soap-env:Body
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:soap-env="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<ns0:Method1Response xmlns:ns0="http://docs.python-zeep.org/tests/rpc">
<arg1>ah1</arg1>
<arg2>ah2</arg2>
</ns0:Method1Response>
</soap-env:Body>
"""
assert_nodes_equal(expected, body)
def test_rpc_message_deserializer():
response_body = load_xml("""
<SOAP-ENV:Body
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<mns:Method1Response xmlns:mns="http://docs.python-zeep.org/tests/rpc"
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<bstrReturn xsi:type="xsd:string">foobar</bstrReturn>
</mns:Method1Response>
</SOAP-ENV:Body>
""") # noqa
msg = soap.RpcMessage(
wsdl=None,
name=None,
operation=None,
nsmap=soap.Soap11Binding.nsmap)
# Fake resolve()
msg.abstract = definitions.AbstractMessage(
etree.QName('{http://docs.python-zeep.org/tests/rpc}Method1Response'))
msg.abstract.parts = OrderedDict([
('bstrReturn', definitions.MessagePart(
element=None, type=xsd.String()))
])
msg.namespace = {
'body': 'http://docs.python-zeep.org/tests/rpc',
'header': None,
'headerfault': None
}
result = msg.deserialize(response_body)
assert result == 'foobar'
| [
"[email protected]"
] | |
3b7b299082fdbb74aaa5ba25f15669ec25330839 | c1f33bfa0d9fdf04cb31fc5353a9a75766e09724 | /batch_predict.py | b9c3796259d2c6bbb18e22a1b733945c296358c8 | [
"Apache-2.0"
] | permissive | pantelisantonoudiou/deep-seizure-detect | 151fe6a594f7e1eac676ed891163592fd1e4329c | 040d746833f5d35172fe944da0798f4909770f9c | refs/heads/main | 2023-03-29T08:57:43.482101 | 2021-03-27T16:18:09 | 2021-03-27T16:18:09 | 314,353,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,579 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 7 10:18:28 2020
@author: Pante
"""
### ------------------------ IMPORTS -------------------------------------- ###
import os, sys, tables, json
# set tf verbosity to 2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from keras.models import load_model
from tqdm import tqdm
### ------------------------------------------------------------------------###
class batchPredict:
"""
Class for batch seizure prediction
"""
def __init__(self):
"""
Data retrieval from config.json
Returns
-------
None.
"""
# load properties from configuration file
jsonpath = 'config.json'
openpath = open(jsonpath, 'r').read();
prop_dict = json.loads(openpath)
# Get parent and re-oranized data path
self.gen_path = prop_dict['main_path']
self.org_rawpath = os.path.join(self.gen_path , prop_dict['org_rawpath'])
# Get predictions path
self.rawpred_path = os.path.join(self.gen_path , prop_dict['rawpred_path'])
# Get model path
self.model_path = prop_dict['model_path']
# get selected channel
self.ch_list = prop_dict['ch_list']
def file_check(self):
"""
Check if files check before prediction
Returns
-------
Bool, True if operation successful
"""
print ('\n----------------------------------------------------------------------')
print ('-> File Check from:', self.gen_path)
print ('----------------------------------------------------------------------\n')
# get file list
filelist = list(filter(lambda k: '.h5' in k, os.listdir(self.org_rawpath)))
if len(filelist) == 0:
print('-> No Files Present.')
return False
# loop files (multilple channels per file)
for i in tqdm(range(len(filelist)), desc = 'Progress', file=sys.stdout):
try:
# get organized data
filepath = os.path.join(self.org_rawpath, filelist[i])
f = tables.open_file(filepath, mode='r')
data_shape = f.root.data.shape
f.close()
if len(data_shape) != 3: # check if data shape is correct
print(f'\nData shape of file {filelist[i]} is not correct {data_shape}. \nPlease re-arrange to (:, 500, channels).')
return False
except Exception as err:
raise FileNotFoundError(f'Unable to read file {filelist[i]}.\n{err}\n')
return False
print ('\n----------------------------------------------------------------------')
print ('------------------------ File Check Completed ------------------------')
print ('----------------------------------------------------------------------\n')
return True
def mainfunc(self):
"""
Iterate over files and generate predictions
Returns
-------
None.
"""
print ('\n----------------------------------------------------------------------')
print ('-> Generating predictions from:', self.gen_path)
print ('----------------------------------------------------------------------\n')
# make path
if os.path.exists(self.rawpred_path) is False:
os.mkdir( self.rawpred_path)
# get file list
filelist = list(filter(lambda k: '.h5' in k, os.listdir(self.org_rawpath)))
# load model object to memory to get path
model = load_model(self.model_path)
# iterate over files
for i in tqdm(range(len(filelist)), desc = 'Progress', file=sys.stdout):
# get organized data
filepath = os.path.join(self.org_rawpath, filelist[i])
f = tables.open_file(filepath, mode='r')
data = f.root.data[:]
f.close()
# get predictions (2D-array)
ypred = model.predict(data[:, :, self.ch_list])
# save predictions as .csv
file_id = filelist[i].replace('.h5', '.csv')
np.savetxt(os.path.join(self.rawpred_path,file_id), ypred, delimiter=',',fmt='%f')
print ('\n----------------------------------------------------------------------')
print ('----------------------- Predictions Completed ------------------------')
print ('----------------------------------------------------------------------\n')
# Execute if module runs as main program
if __name__ == '__main__':
# init object
obj = batchPredict()
# if file check successful get predictions
if obj.file_check() == True:
# get predictions and store in csv
obj.mainfunc()
| [
"[email protected]"
] | |
dbe9ffb9b7edcdaa408e5678e10e269eb60b1c36 | 9923e30eb99716bfc179ba2bb789dcddc28f45e6 | /openapi-generator/python/test/test_tagged_sensor.py | 6d315d9a6115515245f7b517c20e6d325020b960 | [] | no_license | silverspace/samsara-sdks | cefcd61458ed3c3753ac5e6bf767229dd8df9485 | c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa | refs/heads/master | 2020-04-25T13:16:59.137551 | 2019-03-01T05:49:05 | 2019-03-01T05:49:05 | 172,804,041 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,422 | py | # coding: utf-8
"""
Samsara API
# Introduction Samsara provides API endpoints for interacting with Samsara Cloud, so that you can build powerful applications and custom solutions with sensor data. Samsara has endpoints available to track and analyze sensors, vehicles, and entire fleets. The Samsara Cloud API is a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer) accessed by an [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) client such as wget or curl, or HTTP libraries of most modern programming languages including python, ruby, java. We use built-in HTTP features, like HTTP authentication and HTTP verbs, which are understood by off-the-shelf HTTP clients. We allow you to interact securely with our API from a client-side web application (though you should never expose your secret API key). [JSON](http://www.json.org/) is returned by all API responses, including errors. If you’re familiar with what you can build with a REST API, the following API reference guide will be your go-to resource. API access to the Samsara cloud is available to all Samsara administrators. To start developing with Samsara APIs you will need to [obtain your API keys](#section/Authentication) to authenticate your API requests. If you have any questions you can reach out to us on [[email protected]](mailto:[email protected]) # Endpoints All our APIs can be accessed through HTTP requests to URLs like: ```curl https://api.samsara.com/<version>/<endpoint> ``` All our APIs are [versioned](#section/Versioning). If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. # Authentication To authenticate your API request you will need to include your secret token. You can manage your API tokens in the [Dashboard](https://cloud.samsara.com). They are visible under `Settings->Organization->API Tokens`. Your API tokens carry many privileges, so be sure to keep them secure. Do not share your secret API tokens in publicly accessible areas such as GitHub, client-side code, and so on. Authentication to the API is performed via [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Provide your API token as the basic access_token value in the URL. You do not need to provide a password. ```curl https://api.samsara.com/<version>/<endpoint>?access_token={access_token} ``` All API requests must be made over [HTTPS](https://en.wikipedia.org/wiki/HTTPS). Calls made over plain HTTP or without authentication will fail. # Request Methods Our API endpoints use [HTTP request methods](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) to specify the desired operation to be performed. The documentation below specified request method supported by each endpoint and the resulting action. ## GET GET requests are typically used for fetching data (like data for a particular driver). ## POST POST requests are typically used for creating or updating a record (like adding new tags to the system). With that being said, a few of our POST requests can be used for fetching data (like current location data of your fleet). ## PUT PUT requests are typically used for updating an existing record (like updating all devices associated with a particular tag). ## PATCH PATCH requests are typically used for modifying an existing record (like modifying a few devices associated with a particular tag). ## DELETE DELETE requests are used for deleting a record (like deleting a tag from the system). # Response Codes All API requests will respond with appropriate [HTTP status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). Your API client should handle each response class differently. ## 2XX These are successful responses and indicate that the API request returned the expected response. ## 4XX These indicate that there was a problem with the request like a missing parameter or invalid values. Check the response for specific [error details](#section/Error-Responses). Requests that respond with a 4XX status code, should be modified before retrying. ## 5XX These indicate server errors when the server is unreachable or is misconfigured. In this case, you should retry the API request after some delay. # Error Responses In case of a 4XX status code, the body of the response will contain information to briefly explain the error reported. To help debugging the error, you can refer to the following table for understanding the error message. | Status Code | Message | Description | |-------------|----------------|-------------------------------------------------------------------| | 401 | Invalid token | The API token is invalid and could not be authenticated. Please refer to the [authentication section](#section/Authentication). | | 404 | Page not found | The API endpoint being accessed is invalid. | | 400 | Bad request | Default response for an invalid request. Please check the request to make sure it follows the format specified in the documentation. | # Versioning All our APIs are versioned. Our current API version is `v1` and we are continuously working on improving it further and provide additional endpoints. If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. Thus, you can use our current API version worry free. # FAQs Check out our [responses to FAQs here](https://kb.samsara.com/hc/en-us/sections/360000538054-APIs). Don’t see an answer to your question? Reach out to us on [[email protected]](mailto:[email protected]). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.tagged_sensor import TaggedSensor # noqa: E501
from openapi_client.rest import ApiException
class TestTaggedSensor(unittest.TestCase):
"""TaggedSensor unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTaggedSensor(self):
"""Test TaggedSensor"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.tagged_sensor.TaggedSensor() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0adc5b2c1ea7a864c1215b5fab74dd2b88412ed9 | cf6fd897717ba042bd0e0c3bbee62159eeafc819 | /gyp_main.py | bd8b4413bc25e1e639ea179659cab2daec8c348d | [
"BSD-3-Clause"
] | permissive | graysonlang/gyp_slim | ab0fc03ab1ac8835915f6f40f3a35c4590cfa22d | 2d278cdf0dbd3e4a46a7606fde54414d4177cf9d | refs/heads/master | 2020-03-22T01:24:03.485459 | 2018-07-01T05:32:27 | 2018-07-01T05:32:27 | 139,299,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
sys.dont_write_bytecode = True
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
| [
"[email protected]"
] | |
ec8719208c3860fbec7f8a287ab12049779c26ef | 79b127f5fa03f57ddd1899bd06262414ffd07c92 | /users/migrations/0006_auto_20200511_0053.py | 12c577f12ca6ba9910ad2e3468e6ec43ad60b50d | [] | no_license | bruteforcerxx/ponzi | f423d6632bfd6d2f70b9b9972dd1c5d4b373f019 | d326eb8324b07f81d54cbad17c3caabd76082656 | refs/heads/master | 2023-04-15T13:41:25.216493 | 2020-06-13T09:51:44 | 2020-06-13T09:51:44 | 263,668,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 3.0.6 on 2020-05-10 23:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_user_phone'),
]
operations = [
migrations.AlterField(
model_name='user',
name='wallet_address',
field=models.CharField(blank=True, max_length=200, null=True, unique=True),
),
]
| [
"[email protected]"
] | |
b1eed5827c2b0b8e0557413e01f4c513746e7cf0 | eaf468948a97ce53cd50a79e525df00546b3db1c | /blog/migrations/0001_initial.py | 3ff4d30453c20225ea9f1bfa777338747186e541 | [] | no_license | hamohambardzumyan99/6frameGIT | 8ce16efdfbf2bc450fb9a3461529b29588cca4d3 | 8c1a87229b7687c27e1e70c7bb922ba53ad0e530 | refs/heads/master | 2020-12-30T14:45:04.077438 | 2017-05-25T07:04:41 | 2017-05-25T07:04:41 | 91,083,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Free',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=100)),
('created_at', models.DateTimeField(verbose_name='date published')),
('image', models.ImageField(upload_to='video/%Y/%m/%d')),
('download_url', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Paid',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=100)),
('created_at', models.DateTimeField(verbose_name='date published')),
('image', models.ImageField(upload_to='video/%Y/%m/%d')),
('price', models.CharField(max_length=250)),
],
),
]
| [
"[email protected]"
] | |
88030c1952cb064637b6fb1a271709fe38f46c1c | 7af38f9b08cce1172e3416b9ebdd8284023d3c61 | /src/viewprobe.py | 0e48b051dc07f330ed1669fb4476c8a5c53fb638 | [
"MIT"
] | permissive | HubBucket-Team/NetDissect | e03510934e01c962c6c86731b72c423edfcb6e20 | 23f345f0e4b0f45cd51c455948409d1a75f5b13a | refs/heads/release1 | 2020-12-30T11:16:03.600807 | 2017-05-08T11:11:53 | 2017-05-08T11:11:53 | 91,546,782 | 1 | 1 | null | 2017-05-17T07:31:21 | 2017-05-17T07:31:21 | null | UTF-8 | Python | false | false | 18,776 | py | '''
viewprobe creates visualizations for a certain eval.
'''
import glob
import os
import numpy
import re
import upsample
import time
import loadseg
from scipy.misc import imread, imresize, imsave
from loadseg import normalize_label
import expdir
class NetworkProbe:
def __init__(self, directory, blobs=None):
self.ed = expdir.ExperimentDirectory(directory)
# Load probe metadata
info = self.ed.load_info()
self.ih, self.iw = info.input_dim
self.layers = info.blobs if blobs is None else blobs
self.ds = loadseg.SegmentationData(info.dataset)
self.layer = {}
for blob in self.layers:
self.layer[blob] = LayerProbe(self.ed, blob, self.ds)
def score_tally_stats(self, layer, verbose=False):
# First, score every unit
if verbose:
print 'Adding tallys of unit/label alignments.'
sys.stdout.flush()
ta, tg, ti = self.summarize_tally(layer)
labelcat = onehot(primary_categories_per_index(self.ds))
tc = np.count_act_with_labelcat(layer)
# If we were doing per-category activations p, then:
# c = numpy.dot(p, labelcat.transpose())
epsilon = 1e-20 # avoid division-by-zero
# If we were counting activations on non-category examples then:
# iou = i / (a[:,numpy.newaxis] + g[numpy.newaxis,:] - i + epsilon)
iou = ti / (tc + tg[numpy.newaxis,:] - ti + epsilon)
# Let's tally by primary-category.
pc = primary_categories_per_index(self.ds)
categories = self.ds.category_names()
ar = numpy.arange(iou.shape[1])
# actually - let's get the top iou for every category
pciou = numpy.array([iou * (pc[ar] == ci)[numpy.newaxis,:]
for ci in range(len(categories))])
# label_iou = iou.argmax(axis=1)
label_pciou = pciou.argmax(axis=2)
# name_iou = [self.ds.name(None, i) for i in label_iou]
name_pciou = [
[self.ds.name(None, j) for j in label_pciou[ci]]
for ci in range(len(label_pciou))]
# score_iou = iou[numpy.arange(iou.shape[0]), label_iou]
score_pciou = pciou[
numpy.arange(pciou.shape[0])[:,numpy.newaxis],
numpy.arange(pciou.shape[1])[numpy.newaxis,:],
label_pciou]
bestcat_pciou = score_pciou.argsort(axis=0)[::-1]
# Assign category for each label
# cat_per_label = primary_categories_per_index(self.ds)
# cat_iou = [categories[cat_per_label[i]] for i in label_iou]
# Now sort units by score and visulize each one
return bestcat_pciou, name_pciou, score_pciou, label_pciou, tc, tg, ti
def generate_html_summary(self, layer,
imsize=64, imcount=16, imscale=None, tally_stats=None,
gridwidth=None, verbose=False):
print 'Generating html summary', (
self.ed.filename(['html', '%s.html' % expdir.fn_safe(layer)]))
# Grab tally stats
bestcat_pciou, name_pciou, score_pciou, _, _, _, _ = (tally_stats)
if verbose:
print 'Sorting units by score.'
sys.stdout.flush()
if imscale is None:
imscale = imsize
categories = self.ds.category_names()
ordering = score_pciou.max(axis=0).argsort()[::-1]
top = self.max_act_indexes(layer, count=imcount)
self.ed.ensure_dir('html', 'image')
css = ('https://maxcdn.bootstrapcdn.com/bootstrap/latest' +
'/css/bootstrap.min.css')
html = ['<!doctype html>', '<html>', '<head>',
'<link rel="stylesheet" href="%s">' % css,
'</head>', '<body>', '<div class="container-fluid">']
if gridwidth is None:
gridname = ''
gridwidth = imcount
gridheight = 1
else:
gridname = '-%d' % gridwidth
gridheight = (imcount + gridwidth - 1) // gridwidth
for unit in ordering:
if verbose:
print 'Visualizing %s unit %d' % (layer, unit)
sys.stdout.flush()
tiled = numpy.full(
((imsize + 1) * gridheight - 1,
(imsize + 1) * gridwidth - 1, 3), 255, dtype='uint8')
for x, index in enumerate(top[unit]):
row = x // gridwidth
col = x % gridwidth
vis = self.activation_visualization(layer, unit, index)
if vis.shape[:2] != (imsize, imsize):
vis = imresize(vis, (imsize, imsize))
tiled[row*(imsize+1):row*(imsize+1)+imsize,
col*(imsize+1):col*(imsize+1)+imsize,:] = vis
imfn = 'image/%s%s-%04d.jpg' % (
expdir.fn_safe(layer), gridname, unit)
imsave(self.ed.filename(['html', imfn]), tiled)
labels = '; '.join(['%s (%s, %f)' %
(name_pciou[c][unit], categories[c], score_pciou[c, unit])
for c in bestcat_pciou[:,unit]])
html.extend([
'<h6>%s unit %d: %s</h6>' % (layer, unit + 1, labels),
'<img src="%s" height="%d">' % (imfn, imscale)
])
html.extend([
'</div>', '</body>', '</html>', ''])
with open(self.ed.filename([
'html', '%s.html' % expdir.fn_safe(layer)]), 'w') as f:
f.write('\n'.join(html))
def generate_csv_summary(
self, layer, csvfile, tally_stats, order=None, verbose=False):
if verbose:
print 'Generating csv summary', csvfile
sys.stdout.flush()
bestcat_pciou, name_pciou, score_pciou, label_pciou, tc, tg, ti = (
tally_stats)
# For each unit in a layer, outputs the following information:
# - label: best interpretation
# - object-label: top ranked interpretation for scene/object/color/etc
# - object-truth: ground truth pixels
# - object-activation: activating pixels
# - object-intersect: intersecting pixels
# - object-iou: iou score
# - etc, for each category.
categories = self.ds.category_names()
csv_fields = sum([[
'%s-label' % cat,
'%s-truth' % cat,
'%s-activation' % cat,
'%s-intersect' % cat,
'%s-iou' % cat] for cat in categories],
['unit', 'category', 'label', 'score'])
if order is not None:
csv_fields = order
if verbose:
print 'Sorting units by score.'
sys.stdout.flush()
ordering = score_pciou.max(axis=0).argsort()[::-1]
# top = self.max_act_indexes(layer, count=imcount)
import csv
with open(csvfile, 'w') as f:
writer = csv.DictWriter(open(csvfile, 'w'), csv_fields)
writer.writeheader()
for unit in ordering:
# Top images are top[unit]
bestcat = bestcat_pciou[0, unit]
data = {
'unit': (unit + 1),
'category': categories[bestcat],
'label': name_pciou[bestcat][unit],
'score': score_pciou[bestcat][unit]
}
for ci, cat in enumerate(categories):
label = label_pciou[ci][unit]
data.update({
'%s-label' % cat: name_pciou[ci][unit],
'%s-truth' % cat: tg[label],
'%s-activation' % cat: tc[unit, label],
'%s-intersect' % cat: ti[unit, label],
'%s-iou' % cat: score_pciou[ci][unit]
})
writer.writerow(data)
def generate_quantmat(self, layer, verbose=False):
if verbose:
print 'Generating quantmat'
sys.stdout.flush()
from scipy.io import savemat
lp = self.layer[layer]
filename = self.ed.filename(
'quant-%d.mat' % lp.quantdata.shape[1], blob=layer)
savemat(filename, { 'quantile': lp.quantdata })
def generate_imgmax(self, layer, verbose=False):
from scipy.io import savemat
imgmax = self.ed.open_mmap(blob=layer, part='imgmax', mode='w+',
shape = self.layer[layer].blobdata.shape[:2])
imgmax[...] = self.layer[layer].blobdata.max(axis=(2, 3))
self.ed.finish_mmap(imgmax)
# Also copy out to mat file
filename = self.ed.filename('imgmax.mat', blob=layer)
savemat(filename, { 'imgmax': imgmax })
# And cache
self.layer[layer].imgmax = imgmax
def instance_data(self, i, normalize=True):
record, shape = self.ds.resolve_segmentation(
self.ds.metadata(i), categories=None)
if normalize:
default_shape = (1, ) + shape
record = dict((cat, normalize_label(dat, default_shape))
for cat, dat in record.items())
return record, shape
def top_image_indexes(self, layer, unit, count=10):
t = self.layer[layer].count_a[:,unit].argsort()[::-1]
return t[:count]
# Generates a mask at the "lp.level" quantile.
def activation_mask(self, layer, unit, index, shape=None):
if shape is None:
record, shape = self.instance_data(index)
sw, sh = shape
# reduction = int(round(self.iw / float(sw)))
lp = self.layer[layer]
blobdata = lp.blobdata
fieldmap = lp.fieldmap
quantdata = lp.quantdata
threshold = quantdata[unit, int(round(quantdata.shape[1] * lp.level))]
up = upsample.upsampleL(
fieldmap, blobdata[index:index+1, unit],
shape=(self.ih, self.iw), scaleshape=(sh, sw))[0]
mask = up > threshold
return mask
# Makes an iamge using the mask
def activation_visualization(self, layer, unit, index, alpha=0.2):
image = imread(self.ds.filename(index))
mask = self.activation_mask(layer, unit, index, shape=image.shape[:2])
return (mask[:, :, numpy.newaxis] * (1 - alpha) + alpha) * image
def summarize_tally(self, layer):
cat_count = len(self.ds.category_names())
tally = self.layer[layer].tally
unit_size = self.layer[layer].shape[1]
label_size = self.ds.label_size()
count = numpy.zeros(
(unit_size + 1, label_size + 1 + cat_count), dtype='int64')
for i in range(len(tally)):
t = tally[i]
count[t[:,0]+1, t[:,1]+1+cat_count] += t[:,2]
# count_a.shape = (unit size,)
count_a = count[1:,cat_count]
# this would summarize category intersections if we tallied them
# count_c.shape = (unit_size, cat_size)
# count_c = count[1:,0:cat_count]
# count_g.shape = (label_size,)
count_g = count[0,1+cat_count:]
# count_i.shape = (unit_size, label_size)
count_i = count[1:,1+cat_count:]
# return count_a, count_c, count_g, count_i
return count_a, count_g, count_i
def count_act_with_label(self, layer):
# Because our dataaset is sparse, instead of using count_a to count
# all activations, we can compute count_act_with_label to count
# activations only within those images which contain an instance
# of a given label.
tally = self.layer[layer].tally
unit_size = self.layer[layer].shape[1]
label_size = self.ds.label_size()
count = numpy.zeros((unit_size, label_size), dtype='int64')
for i in range(len(tally)):
c1 = numpy.zeros((unit_size + 1, label_size + 1), dtype='int64')
t = tally[i]
c1[t[:,0]+1, t[:,1]+1] = t[:,2]
count += c1[1:,0][:,numpy.newaxis] * (c1[0,1:][numpy.newaxis] > 0)
return count
def count_act_with_labelcat(self, layer):
# Because our dataaset is sparse, instead of using count_a to count
# all activations, we can compute count_act_with_labelcat to count
# activations only within those images which contain an instance
# of a given label category.
labelcat = onehot(primary_categories_per_index(self.ds))
# Be sure to zero out the background label - it belongs to no category.
labelcat[0,:] = 0
tally = self.layer[layer].tally
unit_size = self.layer[layer].shape[1]
label_size = self.ds.label_size()
count = numpy.zeros((unit_size, labelcat.shape[1]), dtype='int64')
for i in range(len(tally)):
c1 = numpy.zeros((unit_size + 1, label_size + 1), dtype='int64')
t = tally[i]
c1[t[:,0]+1, t[:,1]+1] = t[:,2]
count += c1[1:,0][:,numpy.newaxis] * (
numpy.dot(c1[0,1:], labelcat) > 0)
# retval: (unit_size, label_size)
return numpy.dot(count, numpy.transpose(labelcat))
def max_act_indexes(self, layer, count=10):
max_per_image = self.layer[layer].imgmax
return max_per_image.argsort(axis=0)[:-1-count:-1,:].transpose()
def top_act_indexes(self, layer, count=10):
tally = self.layer[layer].tally
unit_size = self.layer[layer].shape[1]
label_size = self.ds.label_size()
all_acts = numpy.zeros((len(tally), unit_size), dtype='int64')
for i in range(len(tally)):
acts = numpy.zeros((unit_size + 1, 2), dtype='int32')
t = tally[i]
acts[t[:,0] + 1, (t[:,1] != -1).astype('int')] = t[:,2]
all_acts[i] = acts[1:,0]
return all_acts.argsort(axis=0)[:-1-count:-1,:].transpose()
class LayerProbe:
def __init__(self, ed, blob, ds):
info = ed.load_info(blob=blob)
self.shape = info.shape
self.fieldmap = info.fieldmap
# Load the raw activation data
if ed.has_mmap(blob=blob):
self.blobdata = ed.open_mmap(blob=blob, shape=self.shape, mode='r')
# Load the blob quantile data and grab thresholds
if ed.has_mmap(blob=blob, part='quant-*'):
self.quantdata = ed.open_mmap(blob=blob, part='quant-*',
shape=(self.shape[1], -1), mode='r')
# Load tally too; tally_depth is inferred from file size.
self.tally = ed.open_mmap(blob=blob, part='tally-*', decimal=True,
shape=(ds.size(), -1, 3), dtype='int32', mode='r')
# And load imgmax
if ed.has_mmap(blob=blob, part='imgmax'):
self.imgmax = ed.open_mmap(blob=blob, part='imgmax',
shape=(ds.size(), self.shape[1]), mode='r')
# Figure out tally level that was used.
self.level = ed.glob_number(
'tally-*.mmap', blob=blob, decimal=True)
def primary_categories_per_index(ds, categories=None):
'''
Returns an array of primary category numbers for each label, where the
first category listed in ds.category_names is given category number 0.
'''
if categories is None:
categories = ds.category_names()
catmap = {}
for cat in categories:
imap = ds.category_index_map(cat)
if len(imap) < ds.label_size(None):
imap = numpy.concatenate((imap, numpy.zeros(
ds.label_size(None) - len(imap), dtype=imap.dtype)))
catmap[cat] = imap
result = []
for i in range(ds.label_size(None)):
maxcov, maxcat = max(
(ds.coverage(cat, catmap[cat][i]) if catmap[cat][i] else 0, ic)
for ic, cat in enumerate(categories))
result.append(maxcat)
return numpy.array(result)
def onehot(arr, minlength=None):
'''
Expands an array of integers in one-hot encoding by adding a new last
dimension, leaving zeros everywhere except for the nth dimension, where
the original array contained the integer n. The minlength parameter is
used to indcate the minimum size of the new dimension.
'''
length = numpy.amax(arr) + 1
if minlength is not None:
length = max(minlength, length)
result = numpy.zeros(arr.shape + (length,))
result[list(numpy.indices(arr.shape)) + [arr]] = 1
return result
if __name__ == '__main__':
import argparse
import sys
import traceback
try:
parser = argparse.ArgumentParser(
description='Generate visualization for probed activation data.')
parser.add_argument(
'--directory',
default='.',
help='output directory for the net probe')
parser.add_argument(
'--format',
default='html',
help='html or csv or both')
parser.add_argument(
'--csvorder',
help='csv header order')
parser.add_argument(
'--blobs',
nargs='*',
help='network blob names to visualize')
parser.add_argument(
'--gridwidth',
type=int, default=None,
help='width of visualization grid')
parser.add_argument(
'--imsize',
type=int, default=72,
help='thumbnail dimensions')
parser.add_argument(
'--imscale',
type=int, default=None,
help='thumbnail dimensions')
parser.add_argument(
'--imcount',
type=int, default=16,
help='number of thumbnails to include')
args = parser.parse_args()
np = NetworkProbe(args.directory, blobs=args.blobs)
for blob in args.blobs:
formats = args.format.split(',')
if 'imgmax' in formats:
np.generate_imgmax(blob)
if 'html' in formats or 'csv' in formats:
tally_stats = np.score_tally_stats(blob, verbose=True)
if 'html' in formats:
np.generate_html_summary(blob,
imsize=args.imsize, imscale=args.imscale,
imcount=args.imcount, tally_stats=tally_stats,
gridwidth=args.gridwidth,
verbose=True)
if 'csv' in formats:
filename = os.path.join(args.directory,
'%s-result.csv' % expdir.fn_safe(blob))
np.generate_csv_summary(blob, filename, tally_stats,
order=(args.csvorder.split(',')
if args.csvorder else None), verbose=True)
if 'quantmat' in formats:
np.generate_quantmat(blob, verbose=True)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
| [
"[email protected]"
] | |
11550ccfbab76dc93f527a2a7add9e008dc5a3fe | 34ed69140f0e975c6d1bb2e2b10402aaf93aa8bc | /whole_img_dataset_builder.py | 41200a768fbdaa9aae928b4e32d5457136c931cf | [] | no_license | evankozliner/transfer-learning-baseline | 95cc86295bede84ec0590b5c8137964fd60f155b | 90b50a2aa29662a0c6bb0f2e16669e23779d2276 | refs/heads/master | 2021-01-20T06:59:55.996851 | 2017-06-07T22:08:23 | 2017-06-07T22:08:23 | 89,946,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | """ Builds a simple whole image directory for the transfer learning script to sanity-check the problem """
import pandas as pd
import shutil
DATA_DIR = 'final/'
OUTPUT_DIR = 'whole-img-dataset/'
if __name__ == "__main__":
gt = pd.read_csv('final.csv')
for i, row in gt.iterrows():
if row['melanoma']:
shutil.copy(DATA_DIR + row['image_id'] + '.jpg', OUTPUT_DIR + 'Malignant/')
else
shutil.copy(DATA_DIR + row['image_id'] + '.jpg', OUTPUT_DIR + 'Benign/')
| [
"[email protected]"
] | |
ec409cd30676c7bfbf9b75859e532d178fcbe632 | 8c61c9d614d98e00f8ad243ff8f0fe2e9370dc62 | /apps/order/models.py | 7bb2591b8fd17959aa6cd92fa651bc6d3ea701c4 | [] | no_license | Alovelyfish/ttsx | 4e550ee2393c98f3de46934232b00e4aa7e9cdb7 | 1338aa984f8bd278e356415f89cd26cfac3f26c6 | refs/heads/master | 2022-11-20T20:45:53.461839 | 2020-07-24T02:41:39 | 2020-07-24T02:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,076 | py | from django.db import models
from db.base_model import BaseModel
# Create your models here.
class OrderInfo(BaseModel):
'''订单模型类'''
PAY_METHOD_CHOICES = (
(1, '货到付款'),
(2, '微信支付'),
(3, '支付宝'),
(4, '银联支付')
)
ORDER_STATUS_CHOICES = (
(1, '待支付'),
(2, '待发货'),
(3, '待收货'),
(4, '待评价'),
(5, '已完成')
)
order_id = models.CharField(max_length=128, primary_key=True, verbose_name='订单id')
user = models.ForeignKey('user.User', verbose_name='用户', on_delete=models.DO_NOTHING)
addr = models.ForeignKey('user.Address', verbose_name='地址', on_delete=models.DO_NOTHING)
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=3, verbose_name='支付方式')
total_count = models.IntegerField(default=1, verbose_name='商品数量')
total_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='商品总价')
transit_price = models.DecimalField(max_digits=10, decimal_places=2,verbose_name='订单运费')
order_status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name='订单状态')
trade_no = models.CharField(max_length=128, verbose_name='支付编号')
class Meta:
db_table = 'df_order_info'
verbose_name = '订单'
verbose_name_plural = verbose_name
class OrderGoods(BaseModel):
'''订单商品模型类'''
order = models.ForeignKey('OrderInfo', verbose_name='订单', on_delete=models.DO_NOTHING)
sku = models.ForeignKey('goods.GoodsSKU', verbose_name='商品SKU', on_delete=models.DO_NOTHING)
count = models.IntegerField(default=1, verbose_name='商品数目')
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='商品价格')
comment = models.CharField(max_length=256, verbose_name='评论')
class Meta:
db_table = 'df_order_goods'
verbose_name = '订单商品'
verbose_name_plural = verbose_name | [
"wuhan"
] | wuhan |
0aa969c3e63c975bff86df4d81520f3f00465199 | 90a7e536e79d37204395d9d36dd7c5aaa786227c | /爬虫项目/LGZP.py | ea7dabcc4a5e0ce402d57de20226c45fc101c326 | [] | no_license | jiashanliang/- | 1ca9588771deb76defbfc73d0cbb4fc07f8d319f | 17b9b7c182d01b94c3baf43e02c9f5a10ba55dc1 | refs/heads/master | 2020-04-10T17:04:23.320425 | 2018-12-11T02:50:37 | 2018-12-11T02:50:37 | 159,295,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | import pymysql
import requests
import json
# 接口 : https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false
from sqlalchemy.dialects.mssql import pymssql
def Initialization(page):
post_url = "https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Cookie': 'user_trace_token=20181119134553-1c044f99-c44b-4fb1-863e-577c95179dfe; _ga=GA1.2.1518018158.1542606354; LGUID=20181119134554-615f1f4f-ebbe-11e8-a6d6-525400f775ce; JSESSIONID=ABAAABAAAIAACBIDF692B19D2CE9DE2302DFF38BC87F777; LGSID=20181122163512-87146e49-ee31-11e8-8acd-5254005c3644; PRE_UTM=; PRE_HOST=www.google.co.jp; PRE_SITE=https%3A%2F%2Fwww.google.co.jp%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; _gid=GA1.2.1375902786.1542875712; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542606354,1542875712; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=search_code; X_HTTP_TOKEN=98574f905ef789092abfe5b08a62982b; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221673a940551be-013d55fa0d77e3-b79183d-1327104-1673a940554134%22%2C%22%24device_id%22%3A%221673a940551be-013d55fa0d77e3-b79183d-1327104-1673a940554134%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1542876806; LGRID=20181122165444-41cc214d-ee34-11e8-b44a-525400f775ce; SEARCH_ID=e3e669861bc2409f9b3bc9f068c806ca',
'Referer': 'https://www.lagou.com/jobs/list_python',
}
data = {
'first': 'true',
'pn': page,
'kd': 'python',
}
r =requests.post(post_url,headers = headers,data=data)
return r
def sava_content(response):
# 连接数据库
connect = pymysql.Connect(
host='localhost',
port=3306,
user='root',
passwd='123456',
db='xzf',
charset='utf8'
)
cursor = connect.cursor()
response = json.loads(response.text)
print(type(response))
works = response["content"]["positionResult"]["result"]
for i in range(len(works)):
work = works[i]
company = work["companyFullName"]
position = work["positionName"]
wages = work["salary"]
city = work["city"]
financeStage = work["financeStage"]
print(company, position, wages, city, financeStage)
sql = "INSERT INTO lgdata(company,position,wages,city,financeStage) VALUES('%s','%s','%s','%s','%s')" % \
(company, position, wages, city, financeStage)
try:
# 执行sql语句
cursor.execute(sql)
# 执行sql语句
connect.commit()
except:
# 发生错误时回滚
connect.rollback()
if __name__ == '__main__':
for page in range(1,6):
response = Initialization(page)
sava_content(response)
# down_load(content) | [
"[email protected]"
] | |
b2bfe839dbda6707027eafc403910e3c91d305b5 | 048d697c2d7bcf0b32d7aaec87acbd930d7af800 | /.ipynb_checkpoints/transforming-checkpoint.py | 1c17a1ccfb53984affebac52ee20357c774564ea | [] | no_license | Radzsingh/DataflowPipeline | d081ce1ee3749e25929769c815c4774d25130404 | 07f045f9b973de8685d36714c96cfade3d6d0384 | refs/heads/master | 2023-03-27T16:08:33.730879 | 2021-03-27T23:48:18 | 2021-03-27T23:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,123 | py | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "historical-speaking",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Started Running\n"
]
},
{
"data": {
"application/javascript": [
"\n",
" if (typeof window.interactive_beam_jquery == 'undefined') {\n",
" var jqueryScript = document.createElement('script');\n",
" jqueryScript.src = 'https://code.jquery.com/jquery-3.4.1.slim.min.js';\n",
" jqueryScript.type = 'text/javascript';\n",
" jqueryScript.onload = function() {\n",
" var datatableScript = document.createElement('script');\n",
" datatableScript.src = 'https://cdn.datatables.net/1.10.20/js/jquery.dataTables.min.js';\n",
" datatableScript.type = 'text/javascript';\n",
" datatableScript.onload = function() {\n",
" window.interactive_beam_jquery = jQuery.noConflict(true);\n",
" window.interactive_beam_jquery(document).ready(function($){\n",
" \n",
" });\n",
" }\n",
" document.head.appendChild(datatableScript);\n",
" };\n",
" document.head.appendChild(jqueryScript);\n",
" } else {\n",
" window.interactive_beam_jquery(document).ready(function($){\n",
" \n",
" });\n",
" }"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/root/apache-beam-2.28.0/lib/python3.7/site-packages/apache_beam/io/gcp/bigquery.py:1653: BeamDeprecationWarning: options is deprecated since First stable release. References to <pipeline>.options will not be supported\n",
" experiments = p.options.view_as(DebugOptions).experiments or []\n",
"WARNING:apache_beam.options.pipeline_options:Discarding unparseable args: ['-f', '/root/.local/share/jupyter/runtime/kernel-15af5235-0529-4a4e-a9c2-dbc14ceb11c3.json']\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Completed Running\n"
]
}
],
"source": [
"import apache_beam as beam\n",
"from apache_beam.io import ReadFromText\n",
"from apache_beam.io import WriteToText\n",
"from google.cloud import bigquery\n",
"from apache_beam.options.pipeline_options import PipelineOptions\n",
"from apache_beam.options.pipeline_options import SetupOptions\n",
"import re\n",
"\n",
"PROJECT = 'tokyo-botany-302620'\n",
"schema1 = 'neighbourhood:STRING,total_count:NUMERIC'\n",
" \n",
"def collectNeighbourhood(data):\n",
" yield '{},{}'.format(data['neighbourhood'],data['id'])\n",
"\n",
"def discard_incomplete(data):\n",
" \"\"\"Filters out records that don't have an information.\"\"\"\n",
" return len(data[0]) > 0 and len(data[1]) > 0 and len(data[2]) > 0 and len(data[3]) > 0 and len(data[4]) > 0 and len(data[5]) > 0 and len(data[6]) > 0 and len(data[7]) > 0 and len(data[8]) > 0 and len(data[9]) > 0 and len(data[10]) > 0 and len(data[11]) > 0 and len(data[12]) > 0 and len(data[13]) > 0 and len(data[14]) > 0 and len(data[15]) > 0\n",
" \n",
"def transform(argv=None):\n",
" inputfile = 'gs://airbnbnyc2019/AB_NYC_2019 (1).csv'\n",
" outputfile = 'gs://airbnbnyc2019/output/output.csv'\n",
"\n",
" pipeline_options = PipelineOptions()\n",
" pipeline_options.view_as(SetupOptions).save_main_session = True\n",
" p = beam.Pipeline(options=pipeline_options)\n",
" \n",
" lines = p | 'ReadMyFile' >> beam.io.ReadFromText(inputfile)\n",
" \n",
" (lines\n",
" | 'Parse CSV1' >> beam.Regex.replace_all(r'\\\"([^\\\"]*)\\\"',lambda x:x.group(1).replace(',',''))\n",
" | 'Split11' >> beam.Map(lambda x: x.split(','))\n",
" | 'DeleteIncompleteData1' >> beam.Filter(discard_incomplete)\n",
" | 'format to dict' >> beam.Map(lambda x: {\"id\": x[0], \"name\": x[1], \"host_id\": x[2], \"host_name\": x[3], \"neighbourhood_group\": x[4], \"neighbourhood\": x[5], \"latitude\": x[6], \"longitude\": x[7], \"room_type\": x[8],\"price\": x[9], \"minimum_nights\": x[10], \"number_of_reviews\":x[11],\"last_review\":x[12],\"reviews_per_month\":x[13],\"calculated_host_listings_count\":x[14], \"availability_365\":x[15]})\n",
" | 'Transform' >> beam.FlatMap(collectNeighbourhood)\n",
" | 'Split2' >> beam.Map(lambda x: x.split(','))\n",
" \n",
" | 'CombineNeighbourhood' >> beam.combiners.Count.PerKey()\n",
" | 'format to dict2' >> beam.Map(lambda y: {\"neighbourhood\": y[0], \"total_count\": y[1]})\n",
" #| 'Print' >> beam.Map(print)\n",
" | 'WriteToBigQueryCount' >> beam.io.WriteToBigQuery('{0}:nycairbnb.airbnb_neighbourhood_count'.format(PROJECT), schema=schema1,\n",
" write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,method=\"STREAMING_INSERTS\")\n",
" )\n",
" p.run()\n",
"\n",
"if __name__ == '__main__':\n",
" print('Started Running')\n",
" transform()\n",
" print('Completed Running')\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "numeric-conspiracy",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Apache Beam 2.28.0 for Python 3",
"language": "python",
"name": "apache-beam-2.28.0"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| [
"[email protected]"
] | |
99ce8996cc92d1351b1811d6b1e7745d244e1d03 | c3668ccd84d108c0d22ca13309912c9afa257830 | /testing/test.py | f760436ad510b18a5198415d5f3b6a262fc6e00c | [] | no_license | pratyush1687/LibraryManagementSystem | 3c0f33a0f9b954dfffcf79603649e4e14e11e0ca | 831d085b6f675d22adf0d9cd7001e3a11021f203 | refs/heads/master | 2021-01-20T01:51:55.990063 | 2017-10-25T13:16:52 | 2017-10-25T13:16:52 | 101,304,120 | 1 | 1 | null | 2017-10-25T13:14:08 | 2017-08-24T14:29:11 | Python | UTF-8 | Python | false | false | 501 | py | # import pickle
# arr=[1,2,3,4]
# p=[3,4,5,6]
# f=open("try.dat", mode="w+")
# pickle.dump(arr,f)
# pickle.dump(p,f)
# f.seek(0,0)
# while True:
# try:
# obj=pickle.load(f)
# obj[0]="ab"
# pickle.dump(obj, f)
# except EOFError:
# # f.close()
# break
# f.seek(0,0)
# while True:
# try:
# obj=pickle.load(f)
# # obj[0]="ab"
# # pickle.dump(obj, f)
# print obj
# except EOFError:
# f.close()
# break
| [
"[email protected]"
] | |
1632e27cb7ff7a99f1399e810c48a01889bdea52 | 9d5305f5fc99f3e79fc3daf19049825ae7cab20e | /post/templates/migrations/0001_initial.py | 5ae80a9bfed1c61183071e350f022d69351314ae | [] | no_license | Salwasalwa/mini_projet_djagno | f2dd385bb3eefde40d983e94b52a5355a7ddcdb4 | 90075975bf624d1d51c7c406588b4951e04589ca | refs/heads/master | 2021-01-20T09:31:43.364445 | 2017-05-18T13:52:08 | 2017-05-18T13:52:08 | 90,259,469 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('creation_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
c9d2ebc9232e0ecf7e9f0a67501dfb735983a292 | 719367fa61685cab1e36aa29a411e7a022171548 | /src/try_load.py | c7d464a1aa21a7b49e39c1eb36418059556afb77 | [
"BSD-3-Clause"
] | permissive | VinACE/san_mrc | 3cdfea101e86fdba30cab7e808a489b89ca46cc6 | e99cf73d3bc362d2566a93fe07de102bdd7a23d5 | refs/heads/master | 2021-10-10T04:10:53.682847 | 2021-09-30T13:34:22 | 2021-09-30T13:34:22 | 183,762,662 | 1 | 0 | BSD-3-Clause | 2019-04-27T11:09:54 | 2019-04-27T11:09:53 | null | UTF-8 | Python | false | false | 635 | py | # import sys
# sys.path.append('/home/madhevan/Documents/py_packages')
from src.batcher import BatchGen
path = '../../dev-v1.1.json'
batch_size = 10
gp = False
tr = False
#f = BatchGen(path,batch_size,gpu = gp,is_train = tr)
import json
with open(path, 'r', encoding='utf-8') as reader:
# filter
data = []
cnt = 0
print(type(reader))
c=0
for line in reader:
if c < 10:
sample = json.loads(line)
print(c)
c+=1
print(sample)
print('\n')
else:
break
print(type(sample))
print(len(sample))
print(sample.keys())
| [
"[email protected]"
] | |
7aa098561b45184b7e74065811dfaa8010e27a1a | 14dc72b1762e6aae8550582fbdfd4c1f77581afa | /myfirstdjangoapp/urls.py | 7fcdd91f37358b52d4176d7b857875fc22754162 | [] | no_license | pysammie/myfirstdjangoapp | f346b940163eb02409cf15383ca1b06f93f1f75f | 5c587a1cf301c58d8dd5f7c93458bfa5d7dbe5ca | refs/heads/main | 2023-04-10T03:50:02.853184 | 2021-04-21T22:10:46 | 2021-04-21T22:10:46 | 360,318,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | """mynewproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('index/', views.index, name = "index")
] | [
"[email protected]"
] | |
5f4b3fc502b3ae070b126bf2b1f104b62fb3c6eb | 9e63ab3790065f8f2227c961d4ccc376bd20f02b | /src/data/dataset.py | 58edfb21f48741315d87e6dfea8a38a7ffd779d7 | [] | no_license | DanKing1903/cwi | 758e602e1b44b28eff2156d98fa882faae867540 | a0ca817a98da0f968f6f5cfdee4650198a3c837c | refs/heads/master | 2020-03-21T09:59:43.899524 | 2018-11-22T08:05:32 | 2018-11-22T08:05:32 | 138,428,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,256 | py | """Dataset Reader
This module contains the class(es) and functions to read the datasets.
"""
import csv
import pandas as pd
import spacy
import pickle
from spacy.tokens import Doc
class Dataset(object):
"""
Utility class to easily load the datasets for training, development and testing.
"""
def __init__(self, language, dataset_name):
"""Defines the basic properties of the dataset reader.
Args:
language: The language of the dataset.
dataset_name: The name of the dataset (all files should have it).
"""
self._language = language
self._dataset_name = dataset_name
self.translate = 'F'
# TODO: Maybe the paths should be passed as parameters or read from a configuration file.
self._trainset_path = "data/raw/{}/{}_Train.tsv".format(language.lower(), dataset_name)
self._devset_path = "data/raw/{}/{}_Dev.tsv".format(language.lower(), dataset_name)
self._testset_path = "data/raw/{}/{}_Test.tsv".format(language.lower(), dataset_name)
self._trainset = None
self._devset = None
self._testset = None
"""spaCy object handling"""
if self._language == "english":
self.nlp = spacy.load('en_core_web_lg')
elif self._language == "spanish":
self.nlp = spacy.load("es_core_news_md")
elif self._language == "german":
self.nlp = spacy.load('de_core_news_sm')
elif self._language == "french":
self.nlp = spacy.load('fr_core_news_md')
self._trainset_spacy_path = "data/interim/{}/{}_Train-spacy-objs.pkl".format(
language.lower(), dataset_name)
self._devset_spacy_path = "data/interim/{}/{}_Dev-spacy-objs.pkl".format(
language.lower(), dataset_name)
self._testset_spacy_path = "data/interim/{}/{}_Test-spacy-objs.pkl".format(
language.lower(), dataset_name)
def train_set(self):
"""list. Getter method for the training set. """
if self._trainset is None: # loads the data to memory once and when requested.
trainset_raw = self.read_dataset(self._trainset_path)
trainset_spacy = self.read_spacy_pickle(self._trainset_spacy_path)
if trainset_raw is None and trainset_spacy is None:
# This is for languages we never see (French)
self._trainset = None
else:
self._trainset = pd.concat([trainset_raw, trainset_spacy], axis=1)
self._trainset['language'] = self._language
self._trainset['dataset_name'] = self._dataset_name
return self._trainset
def dev_set(self):
"""list. Getter method for the development set. """
if self._devset is None: # loads the data to memory once and when requested.
devset_raw = self.read_dataset(self._devset_path)
devset_spacy = self.read_spacy_pickle(self._devset_spacy_path)
if devset_raw is None and devset_spacy is None:
# This is for languages we never see (French)
self._devset = None
else:
self._devset = pd.concat([devset_raw, devset_spacy], axis=1)
self._devset['language'] = self._language
self._devset['dataset_name'] = self._dataset_name
return self._devset
def test_set(self):
"""list. Getter method for the test set. """
if self.translate == 'F':
if self._testset is None: # loads the data to memory once and when requested.
testset_raw = self.read_dataset(self._testset_path)
testset_spacy = self.read_spacy_pickle(self._testset_spacy_path)
self._testset = pd.concat([testset_raw, testset_spacy], axis=1)
self._testset['language'] = self._language
self._testset['dataset_name'] = self._dataset_name
else:
if self._testset is None:
testset_raw = pickle.load(open( "data/processed/translated_frenchdf.p", "rb" ))
testset_spacy = self.read_spacy_pickle(self._testset_spacy_path)
self._testset = pd.concat([testset_raw, testset_spacy], axis=1)
self._testset['language'] = self._language
self._testset['dataset_name'] = self._dataset_name
return self._testset
def read_dataset(self, file_path):
"""Read the dataset file.
Args:
file_path (str): The path of the dataset file. The file should follow the structure specified in the
2018 CWI Shared Task.
Returns:
list. A list of dictionaries that contain the information of each sentence in the dataset.
"""
try:
with open(file_path, encoding="utf-8") as file:
fieldnames = ['hit_id', 'sentence', 'start_offset', 'end_offset', 'target_word', 'native_annots',
'nonnative_annots', 'native_complex', 'nonnative_complex', 'gold_label', 'gold_prob']
dataset = pd.read_csv(file, names=fieldnames, sep="\t")
except FileNotFoundError:
print("File {} not found.".format(file_path))
dataset = None
return dataset
def read_spacy_pickle(self, file_path):
"""Read the pickled spacy objects
Args:
file_path (str): Path of the pickled spacy objects file
Returns:
pandas DataFrame. A single column of the spacy docs.
"""
vocab = self.nlp.vocab
try:
file = open(file_path, "rb")
# putting the spacy doc in a single-item list to avoid pandas splitting it up
spacy_objects = [[Doc(vocab).from_bytes(x)] for x in pickle.load(file)]
file.close()
spacy_objects_dataset = pd.DataFrame(spacy_objects, columns=["spacy"])
return spacy_objects_dataset
except FileNotFoundError:
print('spaCy pickle file for {} does not exist. No spaCy objects will be included.'.format(
self._dataset_name))
return None
| [
"[email protected]"
] | |
24ca41257924ee85e52bd59f9f29cc900200f81c | 065e64fd345678771a7449eb093c85195286c071 | /_test/stress_test.py | 67d7e6cf87a09719d0084e88eea19757b779aa2e | [] | no_license | bluemellophone/gzc-client | de80e678fcbaaa7ab32c360265b7645b9ba5aa0e | 46a2a8a4505f874c098bc73efedb50f4214e1451 | refs/heads/master | 2016-09-05T22:19:46.975038 | 2015-02-28T09:04:51 | 2015-02-28T09:04:51 | 29,980,718 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | #!/usr/bin/env python
import json
import string
import random
import requests
from os import listdir
from os.path import join
alphabet = string.ascii_lowercase
def get_data(color, number, person):
data = {
'car_color': color,
'car_number': number,
'person_letter': person,
'image_first_time_hour': random.randint(0, 23),
'image_first_time_minute': random.randint(0, 59),
}
return data
test_dir = 'stress_test'
test_archives = [f for f in listdir(test_dir) if f.endswith('.zip')]
cars = [('BLUE', 1, (alphabet[x] for x in range(0, 6))), ('RED', 2, (alphabet[x] for x in range(0, 6))), ('GREEN', 3, (alphabet[x] for x in range(0, 6)))]
pairs = [random.choice(cars) for _ in test_archives]
colors = [x[0] for x in pairs]
numbers = [x[1] for x in pairs]
persons = [x[2].next() for x in pairs]
DOMAIN = 'http://localhost:5000'
IMGURL = DOMAIN + '/images/submit'
GPSURL = DOMAIN + '/gps/submit'
for test_archive, color, number, person in zip(test_archives, colors, numbers, persons):
content = open(join(test_dir, test_archive), 'rb')
files = {'image_archive': content}
data = get_data(color, number, person)
print data
r = requests.post(IMGURL, data=data, files=files)
print("HTTP STATUS:", r.status_code)
response = json.loads(r.text)
print("RESPONSE:", response)
| [
"[email protected]"
] | |
987416f131ff62bd851ca65541449b4d3f48b845 | e3c507d659e07225afff3dbd629f550463cf8d34 | /blog/migrations/0001_initial.py | aad83807df822d2036e4e4b07946d759e711c627 | [] | no_license | claudialgds/my-first-blog | a0a68cfe2156ef528ab02ab99df5ea281101ef82 | 22a815b71f2e5794c8c410c30daec391b0f2d618 | refs/heads/master | 2020-09-15T00:13:41.463963 | 2016-08-29T21:33:10 | 2016-08-29T21:33:10 | 66,882,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-27 18:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('create_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
c22b66d7b9df25fc27b77fb4dcad7e9d471f51a8 | 191c95c78d348562f21fb114a12055bc2cb28da6 | /sumsqrdig.py | b3d3948c8c449c239a18c78b88582f9a13a76378 | [] | no_license | AbinayaKP/Set1 | 0936744e4d67da3751758ae6a5896aa98cfc9cac | 887d3b2daeb2aa92772230359c29500a963121db | refs/heads/master | 2020-04-20T07:44:44.530532 | 2019-06-06T11:03:36 | 2019-06-06T11:03:36 | 168,718,762 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | x=raw_input()
z=sum(int(i)**2 for i in str(x))
print(z)
| [
"[email protected]"
] | |
6b60e2e9fbdaad89e59509bed877e18ac72d33c4 | 63bdf28fdc1cd225fad5aabb9c2b849a5740bb92 | /pl186.py | 941d987f4103e370a75d64667813ce4489128f66 | [] | no_license | jyothi1998/python1 | c7f9cee8ed85767c5f087b19402f1938592720a9 | 0abf487598e350b19100eacb56d4e7eab99cbad8 | refs/heads/master | 2020-03-27T02:57:49.722063 | 2018-12-17T05:18:04 | 2018-12-17T05:18:04 | 145,829,247 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | str=raw_input()
if(str in 'Vishal','Sundar'):
print "yes"
else:
print "no"
| [
"[email protected]"
] | |
412de84ef91a9b55ca6ed9b2e9535f117a1ec4e8 | 583e5a6c55d400b44f8ce8f936c8c232d90c6cb4 | /setup.py | a9e0909b313e15b0014f04a51b1489e7eef10fd0 | [
"MIT"
] | permissive | NickVoron/sandbox | aadbbc89576a7062eeda570a9390ecb6e144482c | 86bbcd64c97de6d33fa35a6308291178aa964095 | refs/heads/master | 2021-01-22T14:54:58.759205 | 2017-09-24T15:54:14 | 2017-09-24T15:54:14 | 102,370,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | #!/usr/bin/env python3
import subprocess, contextlib, logging, os, shutil, msbuild
from pathlib import Path
logging.basicConfig(level=logging.INFO)
@contextlib.contextmanager
def cwd(new_cwd):
"""Context manager for current working directory."""
old_cwd = Path.cwd()
logging.info('Change cwd: %s', str(new_cwd))
os.chdir(str(new_cwd))
yield
logging.info('Restore cwd: %s', str(old_cwd))
os.chdir(str(old_cwd))
def setup():
with cwd('.'):
subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'])
intermediate_path = '_obj-libs-etc'
root_path = Path(__file__).parent.resolve()
build_path = Path(intermediate_path) / 'windows' / 'sharedtec'
build_path.mkdir(parents=True, exist_ok=True)
with cwd(build_path):
cmd = [
'cmake',
'-G', 'Visual Studio 15 2017 Win64',
str(root_path) # source dir
]
logging.info('Run cmake command: %s', str(cmd))
subprocess.check_call(cmd)
with cwd('boost-cmake/boost/boost_1_64_0'):
if not os.path.exists("bjam.exe"):
p = subprocess.Popen("bootstrap.bat", cwd=r".")
stdout, stderr = p.communicate()
subprocess.call([r'bjam', r'--stagedir=stage/x64', r'-j10', r'toolset=msvc', r'address-model=64', r'variant=release', r'threading=multi', r'link=static', r'runtime-link=static,shared', r'define=_SECURE_SCL=0', r'define=_HAS_ITERATOR_DEBUGGING=0', r'define=BOOST_TEST_NO_MAIN'])
subprocess.call([r'bjam', r'--stagedir=stage/x64', r'-j10', r'toolset=msvc', r'address-model=64', r'variant=debug' , r'threading=multi', r'link=static', r'runtime-link=static,shared', r'define=BOOST_TEST_NO_MAIN'])
with cwd("third_party"):
msbuild.build('third_party.sln', 'Release', 'x64')
with cwd("Stable"):
msbuild.build('libs.sln', 'Release', 'x64')
setup() | [
"[email protected]"
] | |
9e15bdb8ad4adcd44dc951620563b54217ec1841 | 45d00c4b8c9464d3ecdcc370e7a33f04c41ec433 | /_example/sdk/python/CarInformationAPILib/APIException.py | dce719b555543e696ee331afc6d984b82473f74a | [
"Apache-2.0",
"MIT"
] | permissive | commonledger/accounting-api-sdks | 6cac5373f17aae85032fb19947ea3f29fb07a57f | db078c2523ae60ba9a5b5d201d180b6dac69b996 | refs/heads/master | 2020-04-21T14:32:37.371655 | 2014-11-02T22:02:32 | 2014-11-02T22:02:32 | 26,002,106 | 0 | 1 | null | 2014-11-02T22:02:32 | 2014-10-31T04:32:44 | Java | UTF-8 | Python | false | false | 869 | py | '''
CarInformationAPILib
This file was automatically generated by APIMATIC BETA v2.0 on 11/01/2014
'''
class APIException(Exception):
def __init__(self, reason , responseCode):
'''
:param reason: String containing the reason for raising an exception
:param code: The HTTP response code from the API request
'''
Exception.__init__(self, reason)
self.responseCode= reason
def getResponseCode(self):
'''
The HTTP response code from the API request
:returns: http response code
:rType: int
'''
return self.responseCode
def getReason(self):
'''
The reason for raising an exception
:returns: the reason for raising an exception
:rtype: str
'''
return self.message | [
"[email protected]"
] | |
33e50feaba3f5e4aa2db0651cf4fb18fcdcdda02 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/pvp/PVPInviter.py | 0f67bb934799713dcb4996ee7f7dd4bea8e35126 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,768 | py | from panda3d.core import TextNode
from direct.gui.DirectGui import *
from direct.task.Task import Task
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from otp.otpbase import OTPGlobals
from otp.uberdog.RejectCode import RejectCode
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import SocialPage
from pirates.piratesgui import PiratesGuiGlobals
from pirates.uberdog import UberDogGlobals
from pirates.battle.DistributedBattleNPC import DistributedBattleNPC
from pirates.piratesgui.RequestButton import RequestButton
class PVPInviterButton(RequestButton):
def __init__(self, text, command):
RequestButton.__init__(self, text, command)
self.initialiseoptions(PVPInviterButton)
class PVPInviter(SocialPage.SocialPage):
notify = DirectNotifyGlobal.directNotify.newCategory('PVPInviter')
def __init__(self, avId, avName):
SocialPage.SocialPage.__init__(self, 'PVPInviter')
self.initialiseoptions(PVPInviter)
self.setPos(-0.6, 0, 0.47)
self.avId = avId
self.avName = avName
self.avDisableName = 'disable-%s' % avId
self.fsm = ClassicFSM.ClassicFSM('PVPInviter',
[State.State('off',
self.enterOff,
self.exitOff),
State.State('begin',
self.enterBegin,
self.exitBegin),
State.State('inBattle',
self.enterInBattle,
self.exitInBattle),
State.State('notYet',
self.enterNotYet,
self.exitNotYet),
State.State('checkAvailability',
self.enterCheckAvailability,
self.exitCheckAvailability),
State.State('notAvailable',
self.enterNotAvailable,
self.exitNotAvailable),
State.State('notAcceptingChallenges',
self.enterNotAcceptingChallenges,
self.exitNotAcceptingChallenges),
State.State('wentAway',
self.enterWentAway,
self.exitWentAway),
State.State('alreadyChallenging',
self.enterAlreadyChallenging,
self.exitAlreadyChallenging),
State.State('alreadyInvited',
self.enterAlreadyInvited,
self.exitAlreadyInvited),
State.State('askingNPC',
self.enterAskingNPC,
self.exitAskingNPC),
State.State('endChallenge',
self.enterEndChallenge,
self.exitEndChallenge),
State.State('challengeNoMore',
self.enterChallengeNoMore,
self.exitChallengeNoMore),
State.State('self',
self.enterSelf,
self.exitSelf),
State.State('ignored',
self.enterIgnored,
self.exitIgnored),
State.State('asking',
self.enterAsking,
self.exitAsking),
State.State('yes',
self.enterYes,
self.exitYes),
State.State('no',
self.enterNo,
self.exitNo),
State.State('otherInBattle',
self.enterOtherInBattle,
self.exitOtherInBattle),
State.State('maybe',
self.enterMaybe,
self.exitMaybe),
State.State('down',
self.enterDown,
self.exitDown),
State.State('cancel',
self.enterCancel,
self.exitCancel)],
'off',
'off')
guiMain = loader.loadModel('models/gui/gui_main')
self.box = OnscreenImage(parent = self, pos = (0.25, 0, 0.275), image = guiMain.find('**/general_frame_e'), scale = 0.25)
self.title = DirectLabel(parent = self, relief = None, text = PLocalizer.PVPInviterTitle, text_scale = PiratesGuiGlobals.TextScaleExtraLarge, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_font = PiratesGlobals.getPirateOutlineFont(), pos = (0.25, 0, 0.42), image = None, image_scale = 0.25)
self.message = DirectLabel(parent = self, relief = None, text = '', text_scale = PiratesGuiGlobals.TextScaleLarge, text_align = TextNode.ACenter, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, text_wordwrap = 11, pos = (0.25, 0, 0.325), textMayChange = 1)
self.context = None
self.bOk = PVPInviterButton(text = OTPLocalizer.DialogOK, command = self.__handleOk)
self.bOk.reparentTo(self)
self.bOk.setPos(0.2, 0, 0.05)
self.bOk.hide()
self.bCancel = PVPInviterButton(text = OTPLocalizer.DialogCancel, command = self.__handleCancel)
self.bCancel.reparentTo(self)
self.bCancel.setPos(0.2, 0, 0.05)
self.bCancel.hide()
self.bStop = PVPInviterButton(text = 'Stop', command = self.__handleStop)
self.bStop.reparentTo(self)
self.bStop.setPos(0.2, 0, 0.15)
self.bStop.hide()
self.bYes = PVPInviterButton(text = OTPLocalizer.DialogYes, command = self.__handleYes)
self.bYes.reparentTo(self)
self.bYes.setPos(0.1, 0, 0.05)
self.bYes.hide()
self.bNo = PVPInviterButton(text = OTPLocalizer.DialogNo, command = self.__handleNo)
self.bNo.reparentTo(self)
self.bNo.setPos(0.3, 0, 0.05)
self.bNo.hide()
self.fsm.enterInitialState()
self.fsm.request('begin')
def destroy(self):
if hasattr(self, 'destroyed'):
return
self.destroyed = 1
self.fsm.request('cancel')
del self.fsm
SocialPage.SocialPage.destroy(self)
def enterOff(self):
pass
def exitOff(self):
pass
def enterBegin(self):
myId = base.localAvatar.doId
self.accept(self.avDisableName, self.__handleDisableAvatar)
if self.avId == myId:
self.fsm.request('self')
else:
self.fsm.request('notYet')
def exitBegin(self):
self.ignore(self.avDisableName)
def enterSendChallenge(self):
pass
def exitSendChallenge(self):
pass
def enterInBattle(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self['text_pos'] = (0.0, 0.2)
self.bCancel.show()
self.bCancel.setPos(0.0, 0.0, -0.16)
def exitInBattle(self):
self.bCancel.hide()
def enterNotYet(self):
self.accept(self.avDisableName, self.__handleDisableAvatar)
self.message['text'] = PLocalizer.PVPInviterNotYet % self.avName
self.bYes.show()
self.bNo.show()
def exitNotYet(self):
self.ignore(self.avDisableName)
self.bYes.hide()
self.bNo.hide()
def enterCheckAvailability(self):
self.accept(self.avDisableName, self.__handleDisableAvatar)
avatar = base.cr.doId2do.get(self.avId)
if not avatar:
self.fsm.request('wentAway')
return
if isinstance(avatar, DistributedBattleNPC):
self.fsm.request('askingNPC')
return
base.cr.pvpManager.sendRequestChallenge(self.avId)
self.message['text'] = PLocalizer.PVPInviterCheckAvailability % self.avName
self.accept(PiratesGlobals.PVPAcceptEvent, self.__challengeAccepted)
self.accept(PiratesGlobals.PVPRejectEvent, self.__challengeRejected)
self.bCancel.show()
def __challengeAccepted(self, avIds):
if self.avId in avIds:
self.fsm.request('yes')
def exitCheckAvailability(self):
self.ignore(self.avDisableName)
self.ignore('challengeConsidering')
self.ignore('challengeResponse')
self.ignore(PiratesGlobals.PVPAcceptEvent)
self.ignore(PiratesGlobals.PVPRejectEvent)
self.bCancel.hide()
def enterNotAvailable(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self.context = None
self.bOk.show()
def exitNotAvailable(self):
self.bOk.hide()
def enterNotAcceptingChallenges(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self.context = None
self.bOk.show()
def exitNotAcceptingChallenges(self):
self.bOk.hide()
def enterWentAway(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
if self.context != None:
self.context = None
self.bOk.show()
def exitWentAway(self):
self.bOk.hide()
def enterAlreadyChallenging(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self['text_pos'] = (0.0, 0.2)
self.context = None
self.bStop.show()
self.bCancel.show()
def exitAlreadyChallenging(self):
self.message['text'] = ''
self['text_pos'] = (0.0, 0.13)
self.bStop.hide()
self.bCancel.hide()
def enterAlreadyInvited(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self['text_pos'] = (0.0, 0.2)
self.context = None
self.bStop.show()
self.bCancel.show()
def exitAlreadyInvited(self):
self.message['text'] = ''
self['text_pos'] = (0.0, 0.13)
self.bStop.hide()
self.bCancel.hide()
def enterAskingNPC(self):
self.message['text'] = PLocalizer.PVPInviterAskingNPC % self.avName
taskMgr.doMethodLater(2.0, self.npcReplies, 'npcChallenge')
self.bCancel.show()
def exitAskingNPC(self):
taskMgr.remove('npcChallenge')
self.bCancel.hide()
def npcReplies(self, task):
self.fsm.request('no')
return Task.done
def enterEndChallenge(self):
self.message['text'] = PLocalizer.PVPInviterEndChallenge % self.avName
self.context = None
self.bYes.show()
self.bNo.show()
def exitEndChallenge(self):
self.bYes.hide()
self.bNo.hide()
def enterChallengeNoMore(self):
self.message['text'] = PLocalizer.PVPInviterChallengeNoMore % self.avName
self.bOk.show()
if not self.avId in base.cr.doId2do:
messenger.send(self.avDisableName)
def exitChallengeNoMore(self):
self.bOk.hide()
def enterSelf(self):
self.message['text'] = PLocalizer.PVPInviterSelf
self.context = None
self.bOk.show()
def exitSelf(self):
self.bOk.hide()
def enterIgnored(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self.context = None
self.bOk.show()
def exitIgnored(self):
self.bOk.hide()
def enterAsking(self):
self.accept(self.avDisableName, self.__handleDisableAvatar)
self.message['text'] = PLocalizer.PVPInviterAsking % self.avName
self.accept('challengeResponse', self.__challengeResponse)
self.accept(PiratesGlobals.PVPAcceptEvent, self.__challengeAccepted)
self.bCancel.show()
def exitAsking(self):
self.ignore(self.avDisableName)
self.ignore('challengeResponse')
self.ignore(PiratesGlobals.PVPAcceptEvent)
self.bCancel.hide()
def enterYes(self):
self.message['text'] = PLocalizer.PVPInviterSaidYes % self.avName
self.context = None
self.bOk.show()
def exitYes(self):
self.bOk.hide()
def enterNo(self):
self.message['text'] = PLocalizer.PVPInviterSaidNo % self.avName
self.context = None
self.bOk.show()
def exitNo(self):
self.bOk.hide()
def enterOtherInBattle(self):
self.message['text'] = PLocalizer.PVPInviterBusy % self.avName
self.context = None
self.bOk.show()
def exitOtherInBattle(self):
self.bOk.hide()
def enterMaybe(self):
self.message['text'] = PLocalizer.PVPInviterMaybe % self.avName
self.context = None
self.bOk.show()
def exitMaybe(self):
self.bOk.hide()
def enterDown(self):
self.message['text'] = PLocalizer.PVPInviterDown
self.context = None
self.bOk.show()
def exitDown(self):
self.bOk.hide()
def enterCancel(self):
if self.context != None:
self.context = None
self.fsm.request('off')
def exitCancel(self):
pass
def __handleOk(self):
self.destroy()
def __handleCancel(self):
self.destroy()
def __handleStop(self):
self.fsm.request('endChallenge')
def __handleYes(self):
if self.fsm.getCurrentState().getName() == 'notYet':
localAvatar.guiMgr.showLookoutPanel()
localAvatar.guiMgr.lookoutPage.displayLookout(gameType = PiratesGlobals.GAME_TYPE_PVP, gameStyle = PiratesGlobals.GAME_STYLE_TEAM_BATTLE, inviteOptions = [
PiratesGlobals.LOOKOUT_INVITE_CREW], additionalAvs = [
self.avId])
self.__handleOk()
elif self.fsm.getCurrentState().getName() == 'endChallenge':
self.fsm.request('challengeNoMore')
else:
self.destroy()
def __handleNo(self):
self.destroy()
def __challengeConsidering(self, yesNoAlready, context):
if yesNoAlready == 1:
self.context = context
self.fsm.request('asking')
elif yesNoAlready == 0:
self.fsm.request('notAvailable')
elif yesNoAlready == 2:
self.fsm.request('alreadyChallenging')
elif yesNoAlready == 3:
self.fsm.request('self')
elif yesNoAlready == 4:
self.fsm.request('ignored')
elif yesNoAlready == 6:
self.fsm.request('notAcceptingChallenges')
elif yesNoAlready == 10:
self.fsm.request('no')
elif yesNoAlready == 13:
self.fsm.request('otherInBattle')
else:
self.notify.warning('Got unexpected response to challengeConsidering: %s' % yesNoAlready)
self.fsm.request('maybe')
def __challengeRejected(self, avId, reason):
if reason == RejectCode.INVITEE_NOT_ONLINE:
self.fsm.request('notAvailable')
elif reason == RejectCode.ALREADY_INVITED:
self.fsm.request('alreadyInvited')
elif reason == RejectCode.ALREADY_CHALLENGED:
self.fsm.request('alreadyChallenging')
elif reason == RejectCode.PVP_IN_BATTLE:
self.fsm.request('inBattle')
elif reason == RejectCode.PVP_OTHER_IN_BATTLE:
self.fsm.request('otherInBattle')
elif reason == RejectCode.INVITATION_DECLINED:
self.fsm.request('no')
else:
self.notify.warning('challengeRejectInvite: %s unknown reason: %s.' % (avId, reason))
def __challengeResponse(self, yesNoMaybe, context):
if self.context != context:
self.notify.warning('Unexpected change of context from %s to %s.' % (self.context, context))
self.context = context
if yesNoMaybe == 1:
self.fsm.request('yes')
elif yesNoMaybe == 0:
self.fsm.request('no')
elif yesNoMaybe == 3:
self.fsm.request('otherInBattle')
else:
self.notify.warning('Got unexpected response to challengeResponse: %s' % yesNoMaybe)
self.fsm.request('maybe')
def __handleDisableAvatar(self):
self.fsm.request('wentAway')
| [
"[email protected]"
] | |
3366215c57d1585842ec4e3e63f72078d46d1d4a | b25704097a87b2819600c92f8943596bfdf6794d | /villageapp/urls.py | 79b9fef5c5018b4f8dc42cd65470129b0a8b81fd | [] | no_license | jarrishmakingsly/village-projects | ffefe849aad9194c73b44165246f015c87612747 | 85930b43bc230b00f1cb8a98e3ca855175847c1d | refs/heads/main | 2023-04-20T08:54:21.017012 | 2021-05-04T13:25:56 | 2021-05-04T13:25:56 | 364,262,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from . import views
from django.urls import path
app_name = 'villageapp'
urlpatterns = [
path('',views.index,name='index'),
path('register_panchayath/',views.register_panchayath,name='register_panchayath')
] | [
"[email protected]"
] | |
3c7fd070d6df865841d2e2c6ee47e4586822f6aa | a1fbde85a164bd4c5025a46e37dbe48243875bf8 | /07-one-dimensional-DP/rod_cutting.py | 8e15cbe974d80569cbe3462b2a82fdd5cb755e4d | [] | no_license | Ventrosky/dynamic-programming | e6dfd3e2e5311eb1fd7427609772484116b55889 | 49b58fe3836e000366795042dcd9b04bb124daf2 | refs/heads/master | 2020-08-19T03:09:34.027872 | 2019-11-01T05:49:06 | 2019-11-01T05:49:06 | 215,870,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | import sys
from one_dimensional.util.stop_watch import start_watch, stop_watch_print
def max_profit(l, p):
if l == 0:
return 0
max_p = -sys.maxsize
for i in range(0, l):
max_p = max(max_p, p[i] + max_profit(l - i - 1, p))
return max_p
def max_profit_memo(l, p, cache):
if l == 0:
return 0
if cache[l] != -1:
return cache[l]
max_p = -sys.maxsize
for i in range(0, l):
max_p = max(max_p, p[i] + max_profit(l - i - 1, p))
cache[l] = max_p
return max_p
def max_profit_dp(L, p):
dp = [0 for _ in range(0, L + 1)]
for l in range(1, L + 1):
dp[l] = -sys.maxsize
for i in range(0, l):
dp[l] = max(dp[l], p[i] + dp[l - i - 1])
return dp[L]
def max_profit_dp_reconstruct(L, p):
dp = [0 for _ in range(0, L + 1)]
cuts = [i for i in range(0, L+1)]
for l in range(1, L + 1):
dp[l] = -sys.maxsize
for i in range(0, l):
if p[i] + dp[l - i - 1] > dp[l]:
dp[l] = p[i] + dp[l - i - 1]
cuts[l] = i+1
l = L
cut = cuts[L]
while cut != 0:
print(str(cut)+',',end='')
l = l-cut
cut = cuts[l]
return dp[L]
profits_table = [1, 5, 8, 9, 10, 13, 17, 20, 24, 30]
l = 8
start_watch()
res = max_profit(l, profits_table)
print(res)
stop_watch_print("Recursion {} milli seconds")
cache = [-1 for _ in range(0, l + 1)]
start_watch()
res = max_profit_memo(l, profits_table, cache)
print(res)
stop_watch_print("Memoization {} milli seconds")
start_watch()
res = max_profit_dp(l, profits_table)
print(res)
stop_watch_print("Bottom up {} milli seconds")
res = max_profit_dp_reconstruct(l,profits_table)
print(res)
| [
"[email protected]"
] | |
22cf9c8ffb50b58d453a62b9eaa8609878b54f08 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/web/v20201201/web_app_instance_function_slot.py | 1a7fe3db3da90cc3dad7d6bcb87d27e394f09544 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 24,849 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['WebAppInstanceFunctionSlotArgs', 'WebAppInstanceFunctionSlot']
@pulumi.input_type
class WebAppInstanceFunctionSlotArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
slot: pulumi.Input[str],
config: Optional[Any] = None,
config_href: Optional[pulumi.Input[str]] = None,
files: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
function_app_id: Optional[pulumi.Input[str]] = None,
function_name: Optional[pulumi.Input[str]] = None,
href: Optional[pulumi.Input[str]] = None,
invoke_url_template: Optional[pulumi.Input[str]] = None,
is_disabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
language: Optional[pulumi.Input[str]] = None,
script_href: Optional[pulumi.Input[str]] = None,
script_root_path_href: Optional[pulumi.Input[str]] = None,
secrets_file_href: Optional[pulumi.Input[str]] = None,
test_data: Optional[pulumi.Input[str]] = None,
test_data_href: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppInstanceFunctionSlot resource.
:param pulumi.Input[str] name: Site name.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot.
:param Any config: Config information.
:param pulumi.Input[str] config_href: Config URI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] files: File list.
:param pulumi.Input[str] function_app_id: Function App ID.
:param pulumi.Input[str] function_name: Function name.
:param pulumi.Input[str] href: Function URI.
:param pulumi.Input[str] invoke_url_template: The invocation URL
:param pulumi.Input[bool] is_disabled: Gets or sets a value indicating whether the function is disabled
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] language: The function language
:param pulumi.Input[str] script_href: Script URI.
:param pulumi.Input[str] script_root_path_href: Script root path URI.
:param pulumi.Input[str] secrets_file_href: Secrets file URI.
:param pulumi.Input[str] test_data: Test data used when testing via the Azure Portal.
:param pulumi.Input[str] test_data_href: Test data URI.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "slot", slot)
if config is not None:
pulumi.set(__self__, "config", config)
if config_href is not None:
pulumi.set(__self__, "config_href", config_href)
if files is not None:
pulumi.set(__self__, "files", files)
if function_app_id is not None:
pulumi.set(__self__, "function_app_id", function_app_id)
if function_name is not None:
pulumi.set(__self__, "function_name", function_name)
if href is not None:
pulumi.set(__self__, "href", href)
if invoke_url_template is not None:
pulumi.set(__self__, "invoke_url_template", invoke_url_template)
if is_disabled is not None:
pulumi.set(__self__, "is_disabled", is_disabled)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if language is not None:
pulumi.set(__self__, "language", language)
if script_href is not None:
pulumi.set(__self__, "script_href", script_href)
if script_root_path_href is not None:
pulumi.set(__self__, "script_root_path_href", script_root_path_href)
if secrets_file_href is not None:
pulumi.set(__self__, "secrets_file_href", secrets_file_href)
if test_data is not None:
pulumi.set(__self__, "test_data", test_data)
if test_data_href is not None:
pulumi.set(__self__, "test_data_href", test_data_href)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Site name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def slot(self) -> pulumi.Input[str]:
"""
Name of the deployment slot.
"""
return pulumi.get(self, "slot")
@slot.setter
def slot(self, value: pulumi.Input[str]):
pulumi.set(self, "slot", value)
@property
@pulumi.getter
def config(self) -> Optional[Any]:
"""
Config information.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[Any]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="configHref")
def config_href(self) -> Optional[pulumi.Input[str]]:
"""
Config URI.
"""
return pulumi.get(self, "config_href")
@config_href.setter
def config_href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_href", value)
@property
@pulumi.getter
def files(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
File list.
"""
return pulumi.get(self, "files")
@files.setter
def files(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "files", value)
@property
@pulumi.getter(name="functionAppId")
def function_app_id(self) -> Optional[pulumi.Input[str]]:
"""
Function App ID.
"""
return pulumi.get(self, "function_app_id")
@function_app_id.setter
def function_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_app_id", value)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> Optional[pulumi.Input[str]]:
"""
Function name.
"""
return pulumi.get(self, "function_name")
@function_name.setter
def function_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_name", value)
@property
@pulumi.getter
def href(self) -> Optional[pulumi.Input[str]]:
"""
Function URI.
"""
return pulumi.get(self, "href")
@href.setter
def href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "href", value)
@property
@pulumi.getter(name="invokeUrlTemplate")
def invoke_url_template(self) -> Optional[pulumi.Input[str]]:
"""
The invocation URL
"""
return pulumi.get(self, "invoke_url_template")
@invoke_url_template.setter
def invoke_url_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "invoke_url_template", value)
@property
@pulumi.getter(name="isDisabled")
def is_disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Gets or sets a value indicating whether the function is disabled
"""
return pulumi.get(self, "is_disabled")
@is_disabled.setter
def is_disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_disabled", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def language(self) -> Optional[pulumi.Input[str]]:
"""
The function language
"""
return pulumi.get(self, "language")
@language.setter
def language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "language", value)
@property
@pulumi.getter(name="scriptHref")
def script_href(self) -> Optional[pulumi.Input[str]]:
"""
Script URI.
"""
return pulumi.get(self, "script_href")
@script_href.setter
def script_href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_href", value)
@property
@pulumi.getter(name="scriptRootPathHref")
def script_root_path_href(self) -> Optional[pulumi.Input[str]]:
"""
Script root path URI.
"""
return pulumi.get(self, "script_root_path_href")
@script_root_path_href.setter
def script_root_path_href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_root_path_href", value)
@property
@pulumi.getter(name="secretsFileHref")
def secrets_file_href(self) -> Optional[pulumi.Input[str]]:
"""
Secrets file URI.
"""
return pulumi.get(self, "secrets_file_href")
@secrets_file_href.setter
def secrets_file_href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secrets_file_href", value)
@property
@pulumi.getter(name="testData")
def test_data(self) -> Optional[pulumi.Input[str]]:
"""
Test data used when testing via the Azure Portal.
"""
return pulumi.get(self, "test_data")
@test_data.setter
def test_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "test_data", value)
@property
@pulumi.getter(name="testDataHref")
def test_data_href(self) -> Optional[pulumi.Input[str]]:
"""
Test data URI.
"""
return pulumi.get(self, "test_data_href")
@test_data_href.setter
def test_data_href(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "test_data_href", value)
class WebAppInstanceFunctionSlot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[Any] = None,
config_href: Optional[pulumi.Input[str]] = None,
files: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
function_app_id: Optional[pulumi.Input[str]] = None,
function_name: Optional[pulumi.Input[str]] = None,
href: Optional[pulumi.Input[str]] = None,
invoke_url_template: Optional[pulumi.Input[str]] = None,
is_disabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
language: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
script_href: Optional[pulumi.Input[str]] = None,
script_root_path_href: Optional[pulumi.Input[str]] = None,
secrets_file_href: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
test_data: Optional[pulumi.Input[str]] = None,
test_data_href: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Function information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param Any config: Config information.
:param pulumi.Input[str] config_href: Config URI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] files: File list.
:param pulumi.Input[str] function_app_id: Function App ID.
:param pulumi.Input[str] function_name: Function name.
:param pulumi.Input[str] href: Function URI.
:param pulumi.Input[str] invoke_url_template: The invocation URL
:param pulumi.Input[bool] is_disabled: Gets or sets a value indicating whether the function is disabled
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] language: The function language
:param pulumi.Input[str] name: Site name.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] script_href: Script URI.
:param pulumi.Input[str] script_root_path_href: Script root path URI.
:param pulumi.Input[str] secrets_file_href: Secrets file URI.
:param pulumi.Input[str] slot: Name of the deployment slot.
:param pulumi.Input[str] test_data: Test data used when testing via the Azure Portal.
:param pulumi.Input[str] test_data_href: Test data URI.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppInstanceFunctionSlotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Function information.
:param str resource_name: The name of the resource.
:param WebAppInstanceFunctionSlotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppInstanceFunctionSlotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[Any] = None,
config_href: Optional[pulumi.Input[str]] = None,
files: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
function_app_id: Optional[pulumi.Input[str]] = None,
function_name: Optional[pulumi.Input[str]] = None,
href: Optional[pulumi.Input[str]] = None,
invoke_url_template: Optional[pulumi.Input[str]] = None,
is_disabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
language: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
script_href: Optional[pulumi.Input[str]] = None,
script_root_path_href: Optional[pulumi.Input[str]] = None,
secrets_file_href: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
test_data: Optional[pulumi.Input[str]] = None,
test_data_href: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppInstanceFunctionSlotArgs.__new__(WebAppInstanceFunctionSlotArgs)
__props__.__dict__["config"] = config
__props__.__dict__["config_href"] = config_href
__props__.__dict__["files"] = files
__props__.__dict__["function_app_id"] = function_app_id
__props__.__dict__["function_name"] = function_name
__props__.__dict__["href"] = href
__props__.__dict__["invoke_url_template"] = invoke_url_template
__props__.__dict__["is_disabled"] = is_disabled
__props__.__dict__["kind"] = kind
__props__.__dict__["language"] = language
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["script_href"] = script_href
__props__.__dict__["script_root_path_href"] = script_root_path_href
__props__.__dict__["secrets_file_href"] = secrets_file_href
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__.__dict__["slot"] = slot
__props__.__dict__["test_data"] = test_data
__props__.__dict__["test_data_href"] = test_data_href
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-native:web/v20210115:WebAppInstanceFunctionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20210115:WebAppInstanceFunctionSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppInstanceFunctionSlot, __self__).__init__(
'azure-native:web/v20201201:WebAppInstanceFunctionSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppInstanceFunctionSlot':
"""
Get an existing WebAppInstanceFunctionSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppInstanceFunctionSlotArgs.__new__(WebAppInstanceFunctionSlotArgs)
__props__.__dict__["config"] = None
__props__.__dict__["config_href"] = None
__props__.__dict__["files"] = None
__props__.__dict__["function_app_id"] = None
__props__.__dict__["href"] = None
__props__.__dict__["invoke_url_template"] = None
__props__.__dict__["is_disabled"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["language"] = None
__props__.__dict__["name"] = None
__props__.__dict__["script_href"] = None
__props__.__dict__["script_root_path_href"] = None
__props__.__dict__["secrets_file_href"] = None
__props__.__dict__["test_data"] = None
__props__.__dict__["test_data_href"] = None
__props__.__dict__["type"] = None
return WebAppInstanceFunctionSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def config(self) -> pulumi.Output[Optional[Any]]:
"""
Config information.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="configHref")
def config_href(self) -> pulumi.Output[Optional[str]]:
"""
Config URI.
"""
return pulumi.get(self, "config_href")
@property
@pulumi.getter
def files(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
File list.
"""
return pulumi.get(self, "files")
@property
@pulumi.getter(name="functionAppId")
def function_app_id(self) -> pulumi.Output[Optional[str]]:
"""
Function App ID.
"""
return pulumi.get(self, "function_app_id")
@property
@pulumi.getter
def href(self) -> pulumi.Output[Optional[str]]:
"""
Function URI.
"""
return pulumi.get(self, "href")
@property
@pulumi.getter(name="invokeUrlTemplate")
def invoke_url_template(self) -> pulumi.Output[Optional[str]]:
"""
The invocation URL
"""
return pulumi.get(self, "invoke_url_template")
@property
@pulumi.getter(name="isDisabled")
def is_disabled(self) -> pulumi.Output[Optional[bool]]:
"""
Gets or sets a value indicating whether the function is disabled
"""
return pulumi.get(self, "is_disabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def language(self) -> pulumi.Output[Optional[str]]:
"""
The function language
"""
return pulumi.get(self, "language")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptHref")
def script_href(self) -> pulumi.Output[Optional[str]]:
"""
Script URI.
"""
return pulumi.get(self, "script_href")
@property
@pulumi.getter(name="scriptRootPathHref")
def script_root_path_href(self) -> pulumi.Output[Optional[str]]:
"""
Script root path URI.
"""
return pulumi.get(self, "script_root_path_href")
@property
@pulumi.getter(name="secretsFileHref")
def secrets_file_href(self) -> pulumi.Output[Optional[str]]:
"""
Secrets file URI.
"""
return pulumi.get(self, "secrets_file_href")
@property
@pulumi.getter(name="testData")
def test_data(self) -> pulumi.Output[Optional[str]]:
"""
Test data used when testing via the Azure Portal.
"""
return pulumi.get(self, "test_data")
@property
@pulumi.getter(name="testDataHref")
def test_data_href(self) -> pulumi.Output[Optional[str]]:
"""
Test data URI.
"""
return pulumi.get(self, "test_data_href")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
acea0b64fbbc0712fea0c97e0aaa0b47a570d546 | 87371f38a9d78ed24d7bb199e3ee31a5eb6b5276 | /page/makepagemeta.py | 216b50e3faabafdca25278c398d0a49791498f67 | [
"MIT"
] | permissive | afcarl/20cgenres | d42969ff50670f8fa347d467fcdc712c0db4fbe6 | 98edee2745acd5f322afa7c49a2dea318901abe7 | refs/heads/master | 2020-03-21T06:33:26.186781 | 2016-08-05T18:13:39 | 2016-08-05T18:13:39 | 138,227,806 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,093 | py | #!/usr/bin/env python3
# Based on inferfromreaders.
# We read volume metadata and produce page metadata,
# confirming along the way that the number of pages
# in our metadata matches the number in the json
import csv, os, sys, shutil
from collections import Counter
import numpy as np
import pandas as pd
# import utils
currentdir = os.path.dirname(__file__)
libpath = os.path.join(currentdir, '../../lib')
sys.path.append(libpath)
import SonicScrewdriver as utils
import parsefeaturejsons as parser
readers = ['donofrio', 'erickson', 'alvarez', 'flynn', 'rubio', 'barajas', 'koh', 'trondson', 'lin', 'buck', 'fleming']
sourcedir = '/Volumes/TARDIS/work/readers/'
subobjects = os.listdir(sourcedir)
subdirs = [x for x in subobjects if os.path.isdir(os.path.join(sourcedir, x))]
tagset = set()
taglist = []
paths = dict()
readerowners = dict()
for subdir in subdirs:
thisreader = 'none'
for reader in readers:
if reader in subdir.lower():
thisreader = reader
break
if thisreader == 'none':
print(subdir + ' lacks a recognized reader.')
sys.exit(0)
wholepath = os.path.join(sourcedir, subdir, 'tags')
if os.path.isdir(wholepath):
tagfiles = [x for x in os.listdir(wholepath) if x.endswith('.csv')]
for f in tagfiles:
thispath = os.path.join(wholepath, f)
okaytoadd = True
with open(thispath, encoding = 'utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
if 'tag' not in row or len(row['tag']) < 3:
okaytoadd = False
break
if okaytoadd:
tagset.add(f)
if f not in readerowners:
readerowners[f] = []
paths[f] = []
if thisreader not in readerowners[f]:
readerowners[f].append(thisreader)
paths[f].append(thispath)
print(len(tagset))
allfiles = tagset
# This is a list of all the filenames (note, filenames not docids)
# that we found in the /readers sourcedir.
train1 = pd.read_csv('../bzipmeta.csv', dtype = 'object', index_col = 'docid')
tidx = set(train1.index.values)
for filename in allfiles:
docid = filename.replace('.csv', '')
if utils.dirty_pairtree(docid) not in tidx:
print(docid)
genrestocheck = ['fic', 'poe']
equivalences = {'non', 'bio', 'other'}
volumesingenre = dict()
for g in genrestocheck:
volumesingenre[g] = []
alldocids = set()
for filename, owners in readerowners.items():
path = paths[filename][0]
if 'metadat' in filename:
print(filename)
continue
docid = utils.dirty_pairtree(filename.replace('.csv', ''))
alldocids.add(docid)
expectedgenre = train1.loc[docid, 'sampledas']
with open(path, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
genrectr = Counter()
allct = 0
for row in reader:
allct += 1
genrectr[row['tag'].lower()] += 1
genrectr['all'] = allct
for g in genrestocheck:
if g in genrectr and genrectr[g] > (genrectr['all'] / 2):
volumesingenre[g].append(filename)
jsondirs = ['/Volumes/TARDIS/work/hathifiles/counts/', '/Volumes/TARDIS/work/hathifiles/secondcounts/']
numlost = 0
numpages = dict()
destination = '/Users/tunder/work/pagedata'
for g in genrestocheck:
for filename in volumesingenre[g]:
docid = utils.dirty_pairtree(filename.replace('.csv', ''))
jsonname = filename.replace('.csv', '.basic.json.bz2')
found = False
for directory in jsondirs:
thispath = os.path.join(directory, jsonname)
if os.path.isfile(thispath):
vol = parser.PagelistFromJson(thispath, docid)
print(vol.numpages)
destinationpath = os.path.join(destination, jsonname)
shutil.copyfile(thispath, destinationpath)
found = True
numpages[docid] = vol.numpages
if not found:
numlost += 1
outcols = list(train1.columns.values)
outcols.append('groupid')
outcols.append('pageid')
outcols.append('class')
toignore = {'/Volumes/TARDIS/work/readers/april20trondson/tags/inu.39000004302985.csv', '/Volumes/TARDIS/work/readers/march27trondson/tags/mdp.39015030775509.csv'}
for g in genrestocheck:
print(g)
thisdf = []
for filename in volumesingenre[g]:
if 'metadat' in filename:
print(filename)
continue
docid = utils.dirty_pairtree(filename.replace('.csv', ''))
binarized = []
for path in paths[filename]:
if path in toignore:
continue
recordedgenres = []
with open(path, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
recordedgenres.append(row['tag'])
if len(recordedgenres) != numpages[docid]:
print('Error in page count in doc ' + docid)
print(paths[filename])
binarygenres = [1] * len(recordedgenres)
# the following loop zeroes out the front
# until it hits two successive pages of
# the expected genre
for idx, g1 in enumerate(recordedgenres):
if g1 != g:
binarygenres[idx] = 0
elif idx + 1 >= len(recordedgenres):
break
elif g1 == g and recordedgenres[idx + 1] != g:
binarygenres[idx] = 1
elif g1 == g and recordedgenres[idx + 1] == g:
break
for idx, g1 in reversed(list(enumerate(recordedgenres))):
if g1 != g:
binarygenres[idx] = 0
elif idx - 1 < 0:
break
elif g1 == g and recordedgenres[idx - 1] != g:
binarygenres[idx] = 1
elif g1 == g and recordedgenres[idx - 1] == g:
break
binarized.append(binarygenres)
pagediscrepancy = 0
if len(binarized) > 1:
for idx, bingenre in enumerate(binarized[0]):
if bingenre != binarized[1][idx]:
pagediscrepancy += 1
print('DISCREP: ' + str(pagediscrepancy))
print(docid)
for idx, bgenre in enumerate(binarygenres):
# we are arbitrarily using the last one
# but there were in practice no discrepancies, so ...
corerow = train1.loc[docid, : ]
newrow = corerow.append(pd.Series(docid, index = ['groupid']))
pageid = docid + '||' + str(idx)
newrow = newrow.append(pd.Series(pageid, index = ['pageid']))
newrow = newrow.append(pd.Series(bgenre, index = ['class']))
thisdf.append(newrow)
outpath = g + '.csv'
thisdf = pd.DataFrame(thisdf)
thisdf.to_csv(outpath)
| [
"[email protected]"
] | |
62e405f2d313ab45545521d60dc94e45b278395c | 908eac70186d7d5e3066731ae73c4030e6d5c109 | /Pertemuan6/DemoProgressBar/DemoQProgressBar.py | 7d32916d34cf28efbc5843f80b0f892e154ffc5f | [] | no_license | rezafp01/GUI | 1f5faf5b32299feff6e5efb8a915ddc1467e695f | e779a104bc3e19e6100790901c6682e69f042e28 | refs/heads/master | 2022-09-10T02:46:30.699978 | 2020-06-02T01:47:59 | 2020-06-02T01:47:59 | 268,673,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 280, 150)
self.setWindowTitle('Demo QProgressBar')
self.show()
self.pbar = QProgressBar(self)
self.pbar.setGeometry(30, 40, 200, 25)
self.btn = QPushButton('Start', self)
self.btn.move(40, 80)
self.btn.clicked.connect(self.doAction)
layout = QVBoxLayout()
layout.addWidget(self.pbar)
layout.addWidget(self.btn)
self.setLayout(layout)
self.timer = QBasicTimer()
self.step = 0
def timerEvent(self, e):
if self.step >= 100:
self.timer.stop()
self.btn.setText('Selesai')
self.btn.setText('Ulangi')
self.step=0
return
self.step = self.step + 1
self.pbar.setValue(self.step)
def doAction(self, e):
if self.timer.isActive():
self.timer.stop()
self.btn.setText('Start')
else:
self.timer.start(100, self)
self.btn.setText('Stop')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainForm()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
0fabc77cc1c6a7e77f260c487a3f99fda50b6326 | c4ea92cd16ed772bf623f521e6c117d286d3169c | /simple1/simpledu/handlers/front.py | 69f8e9047168930eda917236976aec8f618025f6 | [] | no_license | temprani/temprani | 875adf871e5851a26f496ca4ad3ee0e753379bdc | e9e7e39b14b38a33fff330dbd4dde668b5fa0066 | refs/heads/master | 2020-04-10T22:57:32.878312 | 2019-02-28T07:40:04 | 2019-02-28T07:40:04 | 161,337,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from flask import Blueprint, render_template
from simpledu.models import Course
front = Blueprint('front',__name__)
@front.route('/')
def index():
courses =Course.query.all()
return render_template('index.html',courses=courses)
| [
"[email protected]"
] | |
e2905bfdb23d2f11287df6794e7e7358e16c9393 | 0cb2db16abfafc325a865c0529f7d231165655b6 | /hackerrank/algorithms/greedy/grid_challenge.py | 0589e2b93ae0c48b981bef0ffa31a49b0e2419b6 | [] | no_license | ExpLog/Challenger | a73e83ac920ea648d4962c12942d6d1089e28757 | a53fe99f4bf9da0780cec21ae952b51374f8697f | refs/heads/master | 2021-01-09T20:36:12.098535 | 2016-10-27T21:57:51 | 2016-10-27T21:57:51 | 60,499,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | def read_matrix():
n = int(input().strip())
matrix = [list(input().strip()) for i in range(n)]
return matrix
def col_sorted(matrix):
n = len(matrix)
for j in range(n):
for i in range(1, n):
if matrix[i-1][j] > matrix[i][j]:
return False
return True
tests = int(input().strip())
for t in range(tests):
matrix = read_matrix()
for row in matrix:
row.sort()
print("YES" if col_sorted(matrix) else "NO") | [
"[email protected]"
] | |
19611ed31b1eaf47bb29a4de4ea1ae6637f0cd45 | f1bef7256b9ab78c1f2e86e7fb89d6f18acf8a82 | /HW8/Driver.py | f64d515f38db951701e858edac0a1bfaa099d920 | [] | no_license | waiwaikim/Algorithms | 7695a6b0f41fbee5e9584623efa1f67d15db68ca | 4d0b3c8064a4ac6354a4dd1f09ab785e404f3528 | refs/heads/master | 2020-11-28T10:28:01.901717 | 2018-11-24T17:18:02 | 2018-11-24T17:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import sys
from Solution import Solution
def main():
if (len(sys.argv) < 2):
print("Please provide the input filepath as the argument")
sys.exit()
input_file = sys.argv[1]
graph = []
node = 0
# Reading and parsing the file
with open(input_file, 'r') as file:
for line in file:
adjList = [int(neighbor) for neighbor in line.split()]
graph.append(adjList)
node += 1
# Get the path
path = Solution(graph).output_edges();
print('Your Solution:')
print('=========================================================')
print(path)
print('=========================================================')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
33244fa1af1fee5573f2c32603ec290fa5f45de1 | a61906e0296c8e7a71474390026f279a99eaec7d | /AnalysisResults/manage.py | 015fd37b6cb09e820246fc33cea271e8fc87515e | [] | no_license | Kamesh1993/ecanalysis | dfdb820e8c1c6d35747956a94f86fa4e7a230be4 | 80a15af4bcd099012fff90de7706476bf0eb7a45 | refs/heads/master | 2020-05-16T16:03:19.378078 | 2019-04-24T07:28:22 | 2019-04-24T07:28:22 | 183,148,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"AnalysisResults.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
cc6fc1662b4e1aea8588076e192231c0f0f06bfd | 605847f7005661400a3753278e05be104f679b85 | /8. List Comprenhension/simple list compreshension.py | 24ad90cbd4e447cdeb9457078569d25bbcd92348 | [] | no_license | kamalsingh-engg/udemy_courses_python | 6923b48e3d1ee78cb0c9aa5dedc57d001dde28b1 | 96b29bb5475f459ac2f4d8180aef7f85efe8283d | refs/heads/master | 2023-02-13T01:29:13.848719 | 2021-01-03T16:36:29 | 2021-01-03T16:36:29 | 296,244,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # this is the program for the simple list compreshansion
# in which we can see the some modification and something with the for loop as here we can divided each element with
# divided with 10 element
temps = [12,34,56,67,78]
new_temps = []
for temp in temps:
new_temps.append(temp/10)
print(new_temps)
| [
"444kamalsingh@gmail,com"
] | 444kamalsingh@gmail,com |
9052d8eeee5292b0bcad8a3247ae22e23d75cb73 | c8cf583664bcb5d3dec45f120e530108465d8fda | /modelexp/models/sas/_superballCSCoupled.py | 071e94a0fcd9ace9c9b566168619dfa939d1fb3e | [
"MIT"
] | permissive | DomiDre/modelexp | c02324da765edd7afdb19137f792a0d08885805b | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | refs/heads/master | 2022-05-30T01:28:01.073861 | 2022-05-27T09:20:44 | 2022-05-27T09:20:44 | 130,115,633 | 0 | 0 | MIT | 2019-06-18T13:40:56 | 2018-04-18T20:02:52 | Python | UTF-8 | Python | false | false | 2,968 | py | from modelexp.models.sas import SAXSModel
from fortSAS import superball_cs_coupled
from numpy.polynomial.hermite import hermgauss
from numpy.polynomial.legendre import leggauss
class SuperballCSCoupled(SAXSModel):
def initParameters(self):
self.params.add('particleSize', 100)
self.params.add('d', 20)
self.params.add('pVal', 2.3)
self.params.add('sldCore', 40e-6)
self.params.add('sldShell', 8e-6)
self.params.add('sldSolvent', 10e-6)
self.params.add('sigParticleSize', 0.)
self.params.add('i0', 1)
self.params.add('bg', 1e-6)
self.params.add('orderHermite', 20)
self.params.add('orderLegendre', 20)
self.addConstantParam('orderHermite')
self.addConstantParam('orderLegendre')
def initMagneticParameters(self):
self.params.add('magSldCore', 5e-6, min=0)
self.params.add('magSldShell', 0, min=0, vary=False)
self.params.add('magSldSolvent', 0, vary=False)
self.addConstantParam('magSldShell')
self.addConstantParam('magSldSolvent')
def calcModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * superball_cs_coupled.formfactor(
self.q,
self.params['particleSize'],
self.params['d'],
self.params['pVal'],
self.params['sldCore'],
self.params['sldShell'],
self.params['sldSolvent'],
self.params['sigParticleSize'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = superball_cs_coupled.sld(
self.params['particleSize'],
self.params['d'],
self.params['sldCore'],
self.params['sldShell'],
self.params['sldSolvent']
)
def calcMagneticModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * superball_cs_coupled.magnetic_formfactor(
self.q,
self.params['particleSize'],
self.params['d'],
self.params['pVal'],
self.params['sldCore'],
self.params['sldShell'],
self.params['sldSolvent'],
self.params['sigParticleSize'],
self.params['magSldCore'],
self.params['magSldShell'],
self.params['magSldSolvent'],
self.params['xi'],
self.params['sin2alpha'],
self.params['polarization'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = superball_cs_coupled.sld(
self.params['particleSize'],
self.params['d'],
self.params['sldCore'],
self.params['sldShell'],
self.params['sldSolvent']
)
self.rMag, self.sldMag = superball_cs_coupled.sld(
self.params['particleSize'],
self.params['d'],
self.params['magSldCore'],
self.params['magSldShell'],
self.params['magSldSolvent'],
)
| [
"[email protected]"
] | |
ee25728090ed11d3d00c80b6eeb6145349a3b9ef | 3e6589ecd807f61ff27afdca74d79f53d2f3af68 | /tests/client/test_proxy.py | d9894a0f8dc75f64b3918d24083dddee5aa74f69 | [
"BSD-2-Clause"
] | permissive | elacuesta/python-scrapinghub | 27e0cbc90f188b586f8e3e79e537e7183b4d8f67 | 06fe95cb9cb71a41571bc05f58e5e5e8ea78b040 | refs/heads/master | 2023-06-23T05:25:34.121179 | 2022-03-10T13:54:51 | 2022-03-10T13:54:51 | 125,394,178 | 0 | 0 | NOASSERTION | 2022-03-10T11:13:25 | 2018-03-15T16:15:02 | Python | UTF-8 | Python | false | false | 1,854 | py | import mock
import pytest
from scrapinghub.client.proxy import _format_iter_filters
from scrapinghub.client.proxy import _ItemsResourceProxy
def test_format_iter_filters():
# work with empty params
assert _format_iter_filters({}) == {}
# doesn't affect other params
params = {'a': 123, 'b': 456}
assert _format_iter_filters(params) == params
# pass filter as-is if not list
params = {'filter': 'some-string'}
assert _format_iter_filters(params) == params
# work fine with empty filter
params = {'filter': []}
assert _format_iter_filters(params) == params
# pass string filters as-is
params = {'filter': ['str1', 'str2']}
assert _format_iter_filters(params) == params
# converts list-formatted filters
params = {'filter': [['field', '>=', ['val']], 'filter2']}
assert (_format_iter_filters(params) ==
{'filter': ['["field", ">=", ["val"]]', 'filter2']})
# works the same with tuple entries
params = {'filter': [('field', '==', ['val'])]}
assert (_format_iter_filters(params) ==
{'filter': ['["field", "==", ["val"]]']})
# exception if entry is not list/tuple or string
with pytest.raises(ValueError):
_format_iter_filters({'filter': ['test', 123]})
def test_item_resource_iter_no_params():
items_proxy = _ItemsResourceProxy(mock.Mock, mock.Mock(), 'mocked_key')
items_proxy._origin = mock.Mock()
items_proxy.iter(count=123)
assert items_proxy._origin.list.call_args == mock.call(None, count=123)
def test_item_resource_iter_with_params():
items_proxy = _ItemsResourceProxy(mock.Mock, mock.Mock(), 'mocked_key')
items_proxy._origin = mock.Mock()
items_proxy.iter(count=123, startts=12345)
assert (items_proxy._origin.list.call_args ==
mock.call(None, count=123, startts=12345))
| [
"[email protected]"
] | |
d9d55a72e98e29e4039f89f49bac5a69f4cc2204 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/ddnami001/question2.py | ceb2fdb7f0f9cf771a033796bc3dadfd254515cb | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | #Amitha Doodnath
#DDNAMI001
#14/04/2014
#Programme to simulate a vending machine
cost=eval(input("Enter the cost (in cents):\n"))
moneyIn=0
while moneyIn < cost:
deposit=eval(input("Deposit a coin or note (in cents):\n"))
moneyIn+=deposit
change=moneyIn-cost
#print(change)
dollar=0
c25=0
c10=0
c5=0
c1=0
#determines amount of change and in which denominations
while change>0:
print("Your change is:")
if change/100 >=1:
dollar=change//100
change=change-(dollar*100)
if change/25 >=1:
c25=change//25
change=change-(c25*25)
if change/10 >=1:
c10=change//10
change=change-(c10*10)
if change/5 >=1:
c5=change/5
change=change-(c5*5)
if change/1 >=1:
c1=change//1
change=change-(c1*1)
#prints denomination of change
if dollar:
print(dollar,"x $1")
if c25:
print(c25,"x 25c")
if c10:
print(c10,"x 10c")
if c5:
print(c5,"x 5c")
if c1:
print(c1,"x 1c") | [
"[email protected]"
] | |
951aee3d2c72976bf9b533837c9957d5641c85bb | 2ef5b78a1a750ee33d86f36bba176796163e3933 | /demo26/scripts/generate_test_datas.py | f7b0284f2b57846a453c8b0b09421163ac2ab3c4 | [] | no_license | LIZEJU/flask-demo | 08f8419757dc4902239b89b3df9ea71ce918ad26 | 6ae201e3cc078b7a3fd18fb6d114b0b83f1b4b41 | refs/heads/master | 2020-09-27T05:58:09.742198 | 2020-01-30T11:04:41 | 2020-01-30T11:04:41 | 226,445,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | # -*- coding:utf-8 -*-
# Author: 李泽军
# Date: 2020/1/26 10:07 PM
# Project: flask-demo
import os
import json
from random import randint
from faker import Faker
from simpledu.modes import db , User , Course, Chapter
# 创建faker 工厂对象
fake = Faker()
# 生成一个教师用户
def iter_users():
yield User(
username='jack lee',
email='[email protected]',
password='111111',
job='研发工程师',
)
# 从datas 读取客户才能数据,生成课程数据,将课程教师设置为jack lee
def iter_courses():
author = User.query.filter_by(username='jack lee').first()
with open(os.path.join(os.path.dirname(__file__),'..','datas','courses.json')) as f :
courses = json.load(f)
for course in courses:
yield Course(
name=course['name'],
description=course['description'],
image_url=course['image_url'],
author=author
)
def iter_chapters():
for course in Course.query:
# 每个课程生成2-10 个章节
for i in range(randint(2,10)):
yield Chapter(
# 使用faker 生成一个句子作为章节名称
name=fake.sentence(),
course=course,
# 所有章节的视频设置成了楼+课程中的某个视频
video_url='https://labfile.oss.aliyuncs.com/courses/923/week2_mp4/2-1-1-mac.mp4',
# 视频时长
video_duration='{}:{}'.format(randint(10,30),randint(1,59))
)
def run():
for user in iter_users():
print(user)
db.session.add(user)
for course in iter_courses():
db.session.add(course)
for chapter in iter_chapters():
db.session.add(chapter)
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
if __name__ == '__main__':
run() | [
"[email protected]"
] | |
95b7c972313c50df40993634568ef6963b13af42 | 89148db614d4b982b9bc2cbcc0773bfa8b7ff786 | /tests/addProductTest.py | 521f04391cdec153f399832602fc0c3e63a14750 | [] | no_license | vaish222/mfscrmTest | e9a8cd1cffd0cef71ed16c191b54044d553c7520 | b019c1e8b4167aeef1d2855b5ab976bba906ea2b | refs/heads/master | 2020-04-06T19:50:17.415576 | 2018-11-24T18:00:24 | 2018-11-24T18:00:24 | 157,751,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | #Vaishali Goel
import unittest
import time
from selenium import webdriver
class addProductTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_add_product(self):
user = "instructor"
pwd = "instructor1a"
driver = self.driver
driver.maximize_window()
driver.get("https://foodserviceapp.herokuapp.com/accounts/login/")
time.sleep(1)
# elem = driver.find_element_by_xpath("/html/body/section[1]/div/div/div/div/div/p[2]/a[1]").click()
# time.sleep(3)
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
# Click Login button
elem = driver.find_element_by_xpath("/html/body/div/div/div/form/input[2]").click()
assert "Logged in"
time.sleep(1)
# Click View details under Product
elem = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div/div/div[3]/div/div/p[2]/a").click()
time.sleep(1)
# Click Add Product button
elem = driver.find_element_by_xpath("/html/body/div/div/div/div[3]/div/a/span").click()
time.sleep(1)
elem = driver.find_element_by_id("id_cust_name")
elem.send_keys("Barbara York")
elem = driver.find_element_by_id("id_product")
elem.send_keys("Box Lunch - Turkey on Wheat")
elem = driver.find_element_by_id("id_p_description")
elem.send_keys("Box Lunch - Turkey on Wheat bread. Apple or Orange and Chips Cookie")
elem = driver.find_element_by_id("id_quantity")
elem.send_keys("9")
# elem = driver.find_element_by_id("id_pickup_time")
# elem.send_keys("2018-11-09 17:31:26")
# time.sleep(1)
elem = driver.find_element_by_id("id_charge")
elem.send_keys("60")
# Click Save
elem = driver.find_element_by_xpath("/html/body/div/div/div/form/button").click()
time.sleep(1)
assert "Added Product successfully"
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
4314ae635541aafeba9d42e4454dcc044959abd0 | 8d8df0b09ea14903fec5cf0de0def074ab16c9b8 | /myapi/migrations/0001_initial.py | 2aef52ee7cb3a26bb1aa948309932ee7013dd0ff | [] | no_license | bhupender009sharma/milk-distributor | f90d47f2914d0559b958df0819fc13ced69788db | b5883ac6c528de0795881515492b20f0586c6cdd | refs/heads/main | 2023-07-09T20:49:22.529473 | 2021-08-12T10:47:00 | 2021-08-12T10:47:00 | 395,286,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | # Generated by Django 3.2.6 on 2021-08-03 05:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customers',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(blank=True, max_length=32)),
('name', models.CharField(blank=True, max_length=100)),
('mobile', models.PositiveBigIntegerField(blank=True)),
('address', models.CharField(blank=True, max_length=300)),
('pincode', models.PositiveIntegerField(blank=True)),
('type_of_customer', models.CharField(choices=[('individual', 'individual'), ('professional', 'professional')], default='individual', max_length=100)),
('is_active', models.BooleanField(default=True)),
],
options={
'verbose_name_plural': 'Customers',
},
),
migrations.CreateModel(
name='DistributionRequired',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_milk', models.CharField(choices=[('cow', 'cow'), ('buffalo', 'buffalo')], default='cow', max_length=100)),
('price', models.FloatField(default=0)),
('unit', models.CharField(choices=[('litre', 'litre'), ('kilogram', 'kilogram')], default='litre', max_length=100)),
('time_of_delivery', models.CharField(choices=[('morning', 'morning'), ('evening', 'evening')], default='morning', max_length=100)),
('customers', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapi.customers')),
],
options={
'verbose_name_plural': 'Distribution_Required',
},
),
migrations.CreateModel(
name='DailyDistribution',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_period', models.CharField(choices=[('morning', 'morning'), ('evening', 'evening')], default='morning', max_length=100)),
('quantity', models.FloatField(default=0)),
('delivered_at', models.DateField(default=0)),
('customers', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapi.customers')),
],
options={
'verbose_name_plural': 'Daily_Distribution',
},
),
]
| [
"[email protected]"
] | |
31a3234f46c1c3f2fe7b6251c7cf41483b7f6a65 | 8d3af1282c04bfdd4e8684474be6cd4a539ceb81 | /Homeworks/HW-1.py | fc18ad8f9b50e2ecffa86c5ad657af34adef6bd6 | [] | no_license | sriniavireddy/UCSD_BigData_2016 | 5c7fbbaabf6f3ffd9880eefeb3b2099dc0731d93 | fcd28181f49ceb8c08fee76ee95a1053c5bcbddf | refs/heads/master | 2021-01-18T01:10:55.146986 | 2016-05-15T05:17:11 | 2016-05-15T05:17:11 | 56,786,119 | 0 | 0 | null | 2016-05-15T05:17:12 | 2016-04-21T15:40:53 | Jupyter Notebook | UTF-8 | Python | false | false | 1,603 | py | #Name: Srinivas Avireddy
# Email: [email protected]
# PID: A53101356
from pyspark import SparkContext
sc = SparkContext()
import re
import string
textRDD = sc.newAPIHadoopFile('/data/Moby-Dick.txt',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text',
conf={'textinputformat.record.delimiter': "\r\n\r\n"}) \
.map(lambda x: x[1])
sentences=textRDD.flatMap(lambda x: x.split(". "))
def printOutput(n,freq_ngramRDD):
top=freq_ngramRDD.take(5)
print '\n============ %d most frequent %d-grams'%(5,n)
print '\nindex\tcount\tngram'
for i in range(5):
print '%d.\t%d: \t"%s"'%(i+1,top[i][0]," ".join(top[i][1]))
def removePunctuation(line):
regex = re.compile('[%s]' % re.escape(string.punctuation))
out = regex.sub(' ', line)
return out
def ngram(line,n):
line = removePunctuation(line.lower())
array = line.split()
ngrams = []
for i in xrange(0,len(array)-n+1):
ngrams.append(tuple(array[i:i+n]))
return ngrams
for n in range(1,6):
# Put your logic for generating the sorted n-gram RDD here and store it in freq_ngramRDD variable
RDD = sentences.flatMap(lambda sentence: ngram(sentence,n))\
.map(lambda ngram: (ngram,1))\
.reduceByKey(lambda x,y:x+y)\
.map(lambda (c,v):(v,c))\
.sortByKey(False)
#print RDD.take(5)
printOutput(n,RDD)
| [
"[email protected]"
] | |
9e39ab3cc176f1ba7c324cf05e3fffd74413b407 | 4e7617ea978782741c85ef4beb20e21eb39fbec1 | /Test4.py | 0418729f17d3c14348c1b084e7c4b0912b3d9bdd | [] | no_license | Pattaraponggg/Programming | 2c4f259b1d4aed25346c939ecdc954e7040ee789 | 0ed56faac07f65bb25f4c0b59ab3e659722895e6 | refs/heads/master | 2020-06-12T19:39:02.905181 | 2019-10-18T16:19:24 | 2019-10-18T16:19:24 | 194,404,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/env python
# coding: utf-8
# In[4]:
#4.จงเขียนโปรแกรมวนซ้ำโดยบังคับใช้คำสั่ง for ให้แสดงผลคูณแม่ ๒
for k in range(1):
print(2,"*",1,"=",2)
print(2,"*",2,"=",4)
print(2,"*",3,"=",6)
print(2,"*",4,"=",8)
print(2,"*",5,"=",10)
print(2,"*",6,"=",12)
print(2,"*",7,"=",14)
print(2,"*",8,"=",16)
print(2,"*",9,"=",18)
print(2,"*",10,"=",20)
print(2,"*",11,"=",22)
print(2,"*",12,"=",24)
# In[ ]:
| [
"[email protected]"
] | |
05d47706d13e2620da6a5a875214046f5759297f | 866dee1b3d01b863c31332ec81330d1b5ef5c6fa | /openquake.hazardlib/openquake/hazardlib/gsim/drouet_2015_brazil.py | 0167472402875787bd260b3108f6df2766043010 | [
"MIT",
"AGPL-3.0-only"
] | permissive | rainzhop/ConvNetQuake | 3e2e1a040952bd5d6346905b83f39889c6a2e51a | a3e6de3f7992eac72f1b9883fec36b8c7fdefd48 | refs/heads/master | 2020-08-07T16:41:03.778293 | 2019-11-01T01:49:00 | 2019-11-01T01:49:00 | 213,527,701 | 0 | 0 | MIT | 2019-10-08T02:08:00 | 2019-10-08T02:08:00 | null | UTF-8 | Python | false | false | 10,453 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`DrouetBrazil2015`
:class:`DrouetBrazil2015_with_depth`
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import CoeffsTable, GMPE
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
from scipy.constants import g
class DrouetBrazil2015(GMPE):
"""
Implements GMPE developed by S. Drouet unpublished for Brazil based on the
method described in Douet & Cotton (2015) BSSA doi: 10.1785/0120140240.
"""
#: Supported tectonic region type is stable continental crust given that
#: the equations have been derived for Eastern North America.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration, see table 6, page 1022 (PGA is assumed
#: to be equal to SA at 0.01 s)
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: Supported intensity measure component is the geometric mean of
#two : horizontal components
#:attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is only total, see equation 35, page
#: 1021
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: No site parameters are needed
REQUIRES_SITES_PARAMETERS = set()
#: Required rupture parameter is only magnitude, see equation 30 page
#: 1021.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))
#: Required distance measure is closest distance to rupture, see equation
#: 30 page 1021.
REQUIRES_DISTANCES = set(('rjb', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
#assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
# for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = self._compute_mean(C, rup, dists.rjb)
if isinstance(imt, (SA, PGA)): # Convert from m/s**2 to g
mean -= np.log(g)
elif isinstance(imt, PGV): # Convert from m/s to cm/s
mean += np.log(100.0)
stddevs = self._get_stddevs(C, stddev_types, rup.mag,
dists.rjb.shape)
return mean, stddevs
def _compute_mean(self, C, rup, rjb):
"""
Compute mean value according to equation 30, page 1021.
"""
mean = (C['c1'] +
self._compute_magnitude_term(C, rup) +
self._compute_distance_term(C, rup, rjb))
return mean
def _get_stddevs(self, C, stddev_types, mag, num_sites):
"""
Return total standard deviation as for equation 35, page 1021.
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
sigma_t = np.sqrt(C['sigma'] ** 2. + C['tau'] ** 2.)
stddevs.append(sigma_t + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(C['sigma'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
return stddevs
def _compute_magnitude_term(self, C, rup):
"""
This computes the term f1 equation 8 Drouet & Cotton (2015)
"""
return C['c2'] * (rup.mag - 8.0) + C['c3'] * (rup.mag - 8.0) ** 2
def _compute_distance_term(self, C, rup, rjb):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * rup.mag) * np.log(
np.sqrt(rjb ** 2. + C['c6'] ** 2.)) + C['c7'] * rjb
#: Coefficient tables are constructed from the electronic suplements of
#: the original paper.
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 sigma tau
pgv 0.457169 0.230048 -0.140795 -1.785544 0.162408 7.201555 -0.002832 0.526166 0.393025
pga 2.575109 -0.243140 -0.155164 -1.807995 0.156084 7.629410 -0.003996 0.625075 0.481226
0.010 2.586201 -0.242271 -0.154703 -1.808695 0.156141 7.623170 -0.004020 0.630187 0.481707
0.020 2.717172 -0.261739 -0.152580 -1.859646 0.160136 7.773640 -0.004048 0.688591 0.486003
0.030 2.930319 -0.236637 -0.147729 -1.831982 0.155727 7.918015 -0.004558 0.751663 0.490319
0.040 3.156209 -0.266436 -0.143032 -1.915769 0.162181 8.224381 -0.004350 0.779379 0.498289
0.050 3.268731 -0.189794 -0.140262 -1.753521 0.144779 7.767181 -0.004910 0.780218 0.497182
0.075 3.352582 -0.164010 -0.140254 -1.691894 0.140095 7.428414 -0.004756 0.706350 0.493511
0.100 3.455122 -0.203575 -0.148680 -1.708867 0.139700 7.707583 -0.004261 0.652456 0.490010
0.150 3.456514 -0.169395 -0.160434 -1.607720 0.131021 7.274064 -0.004025 0.587130 0.480912
0.200 3.480893 -0.155262 -0.168476 -1.646459 0.132556 7.424609 -0.002871 0.556933 0.462619
0.250 3.358985 -0.255601 -0.194574 -1.669187 0.134462 7.753731 -0.002732 0.533650 0.458696
0.300 3.115954 -0.237559 -0.215762 -1.451276 0.114182 7.212529 -0.003761 0.526336 0.452876
0.400 2.806835 -0.340296 -0.250121 -1.418762 0.114054 6.837724 -0.004081 0.519411 0.440790
0.500 2.837393 -0.355473 -0.271003 -1.453916 0.111753 7.298391 -0.003037 0.512892 0.427910
0.750 2.383076 -0.374649 -0.298428 -1.472297 0.117984 7.051676 -0.002899 0.507442 0.405868
1.000 2.070536 -0.263869 -0.303220 -1.410898 0.117144 6.815268 -0.003307 0.511352 0.384417
1.250 1.944386 -0.196142 -0.309115 -1.408815 0.116519 6.904435 -0.003017 0.511909 0.376152
1.500 1.973072 -0.160616 -0.313180 -1.493457 0.122469 7.427893 -0.002316 0.511871 0.370833
1.750 1.747518 -0.129961 -0.320672 -1.400692 0.116855 7.143261 -0.003402 0.508641 0.361738
2.000 1.667278 -0.083863 -0.319818 -1.405853 0.114769 7.128404 -0.003174 0.505025 0.353357
3.000 1.292331 0.312316 -0.263539 -1.464213 0.130085 6.416692 -0.002621 0.512370 0.344082
""")
class DrouetBrazil2015withDepth(DrouetBrazil2015):
"""
Implements GMPE developed by S. Drouet unpublished for Brazil based on the
method described in Douet & Cotton (2015) BSSA doi: 10.1785/0120140240.
Model with magnitude-dependent depth distribution and depth-dependent
stress distribution
"""
#: Required rupture parameter is only magnitude, see equation 30 page
#: 1021.
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'hypo_depth'))
def _compute_distance_term(self, C, rup, rjb):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * rup.mag) * np.log(
np.sqrt(rjb ** 2. + rup.hypo_depth ** 2.)) + C['c6'] * rjb
#: Coefficient tables are constructed from the electronic supplements of
#: the original paper.
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 sigma tau
pgv 1.296890 0.307642 -0.155927 -1.727495 0.128331 -0.002079 0.507600 0.474106
pga 3.393123 -0.139286 -0.169796 -1.695729 0.116686 -0.003491 0.609317 0.573384
0.010000 3.405471 -0.138584 -0.169352 -1.696947 0.116766 -0.003513 0.614578 0.574138
0.020000 3.512058 -0.152120 -0.167461 -1.725659 0.119114 -0.003688 0.674715 0.580870
0.030000 3.695045 -0.123957 -0.162204 -1.688566 0.114624 -0.004259 0.738161 0.585416
0.040000 3.856703 -0.140850 -0.157819 -1.721169 0.117496 -0.004400 0.768733 0.597967
0.050000 4.085005 -0.080044 -0.155124 -1.628430 0.103907 -0.004434 0.766573 0.593676
0.075000 4.242841 -0.056117 -0.154044 -1.601802 0.100823 -0.004025 0.691819 0.587975
0.100000 4.278594 -0.087360 -0.162257 -1.582776 0.098667 -0.003803 0.638952 0.582946
0.150000 4.358265 -0.063108 -0.173452 -1.530628 0.092988 -0.003244 0.572059 0.565676
0.200000 4.385790 -0.041305 -0.181717 -1.553364 0.092537 -0.002134 0.539692 0.543338
0.250000 4.162408 -0.135372 -0.207677 -1.531599 0.092853 -0.002369 0.520273 0.534858
0.300000 3.991746 -0.134064 -0.227479 -1.386035 0.078380 -0.002944 0.511192 0.519484
0.400000 3.746791 -0.248433 -0.261965 -1.396553 0.080903 -0.002965 0.502079 0.499394
0.500000 3.728968 -0.246365 -0.282618 -1.384885 0.074963 -0.002185 0.495203 0.483281
0.750000 3.298203 -0.278081 -0.310518 -1.431361 0.083594 -0.001906 0.488881 0.452970
1.000000 2.966504 -0.185734 -0.315139 -1.405592 0.087333 -0.002169 0.492014 0.426399
1.250000 2.810007 -0.117505 -0.320881 -1.394705 0.086834 -0.001981 0.493565 0.416078
1.500000 2.760153 -0.068911 -0.325231 -1.426076 0.089344 -0.001624 0.494063 0.410019
1.750000 2.538491 -0.052014 -0.332247 -1.367936 0.087458 -0.002541 0.491032 0.398572
2.000000 2.485648 -0.001655 -0.331560 -1.370772 0.084286 -0.002297 0.487175 0.392599
3.000000 2.205128 0.365128 -0.276508 -1.504904 0.104957 -0.001304 0.492143 0.384311
""")
| [
"[email protected]"
] | |
ff0707ebc435e47ab9158e5c3e2aa41285ec26d8 | ead629ba15f3e3c314edf239cb6aeed45ddb2f61 | /lianjia/lianjia/spiders/lianjiaSz.py | df3dc74cab372e77720f2fb27f4ce48debf451eb | [] | no_license | cqu1012/WebSpider | 2b0bfef52f92886fa2e6b018d7e0eb03178af015 | 5e83542241ead79864ca6402849e2d3dc25d7c0a | refs/heads/master | 2021-10-22T08:11:15.101032 | 2019-03-09T07:05:43 | 2019-03-09T07:05:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | # -*- coding: utf-8 -*-
import scrapy
import re
#使用分布式写法
from lianjia.items import LianjiaItem
from scrapy_redis.spiders import RedisSpider
#lpush lianjia:start_urls https://sz.lianjia.com/zufang/pg1rt200600000002/
#set lianjia:start_urls https://sz.lianjia.com/zufang/pg1rt200600000002/
class LianjiaszSpider(RedisSpider):
name = 'lianjiaSz'
allowed_domains = ['sz.lianjia.com']
redis_key = 'lianjia:start_urls'
index = 2
# start_urls = ['https://sz.lianjia.com/zufang/pg{}rt200600000002/'.format(i) for i in range(1)]
def parse(self, response):
base_url = 'https://sz.lianjia.com/zufang/pg{}rt200600000002/'
url_list = response.xpath("//div[@class='content__list--item--main']/p[1]/a/@href").extract()
for url in url_list:
#过滤公寓类型房屋
if re.match(r'/zufang/',url):
yield scrapy.Request(response.urljoin(url),callback=self.getInfo)
yield scrapy.Request(base_url.format(self.index),callback=self.parse)
self.index += 2
def getInfo(self,response):
#租房标题
title = response.xpath("//p[@class='content__title']/text()").extract_first()
#租房价格
price = response.xpath("string(//p[@class='content__aside--title'])").extract_first()
#地址
location = response.xpath("string(//div[@class= 'content__article__info4']/ul/li[1])").extract_first().strip().replace('\n ','')
#租房信息
base_Info = response.xpath("//p[@class='content__article__table']")
rent_way = base_Info.xpath("./span[1]/text()").extract_first()
rent_type = base_Info.xpath("./span[2]/text()").extract_first()
rent_area = base_Info.xpath("./span[3]/text()").extract_first()
rent_dire = base_Info.xpath("./span[4]/text()").extract_first()
#楼层与电梯
floor = response.xpath("//div[@class='content__article__info']/ul/li[8]/text()").extract_first()
elevator = response.xpath("//div[@class='content__article__info']/ul/li[last()-8]/text()").extract_first()
if elevator == None:
elevator='暂无数据'
if floor == None:
floor ='暂无数据'
items = LianjiaItem()
items['title'] = title
items['price'] = price
items['location'] = location
items['rent_way'] = rent_way
items['rent_type'] = rent_type
items['rent_area'] = rent_area
items['rent_dire'] = rent_dire
items['floor'] = floor
items['elevator'] = elevator
yield items
| [
"[email protected]"
] | |
3168b0f779902de00ad73ac085d18405f5b646c6 | dbc755cca2f10aa669c2b770e5e38611575031c1 | /example/updated/behaviors.py | 50669596a636ac037d840ce6efddb50671f0078a | [] | no_license | zenweasel/django-model-behaviors-example | 88345ec806ace9e625e84d8ae3e1c6e359d9702c | d0038a9f7947c2591fecfa98d0494547eaec937d | refs/heads/master | 2020-12-31T03:41:03.699193 | 2013-06-01T06:42:42 | 2013-06-01T06:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | from django.db import models
from django.contrib.auth.models import User
from model_utils.managers import PassThroughManager
from .querysets import PublishableQuerySet, AuthorableQuerySet
class Permalinkable(models.Model):
slug = models.SlugField()
class Meta:
abstract = True
def get_url_kwargs(self, **kwargs):
kwargs.update(getattr(self, 'url_kwargs', {}))
return kwargs
@models.permalink
def get_absolute_url(self):
url_kwargs = self.get_url_kwargs(slug=self.slug)
return (self.url_name, (), url_kwargs)
def pre_save(self, instance, add):
from django.utils.text import slugify
if not instance.slug:
instance.slug = slugify(self.slug_source)
class Publishable(models.Model):
publish_date = models.DateTimeField(null=True)
class Meta:
abstract = True
objects = PassThroughManager.for_queryset_class(PublishableQuerySet)()
def publish_on(self, date=None):
from django.utils import timezone
if not date:
date = timezone.now()
self.publish_date = date
self.save()
@property
def is_published(self):
from django.utils import timezone
return self.publish_date and self.publish_date < timezone.now()
class Authorable(models.Model):
author = models.ForeignKey(User, null=True)
class Meta:
abstract = True
objects = PassThroughManager.for_queryset_class(AuthorableQuerySet)()
class Timestampable(models.Model):
create_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| [
"[email protected]"
] | |
d7e2a9c33bc7b4f1705fbbd5194992afd68f6127 | 236f1e8fb21ede4d72b5e397786945582eab915a | /timelogger/urls.py | 6df4c27b16efa06ec0a72c27d20fedb541169cbf | [] | no_license | shotijohan/time-logging | 24ab044ed780c5e34b61a090db902f9ed323a132 | d7261064a0a26cfb81e8ff5e044980fac6f70c13 | refs/heads/master | 2020-07-14T00:41:27.830399 | 2017-06-27T05:14:19 | 2017-06-27T05:14:19 | 94,295,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | from django.conf.urls import url
from .views import Login, TimeInTimeOutHandler
from .views import Register
from .views import Logout
from .views import API
urlpatterns = [
url(r'^$', Login.as_view(), name='index'),
url(r'^register', Register.as_view(), name='register'),
url(r'^register-member', Register.as_view(), name='register-member'),
url(r'^login', Login.as_view(), name='login'),
url(r'^logout', Logout.as_view(), name='login'),
url(r'^timer-start-end', TimeInTimeOutHandler.as_view(), name="timer-start-end"),
url(r'^api/v1/timeintimeout', API.as_view(), name="API")
# url(r'^$',
# ListView.as_view(queryset=User_accounts.objects.all().order_by("-created_date")[:25],
# template_name="timelogger/home.html"
# )),
]
| [
"[email protected]"
] | |
edd3d69bf4477b28424f4f09443d1a6f4b00fafe | 05e6c528cb252245b4e41bb39d010b427d002746 | /tests/test_topology.py | 88d9e9116b40cef765b9edee55abdedc04d54f3a | [] | no_license | gdaneels/6tisch-new-ReSF | b529cb5f2b5efbf8e9c851c449fc01770a290a36 | e669bddf2c932326b82b372b6eb7a072a84fb938 | refs/heads/main | 2022-12-29T21:05:50.053671 | 2020-10-18T16:49:47 | 2020-10-18T16:49:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | """
\brief Tests for TopologyCreator Factory
\author Yasuyuki Tanaka <[email protected]>
"""
import SimEngine.Topology as Topology
def test_create_random_topology_1(settings):
settings()
assert isinstance(Topology.Topology([]), Topology.RandomTopology)
def test_create_random_topology_2(settings):
settings(**{'topology': 'random'})
assert isinstance(Topology.Topology([]), Topology.RandomTopology)
def test_create_linear_topology(settings):
settings(**{'topology': 'linear'})
assert isinstance(Topology.Topology([]), Topology.LinearTopology)
| [
"[email protected]"
] | |
7b69ac95fe4eb4d1b71290b2a73c3f15e71e885a | d571d407cfda435fcab8b7ccadb1be812c7047c7 | /examples/tensorboard/nested.py | b7c6df93916a72fa3dc3b5903a942a8fbc2d13cd | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | guildai/guildai | 2d8661a2a6bf0d1ced6334095c8bf5a8e391d8af | 149055da49f57eaf4aec418f2e339c8905c1f02f | refs/heads/main | 2023-08-25T10:09:58.560059 | 2023-08-12T20:19:05 | 2023-08-12T20:19:05 | 105,057,392 | 833 | 86 | Apache-2.0 | 2023-08-07T19:34:27 | 2017-09-27T18:57:50 | Python | UTF-8 | Python | false | false | 350 | py | import tensorboardX
with tensorboardX.SummaryWriter("foo") as w:
w.add_scalar("a", 1.0, 1)
w.add_scalar("a", 2.0, 2)
with tensorboardX.SummaryWriter("foo/bar") as w:
w.add_scalar("a", 3.0, 3)
w.add_scalar("a", 4.0, 4)
with tensorboardX.SummaryWriter("foo/bar/baz") as w:
w.add_scalar("a", 5.0, 5)
w.add_scalar("a", 6.0, 6)
| [
"[email protected]"
] | |
30f9220b765956c3736dcde82c9cc8d27e983f04 | b50f0d3b4a7cac4ad33c23654c330655ed740930 | /pythonx/python_function_expander/ultisnipsaide/px/cursor/__init__.py | 512d278a8b0605a8002271f9b927dd9893883015 | [
"MIT"
] | permissive | ColinKennedy/vim-python-function-expander | 869c8025a948ee48ca8aac758001c4c2b8765218 | 7fbaac0bdb6316177c85284052694175a2819638 | refs/heads/master | 2020-04-04T13:04:28.664537 | 2019-10-07T00:15:57 | 2019-10-07T00:15:57 | 155,947,699 | 22 | 0 | MIT | 2018-11-07T18:36:40 | 2018-11-03T03:50:14 | Python | UTF-8 | Python | false | false | 715 | py | # coding=utf8
import re
import vim
def get():
return from_vim(vim.current.window.cursor)
def set(cursor):
vim.current.window.cursor = to_vim(cursor)
def get_adjusted():
(line, column) = get()
if vim.eval('mode()') != 'i':
return (line, column + 1)
else:
return (line, column)
def to_vim(cursor):
return (cursor[0] + 1, cursor[1])
def to_vim_lang(cursor):
return (cursor[0] + 1, cursor[1] + 1)
def from_vim(cursor):
return (cursor[0] - 1, cursor[1])
def is_between(line, cursor, before, after):
if not re.search(before, line[:cursor[1]+1]):
return False
if not re.search(after, line[cursor[1]:]):
return False
return True
| [
"[email protected]"
] | |
083a5b0c421eb59429cbdf13f0339347fa57c6c7 | 36d83f9cdc9cea663e9a6bf5478812c49d6d4971 | /new/forms.py | 912faae3195c3da2b41dec0fd3f6655ccd188820 | [] | no_license | Anas-Darwish-SB/django-the-day-news | 9eec63c10a341f8f6dc182aa91a8658a14954b28 | bdb14b704bf0b69f8ff85c643793e6abd9cf88bd | refs/heads/main | 2023-03-24T03:22:24.005753 | 2021-03-16T05:01:16 | 2021-03-16T05:01:16 | 348,074,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from django import forms
from .models import News, Category
class NewsForm(forms.ModelForm):
class Meta:
model = News
fields = '__all__'
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__' | [
"[email protected]"
] | |
47d3e3490fe59455b27844024bf9970b5fa53982 | ae23f3cbe903d7e39c1747c7d2a28cbe5ed8d2bb | /1/Exercise1.py | 1e6dfd5b50447974c592d2802faac57bfc06138e | [] | no_license | steinarc/Sanntidsprogrammering-Exercises | e3b4c2289761d01e971082a1d040ca4484e76c5d | 27f8b2e36aae4bd39e38ed1c914bd3d5426b0dca | refs/heads/master | 2021-01-11T11:18:11.767625 | 2017-03-17T10:15:45 | 2017-03-17T10:15:45 | 78,627,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import threading
import time
i = 0
def someThreadFunction1():
global i
for x in range (0,1000000):
i = i + 1
def someThreadFunction2():
global i
for x in range (0, 1000000):
i = i - 1
def main():
global i
someThread1 = threading.Thread(target = someThreadFunction1)
someThread2 = threading.Thread(target = someThreadFunction2)
someThread1.start()
someThread2.start()
someThread1.join()
someThread2.join()
print(i)
main()
| [
"[email protected]"
] | |
0f01a2a77659430427724586a974a95a50818b14 | cd336e0b7fbd83bda52c5452cab25a187b4e24a2 | /preview.py | c2b5fbba763eb2a1e43f88273e9cff154661d0ce | [] | no_license | atorman/piCamera | 828a2608f75884e4c84cb4713917044d768f7de4 | 96d57518e78d86722f0063d3da122ef265bcca52 | refs/heads/master | 2023-03-16T06:38:56.988694 | 2021-03-05T20:34:54 | 2021-03-05T20:34:54 | 344,908,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # Test script for using a rasberry pi camera
# more here: https://projects.raspberrypi.org/en/projects/getting-started-with-picamera
from picamera import PiCamera, Color
from os import system
import datetime
from time import sleep
import numpy as np
effect = np.array(['solarize', 'emboss', 'colorswap', 'cartoon', 'oilpaint'])
brightness = np.array([50, 60, 70, 80])
contrast = np.array([30, 40, 50])
iso = np.array([100, 200, 400, 800])
camera = PiCamera()
camera.start_preview()
camera.rotation = 180
for x in iso:
camera.iso = x
camera.annotate_text = "ISO: %s" %x
sleep(10)
#for i in effect:
#camera.image_effect = i
#camera.annotate_text = "Effect: %s" % i
#sleep(5)
#for j in brightness:
#camera.brightness = j
#camera.annotate_text = "Brightness: %s" % j
#sleep(2)
camera.stop_preview() | [
"[email protected]"
] | |
27cdb7a3d462e40e7ee9c8f59c8423c6d90c0a1f | a19a5306c29267994f42fc5f3ec5b10c35a8dedf | /lnumber.py | 7b9f61a53cb0de73df37f6c007a8c6fcabe34a09 | [] | no_license | csaranbalaji/Guvi | ec604b093106c007268cad70b26d59deb9d7376e | 559e389d770f33a53e61c4e0ff1aa4f0d61d852b | refs/heads/master | 2021-06-23T07:10:18.253846 | 2017-08-29T13:31:12 | 2017-08-29T13:31:12 | 95,778,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | s=raw_input().strip().split()
s.sort()
s.reverse()
print ''.join(s) | [
"[email protected]"
] | |
6cb2fd0bc1f295cdc7cc1b00f7b2bfbe9afb356c | d8deb202df3cd4222c642c5c4412652461c4ebb5 | /backend/back_office_15184/settings.py | 5575a47e87ff63b30bc68d351602f6664a202efc | [] | no_license | crowdbotics-apps/back-office-15184 | 1770eb660bd198768a2690bbb58145a70f5f899a | 7dca6056d171e402a7c9f91488a2b815f5077d54 | refs/heads/master | 2023-01-29T13:29:11.195837 | 2020-03-29T16:00:52 | 2020-03-29T16:00:52 | 251,072,394 | 0 | 0 | null | 2023-01-24T01:48:34 | 2020-03-29T16:00:01 | JavaScript | UTF-8 | Python | false | false | 5,803 | py | """
Django settings for back_office_15184 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'back_office_15184.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'back_office_15184.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
ccbe2ea2e2f57bac89f8d8c74ec0a7372656992c | d2a70f4228fe42f89d54bb2091741c934d4d5f5a | /entregableDjango/gestionNoticias/admin.py | 5ba6fba33fb59e734717b3c760218c95a4127698 | [] | no_license | morolok/AII | 9aa12e27b1eb9b62aaec8e9f4f56df8a489aa380 | 2bbdc282cf6f6fbc00bbe16470dceb891c132d2a | refs/heads/master | 2023-08-15T08:35:23.407354 | 2021-10-05T19:17:34 | 2021-10-05T19:17:34 | 242,849,953 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | from django.contrib import admin
from gestionNoticias.models import Autor, Fuente, Noticia
# Register your models here.
admin.site.register(Autor)
admin.site.register(Fuente)
admin.site.register(Noticia) | [
"[email protected]"
] | |
e8c64dbd9200cd63a46de802d536e53edc756c06 | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /monai/transforms/utility/dictionary.py | fb3d88a0ba180a732df1b25d2ac37103a9703a2d | [
"Apache-2.0"
] | permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 21,673 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for utility functions
defined in :py:class:`monai.transforms.utility.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
import copy
import logging
from typing import Callable, Dict, Hashable, Mapping, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import KeysCollection
from monai.transforms.compose import MapTransform
from monai.transforms.utility.array import (
AddChannel,
AsChannelFirst,
AsChannelLast,
CastToType,
DataStats,
FgBgToIndices,
Identity,
LabelToMask,
Lambda,
RepeatChannel,
SimulateDelay,
SqueezeDim,
ToNumpy,
ToTensor,
)
from monai.utils import ensure_tuple, ensure_tuple_rep
class Identityd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Identity`.
"""
def __init__(self, keys: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
"""
super().__init__(keys)
self.identity = Identity()
def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.identity(d[key])
return d
class AsChannelFirstd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelFirst`.
"""
def __init__(self, keys: KeysCollection, channel_dim: int = -1) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the last dimension.
"""
super().__init__(keys)
self.converter = AsChannelFirst(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class AsChannelLastd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AsChannelLast`.
"""
def __init__(self, keys: KeysCollection, channel_dim: int = 0) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
channel_dim: which dimension of input image is the channel, default is the first dimension.
"""
super().__init__(keys)
self.converter = AsChannelLast(channel_dim=channel_dim)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class AddChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.AddChannel`.
"""
def __init__(self, keys: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
"""
super().__init__(keys)
self.adder = AddChannel()
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for key in self.keys:
d[key] = self.adder(d[key])
return d
class RepeatChanneld(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.RepeatChannel`.
"""
def __init__(self, keys: KeysCollection, repeats: int) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
repeats: the number of repetitions for each element.
"""
super().__init__(keys)
self.repeater = RepeatChannel(repeats)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.repeater(d[key])
return d
class CastToTyped(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.CastToType`.
"""
def __init__(
self,
keys: KeysCollection,
dtype: Union[Sequence[Union[np.dtype, torch.dtype]], np.dtype, torch.dtype] = np.float32,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dtype: convert image to this data type, default is `np.float32`.
it also can be a sequence of np.dtype or torch.dtype,
each element corresponds to a key in ``keys``.
"""
MapTransform.__init__(self, keys)
self.dtype = ensure_tuple_rep(dtype, len(self.keys))
self.converter = CastToType()
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.converter(d[key], dtype=self.dtype[idx])
return d
class ToTensord(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToTensor`.
"""
def __init__(self, keys: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
"""
super().__init__(keys)
self.converter = ToTensor()
def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, torch.Tensor]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class ToNumpyd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.ToNumpy`.
"""
def __init__(self, keys: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
"""
super().__init__(keys)
self.converter = ToNumpy()
def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class DeleteItemsd(MapTransform):
"""
Delete specified items from data dictionary to release memory.
It will remove the key-values and copy the others to construct a new dictionary.
"""
def __init__(self, keys: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
"""
super().__init__(keys)
def __call__(self, data):
return {key: val for key, val in data.items() if key not in self.keys}
class SqueezeDimd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SqueezeDim`.
"""
def __init__(self, keys: KeysCollection, dim: int = 0) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
dim: dimension to be squeezed. Default: 0 (the first dimension)
"""
super().__init__(keys)
self.converter = SqueezeDim(dim=dim)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class DataStatsd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.DataStats`.
"""
def __init__(
self,
keys: KeysCollection,
prefix: Union[Sequence[str], str] = "Data",
data_shape: Union[Sequence[bool], bool] = True,
value_range: Union[Sequence[bool], bool] = True,
data_value: Union[Sequence[bool], bool] = False,
additional_info: Optional[Union[Sequence[Callable], Callable]] = None,
logger_handler: Optional[logging.Handler] = None,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
prefix: will be printed in format: "{prefix} statistics".
it also can be a sequence of string, each element corresponds to a key in ``keys``.
data_shape: whether to show the shape of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
value_range: whether to show the value range of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
data_value: whether to show the raw value of input data.
it also can be a sequence of bool, each element corresponds to a key in ``keys``.
a typical example is to print some properties of Nifti image: affine, pixdim, etc.
additional_info: user can define callable function to extract
additional info from input data. it also can be a sequence of string, each element
corresponds to a key in ``keys``.
logger_handler: add additional handler to output data: save to file, etc.
add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html
"""
super().__init__(keys)
self.prefix = ensure_tuple_rep(prefix, len(self.keys))
self.data_shape = ensure_tuple_rep(data_shape, len(self.keys))
self.value_range = ensure_tuple_rep(value_range, len(self.keys))
self.data_value = ensure_tuple_rep(data_value, len(self.keys))
self.additional_info = ensure_tuple_rep(additional_info, len(self.keys))
self.logger_handler = logger_handler
self.printer = DataStats(logger_handler=logger_handler)
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.printer(
d[key],
self.prefix[idx],
self.data_shape[idx],
self.value_range[idx],
self.data_value[idx],
self.additional_info[idx],
)
return d
class SimulateDelayd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SimulateDelay`.
"""
def __init__(self, keys: KeysCollection, delay_time: Union[Sequence[float], float] = 0.0) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
delay_time: The minimum amount of time, in fractions of seconds, to accomplish this identity task.
It also can be a sequence of string, each element corresponds to a key in ``keys``.
"""
super().__init__(keys)
self.delay_time = ensure_tuple_rep(delay_time, len(self.keys))
self.delayer = SimulateDelay()
def __call__(
self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]
) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]:
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.delayer(d[key], delay_time=self.delay_time[idx])
return d
class CopyItemsd(MapTransform):
"""
Copy specified items from data dictionary and save with different key names.
It can copy several items together and copy several times.
"""
def __init__(self, keys: KeysCollection, times: int, names: KeysCollection) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
times: expected copy times, for example, if keys is "img", times is 3,
it will add 3 copies of "img" data to the dictionary.
names: the names corresponding to the newly copied data,
the length should match `len(keys) x times`. for example, if keys is ["img", "seg"]
and times is 2, names can be: ["img_1", "seg_1", "img_2", "seg_2"].
Raises:
ValueError: When ``times`` is nonpositive.
ValueError: When ``len(names)`` is not ``len(keys) * times``. Incompatible values.
"""
super().__init__(keys)
if times < 1:
raise ValueError(f"times must be positive, got {times}.")
self.times = times
names = ensure_tuple(names)
if len(names) != (len(self.keys) * times):
raise ValueError(
"len(names) must match len(keys) * times, "
f"got len(names)={len(names)} len(keys) * times={len(self.keys) * times}."
)
self.names = names
def __call__(self, data):
"""
Raises:
KeyError: When a key in ``self.names`` already exists in ``data``.
"""
d = dict(data)
for key, new_key in zip(self.keys * self.times, self.names):
if new_key in d:
raise KeyError(f"Key {new_key} already exists in data.")
d[new_key] = copy.deepcopy(d[key])
return d
class ConcatItemsd(MapTransform):
"""
Concatenate specified items from data dictionary together on the first dim to construct a big array.
Expect all the items are numpy array or PyTorch Tensor.
"""
def __init__(self, keys: KeysCollection, name: str, dim: int = 0) -> None:
"""
Args:
keys: keys of the corresponding items to be concatenated together.
See also: :py:class:`monai.transforms.compose.MapTransform`
name: the name corresponding to the key to store the concatenated data.
dim: on which dimension to concatenate the items, default is 0.
Raises:
ValueError: When insufficient keys are given (``len(self.keys) < 2``).
"""
super().__init__(keys)
if len(self.keys) < 2:
raise ValueError("Concatenation requires at least 2 keys.")
self.name = name
self.dim = dim
def __call__(self, data):
"""
Raises:
TypeError: When items in ``data`` differ in type.
TypeError: When the item type is not in ``Union[numpy.ndarray, torch.Tensor]``.
"""
d = dict(data)
output = list()
data_type = None
for key in self.keys:
if data_type is None:
data_type = type(d[key])
elif not isinstance(d[key], data_type):
raise TypeError("All items in data must have the same type.")
output.append(d[key])
if data_type == np.ndarray:
d[self.name] = np.concatenate(output, axis=self.dim)
elif data_type == torch.Tensor:
d[self.name] = torch.cat(output, dim=self.dim)
else:
raise TypeError(f"Unsupported data type: {data_type}, available options are (numpy.ndarray, torch.Tensor).")
return d
class Lambdad(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.Lambda`.
For example:
.. code-block:: python
:emphasize-lines: 2
input_data={'image': np.zeros((10, 2, 2)), 'label': np.ones((10, 2, 2))}
lambd = Lambdad(keys='label', func=lambda x: x[:4, :, :])
print(lambd(input_data)['label'].shape)
(4, 2, 2)
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
func: Lambda/function to be applied. It also can be a sequence of Callable,
each element corresponds to a key in ``keys``.
"""
def __init__(self, keys: KeysCollection, func: Union[Sequence[Callable], Callable]) -> None:
super().__init__(keys)
self.func = ensure_tuple_rep(func, len(self.keys))
self.lambd = Lambda()
def __call__(self, data):
d = dict(data)
for idx, key in enumerate(self.keys):
d[key] = self.lambd(d[key], func=self.func[idx])
return d
class LabelToMaskd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LabelToMask`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
select_labels: labels to generate mask from. for 1 channel label, the `select_labels`
is the expected label values, like: [1, 2, 3]. for One-Hot format label, the
`select_labels` is the expected channel indices.
merge_channels: whether to use `np.any()` to merge the result on channel dim.
if yes, will return a single channel mask with binary data.
"""
def __init__( # pytype: disable=annotation-type-mismatch
self,
keys: KeysCollection,
select_labels: Union[Sequence[int], int],
merge_channels: bool = False,
) -> None: # pytype: disable=annotation-type-mismatch
super().__init__(keys)
self.converter = LabelToMask(select_labels=select_labels, merge_channels=merge_channels)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.converter(d[key])
return d
class FgBgToIndicesd(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.FgBgToIndices`.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
fg_postfix: postfix to save the computed foreground indices in dict.
for example, if computed on `label` and `postfix = "_fg_indices"`, the key will be `label_fg_indices`.
bg_postfix: postfix to save the computed background indices in dict.
for example, if computed on `label` and `postfix = "_bg_indices"`, the key will be `label_bg_indices`.
image_key: if image_key is not None, use ``label == 0 & image > image_threshold`` to determine
the negative sample(background). so the output items will not map to all the voxels in the label.
image_threshold: if enabled image_key, use ``image > image_threshold`` to determine
the valid image content area and select background only in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
"""
def __init__(
self,
keys: KeysCollection,
fg_postfix: str = "_fg_indices",
bg_postfix: str = "_bg_indices",
image_key: Optional[str] = None,
image_threshold: float = 0.0,
output_shape: Optional[Sequence[int]] = None,
) -> None:
super().__init__(keys)
self.fg_postfix = fg_postfix
self.bg_postfix = bg_postfix
self.image_key = image_key
self.converter = FgBgToIndices(image_threshold, output_shape)
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
image = d[self.image_key] if self.image_key else None
for key in self.keys:
d[str(key) + self.fg_postfix], d[str(key) + self.bg_postfix] = self.converter(d[key], image)
return d
IdentityD = IdentityDict = Identityd
AsChannelFirstD = AsChannelFirstDict = AsChannelFirstd
AsChannelLastD = AsChannelLastDict = AsChannelLastd
AddChannelD = AddChannelDict = AddChanneld
RepeatChannelD = RepeatChannelDict = RepeatChanneld
CastToTypeD = CastToTypeDict = CastToTyped
ToTensorD = ToTensorDict = ToTensord
DeleteItemsD = DeleteItemsDict = DeleteItemsd
SqueezeDimD = SqueezeDimDict = SqueezeDimd
DataStatsD = DataStatsDict = DataStatsd
SimulateDelayD = SimulateDelayDict = SimulateDelayd
CopyItemsD = CopyItemsDict = CopyItemsd
ConcatItemsD = ConcatItemsDict = ConcatItemsd
LambdaD = LambdaDict = Lambdad
LabelToMaskD = LabelToMaskDict = LabelToMaskd
FgBgToIndicesD = FgBgToIndicesDict = FgBgToIndicesd
| [
"[email protected]"
] | |
7f0ed5bfb86883201842207f74bfd612e2da804f | c2f4d3892dec8859d7795a1bce4bfe0e5c85e9b9 | /StudentskaSluzbaConsole/FakultetPrikazObican/rs/uns/ftn/fakultet/prikaz_obican.py | b3a84bd0c57319b65d562092bdacf0427f58ddbb | [] | no_license | unsftn/SOK-primeri | 517423d6e60ab7ce7576fe1cbcefdf10f642b2f2 | a90ab5e6867e5bb14c0aa6690c57424282f88008 | refs/heads/master | 2021-05-04T07:13:49.239755 | 2016-12-13T06:43:45 | 2016-12-13T06:43:45 | 70,586,070 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from rs.uns.ftn.studentska.sluzba.services.fakultet import FakultetPrikazBase
class FakultetPrikazObican(FakultetPrikazBase):
def identifier(self):
return "FakultetPrikazObican"
def name(self):
return "Prikaz samo naziva fakulteta"
def prikazati_fakultete(self, lista_fakulteta):
prikaz="{}\n".format("Naziv")
for f in lista_fakulteta:
prikaz+="{}\n".format(f.naziv)
return prikaz | [
"[email protected]"
] | |
98be9ef6212d83414b73189c6c2d2fa7f6c99add | db1f67ee43986019df2edf622ee5ad617e3a08c6 | /ML/SVM-Support_Vector_Machine/what_whey.py | 62df6ac272e33d3028195b554a77094e54b53c47 | [] | no_license | RafaelGoulartB/learning-machine-learning | 61dcd0428429d68c52f70330ceb241cdbf6ab724 | e3d77be195186f1b5c771513f75d7675bd6cf2dc | refs/heads/master | 2021-09-27T18:05:17.352268 | 2018-11-10T13:00:36 | 2018-11-10T13:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Packages for analysis
import pandas as pd
import numpy as np
from sklearn import svm
recipes = pd.read_csv('Iso-vs-Con.csv')
#Specific input models
ingredients = recipes[['Carb','Protein']].as_matrix() #Creat a list sorted with Carb and Protein
type_label = np.where(recipes['Type']=='Concentrado', 0, 1) #If Type == Concentrado add 0 else add 1
#Fit the model
model = svm.SVC(kernel='linear')
model.fit(ingredients, type_label)
# Create a function to guess when a whey isolado or concentrado
def concentrado_or_isolado(carb, protein):
if(model.predict([[carb, protein]]))==0:
print("\nIt's a whey Concentrado")
else:
print("\nIt's a whey Isolado")
carb = float(input("How many Carb has in your whey? "))
prot = float(input("How many Protein has in your whey? "))
concentrado_or_isolado(carb, prot) | [
"[email protected]"
] | |
891a67ea8b42a8d19fc90a9867adb2f20065b0a6 | 9fe6c602622625dc598c90b33208660aec0d9305 | /scripts/test/run_unittests.py | 965877e40c4edaa524f420a6b5cdf3a7f47b01d7 | [
"BSD-3-Clause"
] | permissive | mmocny/devtools-frontend | efc55015ffd8900bbc76e7f2d392be04fdfb8032 | 4b644f76915b1dd9f7a039b0c6c7811198776cc6 | refs/heads/master | 2022-12-13T13:07:55.137491 | 2020-08-19T00:52:02 | 2020-08-19T17:02:58 | 288,804,874 | 0 | 0 | BSD-3-Clause | 2020-08-19T18:17:13 | 2020-08-19T18:17:13 | null | UTF-8 | Python | false | false | 3,692 | py | #!/usr/bin/env python
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Run unit tests on a pinned version of chrome.
"""
import os
import platform
import re
from subprocess import Popen
import sys
import signal
import argparse
scripts_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(scripts_path)
import test_helpers
import devtools_paths
def run_tests(chrome_binary, target, no_text_coverage, coverage):
cwd = devtools_paths.devtools_root_path()
karmaconfig_path = os.path.join(cwd, 'out', target, 'gen', 'test', 'unittests', 'front_end', 'karma.conf.js')
if not os.path.exists(karmaconfig_path):
print('Unable to find Karma config at ' + karmaconfig_path)
print('Make sure to set the --ninja-build-name argument to the folder name of "out/target"')
sys.exit(1)
print('Using karma config ' + karmaconfig_path)
exec_command = [devtools_paths.node_path(), devtools_paths.karma_path(), 'start', test_helpers.to_platform_path_exact(karmaconfig_path)]
env = os.environ.copy()
env['NODE_PATH'] = devtools_paths.node_path()
if (no_text_coverage is not False):
env['NO_TEXT_COVERAGE'] = '1'
if (coverage is True):
env['COVERAGE'] = '1'
if (chrome_binary is not None):
env['CHROME_BIN'] = chrome_binary
exit_code = test_helpers.popen(exec_command, cwd=cwd, env=env)
if exit_code == 1:
return True
return False
def run_unit_tests_on_ninja_build_target(target,
no_text_coverage=True,
coverage=False,
chrome_binary=None):
if chrome_binary and not test_helpers.check_chrome_binary(chrome_binary):
print(
'Chrome binary argument path does not exist or is not executable, reverting to downloaded binary'
)
chrome_binary = None
if not chrome_binary:
# Default to the downloaded / pinned Chromium binary
downloaded_chrome_binary = devtools_paths.downloaded_chrome_binary_path(
)
if test_helpers.check_chrome_binary(downloaded_chrome_binary):
chrome_binary = downloaded_chrome_binary
if (chrome_binary is None):
print('Unable to run, no Chrome binary provided')
sys.exit(1)
print('Using Chromium binary (%s)\n' % chrome_binary)
errors_found = run_tests(chrome_binary, target, no_text_coverage, coverage)
if errors_found:
print('ERRORS DETECTED')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Run unittests on Ninja targets.')
parser.add_argument(
'--target', '-t', default='Default', dest='target', help='The name of the Ninja output directory. Defaults to "Default"')
parser.add_argument(
'--no-text-coverage', action='store_true', default=False, dest='no_text_coverage', help='Whether to output text coverage')
parser.add_argument('--coverage',
action='store_true',
default=False,
dest='coverage',
help='Whether to output coverage')
parser.add_argument('--chrome-binary',
dest='chrome_binary',
help='Path to Chromium binary')
args = parser.parse_args(sys.argv[1:])
run_unit_tests_on_ninja_build_target(args.target, args.no_text_coverage,
args.coverage, args.chrome_binary)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5e23fa117afbbafd120a6171ec70a82428e5c1e7 | a25223f5be818b549323e8232c31a606fcca275a | /work/lib_learn/threading_demo.py | a43f663e14785bff3331cff64b38037fa4b71d61 | [] | no_license | Jsonming/workspace | 4ef1119606b3c138ff9594b3e0cf16de8077e28d | 2ac1b07d85deeb611600f5e64083c4eb0688fdb4 | refs/heads/master | 2020-06-13T04:23:15.492080 | 2020-03-31T10:34:30 | 2020-03-31T10:34:30 | 194,531,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/11 11:15
# @Author : yangmingming
# @Site :
# @File : threading_demo.py
# @Software: PyCharm
from threading import Thread
import time
"""
Python中使用线程有两种方式:函数或者用类来包装线程对象。
函数式:调用thread模块中的start_new_thread()函数来产生新线程
thread.start_new_thread ( function, args[, kwargs] )
"""
global a
a = []
def aaa():
print(1111)
time.sleep(1)
print(2222)
a.append("1")
def bbb():
print(333)
time.sleep(1)
print(444)
a.append("2")
t_1 = Thread(target=aaa)
t_2 = Thread(target=bbb)
t_1.start()
t_1.join()
t_2.start()
t_2.join()
print(a)
| [
"[email protected]"
] | |
de29935be8c92874d76f532ba472260e179f3885 | d4beed3fb9c04ab10857a0ea582a10611d27a913 | /ABC_from001to100/ABC066/c.py | 10c5c0f60347f41bf83314233e2fb4b506ae7357 | [] | no_license | jjitch/AtCoder | f797923556441d1fb9c8563614edff5a9c3f4c6f | 54ea96fbd1dfd8741f53d35dbcb5e59fcc2d794e | refs/heads/master | 2023-08-22T02:32:55.137339 | 2021-10-11T17:18:08 | 2021-10-11T17:18:08 | 292,009,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from collections import deque
n = int(input())
a_s = list(map(int, input().split()))
d = deque()
for i in range(n):
if i % 2 == 0:
d.append(a_s[i])
else:
d.appendleft(a_s[i])
if n%2==1:
d.reverse()
print(' '.join(map(str, d)))
| [
"[email protected]"
] | |
ef644fd83609cab01e950f379bfe1760ba929b57 | dfd48ac5cdcf7b964b6f1eb67230d3427366b3dd | /2. Image processing/opencv_image_processing.py | 7e9dc95210474f18a2e072cc5dda221c11743b31 | [] | no_license | ujhoang/computer-vision | f5344164f7c772e154b57892ad2fc8eb337a8c71 | 90676379b0397ee232bc1f78dbf8d2b9f194d6be | refs/heads/master | 2020-12-21T20:30:09.209795 | 2020-03-22T18:01:35 | 2020-03-22T18:01:35 | 236,548,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | # importing packages
import cv2, imutils
import time
import matplotlib.pyplot as plt
name="./images/uj.jpg"
#loading the image, images are stored as (height, width, dimension)
image = cv2.imread(name)
(h, w, d) = image.shape
print("width={}, height={}, dimension={}".format(w,h,d))
cv2.imshow("Image", image)
cv2.waitKey(0)
# access the (B, G, R) pixel in x=50 and y=100
(B, G, R) = image[100, 50]
print("B={}, G={}, R={}".format(B, G, R))
#Array slicing and cropping
#extract region of interest starting from (320,60) to (420,160)
'''
roi = image[80:270, 130:280]
cv2.imshow("Region of Interest Dr", roi)
cv2.waitKey(0)
cv2.imwrite("./images/cropped.jpg", roi)
'''
#Resizing images
#In deep learning we resize images, ignoring aspect ratios,
#so that the volume fits into a network which requires
#the image to be square and to be of certain dimesion
'''
resize = imutils.resize(image, width=300)
cv2.imshow("resized uj", resize)
cv2.waitKey(0)
cv2.imwrite("./images/resized.jpg", resize)
ratio = 200/w
dim = (200, int(h*ratio))
resize = cv2.resize(image, dim)
cv2.imshow("resized photo", resize)
cv2.waitKey(0)
'''
# Rotating Images
'''
rotated = imutils.rotate(image, 45)
cv2.imshow("rotated uj", rotated)
cv2.waitKey(0)
cv2.imwrite("./images/rotated.jpg", rotated)
'''
#Rotating image within boundary using imutils
'''
rotate_bound = imutils.rotate_bound(image, -45)
cv2.imshow("rotated uj", rotate_bound)
cv2.waitKey(0)
cv2.imwrite("./images/rotated in boundary.jpg", rotate_bound)
'''
# Skeletonize the picture (edge detection)
'''
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
skeleton = imutils.skeletonize(grey, size=(3,3))
cv2.imshow("matplot",skeleton)
cv2.waitKey(0)
cv2.imwrite("./images/skeleton.jpg", skeleton)
'''
# Smoothing images
# In many image processing, we need to reduce the high frequency noise,
# making it easier for our algorithm to understand the contents rather
# than the noise which will confuse the algorithm. Blurring an image is
# one of the easiest way of doing so!
'''
blurred = cv2.GaussianBlur(image, (11,11), 10,0,10)
cv2.imshow("blurred", blurred)
cv2.waitKey(0)
cv2.imwrite("./images/blurred uj.jpg", blurred)
'''
# Drawing on an image
# Note when drawing on an image, we are drawing in place
# Hence, it is a good practice to create a copy (image.copy())
# when drawing so we do not destroy the original image
'''
output = image.copy()
cv2.rectangle(output, (140,90), (280,280),(0,0,255), 5)
cv2.imshow("output rectangle", output)
cv2.waitKey(0)
cv2.imwrite("./images/uj_rectangle.jpg", output)
'''
'''
output = image.copy()
cv2.circle(output, (140,280), 40, (50,255,50), -1) # -1 makes the circle solid fill
cv2.imshow("circle", output)
cv2.waitKey(0)
cv2.imwrite("./images/uj_circle.jpg", output)
'''
'''
output = image.copy()
cv2.putText(output, "ujhoang, Data Scientist", (10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.55, (255,255,255),1)
cv2.imshow("uj with text", output)
cv2.waitKey(0)
cv2.imwrite("./images/uj_text.jpg", output)
''' | [
"[email protected]"
] | |
b435a28b080aada90a007675590ff6531adf2c14 | 5c05866275988edb7cf31d096569e83c3e1138e1 | /career/models.py | da570f73439fa3e7b2838c364f1885ed4ce06f1f | [] | no_license | kljopu/fixo-back-fork | 3203952a8a72c67f545c7d30cd42b0b8df2d790c | bd7b116f2d7c01655e02b30a6b07bf913d0bf0cc | refs/heads/master | 2022-12-15T03:17:55.829477 | 2020-09-14T05:36:24 | 2020-09-14T05:36:24 | 295,312,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | from django.db import models
from utils.models import TimeStampModel
class Service(TimeStampModel):
corrector = models.ForeignKey("accounts.Corrector", on_delete=models.CASCADE)
name = models.CharField(max_length=2000)
service_description = models.CharField(max_length=2000)
image = models.URLField(max_length=2000)
price = models.IntegerField()
price_description = models.CharField(max_length=2000, null=True)
career_detail = models.CharField(max_length=1000, null = True)
class Meta:
db_table = "services"
managed = True
| [
"[email protected]"
] | |
b9e53559090239af7ea0299aa3616f5a7fea2643 | 02cc235dcbfc9f3d645b9905d1bf19a8d746420b | /amazon/amazon_bu/mapper_rdf copy.py | a1b0e03ab3060e1ab0008246bc0ba999426460fa | [] | no_license | manzikki/hadooprdf | 9300b7186c5b86fabeac367278824f9cdf9b1c0e | d7bec4323e3ee0f9a199239f4dfaed70bb00d020 | refs/heads/master | 2020-04-12T19:00:14.654709 | 2019-03-01T09:13:50 | 2019-03-01T09:13:50 | 162,696,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | #!/usr/bin/env python
"""A more advanced Mapper, using Python iterators and generators."""
import sys
import rdflib
def main(separator='\t'):
# input comes from STDIN (standard input)
g = rdflib.Graph()
g.load(sys.stdin)
q = g.query('SELECT ?a WHERE { ?a hip:hasContinent hip:Asia }',
initNs = { 'hip' : 'http://wiki.hip.fi/xml/ontology/olapdim_st.owl#'})
qs=q.serialize(destination=None,format='xml')
print(qs)
#for row in q:
# print (row)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1227ba925d1a893df632f37cec27a2fdf940ba58 | bd2057543881ae2b64c37f295df036567ef53f3d | /bonds/migrations/0005_auto_20170307_1711.py | 072e4944b7d3d052363c2562b66dd8de8f5b6b2a | [
"MIT"
] | permissive | reddress/bondtracker | fccad222df9733345988500489a9ef6a7997c2f4 | 432f2ed1962cf0019fb5ebdabad8354a123088c8 | refs/heads/master | 2021-04-28T15:39:50.829990 | 2019-03-08T20:05:11 | 2019-03-08T20:05:11 | 121,995,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-07 20:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bonds', '0004_auto_20170307_0022'),
]
operations = [
migrations.AlterModelOptions(
name='bond',
options={'ordering': ['bondtype', 'maturity']},
),
]
| [
"[email protected]"
] | |
8f18834b8d94f9186ba897a70ec8fcb25f13f461 | bf2a9f9284bee1d75da497dba8cfd69cf1cf001b | /subliminal/plugins/TheSubDB.py | 6c7588d4ed5b8dff2b9ef06ada49e982b410d277 | [] | no_license | mrcrabby/subliminal | eb668f29a34e22ef98fe3647def0a7514a6b5142 | f3292c2916e53cdbe393ce4d31e56c5bf2089d20 | refs/heads/master | 2021-01-18T10:16:03.246677 | 2011-10-08T17:44:14 | 2011-10-08T17:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,735 | py | # -*- coding: utf-8 -*-
#
# Subliminal - Subtitles, faster than your thoughts
# Copyright (c) 2008-2011 Patrick Dessalle <[email protected]>
# Copyright (c) 2011 Antoine Bertin <[email protected]>
#
# This file is part of Subliminal.
#
# Subliminal is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import PluginBase
import hashlib
import os
import urllib2
from subliminal.classes import Subtitle
class TheSubDB(PluginBase.PluginBase):
site_url = 'http://thesubdb.com'
site_name = 'SubDB'
server_url = 'http://api.thesubdb.com' # for testing purpose, use http://sandbox.thesubdb.com instead
api_based = True
user_agent = 'SubDB/1.0 (Subliminal/1.0; https://github.com/Diaoul/subliminal)' # defined by the API
_plugin_languages = {'af': 'af', 'cs': 'cs', 'da': 'da', 'de': 'de', 'en': 'en', 'es': 'es', 'fi': 'fi', 'fr': 'fr', 'hu': 'hu', 'id': 'id',
'it': 'it', 'la': 'la', 'nl': 'nl', 'no': 'no', 'oc': 'oc', 'pl': 'pl', 'pt': 'pt', 'ro': 'ro', 'ru': 'ru', 'sl': 'sl', 'sr': 'sr',
'sv': 'sv', 'tr': 'tr'} # list available with the API at http://sandbox.thesubdb.com/?action=languages
def __init__(self, config_dict=None):
super(TheSubDB, self).__init__(self._plugin_languages, config_dict)
def list(self, filepath, languages):
possible_languages = self.possible_languages(languages)
if not possible_languages:
return []
if not os.path.isfile(filepath):
return []
return self.query(filepath, self.hashFile(filepath), possible_languages)
def query(self, filepath, moviehash, languages):
searchurl = '%s/?action=%s&hash=%s' % (self.server_url, 'search', moviehash)
self.logger.debug(u'Query URL: %s' % searchurl)
try:
req = urllib2.Request(searchurl, headers={'User-Agent': self.user_agent})
page = urllib2.urlopen(req, timeout=self.timeout)
except urllib2.HTTPError as inst:
if inst.code == 404: # no result found
return []
self.logger.error(u'Error: %s - %s' % (searchurl, inst))
return []
except urllib2.URLError as inst:
self.logger.error(u'TimeOut: %s' % inst)
return []
available_languages = page.readlines()[0].split(',')
self.logger.debug(u'Available languages: %s' % available_languages)
subs = []
for l in available_languages:
if l in languages:
result = Subtitle(filepath, self.getSubtitlePath(filepath, l), self.__class__.__name__, l, '%s/?action=download&hash=%s&language=%s' % (self.server_url, moviehash, l))
subs.append(result)
return subs
def hashFile(self, filepath):
"""TheSubDB specific hash function"""
readsize = 64 * 1024
with open(filepath, 'rb') as f:
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
def download(self, subtitle):
self.downloadFile(subtitle.link, subtitle.path)
return subtitle
| [
"[email protected]"
] | |
51aa44dcaa7e5c3afc095422120d33f9cd7d4158 | 2fad894e9e041fc98c0ec2641480cbbaf3034764 | /ood_special_method.py | dcf0a0c7d8e9ee2f2693a27ec47cc73a0ea79ee2 | [] | no_license | mohammadasim/python-course | 04698e19a90d5d5dec715a9b640c78da3cc0e4a7 | 688617ded71ff1c025bdcd25caefba5b6d4665fb | refs/heads/master | 2021-04-30T04:39:16.023071 | 2018-08-02T16:16:53 | 2018-08-02T16:16:53 | 121,540,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # When we create a list object and pring the list, we will get the list object containing the list. However if I create a book object
# using this book class what we will get is the object in memory. Behind the scene the pring object gets the string representation of the
# book object and prints that. So inorder to enable the print() method to print the real book object i-e the title, author and pages of
# the book we have to overwrite the __str__(self) method.
class Book():
def __init__(self, title, author, pages):
self.title = title
self.author = author
self.pages = pages
# String representation of this book class
def __str__(self):
return "{} by {}".format(self.title, self.author)
def __len__(self):
return self.pages
myBook = Book('My story', 'Asim', '202')
print(myBook)
print(str(myBook)) | [
"[email protected]"
] | |
d4d778b9a2b0dad95e29227cf48aad56ae424e50 | ff4244bace9897d2ff9f32748e3e51d5d7df2f90 | /setup.py | ceec156f2cad4aa8a067f405d37a231b9c3a0c47 | [
"MIT"
] | permissive | kouzapo/altrobot | d3c8e3efe4002d415ebe0b09f1aa4e39883c7fcd | 4d31701bbd47c29acbc9ba2b65c2cb84b2b88d94 | refs/heads/master | 2021-08-02T19:32:13.600302 | 2021-07-24T09:20:26 | 2021-07-24T09:20:26 | 213,660,071 | 3 | 4 | MIT | 2021-01-22T18:11:22 | 2019-10-08T14:08:10 | Python | UTF-8 | Python | false | false | 767 | py | #!/usr/bin/env python3
import setuptools
with open('README.md', 'r', encoding = 'utf-8') as f:
long_description = f.read()
setuptools.setup(
name = 'altrobot',
version = '0.0.1',
author = 'Apostolos Anastasios Kouzoukos',
author_email = '[email protected]',
description = 'A backtesting library for algorithmic trading, based on neural networks',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://github.com/kouzapo/altrobot',
packages = setuptools.find_packages(),
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires = '>=3.6',
)
| [
"[email protected]"
] | |
a136480a9e699fc70d57d1c9205dde7c20f919bb | b5ac08012751057a410951890eab0b13f1e30e42 | /users/migrations/0005_auto_20190509_0446.py | 7780f17ee8f4127c013895fc7746d7e1189e9312 | [] | no_license | TheSuperiorStanislav/practice-django-utilities_stats | 1934062df07333efdb0753f4a3d9f0f398453afc | 93b8b32eedc63dc0c425f49e7f8f666808a4b885 | refs/heads/master | 2023-04-29T19:35:50.928377 | 2022-04-28T04:42:54 | 2022-04-28T04:42:54 | 185,186,204 | 0 | 1 | null | 2023-04-21T22:10:06 | 2019-05-06T11:48:10 | Python | UTF-8 | Python | false | false | 447 | py | # Generated by Django 2.2.1 on 2019-05-09 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20190430_1305'),
]
operations = [
migrations.AlterField(
model_name='utilitiesuser',
name='email',
field=models.EmailField(blank=True, max_length=254, unique=True, verbose_name='email address'),
),
]
| [
"[email protected]"
] | |
f66c3193943c46902e71914540ea3d2d9ef152cc | bb8c36271b45ba80b0b2074d7704bb2c0e9839be | /home/forms.py | 0d3dc760d6d5598369cce1073a05879924dbd71c | [] | no_license | alexeysorok/learn_django_specialist_2020 | f0df4fe1ad8c3ee8e038e5f4df39168185654f80 | cb08f0a61b32f2c59d995cdb4d956207b8980b74 | refs/heads/master | 2020-12-08T20:51:07.239868 | 2020-01-10T17:09:09 | 2020-01-10T17:09:09 | 233,091,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django import forms
class LoginForm(forms.Form):
login = forms.CharField(label='login')
password = forms.CharField(label='password')
| [
"alexe@W10SSD"
] | alexe@W10SSD |
4db691364a9d81a959c8bcf57a843628ce0d0c64 | b73d4b5f169a2a43b0e7e29da369e701028663d9 | /Event_Listeners/main.py | 02c58865cee50c9e30f789f94421bca7e1ae7137 | [
"BSD-3-Clause"
] | permissive | yash-agrawal20/Python-Project | 3824b93e2fb02bb93919b439a9d472486c4d821e | 29780c4741daf2ef299cc3cb93b88b851f270f51 | refs/heads/main | 2023-09-03T07:00:47.232547 | 2021-09-23T06:31:54 | 2021-09-23T06:31:54 | 406,480,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | #Using Event Listeners
#Date: 14-09-2021
import turtle as t
my_turtle = t.Turtle()
def move_forward():
my_turtle.forward(50)
def move_backward():
my_turtle.backward(50)
def move_clockwise():
my_turtle.right(30)
def move_counter_clockwise():
my_turtle.left(30)
def clear_screen():
my_turtle.reset()
screen = t.Screen()
screen.listen()
screen.onkey(fun = move_forward, key = "w")
screen.onkey(fun = move_backward, key = "s")
screen.onkey(fun = move_clockwise, key = "d")
screen.onkey(fun = move_counter_clockwise, key = "a")
screen.onkey(fun = clear_screen, key = "c")
screen.exitonclick() | [
"[email protected]"
] | |
91c956e0bced4cf8c7ac59bc5f8f643ceb4d0a72 | cd52f86c04e10bdf6cda9539ace5c518209c72b1 | /ocr_main.py | c14ae83eccbbaae5a7ec23e9bbde9f237bde5d7b | [] | no_license | TonyChouZJU/SimuOCR | 033a5d3b717fbab49b509332ba96581aaad048bf | 794a875cef6be61fe6662e8fa0b6de9cbcb4517e | refs/heads/master | 2021-01-11T17:27:32.831039 | 2017-01-23T05:23:48 | 2017-01-23T05:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | #coding=utf-8
import _init_paths
import sys
import os
import subprocess
import pickle
import numpy as np
import subprocess
import matplotlib.pylab as plt
import cv2
import json
from font_effects import FontsEffects
'''
backcolor is using with label:'福'
xc:transparent -annotate 0x0 '福'
'''
def parse_fonts_list(fonts_list_file='./sys_fonts_list.txt',
fonts_pkl_file='./fonts.pkl'):
fonts_list = []
if os.path.exists(fonts_pkl_file):
fonts_list = pickle.load(open(fonts_pkl_file, 'r'))
return fonts_list
with open(fonts_list_file, 'r') as f:
for line in f:
line = line.strip('\n').strip(' ')
if line[:5] != 'Font:':
continue
else:
fonts_split = line.split(':')
print fonts_split
fonts_list.append(fonts_split[1].strip(' '))
with open(fonts_pkl_file, 'w+') as f:
pickle.dump(fonts_list, f)
return fonts_list
def parse_color_list(color_list_file='./im_color_list.txt',
color_pkl_file='./color.pkl'):
color_list = []
if os.path.exists(color_pkl_file):
color_list = pickle.load(open(color_pkl_file, 'r'))
return color_list
with open(color_list_file, 'r') as f:
for line in f:
line_split = line.split(' ')
color_list.append(line_split[0])
with open(color_pkl_file, 'w+') as f:
pickle.dump(color_list, f)
return color_list
def parse_gradients_json(color_json_file='./gradients.json'):
with open(color_json_file, 'r') as f:
color_list = json.load(f)
return color_list
def get_cn_fonts_list(fonts_pkl_file='./fonts.pkl',
cn_dir='/mnt/exhdd/tomorning_dataset/wonderland/raw_data/word/fu_cn',
cn_fonts_pkl_file='./cn_fonts.pkl'):
if os.path.exists(cn_fonts_pkl_file):
cn_fonts_list = pickle.load(open(cn_fonts_pkl_file, 'r'))
return cn_fonts_list
fonts_list = parse_fonts_list()
cn_idx_list = [int(img_file.split('_')[0]) for img_file in os.listdir(cn_dir)]
cn_fonts_list = [fonts_list[ii] for ii in cn_idx_list]
with open(cn_fonts_pkl_file, 'w+') as f:
pickle.dump(cn_fonts_list, f)
return cn_fonts_list
def get_tile_list(tile_dir):
tile_list = [os.path.join(tile_dir, img_file) for img_file in os.listdir(tile_dir)]
return tile_list
if __name__ == "__main__":
sys_fonts_list = parse_fonts_list()
im_color_list = parse_color_list()
local_gradient_list = parse_gradients_json()
sys_cn_fonts_list = get_cn_fonts_list()
local_tile_list = get_tile_list(tile_dir='/home/zyb/cv/simultate_detection_examples/words_bg/words_color')
word = u'福'
FontMaker = FontsEffects(fonts_list=sys_cn_fonts_list, color_list=im_color_list,
gradient_list=local_gradient_list, tile_list=local_tile_list, wd=word)
FontEffectsList = dir(FontsEffects)
FontEffectsList.remove('__doc__')
FontEffectsList.remove('__module__')
FontEffectsList.remove('__init__')
FontFuncsList = ['FontMaker.' + func_name for func_name in FontEffectsList]
# BE CAREFULL TO USE "EVAL"
for idx in range(len(FontFuncsList)):
FontFuncName = FontFuncsList[idx]
save_file = '/home/zyb/cv/simultate_detection_examples/word_imgs/fu/' + str(idx) +'.png'
callFontFuncName = FontFuncName + '()'
params = eval(callFontFuncName) + [save_file]
print idx, callFontFuncName, params
subprocess.check_call(params)
| [
"[email protected]"
] | |
6b13a3be85c021acb5f723df9c5da292421f2649 | 363670e1c658045f7145c12d64a0b70aaa24662f | /BusyLoop/__init__.py | 2723459c0bb7277739c30bf077c1d1479db0ca88 | [] | no_license | fortune/azure-functions-sample-in-python | 609ce8c4a564ce1dfb3707686eb20a03b31e6119 | cf301fdb990e3efb4f84326c5788212e20d2207d | refs/heads/master | 2022-12-10T11:51:07.350793 | 2019-08-22T06:16:22 | 2019-08-22T06:16:22 | 173,325,622 | 1 | 0 | null | 2022-12-08T01:39:20 | 2019-03-01T15:33:57 | Python | UTF-8 | Python | false | false | 385 | py | import logging
import os, time
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
pid = os.getpid()
start_time = time.time()
while True:
if time.time() - start_time > 3:
break
return func.HttpResponse('pid = ' + str(pid))
| [
"[email protected]"
] | |
f62a38f0d528e475c40d662e31ce90855a495fae | 58f6e51e0da894dd04d2a6cec7736719611f6df6 | /aorta_detection/addSRM/merge_dl_srm.py | 2e47a3b81b8ee97c1233e137a6191f84616f4d69 | [] | no_license | RitaOlenchuk/bachelor_thesis | 9e45f36cc42a741caac7b0eda54e64dc9fb1f8e4 | 1dfb7972fcf6f7c04780e2a65668fbe2773575f1 | refs/heads/master | 2020-06-27T17:44:34.170761 | 2020-04-27T21:15:07 | 2020-04-27T21:15:07 | 200,010,889 | 0 | 1 | null | 2019-08-05T08:18:29 | 2019-08-01T08:33:46 | Jupyter Notebook | UTF-8 | Python | false | false | 2,436 | py | import numpy as np
from PIL import Image
import imageio
from matplotlib import pyplot as plt
img_path = '/Users/rita/Uni/bachelor_thesis/DL/2_outfile.png'
img = Image.open(img_path)
img = np.array(img, dtype=np.float32)
img = img / np.max(np.max(img))
print(set(img.flatten()))
img [img < 0.3] = 0
selParts = (img > 0.3) & (img < 0.5)
img [img >= 0.5] = 1
img [selParts] = 2
img [img >= 3] = 0
print(set(img.flatten()))
background = np.argwhere(img==0)
membrane = np.argwhere(img==1)
plaque = np.argwhere(img==2)
img_path2 = '/Users/rita/Uni/bachelor_thesis/DL/segmented2.png'
img2 = Image.open(img_path2)
img2 = np.array(img2)
n_segments = (len(set(img2.flatten())))
print('Number of segments: ',n_segments)
fig = plt.figure()
plt.subplot(221)
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title('DL', fontsize=16)
plt.subplot(222)
plt.imshow(img2, cmap='gray')
plt.axis('off')
plt.title('Segmented', fontsize=16)
plt.show()
#fig = plt.figure()
#plt.subplot(221)
#plt.imshow(img2, cmap='gray')
#plt.axis('off')
#plt.title('DL', fontsize=16)
#m = 38
#tmp = np.argwhere(img2==m)
#plt.subplot(222)
#plt.imshow(img2, cmap='gray')
#plt.scatter([t1[1] for t1 in tmp], [t2[0] for t2 in tmp], c='red', s=1, label='Segment '+str(m))
#plt.axis('off')
#plt.legend()
#plt.title('Segmented', fontsize=16)
#plt.show()
output_image = np.zeros(img.shape)
for segm in range(n_segments):
coords = np.argwhere(img2 == segm) #all coordinates that belong to the segment
frequencies = [0, 0, 0] #how often the coordinate in selected segment belong to (background, membrane, plaque)
for coord in coords:
if img[coord[0], coord[1]] == 0:
frequencies[0] += 1
elif img[coord[0], coord[1]] == 1:
frequencies[1] += 1
elif img[coord[0], coord[1]] == 2:
frequencies[2] += 1
struct = np.argmax(frequencies)
for c in coords:
output_image[c[0], c[1]] = struct
#output_image[coords] = struct
fig = plt.figure()
fig.suptitle('Q = 0.002', fontsize=16)
plt.subplot(131)
plt.imshow(img)
plt.axis('off')
plt.title('DL', fontsize=16)
plt.subplot(132)
plt.imshow(img2)
#plt.scatter([t1[1] for t1 in tmp], [t2[0] for t2 in tmp], c='red', s=1, label='Segment '+str(m))
plt.axis('off')
plt.title('Segmented', fontsize=16)
plt.subplot(133)
plt.imshow(output_image)
plt.axis('off')
plt.title('Mix', fontsize=16)
plt.savefig('dl_segm_mixed2.png')
plt.show()
| [
"[email protected]"
] | |
5d1ae22b5fa8aff66aa85311dfd7724f3d15d47c | a26c2e7ed5adc0ac0971182d1715ccba258ba88d | /Flask_package/main_route.py | cb39358184e7d5bb13396342e876365a7d7a0b87 | [] | no_license | LalithaPrasad/ERMS_Falcon_Angular | 6af39833beb9aa24df8f60a57894361829c6d981 | 2d4c4b6e46edaa039eacde119602fdb28f010755 | refs/heads/master | 2022-05-30T03:21:23.705641 | 2020-05-04T03:47:01 | 2020-05-04T03:47:01 | 261,074,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from Flask_package import app
@app.route("/", methods=["GET"])
def index():
return app.send_static_file("index.html")
| [
"[email protected]"
] | |
e656fa42de5a5ea7295f63e5901d7b38a56bc1b3 | 5db6e9e2d3d2892e51cd8d90fb1a319ca6ee92e7 | /Streaming/capture_send.py | 9eb773a61267da274c9216e7fdd0370a464af0cc | [
"MIT"
] | permissive | rprabhuh/SDNDDoS | e27c6bce7a3bb483c2bda5ea3f53c28638c3ebe7 | 91a23c0b817999bce5b07359681c5b2dddf89b9d | refs/heads/master | 2021-01-20T20:56:36.874380 | 2015-01-14T07:41:53 | 2015-01-14T07:41:53 | 24,749,134 | 3 | 5 | null | 2014-10-15T00:50:26 | 2014-10-03T06:41:46 | Lua | UTF-8 | Python | false | false | 1,125 | py | #!/usr/bin/env python
import socket
from scapy.all import *
# prepare the listening connection
TCP_IP = '127.0.0.1'
#TCP_PORT = int(sys.argv[-1])
TCP_PORT = 3000
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(20)
conn, addr = s.accept()
print 'Connection address:', addr
def pkt_callback(pkt):
#pkt.show()
if not IP in pkt:
return "not IP"
global conn
# data fields:
# class label, length of ethernet frame, src IP, dst IP,
# network layer protocol(UDP=1, else=0), src port, dst port,
# monlist req(exist=1, otherwise=0)
label = 1 if "\x17\x00\x03\x2a\x00\x00\x00\x00" in str(pkt[IP]) and len(pkt) <= 100 else 0
output = [label, len(pkt),
1 if (UDP in pkt) else 0, pkt[IP].sport, pkt[IP].dport]
if "\x17\x00\x03\x2a" in str(pkt[IP]):
output.append(1)
else:
output.append(0)
# do remember to append the newline char otherwise Spark will wait
outputStr = ' '.join(map(str, output)) + '\n'
conn.send(outputStr)
print outputStr
sniff(iface="wlan0", prn=pkt_callback, filter="ip", store=0)
conn.close()
| [
"[email protected]"
] | |
e3bb201ac79fa6cc8dd092ae3b68e430b7375067 | 772915fc1d85ba50892d6aadcff9d5d55e1a62b5 | /11_Standart_library/task_11_02_01_Logging_app/code/main.py | ec061b93687234d23dc72cf568661d9275562cd4 | [] | no_license | dolphin-in-a-coma/python-course | eb90b9797e4ef397acd1577b30c31e372ffb6ed7 | c5dd67c3e699a62eb5f9594bc13dabbed830fecc | refs/heads/master | 2020-04-15T19:24:53.277011 | 2019-01-09T22:48:04 | 2019-01-09T22:48:04 | 164,948,718 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | # ПРОБЛЕМА ВКЛЮЧЕНИЯ!!!!!!!!!
import datetime
import platform
from settings import Settings
def get_last_run_info(settings, launch_dt):
"""Вернуть кортеж вида: (
Количество запусков программы,
Количество секунд с последнего запуска,
Информация о платформе
).
Если программа не была запущена ранее:
- количество запусков: 0,
- количество секунд: -1,
- информации о платформе: пустой кортеж.
Параметры:
- settings: настройки - экземпляр класса Settings;
- launch_dt (datetime.datetime): дата/время запуска программы.
"""
last_time = settings.get_value('last_run_datetime', False)
if last_time:
sek = (launch_dt - last_time).total_seconds()
else:
sek = -1
return (settings.get_value('run_count', 0),
sek,
tuple(settings.get_value('last_run_platform_info', ())))
def update_last_run_info(settings, launch_dt):
"""Установить новые значения для настроек:
- количество запусков программы должно быть увеличено на 1;
- установлена дата/время последнего запуска (текущая);
- установлена информация о текущей платформе (platform.uname()).
Параметры:
- settings: настройки - экземпляр класса Settings;
- launch_dt (datetime.datetime): дата/время запуска программы.
"""
settings.set_value('run_count', settings.get_value('run_count', 0) + 1)
settings.set_value('last_run_datetime', launch_dt)
settings.set_value('last_run_platform_info', tuple(platform.uname()))
if __name__ == "__main__":
launch_dt = datetime.datetime.now() # Время запуска приложения
settings = Settings()
try:
settings.load()
run_count, last_run_seconds, last_run_platform_info = \
get_last_run_info(settings, launch_dt)
print("Сейчас программа запущена: {}-й раз.".format(run_count + 1))
if run_count > 0:
print("С предыдущего запуска прошло {} с.".
format(last_run_seconds))
print("Информация о платформе: {}".format(last_run_platform_info))
update_last_run_info(settings, launch_dt)
settings.save()
except Exception as err:
print("Во время работы приложения произошла ошибка:", err)
# -------------
# Пример вывода:
#
# Сейчас программа запущена: 1-й раз.
# -------------
# Пример вывода:
#
# Сейчас программа запущена: 3-й раз.
# С предыдущего запуска прошло 12 с.
# Информация о платформе: ('Windows', 'user-pc', '10', '10.0.14393', 'AMD64',
# 'Intel64 Family 6 Model 58 Stepping 9, GenuineIntel')
| [
"[email protected]"
] | |
ee8ffd4568bff2eeceefd329d0b1ea1cae90f7de | 2a3c0d083fa73d89e3ca9a86928a98f738c07055 | /pydb/pydb/__init__.py | 0361e2125f60781f20cf377afbd47dcdce65233e | [] | no_license | hacchuu0119/pip_packages | 8ebef92f9196c57aedb4e9cb60ec3623f534c10f | e79ee0ca6f76d7ebce33d0de02280cd58ea809f7 | refs/heads/master | 2020-08-29T08:25:25.911142 | 2019-12-17T07:44:50 | 2019-12-17T07:44:50 | 217,981,027 | 3 | 0 | null | 2019-11-07T03:00:38 | 2019-10-28T06:34:33 | Python | UTF-8 | Python | false | false | 1,781 | py | from .connection import Connection
CONNECTION_MYSQL = None
def setup_connection_instance_mysql():
global CONNECTION_MYSQL
CONNECTION_MYSQL = Connection()
def _get_connection_instance_mysql():
if not CONNECTION_MYSQL:
setup_connection_instance_mysql()
return CONNECTION_MYSQL
def mysql(user=None, password=None,
host=None, port=None, database=None, *args, **kwargs):
return _get_connection_instance_mysql().create_connection_mysql(user=user,
password=password,
host=host,
port=port,
database=database,
*args, **kwargs)
CONNECTION_ORACLE = None
def setup_connection_instance_oracle():
global CONNECTION_ORACLE
CONNECTION_ORACLE = Connection()
def _get_connection_instance_oracle():
if not CONNECTION_ORACLE:
setup_connection_instance_oracle()
return CONNECTION_ORACLE
def oracle(user=None, password=None,
host=None, port=None, sid=None, *args, **kwargs):
return _get_connection_instance_oracle().create_connection_oracle(user=user,
password=password,
host=host,
port=port,
sid=sid,
*args, **kwargs)
| [
"[email protected]"
] | |
ae00f2a571e93364b014c7430ef774d27668efb1 | eaf00759ebec00f930c936b0beacc4b65cee6452 | /7.0/ursa_product_customizations/product.py | 17bd1ad0934adc534465c6ade5a61bc7bb1cb01e | [] | no_license | alephobjects/ao-openerp | b73bbeedc2149b08a946660aeb6d78f4eafa3698 | 3a0d7ddb85d497b4f576678370a1fbbfd71379f4 | refs/heads/master | 2020-04-12T03:53:47.210221 | 2016-07-27T09:44:24 | 2016-07-27T09:44:24 | 15,320,631 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Ursa Information Systems
# Author: Balaji Kannan
# Copyright (C) 2013 (<http://www.ursainfosystems.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class product_product(osv.osv):
_inherit = "product.product"
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
_columns = {
'code': fields.function(_product_code, type='char', string='Internal Code'),
'manf_country': fields.many2one('res.country', 'Country of Origin'),
'scheduleb' : fields.char('Schedule B #', size=128, help='Schedule B number for item'),
}
product_product()
| [
"[email protected]"
] | |
8823a2139da983b787778dd99adf5495845f9d38 | 3c397042e7fa0d7d4fa25cd75f0d10babd9f933f | /lab_8/mysite/mysite/settings.py | 7e02b37905d7097f51689bd92ff721a407afe78c | [] | no_license | StepanIonov/RIP_lab | f34f2a95fb8ddcfeeb703efd7088320f40ac1fc5 | 0fefaf77d664ed404d791422658a062fc3e9201c | refs/heads/master | 2023-02-20T12:38:33.389360 | 2021-01-18T10:13:24 | 2021-01-18T10:13:24 | 295,768,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n-)*r0pos6$$3p=*+vx+fwz%j80f$s6pn@#x*57(^#1o4b54wn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dishes.apps.DishesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
06c458fbca9c45c83e062349ed1b4c3d1ba8b0bd | af34f6e3cb1d0bc783dc354b295f81d9a15120be | /pytorchocr/postprocess/fce_postprocess.py | ae4633fbea17091db90456cbe9eaffc81c2b655d | [
"Apache-2.0"
] | permissive | znsoftm/PaddleOCR2Pytorch | 1785a820a1361b556963ef3d986161aebefd387a | 947674812acaeaa62d2ad56a17a135c9a0582f31 | refs/heads/main | 2023-07-05T22:49:30.358494 | 2023-04-16T06:04:04 | 2023-04-16T06:04:04 | 356,120,953 | 0 | 0 | Apache-2.0 | 2021-04-09T03:20:42 | 2021-04-09T03:20:42 | null | UTF-8 | Python | false | false | 8,040 | py | """
This code is refer from:
https://github.com/open-mmlab/mmocr/blob/v0.3.0/mmocr/models/textdet/postprocess/wrapper.py
"""
import cv2
import torch
import numpy as np
from numpy.fft import ifft
from pytorchocr.utils.poly_nms import poly_nms, valid_boundary
def fill_hole(input_mask):
h, w = input_mask.shape
canvas = np.zeros((h + 2, w + 2), np.uint8)
canvas[1:h + 1, 1:w + 1] = input_mask.copy()
mask = np.zeros((h + 4, w + 4), np.uint8)
cv2.floodFill(canvas, mask, (0, 0), 1)
canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
return ~canvas | input_mask
def fourier2poly(fourier_coeff, num_reconstr_points=50):
""" Inverse Fourier transform
Args:
fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1),
with n and k being candidates number and Fourier degree
respectively.
num_reconstr_points (int): Number of reconstructed polygon points.
Returns:
Polygons (ndarray): The reconstructed polygons shaped (n, n')
"""
a = np.zeros((len(fourier_coeff), num_reconstr_points), dtype='complex')
k = (len(fourier_coeff[0]) - 1) // 2
a[:, 0:k + 1] = fourier_coeff[:, k:]
a[:, -k:] = fourier_coeff[:, :k]
poly_complex = ifft(a) * num_reconstr_points
polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2))
polygon[:, :, 0] = poly_complex.real
polygon[:, :, 1] = poly_complex.imag
return polygon.astype('int32').reshape((len(fourier_coeff), -1))
class FCEPostProcess(object):
"""
The post process for FCENet.
"""
def __init__(self,
scales,
fourier_degree=5,
num_reconstr_points=50,
decoding_type='fcenet',
score_thr=0.3,
nms_thr=0.1,
alpha=1.0,
beta=1.0,
box_type='poly',
**kwargs):
self.scales = scales
self.fourier_degree = fourier_degree
self.num_reconstr_points = num_reconstr_points
self.decoding_type = decoding_type
self.score_thr = score_thr
self.nms_thr = nms_thr
self.alpha = alpha
self.beta = beta
self.box_type = box_type
def __call__(self, preds, shape_list):
score_maps = []
for key, value in preds.items():
if isinstance(value, torch.Tensor):
value = value.numpy()
cls_res = value[:, :4, :, :]
reg_res = value[:, 4:, :, :]
score_maps.append([cls_res, reg_res])
return self.get_boundary(score_maps, shape_list)
def resize_boundary(self, boundaries, scale_factor):
"""Rescale boundaries via scale_factor.
Args:
boundaries (list[list[float]]): The boundary list. Each boundary
with size 2k+1 with k>=4.
scale_factor(ndarray): The scale factor of size (4,).
Returns:
boundaries (list[list[float]]): The scaled boundaries.
"""
boxes = []
scores = []
for b in boundaries:
sz = len(b)
valid_boundary(b, True)
scores.append(b[-1])
b = (np.array(b[:sz - 1]) *
(np.tile(scale_factor[:2], int(
(sz - 1) / 2)).reshape(1, sz - 1))).flatten().tolist()
boxes.append(np.array(b).reshape([-1, 2]))
return np.array(boxes, dtype=np.float32), scores
def get_boundary(self, score_maps, shape_list):
assert len(score_maps) == len(self.scales)
boundaries = []
for idx, score_map in enumerate(score_maps):
scale = self.scales[idx]
boundaries = boundaries + self._get_boundary_single(score_map,
scale)
# nms
boundaries = poly_nms(boundaries, self.nms_thr)
boundaries, scores = self.resize_boundary(
boundaries, (1 / shape_list[0, 2:]).tolist()[::-1])
boxes_batch = [dict(points=boundaries, scores=scores)]
return boxes_batch
def _get_boundary_single(self, score_map, scale):
assert len(score_map) == 2
assert score_map[1].shape[1] == 4 * self.fourier_degree + 2
return self.fcenet_decode(
preds=score_map,
fourier_degree=self.fourier_degree,
num_reconstr_points=self.num_reconstr_points,
scale=scale,
alpha=self.alpha,
beta=self.beta,
box_type=self.box_type,
score_thr=self.score_thr,
nms_thr=self.nms_thr)
def fcenet_decode(self,
preds,
fourier_degree,
num_reconstr_points,
scale,
alpha=1.0,
beta=2.0,
box_type='poly',
score_thr=0.3,
nms_thr=0.1):
"""Decoding predictions of FCENet to instances.
Args:
preds (list(Tensor)): The head output tensors.
fourier_degree (int): The maximum Fourier transform degree k.
num_reconstr_points (int): The points number of the polygon
reconstructed from predicted Fourier coefficients.
scale (int): The down-sample scale of the prediction.
alpha (float) : The parameter to calculate final scores. Score_{final}
= (Score_{text region} ^ alpha)
* (Score_{text center region}^ beta)
beta (float) : The parameter to calculate final score.
box_type (str): Boundary encoding type 'poly' or 'quad'.
score_thr (float) : The threshold used to filter out the final
candidates.
nms_thr (float) : The threshold of nms.
Returns:
boundaries (list[list[float]]): The instance boundary and confidence
list.
"""
assert isinstance(preds, list)
assert len(preds) == 2
assert box_type in ['poly', 'quad']
cls_pred = preds[0][0]
tr_pred = cls_pred[0:2]
tcl_pred = cls_pred[2:]
reg_pred = preds[1][0].transpose([1, 2, 0])
x_pred = reg_pred[:, :, :2 * fourier_degree + 1]
y_pred = reg_pred[:, :, 2 * fourier_degree + 1:]
score_pred = (tr_pred[1]**alpha) * (tcl_pred[1]**beta)
tr_pred_mask = (score_pred) > score_thr
tr_mask = fill_hole(tr_pred_mask)
tr_contours, _ = cv2.findContours(
tr_mask.astype(np.uint8), cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE) # opencv4
mask = np.zeros_like(tr_mask)
boundaries = []
for cont in tr_contours:
deal_map = mask.copy().astype(np.int8)
cv2.drawContours(deal_map, [cont], -1, 1, -1)
score_map = score_pred * deal_map
score_mask = score_map > 0
xy_text = np.argwhere(score_mask)
dxy = xy_text[:, 1] + xy_text[:, 0] * 1j
x, y = x_pred[score_mask], y_pred[score_mask]
c = x + y * 1j
c[:, fourier_degree] = c[:, fourier_degree] + dxy
c *= scale
polygons = fourier2poly(c, num_reconstr_points)
score = score_map[score_mask].reshape(-1, 1)
polygons = poly_nms(np.hstack((polygons, score)).tolist(), nms_thr)
boundaries = boundaries + polygons
boundaries = poly_nms(boundaries, nms_thr)
if box_type == 'quad':
new_boundaries = []
for boundary in boundaries:
poly = np.array(boundary[:-1]).reshape(-1, 2).astype(np.float32)
score = boundary[-1]
points = cv2.boxPoints(cv2.minAreaRect(poly))
points = np.int0(points)
new_boundaries.append(points.reshape(-1).tolist() + [score])
boundaries = new_boundaries
return boundaries
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.