nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
dephell/dephell
de96f01fcfd8dd620b049369a8ec30dde566c5de
dephell/commands/inspect_gadget.py
python
InspectGadgetCommand.build_parser
(parser)
return parser
[]
def build_parser(parser) -> ArgumentParser: builders.build_config(parser) return parser
[ "def", "build_parser", "(", "parser", ")", "->", "ArgumentParser", ":", "builders", ".", "build_config", "(", "parser", ")", "return", "parser" ]
https://github.com/dephell/dephell/blob/de96f01fcfd8dd620b049369a8ec30dde566c5de/dephell/commands/inspect_gadget.py#L54-L56
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/distutils/command/build_ext.py
python
build_ext.get_ext_fullname
(self, ext_name)
Returns the fullname of a given extension name. Adds the `package.` prefix
Returns the fullname of a given extension name.
[ "Returns", "the", "fullname", "of", "a", "given", "extension", "name", "." ]
def get_ext_fullname(self, ext_name): """Returns the fullname of a given extension name. Adds the `package.` prefix""" if self.package is None: return ext_name else: return self.package + '.' + ext_name
[ "def", "get_ext_fullname", "(", "self", ",", "ext_name", ")", ":", "if", "self", ".", "package", "is", "None", ":", "return", "ext_name", "else", ":", "return", "self", ".", "package", "+", "'.'", "+", "ext_name" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/distutils/command/build_ext.py#L654-L661
trezor/python-trezor
2813522b05cef4e0e545a101f8b3559a3183b45b
trezorlib/cosi.py
python
get_nonce
( sk: Ed25519PrivateKey, data: bytes, ctr: int = 0 )
return r, Ed25519PublicPoint(_ed25519.encodepoint(R))
Calculate CoSi nonces for given data. These differ from Ed25519 deterministic nonces in that there is a counter appended at end. Returns both the private point `r` and the partial signature `R`. `r` is returned for performance reasons: :func:`sign_with_privkey` takes it as its `nonce` argument so that it doesn't repeat the `get_nonce` call. `R` should be combined with other partial signatures through :func:`combine_keys` to obtain a "global commitment".
Calculate CoSi nonces for given data. These differ from Ed25519 deterministic nonces in that there is a counter appended at end.
[ "Calculate", "CoSi", "nonces", "for", "given", "data", ".", "These", "differ", "from", "Ed25519", "deterministic", "nonces", "in", "that", "there", "is", "a", "counter", "appended", "at", "end", "." ]
def get_nonce( sk: Ed25519PrivateKey, data: bytes, ctr: int = 0 ) -> Tuple[int, Ed25519PublicPoint]: """Calculate CoSi nonces for given data. These differ from Ed25519 deterministic nonces in that there is a counter appended at end. Returns both the private point `r` and the partial signature `R`. `r` is returned for performance reasons: :func:`sign_with_privkey` takes it as its `nonce` argument so that it doesn't repeat the `get_nonce` call. `R` should be combined with other partial signatures through :func:`combine_keys` to obtain a "global commitment". """ # r = hash(hash(sk)[b .. 2b] + M + ctr) # R = rB h = _ed25519.H(sk) bytesize = _ed25519.b // 8 assert len(h) == bytesize * 2 r = _ed25519.Hint(h[bytesize:] + data + ctr.to_bytes(4, "big")) R = _ed25519.scalarmult(_ed25519.B, r) return r, Ed25519PublicPoint(_ed25519.encodepoint(R))
[ "def", "get_nonce", "(", "sk", ":", "Ed25519PrivateKey", ",", "data", ":", "bytes", ",", "ctr", ":", "int", "=", "0", ")", "->", "Tuple", "[", "int", ",", "Ed25519PublicPoint", "]", ":", "# r = hash(hash(sk)[b .. 2b] + M + ctr)", "# R = rB", "h", "=", "_ed25519", ".", "H", "(", "sk", ")", "bytesize", "=", "_ed25519", ".", "b", "//", "8", "assert", "len", "(", "h", ")", "==", "bytesize", "*", "2", "r", "=", "_ed25519", ".", "Hint", "(", "h", "[", "bytesize", ":", "]", "+", "data", "+", "ctr", ".", "to_bytes", "(", "4", ",", "\"big\"", ")", ")", "R", "=", "_ed25519", ".", "scalarmult", "(", "_ed25519", ".", "B", ",", "r", ")", "return", "r", ",", "Ed25519PublicPoint", "(", "_ed25519", ".", "encodepoint", "(", "R", ")", ")" ]
https://github.com/trezor/python-trezor/blob/2813522b05cef4e0e545a101f8b3559a3183b45b/trezorlib/cosi.py#L47-L67
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/discounts/templatetags/discount_tags.py
python
discount_current_app
(context, user, discount=None)
return context
[]
def discount_current_app(context, user, discount=None): context.update({ "app_object": discount, "user": user }) return context
[ "def", "discount_current_app", "(", "context", ",", "user", ",", "discount", "=", "None", ")", ":", "context", ".", "update", "(", "{", "\"app_object\"", ":", "discount", ",", "\"user\"", ":", "user", "}", ")", "return", "context" ]
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/discounts/templatetags/discount_tags.py#L34-L39
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/Django/django/contrib/admin/filters.py
python
ListFilter.expected_parameters
(self)
Returns the list of parameter names that are expected from the request's query string and that will be used by this filter.
Returns the list of parameter names that are expected from the request's query string and that will be used by this filter.
[ "Returns", "the", "list", "of", "parameter", "names", "that", "are", "expected", "from", "the", "request", "s", "query", "string", "and", "that", "will", "be", "used", "by", "this", "filter", "." ]
def expected_parameters(self): """ Returns the list of parameter names that are expected from the request's query string and that will be used by this filter. """ raise NotImplementedError
[ "def", "expected_parameters", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/admin/filters.py#L50-L55
nvbn/everpad
5db96c0f9b7c30ce4f900274f3826fdfa55cbaac
everpad/pad/editor/resources.py
python
ResourceEdit._put
(self, res)
Put resource on widget
Put resource on widget
[ "Put", "resource", "on", "widget" ]
def _put(self, res): """Put resource on widget""" item = ResourceItem(res) item.mouseReleaseEvent = partial(self.click, res) self.widget.layout().addWidget(item) self._resource_labels[res] = item self._res_hash[res.hash] = res res.in_content = False self.update_label()
[ "def", "_put", "(", "self", ",", "res", ")", ":", "item", "=", "ResourceItem", "(", "res", ")", "item", ".", "mouseReleaseEvent", "=", "partial", "(", "self", ".", "click", ",", "res", ")", "self", ".", "widget", ".", "layout", "(", ")", ".", "addWidget", "(", "item", ")", "self", ".", "_resource_labels", "[", "res", "]", "=", "item", "self", ".", "_res_hash", "[", "res", ".", "hash", "]", "=", "res", "res", ".", "in_content", "=", "False", "self", ".", "update_label", "(", ")" ]
https://github.com/nvbn/everpad/blob/5db96c0f9b7c30ce4f900274f3826fdfa55cbaac/everpad/pad/editor/resources.py#L108-L116
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/contrib/gis/db/models/functions.py
python
Length.as_postgresql
(self, compiler, connection)
return super(Length, self).as_sql(compiler, connection)
[]
def as_postgresql(self, compiler, connection): geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info if self.source_is_geography(): self.source_expressions.append(Value(self.spheroid)) elif geo_field.geodetic(connection): # Geometry fields with geodetic (lon/lat) coordinates need length_spheroid self.function = connection.ops.spatial_function_name('LengthSpheroid') self.source_expressions.append(Value(geo_field._spheroid)) else: dim = min(f.dim for f in self.get_source_fields() if f) if dim > 2: self.function = connection.ops.length3d return super(Length, self).as_sql(compiler, connection)
[ "def", "as_postgresql", "(", "self", ",", "compiler", ",", "connection", ")", ":", "geo_field", "=", "GeometryField", "(", "srid", "=", "self", ".", "srid", ")", "# Fake field to get SRID info", "if", "self", ".", "source_is_geography", "(", ")", ":", "self", ".", "source_expressions", ".", "append", "(", "Value", "(", "self", ".", "spheroid", ")", ")", "elif", "geo_field", ".", "geodetic", "(", "connection", ")", ":", "# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid", "self", ".", "function", "=", "connection", ".", "ops", ".", "spatial_function_name", "(", "'LengthSpheroid'", ")", "self", ".", "source_expressions", ".", "append", "(", "Value", "(", "geo_field", ".", "_spheroid", ")", ")", "else", ":", "dim", "=", "min", "(", "f", ".", "dim", "for", "f", "in", "self", ".", "get_source_fields", "(", ")", "if", "f", ")", "if", "dim", ">", "2", ":", "self", ".", "function", "=", "connection", ".", "ops", ".", "length3d", "return", "super", "(", "Length", ",", "self", ")", ".", "as_sql", "(", "compiler", ",", "connection", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/contrib/gis/db/models/functions.py#L344-L356
h5py/h5py
aa31f03bef99e5807d1d6381e36233325d944279
h5py/_hl/group.py
python
Group.__init__
(self, bind)
Create a new Group object by binding to a low-level GroupID.
Create a new Group object by binding to a low-level GroupID.
[ "Create", "a", "new", "Group", "object", "by", "binding", "to", "a", "low", "-", "level", "GroupID", "." ]
def __init__(self, bind): """ Create a new Group object by binding to a low-level GroupID. """ with phil: if not isinstance(bind, h5g.GroupID): raise ValueError("%s is not a GroupID" % bind) super().__init__(bind)
[ "def", "__init__", "(", "self", ",", "bind", ")", ":", "with", "phil", ":", "if", "not", "isinstance", "(", "bind", ",", "h5g", ".", "GroupID", ")", ":", "raise", "ValueError", "(", "\"%s is not a GroupID\"", "%", "bind", ")", "super", "(", ")", ".", "__init__", "(", "bind", ")" ]
https://github.com/h5py/h5py/blob/aa31f03bef99e5807d1d6381e36233325d944279/h5py/_hl/group.py#L34-L40
rgerum/pylustrator
b01825bc3de75ac127291647729fa7b0e6f8b821
pylustrator/QComplexWidgets.py
python
TextPropertiesWidget.__init__
(self, layout: QtWidgets.QLayout)
A widget to edit the properties of a Matplotlib text Args: layout: the layout to which to add the widget
A widget to edit the properties of a Matplotlib text
[ "A", "widget", "to", "edit", "the", "properties", "of", "a", "Matplotlib", "text" ]
def __init__(self, layout: QtWidgets.QLayout): """ A widget to edit the properties of a Matplotlib text Args: layout: the layout to which to add the widget """ QtWidgets.QWidget.__init__(self) layout.addWidget(self) self.layout = QtWidgets.QHBoxLayout(self) self.layout.setContentsMargins(0, 0, 0, 0) self.buttons_align = [] self.align_names = ["left", "center", "right"] for align in self.align_names: button = QtWidgets.QPushButton(qta.icon("fa.align-" + align), "") button.setCheckable(True) button.clicked.connect(lambda x, name=align: self.changeAlign(name)) self.layout.addWidget(button) self.buttons_align.append(button) self.button_bold = QtWidgets.QPushButton(qta.icon("fa.bold"), "") self.button_bold.setCheckable(True) self.button_bold.clicked.connect(self.changeWeight) self.layout.addWidget(self.button_bold) self.button_italic = QtWidgets.QPushButton(qta.icon("fa.italic"), "") self.button_italic.setCheckable(True) self.button_italic.clicked.connect(self.changeStyle) self.layout.addWidget(self.button_italic) self.button_color = QColorWidget(self.layout) self.button_color.valueChanged.connect(self.changeColor) self.layout.addStretch() self.font_size = QtWidgets.QSpinBox() self.layout.addWidget(self.font_size) self.font_size.valueChanged.connect(self.changeFontSize) self.label = QtWidgets.QPushButton(qta.icon("fa.font"), "") # .pixmap(16)) self.layout.addWidget(self.label) self.label.clicked.connect(self.selectFont) self.button_delete = QtWidgets.QPushButton(qta.icon("fa.trash"), "") self.button_delete.clicked.connect(self.delete) self.layout.addWidget(self.button_delete)
[ "def", "__init__", "(", "self", ",", "layout", ":", "QtWidgets", ".", "QLayout", ")", ":", "QtWidgets", ".", "QWidget", ".", "__init__", "(", "self", ")", "layout", ".", "addWidget", "(", "self", ")", "self", ".", "layout", "=", "QtWidgets", ".", "QHBoxLayout", "(", "self", ")", "self", ".", "layout", ".", "setContentsMargins", "(", "0", ",", "0", ",", "0", ",", "0", ")", "self", ".", "buttons_align", "=", "[", "]", "self", ".", "align_names", "=", "[", "\"left\"", ",", "\"center\"", ",", "\"right\"", "]", "for", "align", "in", "self", ".", "align_names", ":", "button", "=", "QtWidgets", ".", "QPushButton", "(", "qta", ".", "icon", "(", "\"fa.align-\"", "+", "align", ")", ",", "\"\"", ")", "button", ".", "setCheckable", "(", "True", ")", "button", ".", "clicked", ".", "connect", "(", "lambda", "x", ",", "name", "=", "align", ":", "self", ".", "changeAlign", "(", "name", ")", ")", "self", ".", "layout", ".", "addWidget", "(", "button", ")", "self", ".", "buttons_align", ".", "append", "(", "button", ")", "self", ".", "button_bold", "=", "QtWidgets", ".", "QPushButton", "(", "qta", ".", "icon", "(", "\"fa.bold\"", ")", ",", "\"\"", ")", "self", ".", "button_bold", ".", "setCheckable", "(", "True", ")", "self", ".", "button_bold", ".", "clicked", ".", "connect", "(", "self", ".", "changeWeight", ")", "self", ".", "layout", ".", "addWidget", "(", "self", ".", "button_bold", ")", "self", ".", "button_italic", "=", "QtWidgets", ".", "QPushButton", "(", "qta", ".", "icon", "(", "\"fa.italic\"", ")", ",", "\"\"", ")", "self", ".", "button_italic", ".", "setCheckable", "(", "True", ")", "self", ".", "button_italic", ".", "clicked", ".", "connect", "(", "self", ".", "changeStyle", ")", "self", ".", "layout", ".", "addWidget", "(", "self", ".", "button_italic", ")", "self", ".", "button_color", "=", "QColorWidget", "(", "self", ".", "layout", ")", "self", ".", "button_color", ".", "valueChanged", ".", "connect", "(", "self", ".", "changeColor", ")", "self", ".", "layout", ".", "addStretch", "(", ")", "self", ".", "font_size", "=", "QtWidgets", ".", "QSpinBox", "(", ")", "self", ".", "layout", ".", "addWidget", "(", "self", ".", "font_size", ")", "self", ".", "font_size", ".", "valueChanged", ".", "connect", "(", "self", ".", "changeFontSize", ")", "self", ".", "label", "=", "QtWidgets", ".", "QPushButton", "(", "qta", ".", "icon", "(", "\"fa.font\"", ")", ",", "\"\"", ")", "# .pixmap(16))", "self", ".", "layout", ".", "addWidget", "(", "self", ".", "label", ")", "self", ".", "label", ".", "clicked", ".", "connect", "(", "self", ".", "selectFont", ")", "self", ".", "button_delete", "=", "QtWidgets", ".", "QPushButton", "(", "qta", ".", "icon", "(", "\"fa.trash\"", ")", ",", "\"\"", ")", "self", ".", "button_delete", ".", "clicked", ".", "connect", "(", "self", ".", "delete", ")", "self", ".", "layout", ".", "addWidget", "(", "self", ".", "button_delete", ")" ]
https://github.com/rgerum/pylustrator/blob/b01825bc3de75ac127291647729fa7b0e6f8b821/pylustrator/QComplexWidgets.py#L69-L114
pexpect/pexpect
2be6c4d1aa2b9b522636342c2fd54b73c058060d
pexpect/screen.py
python
screen.dump
(self)
return u''.join ([ u''.join(c) for c in self.w ])
This returns a copy of the screen as a unicode string. This is similar to __str__/__unicode__ except that lines are not terminated with line feeds.
This returns a copy of the screen as a unicode string. This is similar to __str__/__unicode__ except that lines are not terminated with line feeds.
[ "This", "returns", "a", "copy", "of", "the", "screen", "as", "a", "unicode", "string", ".", "This", "is", "similar", "to", "__str__", "/", "__unicode__", "except", "that", "lines", "are", "not", "terminated", "with", "line", "feeds", "." ]
def dump (self): '''This returns a copy of the screen as a unicode string. This is similar to __str__/__unicode__ except that lines are not terminated with line feeds.''' return u''.join ([ u''.join(c) for c in self.w ])
[ "def", "dump", "(", "self", ")", ":", "return", "u''", ".", "join", "(", "[", "u''", ".", "join", "(", "c", ")", "for", "c", "in", "self", ".", "w", "]", ")" ]
https://github.com/pexpect/pexpect/blob/2be6c4d1aa2b9b522636342c2fd54b73c058060d/pexpect/screen.py#L131-L136
naparuba/shinken
8163d645e801fa43ee1704f099a4684f120e667b
shinken/webui/bottlecore.py
python
Bottle._cast
(self, out, request, response, peek=None)
return self._cast(HTTPError(500, 'Unsupported response type: %s' % type(first)), request, response)
Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes
Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes
[ "Try", "to", "convert", "the", "parameter", "into", "something", "WSGI", "compatible", "and", "set", "correct", "HTTP", "headers", "when", "possible", ".", "Support", ":", "False", "str", "unicode", "dict", "HTTPResponse", "HTTPError", "file", "-", "like", "iterable", "of", "strings", "and", "iterable", "of", "unicodes" ]
def _cast(self, out, request, response, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status, repr)(out) if isinstance(out, HTTPResponse): depr('Error handlers must not return :exc:`HTTPResponse`.') # 0.9 return self._cast(out, request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s' % type(first)), request, response)
[ "def", "_cast", "(", "self", ",", "out", ",", "request", ",", "response", ",", "peek", "=", "None", ")", ":", "# Empty output is done here", "if", "not", "out", ":", "response", "[", "'Content-Length'", "]", "=", "0", "return", "[", "]", "# Join lists of byte or unicode strings. Mixed lists are NOT supported", "if", "isinstance", "(", "out", ",", "(", "tuple", ",", "list", ")", ")", "and", "isinstance", "(", "out", "[", "0", "]", ",", "(", "bytes", ",", "unicode", ")", ")", ":", "out", "=", "out", "[", "0", "]", "[", "0", ":", "0", "]", ".", "join", "(", "out", ")", "# b'abc'[0:0] -> b''", "# Encode unicode strings", "if", "isinstance", "(", "out", ",", "unicode", ")", ":", "out", "=", "out", ".", "encode", "(", "response", ".", "charset", ")", "# Byte Strings are just returned", "if", "isinstance", "(", "out", ",", "bytes", ")", ":", "response", "[", "'Content-Length'", "]", "=", "len", "(", "out", ")", "return", "[", "out", "]", "# HTTPError or HTTPException (recursive, because they may wrap anything)", "# TODO: Handle these explicitly in handle() or make them iterable.", "if", "isinstance", "(", "out", ",", "HTTPError", ")", ":", "out", ".", "apply", "(", "response", ")", "out", "=", "self", ".", "error_handler", ".", "get", "(", "out", ".", "status", ",", "repr", ")", "(", "out", ")", "if", "isinstance", "(", "out", ",", "HTTPResponse", ")", ":", "depr", "(", "'Error handlers must not return :exc:`HTTPResponse`.'", ")", "# 0.9", "return", "self", ".", "_cast", "(", "out", ",", "request", ",", "response", ")", "if", "isinstance", "(", "out", ",", "HTTPResponse", ")", ":", "out", ".", "apply", "(", "response", ")", "return", "self", ".", "_cast", "(", "out", ".", "output", ",", "request", ",", "response", ")", "# File-like objects.", "if", "hasattr", "(", "out", ",", "'read'", ")", ":", "if", "'wsgi.file_wrapper'", "in", "request", ".", "environ", ":", "return", "request", ".", "environ", "[", "'wsgi.file_wrapper'", "]", "(", "out", ")", "elif", "hasattr", "(", "out", ",", "'close'", ")", "or", "not", "hasattr", "(", "out", ",", "'__iter__'", ")", ":", "return", "WSGIFileWrapper", "(", "out", ")", "# Handle Iterables. We peek into them to detect their inner type.", "try", ":", "out", "=", "iter", "(", "out", ")", "first", "=", "out", ".", "next", "(", ")", "while", "not", "first", ":", "first", "=", "out", ".", "next", "(", ")", "except", "StopIteration", ":", "return", "self", ".", "_cast", "(", "''", ",", "request", ",", "response", ")", "except", "HTTPResponse", ",", "e", ":", "first", "=", "e", "except", "Exception", ",", "e", ":", "first", "=", "HTTPError", "(", "500", ",", "'Unhandled exception'", ",", "e", ",", "format_exc", "(", "10", ")", ")", "if", "isinstance", "(", "e", ",", "(", "KeyboardInterrupt", ",", "SystemExit", ",", "MemoryError", ")", ")", "or", "not", "self", ".", "catchall", ":", "raise", "# These are the inner types allowed in iterator or generator objects.", "if", "isinstance", "(", "first", ",", "HTTPResponse", ")", ":", "return", "self", ".", "_cast", "(", "first", ",", "request", ",", "response", ")", "if", "isinstance", "(", "first", ",", "bytes", ")", ":", "return", "itertools", ".", "chain", "(", "[", "first", "]", ",", "out", ")", "if", "isinstance", "(", "first", ",", "unicode", ")", ":", "return", "itertools", ".", "imap", "(", "lambda", "x", ":", "x", ".", "encode", "(", "response", ".", "charset", ")", ",", "itertools", ".", "chain", "(", "[", "first", "]", ",", "out", ")", ")", "return", "self", ".", "_cast", "(", "HTTPError", "(", "500", ",", "'Unsupported response type: %s'", "%", "type", "(", "first", ")", ")", ",", "request", ",", "response", ")" ]
https://github.com/naparuba/shinken/blob/8163d645e801fa43ee1704f099a4684f120e667b/shinken/webui/bottlecore.py#L703-L768
rembo10/headphones
b3199605be1ebc83a7a8feab6b1e99b64014187c
lib/html5lib/inputstream.py
python
HTMLBinaryInputStream.openStream
(self, source)
return stream
Produces a file object from source. source can be either a file object, local filename or a string.
Produces a file object from source.
[ "Produces", "a", "file", "object", "from", "source", "." ]
def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: stream = BufferedStream(stream) return stream
[ "def", "openStream", "(", "self", ",", "source", ")", ":", "# Already a file object", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "stream", "=", "source", "else", ":", "stream", "=", "BytesIO", "(", "source", ")", "try", ":", "stream", ".", "seek", "(", "stream", ".", "tell", "(", ")", ")", "except", ":", "stream", "=", "BufferedStream", "(", "stream", ")", "return", "stream" ]
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/html5lib/inputstream.py#L443-L460
jisaacks/GitGutter
1badb94bd1827f4b3f96fd90c26bf1b348783ff9
modules/annotation.py
python
SimpleLineAnnotationTemplate.render
(cls, kwargs)
return cls.TEMPLATE.format(**kwargs)
Render line annotation using a static template. Arguments: kwargs (dict): The dictionary with the information about the blame, which are provided as variables for the message template. Returns: string: The formatted annotation message.
Render line annotation using a static template.
[ "Render", "line", "annotation", "using", "a", "static", "template", "." ]
def render(cls, kwargs): """Render line annotation using a static template. Arguments: kwargs (dict): The dictionary with the information about the blame, which are provided as variables for the message template. Returns: string: The formatted annotation message. """ return cls.TEMPLATE.format(**kwargs)
[ "def", "render", "(", "cls", ",", "kwargs", ")", ":", "return", "cls", ".", "TEMPLATE", ".", "format", "(", "*", "*", "kwargs", ")" ]
https://github.com/jisaacks/GitGutter/blob/1badb94bd1827f4b3f96fd90c26bf1b348783ff9/modules/annotation.py#L20-L31
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/CGIHTTPServer.py
python
CGIHTTPRequestHandler.is_python
(self, path)
return tail.lower() in (".py", ".pyw")
Test whether argument path is a Python script.
Test whether argument path is a Python script.
[ "Test", "whether", "argument", "path", "is", "a", "Python", "script", "." ]
def is_python(self, path): """Test whether argument path is a Python script.""" head, tail = os.path.splitext(path) return tail.lower() in (".py", ".pyw")
[ "def", "is_python", "(", "self", ",", "path", ")", ":", "head", ",", "tail", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "return", "tail", ".", "lower", "(", ")", "in", "(", "\".py\"", ",", "\".pyw\"", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/CGIHTTPServer.py#L101-L104
joe42/CloudFusion
c4b94124e74a81e0634578c7754d62160081f7a1
cloudfusion/third_party/parsedatetime/parsedatetime/__init__.py
python
Calendar.parse
(self, datetimeString, sourceTime=None)
return (totalTime, self.dateFlag + self.timeFlag)
Splits the given C{datetimeString} into tokens, finds the regex patterns that match and then calculates a C{struct_time} value from the chunks. If C{sourceTime} is given then the C{struct_time} value will be calculated from that value, otherwise from the current date/time. If the C{datetimeString} is parsed and date/time value found then the second item of the returned tuple will be a flag to let you know what kind of C{struct_time} value is being returned:: 0 = not parsed at all 1 = parsed as a C{date} 2 = parsed as a C{time} 3 = parsed as a C{datetime} @type datetimeString: string @param datetimeString: date/time text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: tuple @return: tuple of: modified C{sourceTime} and the result flag
Splits the given C{datetimeString} into tokens, finds the regex patterns that match and then calculates a C{struct_time} value from the chunks.
[ "Splits", "the", "given", "C", "{", "datetimeString", "}", "into", "tokens", "finds", "the", "regex", "patterns", "that", "match", "and", "then", "calculates", "a", "C", "{", "struct_time", "}", "value", "from", "the", "chunks", "." ]
def parse(self, datetimeString, sourceTime=None): """ Splits the given C{datetimeString} into tokens, finds the regex patterns that match and then calculates a C{struct_time} value from the chunks. If C{sourceTime} is given then the C{struct_time} value will be calculated from that value, otherwise from the current date/time. If the C{datetimeString} is parsed and date/time value found then the second item of the returned tuple will be a flag to let you know what kind of C{struct_time} value is being returned:: 0 = not parsed at all 1 = parsed as a C{date} 2 = parsed as a C{time} 3 = parsed as a C{datetime} @type datetimeString: string @param datetimeString: date/time text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: tuple @return: tuple of: modified C{sourceTime} and the result flag """ datetimeString = re.sub(r'(\w)(\.)(\s)', r'\1\3', datetimeString) datetimeString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', datetimeString) datetimeString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', datetimeString) if sourceTime: if isinstance(sourceTime, datetime.datetime): log.debug('coercing datetime to timetuple') sourceTime = sourceTime.timetuple() else: if not isinstance(sourceTime, time.struct_time) and \ not isinstance(sourceTime, tuple): raise Exception('sourceTime is not a struct_time') s = datetimeString.strip().lower() parseStr = '' totalTime = sourceTime if s == '' : if sourceTime is not None: return (sourceTime, self.dateFlag + self.timeFlag) else: return (time.localtime(), 0) self.timeFlag = 0 self.dateFlag = 0 while len(s) > 0: flag = False chunk1 = '' chunk2 = '' log.debug('parse (top of loop): [%s][%s]' % (s, parseStr)) if parseStr == '': # Modifier like next\prev.. m = self.ptc.CRE_MODIFIER.search(s) if m is not None: self.modifierFlag = True if (m.group('modifier') != s): # capture remaining string parseStr = m.group('modifier') chunk1 = s[:m.start('modifier')].strip() chunk2 = s[m.end('modifier'):].strip() flag = True else: parseStr = s if parseStr == '': # Modifier like from\after\prior.. m = self.ptc.CRE_MODIFIER2.search(s) if m is not None: self.modifier2Flag = True if (m.group('modifier') != s): # capture remaining string parseStr = m.group('modifier') chunk1 = s[:m.start('modifier')].strip() chunk2 = s[m.end('modifier'):].strip() flag = True else: parseStr = s if parseStr == '': # Quantity + Units m = self.ptc.CRE_UNITS.search(s) if m is not None: log.debug('CRE_UNITS matched') if self._UnitsTrapped(s, m, 'units'): log.debug('day suffix trapped by unit match') else: self.unitsFlag = True if (m.group('qty') != s): # capture remaining string parseStr = m.group('qty') chunk1 = s[:m.start('qty')].strip() chunk2 = s[m.end('qty'):].strip() if chunk1[-1:] == '-': parseStr = '-%s' % parseStr chunk1 = chunk1[:-1] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # Quantity + Units m = self.ptc.CRE_QUNITS.search(s) if m is not None: log.debug('CRE_QUNITS matched') if self._UnitsTrapped(s, m, 'qunits'): log.debug('day suffix trapped by qunit match') else: self.qunitsFlag = True if (m.group('qty') != s): # capture remaining string parseStr = m.group('qty') chunk1 = s[:m.start('qty')].strip() chunk2 = s[m.end('qty'):].strip() if chunk1[-1:] == '-': parseStr = '-%s' % parseStr chunk1 = chunk1[:-1] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': valid_date = False for match in self.ptc.CRE_DATE3.finditer(s): # to prevent "HH:MM(:SS) time strings" expressions from triggering # this regex, we checks if the month field exists in the searched # expression, if it doesn't exist, the date field is not valid if match.group('mthname'): m = self.ptc.CRE_DATE3.search(s, match.start()) valid_date = True break # String date format if valid_date: self.dateStrFlag = True self.dateFlag = 1 if (m.group('date') != s): # capture remaining string parseStr = m.group('date') chunk1 = s[:m.start('date')] chunk2 = s[m.end('date'):] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # Standard date format m = self.ptc.CRE_DATE.search(s) if m is not None: self.dateStdFlag = True self.dateFlag = 1 if (m.group('date') != s): # capture remaining string parseStr = m.group('date') chunk1 = s[:m.start('date')] chunk2 = s[m.end('date'):] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # Natural language day strings m = self.ptc.CRE_DAY.search(s) if m is not None: self.dayStrFlag = True self.dateFlag = 1 if (m.group('day') != s): # capture remaining string parseStr = m.group('day') chunk1 = s[:m.start('day')] chunk2 = s[m.end('day'):] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # Weekday m = self.ptc.CRE_WEEKDAY.search(s) if m is not None: gv = m.group('weekday') if s not in self.ptc.dayOffsets: self.weekdyFlag = True self.dateFlag = 1 if (gv != s): # capture remaining string parseStr = gv chunk1 = s[:m.start('weekday')] chunk2 = s[m.end('weekday'):] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # Natural language time strings m = self.ptc.CRE_TIME.search(s) if m is not None or s in self.ptc.re_values['now']: self.timeStrFlag = True self.timeFlag = 2 if (m and m.group('time') != s): # capture remaining string parseStr = m.group('time') chunk1 = s[:m.start('time')] chunk2 = s[m.end('time'):] s = '%s %s' % (chunk1, chunk2) flag = True else: parseStr = s if parseStr == '': # HH:MM(:SS) am/pm time strings m = self.ptc.CRE_TIMEHMS2.search(s) if m is not None: self.meridianFlag = True self.timeFlag = 2 if m.group('minutes') is not None: if m.group('seconds') is not None: parseStr = '%s:%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('seconds'), m.group('meridian')) else: parseStr = '%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('meridian')) else: parseStr = '%s %s' % (m.group('hours'), m.group('meridian')) chunk1 = s[:m.start('hours')] chunk2 = s[m.end('meridian'):] s = '%s %s' % (chunk1, chunk2) flag = True if parseStr == '': # HH:MM(:SS) time strings m = self.ptc.CRE_TIMEHMS.search(s) if m is not None: self.timeStdFlag = True self.timeFlag = 2 if m.group('seconds') is not None: parseStr = '%s:%s:%s' % (m.group('hours'), m.group('minutes'), m.group('seconds')) chunk1 = s[:m.start('hours')] chunk2 = s[m.end('seconds'):] else: parseStr = '%s:%s' % (m.group('hours'), m.group('minutes')) chunk1 = s[:m.start('hours')] chunk2 = s[m.end('minutes'):] s = '%s %s' % (chunk1, chunk2) flag = True # if string does not match any regex, empty string to # come out of the while loop if not flag: s = '' log.debug('parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)) log.debug('weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \ (self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)) log.debug('dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \ (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)) # evaluate the matched string if parseStr != '': if self.modifierFlag == True: t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime) # t is the unparsed part of the chunks. # If it is not date/time, return current # totalTime as it is; else return the output # after parsing t. if (t != '') and (t != None): tempDateFlag = self.dateFlag tempTimeFlag = self.timeFlag (totalTime2, flag) = self.parse(t, totalTime) if flag == 0 and totalTime is not None: self.timeFlag = tempTimeFlag self.dateFlag = tempDateFlag log.debug('return 1') return (totalTime, self.dateFlag + self.timeFlag) else: log.debug('return 2') return (totalTime2, self.dateFlag + self.timeFlag) elif self.modifier2Flag == True: totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime) if invalidFlag == True: self.dateFlag = 0 self.timeFlag = 0 else: totalTime = self._evalString(parseStr, totalTime) parseStr = '' # String is not parsed at all if totalTime is None or totalTime == sourceTime: totalTime = time.localtime() self.dateFlag = 0 self.timeFlag = 0 log.debug('return') return (totalTime, self.dateFlag + self.timeFlag)
[ "def", "parse", "(", "self", ",", "datetimeString", ",", "sourceTime", "=", "None", ")", ":", "datetimeString", "=", "re", ".", "sub", "(", "r'(\\w)(\\.)(\\s)'", ",", "r'\\1\\3'", ",", "datetimeString", ")", "datetimeString", "=", "re", ".", "sub", "(", "r'(\\w)(\\'|\")(\\s|$)'", ",", "r'\\1 \\3'", ",", "datetimeString", ")", "datetimeString", "=", "re", ".", "sub", "(", "r'(\\s|^)(\\'|\")(\\w)'", ",", "r'\\1 \\3'", ",", "datetimeString", ")", "if", "sourceTime", ":", "if", "isinstance", "(", "sourceTime", ",", "datetime", ".", "datetime", ")", ":", "log", ".", "debug", "(", "'coercing datetime to timetuple'", ")", "sourceTime", "=", "sourceTime", ".", "timetuple", "(", ")", "else", ":", "if", "not", "isinstance", "(", "sourceTime", ",", "time", ".", "struct_time", ")", "and", "not", "isinstance", "(", "sourceTime", ",", "tuple", ")", ":", "raise", "Exception", "(", "'sourceTime is not a struct_time'", ")", "s", "=", "datetimeString", ".", "strip", "(", ")", ".", "lower", "(", ")", "parseStr", "=", "''", "totalTime", "=", "sourceTime", "if", "s", "==", "''", ":", "if", "sourceTime", "is", "not", "None", ":", "return", "(", "sourceTime", ",", "self", ".", "dateFlag", "+", "self", ".", "timeFlag", ")", "else", ":", "return", "(", "time", ".", "localtime", "(", ")", ",", "0", ")", "self", ".", "timeFlag", "=", "0", "self", ".", "dateFlag", "=", "0", "while", "len", "(", "s", ")", ">", "0", ":", "flag", "=", "False", "chunk1", "=", "''", "chunk2", "=", "''", "log", ".", "debug", "(", "'parse (top of loop): [%s][%s]'", "%", "(", "s", ",", "parseStr", ")", ")", "if", "parseStr", "==", "''", ":", "# Modifier like next\\prev..", "m", "=", "self", ".", "ptc", ".", "CRE_MODIFIER", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "modifierFlag", "=", "True", "if", "(", "m", ".", "group", "(", "'modifier'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'modifier'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'modifier'", ")", "]", ".", "strip", "(", ")", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'modifier'", ")", ":", "]", ".", "strip", "(", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Modifier like from\\after\\prior..", "m", "=", "self", ".", "ptc", ".", "CRE_MODIFIER2", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "modifier2Flag", "=", "True", "if", "(", "m", ".", "group", "(", "'modifier'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'modifier'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'modifier'", ")", "]", ".", "strip", "(", ")", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'modifier'", ")", ":", "]", ".", "strip", "(", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Quantity + Units", "m", "=", "self", ".", "ptc", ".", "CRE_UNITS", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "log", ".", "debug", "(", "'CRE_UNITS matched'", ")", "if", "self", ".", "_UnitsTrapped", "(", "s", ",", "m", ",", "'units'", ")", ":", "log", ".", "debug", "(", "'day suffix trapped by unit match'", ")", "else", ":", "self", ".", "unitsFlag", "=", "True", "if", "(", "m", ".", "group", "(", "'qty'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'qty'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'qty'", ")", "]", ".", "strip", "(", ")", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'qty'", ")", ":", "]", ".", "strip", "(", ")", "if", "chunk1", "[", "-", "1", ":", "]", "==", "'-'", ":", "parseStr", "=", "'-%s'", "%", "parseStr", "chunk1", "=", "chunk1", "[", ":", "-", "1", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Quantity + Units", "m", "=", "self", ".", "ptc", ".", "CRE_QUNITS", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "log", ".", "debug", "(", "'CRE_QUNITS matched'", ")", "if", "self", ".", "_UnitsTrapped", "(", "s", ",", "m", ",", "'qunits'", ")", ":", "log", ".", "debug", "(", "'day suffix trapped by qunit match'", ")", "else", ":", "self", ".", "qunitsFlag", "=", "True", "if", "(", "m", ".", "group", "(", "'qty'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'qty'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'qty'", ")", "]", ".", "strip", "(", ")", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'qty'", ")", ":", "]", ".", "strip", "(", ")", "if", "chunk1", "[", "-", "1", ":", "]", "==", "'-'", ":", "parseStr", "=", "'-%s'", "%", "parseStr", "chunk1", "=", "chunk1", "[", ":", "-", "1", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "valid_date", "=", "False", "for", "match", "in", "self", ".", "ptc", ".", "CRE_DATE3", ".", "finditer", "(", "s", ")", ":", "# to prevent \"HH:MM(:SS) time strings\" expressions from triggering", "# this regex, we checks if the month field exists in the searched", "# expression, if it doesn't exist, the date field is not valid", "if", "match", ".", "group", "(", "'mthname'", ")", ":", "m", "=", "self", ".", "ptc", ".", "CRE_DATE3", ".", "search", "(", "s", ",", "match", ".", "start", "(", ")", ")", "valid_date", "=", "True", "break", "# String date format", "if", "valid_date", ":", "self", ".", "dateStrFlag", "=", "True", "self", ".", "dateFlag", "=", "1", "if", "(", "m", ".", "group", "(", "'date'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'date'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'date'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'date'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Standard date format", "m", "=", "self", ".", "ptc", ".", "CRE_DATE", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "dateStdFlag", "=", "True", "self", ".", "dateFlag", "=", "1", "if", "(", "m", ".", "group", "(", "'date'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'date'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'date'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'date'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Natural language day strings", "m", "=", "self", ".", "ptc", ".", "CRE_DAY", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "dayStrFlag", "=", "True", "self", ".", "dateFlag", "=", "1", "if", "(", "m", ".", "group", "(", "'day'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'day'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'day'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'day'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Weekday", "m", "=", "self", ".", "ptc", ".", "CRE_WEEKDAY", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "gv", "=", "m", ".", "group", "(", "'weekday'", ")", "if", "s", "not", "in", "self", ".", "ptc", ".", "dayOffsets", ":", "self", ".", "weekdyFlag", "=", "True", "self", ".", "dateFlag", "=", "1", "if", "(", "gv", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "gv", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'weekday'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'weekday'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# Natural language time strings", "m", "=", "self", ".", "ptc", ".", "CRE_TIME", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", "or", "s", "in", "self", ".", "ptc", ".", "re_values", "[", "'now'", "]", ":", "self", ".", "timeStrFlag", "=", "True", "self", ".", "timeFlag", "=", "2", "if", "(", "m", "and", "m", ".", "group", "(", "'time'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'time'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'time'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'time'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "else", ":", "parseStr", "=", "s", "if", "parseStr", "==", "''", ":", "# HH:MM(:SS) am/pm time strings", "m", "=", "self", ".", "ptc", ".", "CRE_TIMEHMS2", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "meridianFlag", "=", "True", "self", ".", "timeFlag", "=", "2", "if", "m", ".", "group", "(", "'minutes'", ")", "is", "not", "None", ":", "if", "m", ".", "group", "(", "'seconds'", ")", "is", "not", "None", ":", "parseStr", "=", "'%s:%s:%s %s'", "%", "(", "m", ".", "group", "(", "'hours'", ")", ",", "m", ".", "group", "(", "'minutes'", ")", ",", "m", ".", "group", "(", "'seconds'", ")", ",", "m", ".", "group", "(", "'meridian'", ")", ")", "else", ":", "parseStr", "=", "'%s:%s %s'", "%", "(", "m", ".", "group", "(", "'hours'", ")", ",", "m", ".", "group", "(", "'minutes'", ")", ",", "m", ".", "group", "(", "'meridian'", ")", ")", "else", ":", "parseStr", "=", "'%s %s'", "%", "(", "m", ".", "group", "(", "'hours'", ")", ",", "m", ".", "group", "(", "'meridian'", ")", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'hours'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'meridian'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "if", "parseStr", "==", "''", ":", "# HH:MM(:SS) time strings", "m", "=", "self", ".", "ptc", ".", "CRE_TIMEHMS", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "self", ".", "timeStdFlag", "=", "True", "self", ".", "timeFlag", "=", "2", "if", "m", ".", "group", "(", "'seconds'", ")", "is", "not", "None", ":", "parseStr", "=", "'%s:%s:%s'", "%", "(", "m", ".", "group", "(", "'hours'", ")", ",", "m", ".", "group", "(", "'minutes'", ")", ",", "m", ".", "group", "(", "'seconds'", ")", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'hours'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'seconds'", ")", ":", "]", "else", ":", "parseStr", "=", "'%s:%s'", "%", "(", "m", ".", "group", "(", "'hours'", ")", ",", "m", ".", "group", "(", "'minutes'", ")", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'hours'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'minutes'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "flag", "=", "True", "# if string does not match any regex, empty string to", "# come out of the while loop", "if", "not", "flag", ":", "s", "=", "''", "log", ".", "debug", "(", "'parse (bottom) [%s][%s][%s][%s]'", "%", "(", "s", ",", "parseStr", ",", "chunk1", ",", "chunk2", ")", ")", "log", ".", "debug", "(", "'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s'", "%", "(", "self", ".", "weekdyFlag", ",", "self", ".", "dateStdFlag", ",", "self", ".", "dateStrFlag", ",", "self", ".", "timeStdFlag", ",", "self", ".", "timeStrFlag", ",", "self", ".", "meridianFlag", ")", ")", "log", ".", "debug", "(", "'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s'", "%", "(", "self", ".", "dayStrFlag", ",", "self", ".", "modifierFlag", ",", "self", ".", "modifier2Flag", ",", "self", ".", "unitsFlag", ",", "self", ".", "qunitsFlag", ")", ")", "# evaluate the matched string", "if", "parseStr", "!=", "''", ":", "if", "self", ".", "modifierFlag", "==", "True", ":", "t", ",", "totalTime", "=", "self", ".", "_evalModifier", "(", "parseStr", ",", "chunk1", ",", "chunk2", ",", "totalTime", ")", "# t is the unparsed part of the chunks.", "# If it is not date/time, return current", "# totalTime as it is; else return the output", "# after parsing t.", "if", "(", "t", "!=", "''", ")", "and", "(", "t", "!=", "None", ")", ":", "tempDateFlag", "=", "self", ".", "dateFlag", "tempTimeFlag", "=", "self", ".", "timeFlag", "(", "totalTime2", ",", "flag", ")", "=", "self", ".", "parse", "(", "t", ",", "totalTime", ")", "if", "flag", "==", "0", "and", "totalTime", "is", "not", "None", ":", "self", ".", "timeFlag", "=", "tempTimeFlag", "self", ".", "dateFlag", "=", "tempDateFlag", "log", ".", "debug", "(", "'return 1'", ")", "return", "(", "totalTime", ",", "self", ".", "dateFlag", "+", "self", ".", "timeFlag", ")", "else", ":", "log", ".", "debug", "(", "'return 2'", ")", "return", "(", "totalTime2", ",", "self", ".", "dateFlag", "+", "self", ".", "timeFlag", ")", "elif", "self", ".", "modifier2Flag", "==", "True", ":", "totalTime", ",", "invalidFlag", "=", "self", ".", "_evalModifier2", "(", "parseStr", ",", "chunk1", ",", "chunk2", ",", "totalTime", ")", "if", "invalidFlag", "==", "True", ":", "self", ".", "dateFlag", "=", "0", "self", ".", "timeFlag", "=", "0", "else", ":", "totalTime", "=", "self", ".", "_evalString", "(", "parseStr", ",", "totalTime", ")", "parseStr", "=", "''", "# String is not parsed at all", "if", "totalTime", "is", "None", "or", "totalTime", "==", "sourceTime", ":", "totalTime", "=", "time", ".", "localtime", "(", ")", "self", ".", "dateFlag", "=", "0", "self", ".", "timeFlag", "=", "0", "log", ".", "debug", "(", "'return'", ")", "return", "(", "totalTime", ",", "self", ".", "dateFlag", "+", "self", ".", "timeFlag", ")" ]
https://github.com/joe42/CloudFusion/blob/c4b94124e74a81e0634578c7754d62160081f7a1/cloudfusion/third_party/parsedatetime/parsedatetime/__init__.py#L1280-L1606
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/collections.py
python
OrderedDict.viewitems
(self)
return ItemsView(self)
od.viewitems() -> a set-like object providing a view on od's items
od.viewitems() -> a set-like object providing a view on od's items
[ "od", ".", "viewitems", "()", "-", ">", "a", "set", "-", "like", "object", "providing", "a", "view", "on", "od", "s", "items" ]
def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
[ "def", "viewitems", "(", "self", ")", ":", "return", "ItemsView", "(", "self", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/collections.py#L225-L227
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/core/leoCommands.py
python
Commands.general_script_helper
(self, command, ext, language, root, directory=None, regex=None)
The official helper for the execute-general-script command. c: The Commander of the outline. command: The os command to execute the script. directory: Optional: Change to this directory before executing command. ext: The file extention for the tempory file. language: The language name. regex: Optional regular expression describing error messages. If present, group(1) should evaluate to a line number. May be a compiled regex expression or a string. root: The root of the tree containing the script, The script may contain section references and @others. Other features: - Create a temporary external file if `not root.isAnyAtFileNode()`. - Compute the final command as follows. 1. If command contains <FILE>, replace <FILE> with the full path. 2. If command contains <NO-FILE>, just remove <NO-FILE>. This allows, for example, `go run .` to work as expected. 3. Append the full path to the command.
The official helper for the execute-general-script command.
[ "The", "official", "helper", "for", "the", "execute", "-", "general", "-", "script", "command", "." ]
def general_script_helper(self, command, ext, language, root, directory=None, regex=None): """ The official helper for the execute-general-script command. c: The Commander of the outline. command: The os command to execute the script. directory: Optional: Change to this directory before executing command. ext: The file extention for the tempory file. language: The language name. regex: Optional regular expression describing error messages. If present, group(1) should evaluate to a line number. May be a compiled regex expression or a string. root: The root of the tree containing the script, The script may contain section references and @others. Other features: - Create a temporary external file if `not root.isAnyAtFileNode()`. - Compute the final command as follows. 1. If command contains <FILE>, replace <FILE> with the full path. 2. If command contains <NO-FILE>, just remove <NO-FILE>. This allows, for example, `go run .` to work as expected. 3. Append the full path to the command. """ c, log = self, self.frame.log #@+others # Define helper functions #@+node:ekr.20210529142153.1: *5* function: put_line def put_line(s): """ Put the line, creating a clickable link if the regex matches. """ if not regex: g.es_print(s) return # Get the line number. m = regex.match(s) if not m: g.es_print(s) return # If present, the regex should define two groups. try: s1 = m.group(1) s2 = m.group(2) except IndexError: g.es_print(f"Regex {regex.pattern()} must define two groups") return if s1.isdigit(): n = int(s1) fn = s2 elif s2.isdigit(): n = int(s2) fn = s1 else: # No line number. g.es_print(s) return s = s.replace(root_path, root.h) # Print to the console. print(s) # Find the node and offset corresponding to line n. p, n2 = find_line(fn, n) # Create the link. unl = p.get_UNL(with_proto=True, with_count=True) if unl: log.put(s + '\n', nodeLink=f"{unl},{n2}") else: log.put(s + '\n') #@+node:ekr.20210529164957.1: *5* function: find_line def find_line(path, n): """ Return the node corresponding to line n of external file given by path. """ if path == root_path: p, offset, found = c.gotoCommands.find_file_line(n, root) else: # Find an @<file> node with the given path. found = False for p in c.all_positions(): if p.isAnyAtFileNode(): norm_path = os.path.normpath(g.fullPath(c, p)) if path == norm_path: p, offset, found = c.gotoCommands.find_file_line(n, p) break if found: return p, offset return root, n #@-others # Compile and check the regex. if regex: if isinstance(regex, str): try: regex = re.compile(regex) except Exception: g.trace(f"Bad regex: {regex!s}") return None # Get the script. script = g.getScript(c, root, useSelectedText=False, forcePythonSentinels=False, # language=='python', useSentinels=True, ) # Create a temp file if root is not an @<file> node. use_temp = not root.isAnyAtFileNode() if use_temp: fd, root_path = tempfile.mkstemp(suffix=ext, prefix="") with os.fdopen(fd, 'w') as f: f.write(script) else: root_path = g.fullPath(c, root) # Compute the final command. if '<FILE>' in command: final_command = command.replace('<FILE>', root_path) elif '<NO-FILE>' in command: final_command = command.replace('<NO-FILE>', '').replace(root_path, '') else: final_command = f"{command} {root_path}" # Change directory. old_dir = os.path.abspath(os.path.curdir) if not directory: directory = os.path.dirname(root_path) os.chdir(directory) # Execute the final command. try: proc = subprocess.Popen(final_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() for s in g.splitLines(g.toUnicode(out)): print(s.rstrip()) print('') for s in g.splitLines(g.toUnicode(err)): put_line(s.rstrip()) finally: if use_temp: os.remove(root_path) os.chdir(old_dir)
[ "def", "general_script_helper", "(", "self", ",", "command", ",", "ext", ",", "language", ",", "root", ",", "directory", "=", "None", ",", "regex", "=", "None", ")", ":", "c", ",", "log", "=", "self", ",", "self", ".", "frame", ".", "log", "#@+others # Define helper functions", "#@+node:ekr.20210529142153.1: *5* function: put_line", "def", "put_line", "(", "s", ")", ":", "\"\"\"\n Put the line, creating a clickable link if the regex matches.\n \"\"\"", "if", "not", "regex", ":", "g", ".", "es_print", "(", "s", ")", "return", "# Get the line number.", "m", "=", "regex", ".", "match", "(", "s", ")", "if", "not", "m", ":", "g", ".", "es_print", "(", "s", ")", "return", "# If present, the regex should define two groups.", "try", ":", "s1", "=", "m", ".", "group", "(", "1", ")", "s2", "=", "m", ".", "group", "(", "2", ")", "except", "IndexError", ":", "g", ".", "es_print", "(", "f\"Regex {regex.pattern()} must define two groups\"", ")", "return", "if", "s1", ".", "isdigit", "(", ")", ":", "n", "=", "int", "(", "s1", ")", "fn", "=", "s2", "elif", "s2", ".", "isdigit", "(", ")", ":", "n", "=", "int", "(", "s2", ")", "fn", "=", "s1", "else", ":", "# No line number.", "g", ".", "es_print", "(", "s", ")", "return", "s", "=", "s", ".", "replace", "(", "root_path", ",", "root", ".", "h", ")", "# Print to the console.", "print", "(", "s", ")", "# Find the node and offset corresponding to line n.", "p", ",", "n2", "=", "find_line", "(", "fn", ",", "n", ")", "# Create the link.", "unl", "=", "p", ".", "get_UNL", "(", "with_proto", "=", "True", ",", "with_count", "=", "True", ")", "if", "unl", ":", "log", ".", "put", "(", "s", "+", "'\\n'", ",", "nodeLink", "=", "f\"{unl},{n2}\"", ")", "else", ":", "log", ".", "put", "(", "s", "+", "'\\n'", ")", "#@+node:ekr.20210529164957.1: *5* function: find_line", "def", "find_line", "(", "path", ",", "n", ")", ":", "\"\"\"\n Return the node corresponding to line n of external file given by path.\n \"\"\"", "if", "path", "==", "root_path", ":", "p", ",", "offset", ",", "found", "=", "c", ".", "gotoCommands", ".", "find_file_line", "(", "n", ",", "root", ")", "else", ":", "# Find an @<file> node with the given path.", "found", "=", "False", "for", "p", "in", "c", ".", "all_positions", "(", ")", ":", "if", "p", ".", "isAnyAtFileNode", "(", ")", ":", "norm_path", "=", "os", ".", "path", ".", "normpath", "(", "g", ".", "fullPath", "(", "c", ",", "p", ")", ")", "if", "path", "==", "norm_path", ":", "p", ",", "offset", ",", "found", "=", "c", ".", "gotoCommands", ".", "find_file_line", "(", "n", ",", "p", ")", "break", "if", "found", ":", "return", "p", ",", "offset", "return", "root", ",", "n", "#@-others", "# Compile and check the regex.", "if", "regex", ":", "if", "isinstance", "(", "regex", ",", "str", ")", ":", "try", ":", "regex", "=", "re", ".", "compile", "(", "regex", ")", "except", "Exception", ":", "g", ".", "trace", "(", "f\"Bad regex: {regex!s}\"", ")", "return", "None", "# Get the script.", "script", "=", "g", ".", "getScript", "(", "c", ",", "root", ",", "useSelectedText", "=", "False", ",", "forcePythonSentinels", "=", "False", ",", "# language=='python',", "useSentinels", "=", "True", ",", ")", "# Create a temp file if root is not an @<file> node.", "use_temp", "=", "not", "root", ".", "isAnyAtFileNode", "(", ")", "if", "use_temp", ":", "fd", ",", "root_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "ext", ",", "prefix", "=", "\"\"", ")", "with", "os", ".", "fdopen", "(", "fd", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "script", ")", "else", ":", "root_path", "=", "g", ".", "fullPath", "(", "c", ",", "root", ")", "# Compute the final command.", "if", "'<FILE>'", "in", "command", ":", "final_command", "=", "command", ".", "replace", "(", "'<FILE>'", ",", "root_path", ")", "elif", "'<NO-FILE>'", "in", "command", ":", "final_command", "=", "command", ".", "replace", "(", "'<NO-FILE>'", ",", "''", ")", ".", "replace", "(", "root_path", ",", "''", ")", "else", ":", "final_command", "=", "f\"{command} {root_path}\"", "# Change directory.", "old_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", "if", "not", "directory", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "root_path", ")", "os", ".", "chdir", "(", "directory", ")", "# Execute the final command.", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "final_command", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "proc", ".", "communicate", "(", ")", "for", "s", "in", "g", ".", "splitLines", "(", "g", ".", "toUnicode", "(", "out", ")", ")", ":", "print", "(", "s", ".", "rstrip", "(", ")", ")", "print", "(", "''", ")", "for", "s", "in", "g", ".", "splitLines", "(", "g", ".", "toUnicode", "(", "err", ")", ")", ":", "put_line", "(", "s", ".", "rstrip", "(", ")", ")", "finally", ":", "if", "use_temp", ":", "os", ".", "remove", "(", "root_path", ")", "os", ".", "chdir", "(", "old_dir", ")" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/core/leoCommands.py#L2214-L2350
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/httplib2/__init__.py
python
WsseAuthentication.request
(self, method, request_uri, headers, content)
Modify the request headers to add the appropriate Authorization header.
Modify the request headers to add the appropriate Authorization header.
[ "Modify", "the", "request", "headers", "to", "add", "the", "appropriate", "Authorization", "header", "." ]
def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'WSSE profile="UsernameToken"' iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) cnonce = _cnonce() password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self.credentials[0], password_digest, cnonce, iso_now)
[ "def", "request", "(", "self", ",", "method", ",", "request_uri", ",", "headers", ",", "content", ")", ":", "headers", "[", "'authorization'", "]", "=", "'WSSE profile=\"UsernameToken\"'", "iso_now", "=", "time", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ",", "time", ".", "gmtime", "(", ")", ")", "cnonce", "=", "_cnonce", "(", ")", "password_digest", "=", "_wsse_username_token", "(", "cnonce", ",", "iso_now", ",", "self", ".", "credentials", "[", "1", "]", ")", "headers", "[", "'X-WSSE'", "]", "=", "'UsernameToken Username=\"%s\", PasswordDigest=\"%s\", Nonce=\"%s\", Created=\"%s\"'", "%", "(", "self", ".", "credentials", "[", "0", "]", ",", "password_digest", ",", "cnonce", ",", "iso_now", ")" ]
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/httplib2/__init__.py#L639-L650
pybrain/pybrain
dcdf32ba1805490cefbc0bdeb227260d304fdb42
pybrain/tools/gridsearch.py
python
GridSearchCostGamma._getTrainerForParams
(self, params)
return trainer
Returns a trainer, loaded with the supplied metaparameters.
Returns a trainer, loaded with the supplied metaparameters.
[ "Returns", "a", "trainer", "loaded", "with", "the", "supplied", "metaparameters", "." ]
def _getTrainerForParams(self, params): """ Returns a trainer, loaded with the supplied metaparameters. """ trainer = copy.deepcopy(self._trainer) trainer.setArgs(cost=2 ** params[0], gamma=2 ** params[1], ver=0) return trainer
[ "def", "_getTrainerForParams", "(", "self", ",", "params", ")", ":", "trainer", "=", "copy", ".", "deepcopy", "(", "self", ".", "_trainer", ")", "trainer", ".", "setArgs", "(", "cost", "=", "2", "**", "params", "[", "0", "]", ",", "gamma", "=", "2", "**", "params", "[", "1", "]", ",", "ver", "=", "0", ")", "return", "trainer" ]
https://github.com/pybrain/pybrain/blob/dcdf32ba1805490cefbc0bdeb227260d304fdb42/pybrain/tools/gridsearch.py#L300-L305
coto/gae-boilerplate
470f2b61fcb0238c1ad02cc1f97e6017acbe9628
bp_includes/external/requests/sessions.py
python
Session.options
(self, url, **kwargs)
return self.request('OPTIONS', url, **kwargs)
Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.
Sends a OPTIONS request. Returns :class:`Response` object.
[ "Sends", "a", "OPTIONS", "request", ".", "Returns", ":", "class", ":", "Response", "object", "." ]
def options(self, url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs)
[ "def", "options", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'allow_redirects'", ",", "True", ")", "return", "self", ".", "request", "(", "'OPTIONS'", ",", "url", ",", "*", "*", "kwargs", ")" ]
https://github.com/coto/gae-boilerplate/blob/470f2b61fcb0238c1ad02cc1f97e6017acbe9628/bp_includes/external/requests/sessions.py#L397-L405
sakuranew/BERT-AttributeExtraction
f4d796046ced6ff508442a802962549f4c4a51de
birthplace/FeatureExtraction/extract_features.py
python
input_fn_builder
(features, seq_length)
return input_fn
Creates an `input_fn` closure to be passed to TPUEstimator.
Creates an `input_fn` closure to be passed to TPUEstimator.
[ "Creates", "an", "input_fn", "closure", "to", "be", "passed", "to", "TPUEstimator", "." ]
def input_fn_builder(features, seq_length): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_input_type_ids = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_input_type_ids.append(feature.input_type_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "unique_ids": tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "input_type_ids": tf.constant( all_input_type_ids, shape=[num_examples, seq_length], dtype=tf.int32), }) d = d.batch(batch_size=batch_size) return d return input_fn
[ "def", "input_fn_builder", "(", "features", ",", "seq_length", ")", ":", "all_unique_ids", "=", "[", "]", "all_input_ids", "=", "[", "]", "all_input_mask", "=", "[", "]", "all_input_type_ids", "=", "[", "]", "for", "feature", "in", "features", ":", "all_unique_ids", ".", "append", "(", "feature", ".", "unique_id", ")", "all_input_ids", ".", "append", "(", "feature", ".", "input_ids", ")", "all_input_mask", ".", "append", "(", "feature", ".", "input_mask", ")", "all_input_type_ids", ".", "append", "(", "feature", ".", "input_type_ids", ")", "def", "input_fn", "(", "params", ")", ":", "\"\"\"The actual input function.\"\"\"", "batch_size", "=", "params", "[", "\"batch_size\"", "]", "num_examples", "=", "len", "(", "features", ")", "# This is for demo purposes and does NOT scale to large data sets. We do", "# not use Dataset.from_generator() because that uses tf.py_func which is", "# not TPU compatible. The right way to load data is with TFRecordReader.", "d", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensor_slices", "(", "{", "\"unique_ids\"", ":", "tf", ".", "constant", "(", "all_unique_ids", ",", "shape", "=", "[", "num_examples", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_ids\"", ":", "tf", ".", "constant", "(", "all_input_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_mask\"", ":", "tf", ".", "constant", "(", "all_input_mask", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "\"input_type_ids\"", ":", "tf", ".", "constant", "(", "all_input_type_ids", ",", "shape", "=", "[", "num_examples", ",", "seq_length", "]", ",", "dtype", "=", "tf", ".", "int32", ")", ",", "}", ")", "d", "=", "d", ".", "batch", "(", "batch_size", "=", "batch_size", ")", "return", "d", "return", "input_fn" ]
https://github.com/sakuranew/BERT-AttributeExtraction/blob/f4d796046ced6ff508442a802962549f4c4a51de/birthplace/FeatureExtraction/extract_features.py#L103-L148
1012598167/flask_mongodb_game
60c7e0351586656ec38f851592886338e50b4110
python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/utils/ui.py
python
InterruptibleMixin.finish
(self)
Restore the original SIGINT handler after finishing. This should happen regardless of whether the progress display finishes normally, or gets interrupted.
Restore the original SIGINT handler after finishing.
[ "Restore", "the", "original", "SIGINT", "handler", "after", "finishing", "." ]
def finish(self): """ Restore the original SIGINT handler after finishing. This should happen regardless of whether the progress display finishes normally, or gets interrupted. """ super(InterruptibleMixin, self).finish() signal(SIGINT, self.original_handler)
[ "def", "finish", "(", "self", ")", ":", "super", "(", "InterruptibleMixin", ",", "self", ")", ".", "finish", "(", ")", "signal", "(", "SIGINT", ",", "self", ".", "original_handler", ")" ]
https://github.com/1012598167/flask_mongodb_game/blob/60c7e0351586656ec38f851592886338e50b4110/python_flask/venv/Lib/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/utils/ui.py#L100-L108
nschloe/quadpy
c4c076d8ddfa968486a2443a95e2fb3780dcde0f
src/quadpy/w3/_felippa.py
python
felippa_5
()
return W3Scheme("Felippa 5", weights, points, 5, source)
[]
def felippa_5(): a1, a2 = ((6 - i * sqrt(15)) / 21 for i in [+1, -1]) data = [ (0.3498310570689643e-01, _s21_z(a1, np.sqrt(3.0 / 5.0))), (0.3677615355236283e-01, _s21_z(a2, np.sqrt(3.0 / 5.0))), (1.0 / 16.0, _s3_z(np.sqrt(3.0 / 5.0), symbolic=False)), (0.5597296913103428e-01, _s21(a1)), (0.5884184568378053e-01, _s21(a2)), (0.1, _s3(symbolic=False)), ] points, weights = untangle(data) return W3Scheme("Felippa 5", weights, points, 5, source)
[ "def", "felippa_5", "(", ")", ":", "a1", ",", "a2", "=", "(", "(", "6", "-", "i", "*", "sqrt", "(", "15", ")", ")", "/", "21", "for", "i", "in", "[", "+", "1", ",", "-", "1", "]", ")", "data", "=", "[", "(", "0.3498310570689643e-01", ",", "_s21_z", "(", "a1", ",", "np", ".", "sqrt", "(", "3.0", "/", "5.0", ")", ")", ")", ",", "(", "0.3677615355236283e-01", ",", "_s21_z", "(", "a2", ",", "np", ".", "sqrt", "(", "3.0", "/", "5.0", ")", ")", ")", ",", "(", "1.0", "/", "16.0", ",", "_s3_z", "(", "np", ".", "sqrt", "(", "3.0", "/", "5.0", ")", ",", "symbolic", "=", "False", ")", ")", ",", "(", "0.5597296913103428e-01", ",", "_s21", "(", "a1", ")", ")", ",", "(", "0.5884184568378053e-01", ",", "_s21", "(", "a2", ")", ")", ",", "(", "0.1", ",", "_s3", "(", "symbolic", "=", "False", ")", ")", ",", "]", "points", ",", "weights", "=", "untangle", "(", "data", ")", "return", "W3Scheme", "(", "\"Felippa 5\"", ",", "weights", ",", "points", ",", "5", ",", "source", ")" ]
https://github.com/nschloe/quadpy/blob/c4c076d8ddfa968486a2443a95e2fb3780dcde0f/src/quadpy/w3/_felippa.py#L56-L67
ctxis/CAPE
dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82
analyzer/windows/modules/auxiliary/disguise.py
python
Disguise.change_productid
(self)
Randomizes Windows ProductId. The Windows ProductId is occasionally used by malware to detect public setups of Cuckoo, e.g., Malwr.com.
Randomizes Windows ProductId. The Windows ProductId is occasionally used by malware to detect public setups of Cuckoo, e.g., Malwr.com.
[ "Randomizes", "Windows", "ProductId", ".", "The", "Windows", "ProductId", "is", "occasionally", "used", "by", "malware", "to", "detect", "public", "setups", "of", "Cuckoo", "e", ".", "g", ".", "Malwr", ".", "com", "." ]
def change_productid(self): """Randomizes Windows ProductId. The Windows ProductId is occasionally used by malware to detect public setups of Cuckoo, e.g., Malwr.com. """ key = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion", 0, KEY_SET_VALUE) value = "{0}-{1}-{2}-{3}".format(random_integer(5), random_integer(3), random_integer(7), random_integer(5)) SetValueEx(key, "ProductId", 0, REG_SZ, value) CloseKey(key)
[ "def", "change_productid", "(", "self", ")", ":", "key", "=", "OpenKey", "(", "HKEY_LOCAL_MACHINE", ",", "\"SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\"", ",", "0", ",", "KEY_SET_VALUE", ")", "value", "=", "\"{0}-{1}-{2}-{3}\"", ".", "format", "(", "random_integer", "(", "5", ")", ",", "random_integer", "(", "3", ")", ",", "random_integer", "(", "7", ")", ",", "random_integer", "(", "5", ")", ")", "SetValueEx", "(", "key", ",", "\"ProductId\"", ",", "0", ",", "REG_SZ", ",", "value", ")", "CloseKey", "(", "key", ")" ]
https://github.com/ctxis/CAPE/blob/dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82/analyzer/windows/modules/auxiliary/disguise.py#L19-L32
antspy/quantized_distillation
bb500b7ae48a3f6751d6434126de9845b58d2d65
onmt/IO.py
python
ONMTDataset._read_corpus_file
(self, path, truncate)
path: location of a src or tgt file truncate: maximum sequence length (0 for unlimited) returns: (word, features, nfeat) triples for each line
path: location of a src or tgt file truncate: maximum sequence length (0 for unlimited)
[ "path", ":", "location", "of", "a", "src", "or", "tgt", "file", "truncate", ":", "maximum", "sequence", "length", "(", "0", "for", "unlimited", ")" ]
def _read_corpus_file(self, path, truncate): """ path: location of a src or tgt file truncate: maximum sequence length (0 for unlimited) returns: (word, features, nfeat) triples for each line """ with codecs.open(path, "r", "utf-8") as corpus_file: lines = (line.split() for line in corpus_file) if truncate: lines = (line[:truncate] for line in lines) for line in lines: yield extract_features(line)
[ "def", "_read_corpus_file", "(", "self", ",", "path", ",", "truncate", ")", ":", "with", "codecs", ".", "open", "(", "path", ",", "\"r\"", ",", "\"utf-8\"", ")", "as", "corpus_file", ":", "lines", "=", "(", "line", ".", "split", "(", ")", "for", "line", "in", "corpus_file", ")", "if", "truncate", ":", "lines", "=", "(", "line", "[", ":", "truncate", "]", "for", "line", "in", "lines", ")", "for", "line", "in", "lines", ":", "yield", "extract_features", "(", "line", ")" ]
https://github.com/antspy/quantized_distillation/blob/bb500b7ae48a3f6751d6434126de9845b58d2d65/onmt/IO.py#L197-L209
nerdvegas/rez
d392c65bf63b4bca8106f938cec49144ba54e770
src/rez/pip.py
python
_check_found
(py_exe, version_text, log_invalid=True)
return is_valid
Check the Python and pip version text found. Args: py_exe (str or None): Python executable path found, if any. version_text (str or None): Pip version found, if any. log_invalid (bool): Whether to log messages if found invalid. Returns: bool: Python is OK and pip version fits against ``PIP_SPECIFIER``.
Check the Python and pip version text found.
[ "Check", "the", "Python", "and", "pip", "version", "text", "found", "." ]
def _check_found(py_exe, version_text, log_invalid=True): """Check the Python and pip version text found. Args: py_exe (str or None): Python executable path found, if any. version_text (str or None): Pip version found, if any. log_invalid (bool): Whether to log messages if found invalid. Returns: bool: Python is OK and pip version fits against ``PIP_SPECIFIER``. """ is_valid = True message = "Needs pip%s, but found '%s' for Python '%s'" if version_text is None or not py_exe: is_valid = False if log_invalid: print_debug(message, PIP_SPECIFIER, version_text, py_exe) elif PackagingVersion(version_text) not in PIP_SPECIFIER: is_valid = False if log_invalid: print_warning(message, PIP_SPECIFIER, version_text, py_exe) return is_valid
[ "def", "_check_found", "(", "py_exe", ",", "version_text", ",", "log_invalid", "=", "True", ")", ":", "is_valid", "=", "True", "message", "=", "\"Needs pip%s, but found '%s' for Python '%s'\"", "if", "version_text", "is", "None", "or", "not", "py_exe", ":", "is_valid", "=", "False", "if", "log_invalid", ":", "print_debug", "(", "message", ",", "PIP_SPECIFIER", ",", "version_text", ",", "py_exe", ")", "elif", "PackagingVersion", "(", "version_text", ")", "not", "in", "PIP_SPECIFIER", ":", "is_valid", "=", "False", "if", "log_invalid", ":", "print_warning", "(", "message", ",", "PIP_SPECIFIER", ",", "version_text", ",", "py_exe", ")", "return", "is_valid" ]
https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rez/pip.py#L612-L636
postlund/hass-atv-beta
0ce01623eabc6a11b84a79deaf25cec0359056ea
custom_components/apple_tv/media_player.py
python
async_setup_entry
(hass, config_entry, async_add_entities)
Load Apple TV media player based on a config entry.
Load Apple TV media player based on a config entry.
[ "Load", "Apple", "TV", "media", "player", "based", "on", "a", "config", "entry", "." ]
async def async_setup_entry(hass, config_entry, async_add_entities): """Load Apple TV media player based on a config entry.""" name = config_entry.data[CONF_NAME] manager = hass.data[DOMAIN][config_entry.unique_id] async_add_entities([AppleTvMediaPlayer(name, config_entry.unique_id, manager)])
[ "async", "def", "async_setup_entry", "(", "hass", ",", "config_entry", ",", "async_add_entities", ")", ":", "name", "=", "config_entry", ".", "data", "[", "CONF_NAME", "]", "manager", "=", "hass", ".", "data", "[", "DOMAIN", "]", "[", "config_entry", ".", "unique_id", "]", "async_add_entities", "(", "[", "AppleTvMediaPlayer", "(", "name", ",", "config_entry", ".", "unique_id", ",", "manager", ")", "]", ")" ]
https://github.com/postlund/hass-atv-beta/blob/0ce01623eabc6a11b84a79deaf25cec0359056ea/custom_components/apple_tv/media_player.py#L103-L107
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/distlib/locators.py
python
SimpleScrapingLocator.__init__
(self, url, timeout=None, num_workers=10, **kwargs)
Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass.
Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass.
[ "Initialise", "an", "instance", ".", ":", "param", "url", ":", "The", "root", "URL", "to", "use", "for", "scraping", ".", ":", "param", "timeout", ":", "The", "timeout", "in", "seconds", "to", "be", "applied", "to", "requests", ".", "This", "defaults", "to", "None", "(", "no", "timeout", "specified", ")", ".", ":", "param", "num_workers", ":", "The", "number", "of", "worker", "threads", "you", "want", "to", "do", "I", "/", "O", "This", "defaults", "to", "10", ".", ":", "param", "kwargs", ":", "Passed", "to", "the", "superclass", "." ]
def __init__(self, url, timeout=None, num_workers=10, **kwargs): """ Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. """ super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() # See issue #45: we need to be resilient when the locator is used # in a thread, e.g. with concurrent.futures. We can't use self._lock # as it is for coordinating our internal threads - the ones created # in _prepare_threads. self._gplock = threading.RLock() self.platform_check = False
[ "def", "__init__", "(", "self", ",", "url", ",", "timeout", "=", "None", ",", "num_workers", "=", "10", ",", "*", "*", "kwargs", ")", ":", "super", "(", "SimpleScrapingLocator", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "self", ".", "base_url", "=", "ensure_slash", "(", "url", ")", "self", ".", "timeout", "=", "timeout", "self", ".", "_page_cache", "=", "{", "}", "self", ".", "_seen", "=", "set", "(", ")", "self", ".", "_to_fetch", "=", "queue", ".", "Queue", "(", ")", "self", ".", "_bad_hosts", "=", "set", "(", ")", "self", ".", "skip_externals", "=", "False", "self", ".", "num_workers", "=", "num_workers", "self", ".", "_lock", "=", "threading", ".", "RLock", "(", ")", "# See issue #45: we need to be resilient when the locator is used", "# in a thread, e.g. with concurrent.futures. We can't use self._lock", "# as it is for coordinating our internal threads - the ones created", "# in _prepare_threads.", "self", ".", "_gplock", "=", "threading", ".", "RLock", "(", ")", "self", ".", "platform_check", "=", "False" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/site-packages/pip/_vendor/distlib/locators.py#L593-L618
OpenNMT/OpenNMT-tf
59a4dfdb911d0570ba1096b7a0a7b9fc5c7844bf
opennmt/utils/misc.py
python
get_devices
(count=1, fallback_to_cpu=True)
return devices[0:count]
Gets devices. Args: count: The number of devices to get. fallback_to_cpu: If ``True``, return CPU devices if no GPU is available. Returns: A list of device names. Raises: ValueError: if :obj:`count` is greater than the number of visible devices.
Gets devices.
[ "Gets", "devices", "." ]
def get_devices(count=1, fallback_to_cpu=True): """Gets devices. Args: count: The number of devices to get. fallback_to_cpu: If ``True``, return CPU devices if no GPU is available. Returns: A list of device names. Raises: ValueError: if :obj:`count` is greater than the number of visible devices. """ device_type = "GPU" devices = tf.config.list_logical_devices(device_type=device_type) if not devices and fallback_to_cpu: tf.get_logger().warning("No GPU is detected, falling back to CPU") device_type = "CPU" devices = tf.config.list_logical_devices(device_type=device_type) if len(devices) < count: raise ValueError( "Requested %d %s devices but %d %s %s visible" % ( count, device_type, len(devices), device_type, "is" if len(devices) == 1 else "are", ) ) return devices[0:count]
[ "def", "get_devices", "(", "count", "=", "1", ",", "fallback_to_cpu", "=", "True", ")", ":", "device_type", "=", "\"GPU\"", "devices", "=", "tf", ".", "config", ".", "list_logical_devices", "(", "device_type", "=", "device_type", ")", "if", "not", "devices", "and", "fallback_to_cpu", ":", "tf", ".", "get_logger", "(", ")", ".", "warning", "(", "\"No GPU is detected, falling back to CPU\"", ")", "device_type", "=", "\"CPU\"", "devices", "=", "tf", ".", "config", ".", "list_logical_devices", "(", "device_type", "=", "device_type", ")", "if", "len", "(", "devices", ")", "<", "count", ":", "raise", "ValueError", "(", "\"Requested %d %s devices but %d %s %s visible\"", "%", "(", "count", ",", "device_type", ",", "len", "(", "devices", ")", ",", "device_type", ",", "\"is\"", "if", "len", "(", "devices", ")", "==", "1", "else", "\"are\"", ",", ")", ")", "return", "devices", "[", "0", ":", "count", "]" ]
https://github.com/OpenNMT/OpenNMT-tf/blob/59a4dfdb911d0570ba1096b7a0a7b9fc5c7844bf/opennmt/utils/misc.py#L17-L47
ricequant/rqalpha-mod-ctp
bfd40801f9a182226a911cac74660f62993eb6db
rqalpha_mod_ctp/ctp/pyctp/linux64_27/__init__.py
python
TraderApi.ReqUserLogout
(self, pUserLogout, nRequestID)
return 0
登出请求
登出请求
[ "登出请求" ]
def ReqUserLogout(self, pUserLogout, nRequestID): """登出请求""" return 0
[ "def", "ReqUserLogout", "(", "self", ",", "pUserLogout", ",", "nRequestID", ")", ":", "return", "0" ]
https://github.com/ricequant/rqalpha-mod-ctp/blob/bfd40801f9a182226a911cac74660f62993eb6db/rqalpha_mod_ctp/ctp/pyctp/linux64_27/__init__.py#L241-L243
ParmEd/ParmEd
cd763f2e83c98ba9e51676f6dbebf0eebfd5157e
parmed/openmm/reporters.py
python
StateDataReporter.finalize
(self)
Closes any open file
Closes any open file
[ "Closes", "any", "open", "file" ]
def finalize(self): """ Closes any open file """ try: if self._out is not None and self._openedFile: self._out.close() except AttributeError: # pragma: no cover pass
[ "def", "finalize", "(", "self", ")", ":", "try", ":", "if", "self", ".", "_out", "is", "not", "None", "and", "self", ".", "_openedFile", ":", "self", ".", "_out", ".", "close", "(", ")", "except", "AttributeError", ":", "# pragma: no cover", "pass" ]
https://github.com/ParmEd/ParmEd/blob/cd763f2e83c98ba9e51676f6dbebf0eebfd5157e/parmed/openmm/reporters.py#L274-L280
pydata/pandas-datareader
3f1d590e6e67cf30aa516d3b1f1921b5c45ccc4b
pandas_datareader/yahoo/daily.py
python
_calc_return_index
(price_df)
return df
Return a returns index from a input price df or series. Initial value (typically NaN) is set to 1.
Return a returns index from a input price df or series. Initial value (typically NaN) is set to 1.
[ "Return", "a", "returns", "index", "from", "a", "input", "price", "df", "or", "series", ".", "Initial", "value", "(", "typically", "NaN", ")", "is", "set", "to", "1", "." ]
def _calc_return_index(price_df): """ Return a returns index from a input price df or series. Initial value (typically NaN) is set to 1. """ df = price_df.pct_change().add(1).cumprod() mask = notnull(df.iloc[1]) & isnull(df.iloc[0]) if mask: df.loc[df.index[0]] = 1 # Check for first stock listings after starting date of index in ret_index # If True, find first_valid_index and set previous entry to 1. if not mask: tstamp = df.first_valid_index() t_idx = df.index.get_loc(tstamp) - 1 df.iloc[t_idx] = 1 return df
[ "def", "_calc_return_index", "(", "price_df", ")", ":", "df", "=", "price_df", ".", "pct_change", "(", ")", ".", "add", "(", "1", ")", ".", "cumprod", "(", ")", "mask", "=", "notnull", "(", "df", ".", "iloc", "[", "1", "]", ")", "&", "isnull", "(", "df", ".", "iloc", "[", "0", "]", ")", "if", "mask", ":", "df", ".", "loc", "[", "df", ".", "index", "[", "0", "]", "]", "=", "1", "# Check for first stock listings after starting date of index in ret_index", "# If True, find first_valid_index and set previous entry to 1.", "if", "not", "mask", ":", "tstamp", "=", "df", ".", "first_valid_index", "(", ")", "t_idx", "=", "df", ".", "index", ".", "get_loc", "(", "tstamp", ")", "-", "1", "df", ".", "iloc", "[", "t_idx", "]", "=", "1", "return", "df" ]
https://github.com/pydata/pandas-datareader/blob/3f1d590e6e67cf30aa516d3b1f1921b5c45ccc4b/pandas_datareader/yahoo/daily.py#L239-L256
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/tornado/template.py
python
Loader.__init__
(self, root_directory, **kwargs)
[]
def __init__(self, root_directory, **kwargs): super(Loader, self).__init__(**kwargs) self.root = os.path.abspath(root_directory)
[ "def", "__init__", "(", "self", ",", "root_directory", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Loader", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")", "self", ".", "root", "=", "os", ".", "path", ".", "abspath", "(", "root_directory", ")" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/tornado/template.py#L435-L437
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/search_agents/muzero/server.py
python
NQServer.get_query
( self, index: Optional[int] = None, dataset_type: str = 'TRAIN')
return query
Get query by index. Args: index: int, Index of the query. dataset_type: str, Dataset to choose from in ['TRAIN', 'DEV', 'TEST']. Returns: The corresponding query as enviornment_pb2.Query.
Get query by index.
[ "Get", "query", "by", "index", "." ]
def get_query( self, index: Optional[int] = None, dataset_type: str = 'TRAIN') -> environment_pb2.GetQueryResponse: """Get query by index. Args: index: int, Index of the query. dataset_type: str, Dataset to choose from in ['TRAIN', 'DEV', 'TEST']. Returns: The corresponding query as enviornment_pb2.Query. """ dataset = environment_pb2.DataSet.Value(common_flags.DATASET.value) if index: req = environment_pb2.GetQueryRequest( index=index, dataset=dataset, dataset_type=dataset_type) else: req = environment_pb2.GetQueryRequest( dataset=dataset, dataset_type=dataset_type) query = self._call_rpc(self._stub.GetQuery, req) return query
[ "def", "get_query", "(", "self", ",", "index", ":", "Optional", "[", "int", "]", "=", "None", ",", "dataset_type", ":", "str", "=", "'TRAIN'", ")", "->", "environment_pb2", ".", "GetQueryResponse", ":", "dataset", "=", "environment_pb2", ".", "DataSet", ".", "Value", "(", "common_flags", ".", "DATASET", ".", "value", ")", "if", "index", ":", "req", "=", "environment_pb2", ".", "GetQueryRequest", "(", "index", "=", "index", ",", "dataset", "=", "dataset", ",", "dataset_type", "=", "dataset_type", ")", "else", ":", "req", "=", "environment_pb2", ".", "GetQueryRequest", "(", "dataset", "=", "dataset", ",", "dataset_type", "=", "dataset_type", ")", "query", "=", "self", ".", "_call_rpc", "(", "self", ".", "_stub", ".", "GetQuery", ",", "req", ")", "return", "query" ]
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/search_agents/muzero/server.py#L109-L130
Pymol-Scripts/Pymol-script-repo
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
modules/pdb2pqr/contrib/numpy-1.1.0/numpy/ma/core.py
python
MaskedArray.trace
(self, offset=0, axis1=0, axis2=1, dtype=None, out=None)
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along the offset diagonal of the array's indicated `axis1` and `axis2`.
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
[ "a", ".", "trace", "(", "offset", "=", "0", "axis1", "=", "0", "axis2", "=", "1", "dtype", "=", "None", "out", "=", "None", ")" ]
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along the offset diagonal of the array's indicated `axis1` and `axis2`. """ # TODO: What are we doing with `out`? m = self._mask if m is nomask: result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=None)
[ "def", "trace", "(", "self", ",", "offset", "=", "0", ",", "axis1", "=", "0", ",", "axis2", "=", "1", ",", "dtype", "=", "None", ",", "out", "=", "None", ")", ":", "# TODO: What are we doing with `out`?", "m", "=", "self", ".", "_mask", "if", "m", "is", "nomask", ":", "result", "=", "super", "(", "MaskedArray", ",", "self", ")", ".", "trace", "(", "offset", "=", "offset", ",", "axis1", "=", "axis1", ",", "axis2", "=", "axis2", ",", "out", "=", "out", ")", "return", "result", ".", "astype", "(", "dtype", ")", "else", ":", "D", "=", "self", ".", "diagonal", "(", "offset", "=", "offset", ",", "axis1", "=", "axis1", ",", "axis2", "=", "axis2", ")", "return", "D", ".", "astype", "(", "dtype", ")", ".", "filled", "(", "0", ")", ".", "sum", "(", "axis", "=", "None", ")" ]
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/pdb2pqr/contrib/numpy-1.1.0/numpy/ma/core.py#L2027-L2042
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vpc/v20170312/vpc_client.py
python
VpcClient.InquiryPriceResetVpnGatewayInternetMaxBandwidth
(self, request)
本接口(InquiryPriceResetVpnGatewayInternetMaxBandwidth)调整VPN网关带宽上限询价。 :param request: Request instance for InquiryPriceResetVpnGatewayInternetMaxBandwidth. :type request: :class:`tencentcloud.vpc.v20170312.models.InquiryPriceResetVpnGatewayInternetMaxBandwidthRequest` :rtype: :class:`tencentcloud.vpc.v20170312.models.InquiryPriceResetVpnGatewayInternetMaxBandwidthResponse`
本接口(InquiryPriceResetVpnGatewayInternetMaxBandwidth)调整VPN网关带宽上限询价。
[ "本接口(InquiryPriceResetVpnGatewayInternetMaxBandwidth)调整VPN网关带宽上限询价。" ]
def InquiryPriceResetVpnGatewayInternetMaxBandwidth(self, request): """本接口(InquiryPriceResetVpnGatewayInternetMaxBandwidth)调整VPN网关带宽上限询价。 :param request: Request instance for InquiryPriceResetVpnGatewayInternetMaxBandwidth. :type request: :class:`tencentcloud.vpc.v20170312.models.InquiryPriceResetVpnGatewayInternetMaxBandwidthRequest` :rtype: :class:`tencentcloud.vpc.v20170312.models.InquiryPriceResetVpnGatewayInternetMaxBandwidthResponse` """ try: params = request._serialize() body = self.call("InquiryPriceResetVpnGatewayInternetMaxBandwidth", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.InquiryPriceResetVpnGatewayInternetMaxBandwidthResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message)
[ "def", "InquiryPriceResetVpnGatewayInternetMaxBandwidth", "(", "self", ",", "request", ")", ":", "try", ":", "params", "=", "request", ".", "_serialize", "(", ")", "body", "=", "self", ".", "call", "(", "\"InquiryPriceResetVpnGatewayInternetMaxBandwidth\"", ",", "params", ")", "response", "=", "json", ".", "loads", "(", "body", ")", "if", "\"Error\"", "not", "in", "response", "[", "\"Response\"", "]", ":", "model", "=", "models", ".", "InquiryPriceResetVpnGatewayInternetMaxBandwidthResponse", "(", ")", "model", ".", "_deserialize", "(", "response", "[", "\"Response\"", "]", ")", "return", "model", "else", ":", "code", "=", "response", "[", "\"Response\"", "]", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "message", "=", "response", "[", "\"Response\"", "]", "[", "\"Error\"", "]", "[", "\"Message\"", "]", "reqid", "=", "response", "[", "\"Response\"", "]", "[", "\"RequestId\"", "]", "raise", "TencentCloudSDKException", "(", "code", ",", "message", ",", "reqid", ")", "except", "Exception", "as", "e", ":", "if", "isinstance", "(", "e", ",", "TencentCloudSDKException", ")", ":", "raise", "else", ":", "raise", "TencentCloudSDKException", "(", "e", ".", "message", ",", "e", ".", "message", ")" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vpc/v20170312/vpc_client.py#L5489-L5514
edmunds/shadowreader
24f1bc5e751b5b2605114a3c329c28f1f37f46d1
shadowreader/libs/cloudwatch.py
python
_put_metric
( namespace: str, metric_name: str, dimensions: list, timestamp: int, val: float, storage_resolution: int = 60, )
return resp
Put a custom CloudWatch metric
Put a custom CloudWatch metric
[ "Put", "a", "custom", "CloudWatch", "metric" ]
def _put_metric( namespace: str, metric_name: str, dimensions: list, timestamp: int, val: float, storage_resolution: int = 60, ): """ Put a custom CloudWatch metric """ resp = cw.put_metric_data( Namespace=namespace, MetricData=[{ "MetricName": metric_name, "Dimensions": dimensions, "Timestamp": int(timestamp), "Value": val, "Unit": "Count", "StorageResolution": storage_resolution, }], ) return resp
[ "def", "_put_metric", "(", "namespace", ":", "str", ",", "metric_name", ":", "str", ",", "dimensions", ":", "list", ",", "timestamp", ":", "int", ",", "val", ":", "float", ",", "storage_resolution", ":", "int", "=", "60", ",", ")", ":", "resp", "=", "cw", ".", "put_metric_data", "(", "Namespace", "=", "namespace", ",", "MetricData", "=", "[", "{", "\"MetricName\"", ":", "metric_name", ",", "\"Dimensions\"", ":", "dimensions", ",", "\"Timestamp\"", ":", "int", "(", "timestamp", ")", ",", "\"Value\"", ":", "val", ",", "\"Unit\"", ":", "\"Count\"", ",", "\"StorageResolution\"", ":", "storage_resolution", ",", "}", "]", ",", ")", "return", "resp" ]
https://github.com/edmunds/shadowreader/blob/24f1bc5e751b5b2605114a3c329c28f1f37f46d1/shadowreader/libs/cloudwatch.py#L67-L87
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/logging/__init__.py
python
Logger.debug
(self, msg, *args, **kwargs)
Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
Log 'msg % args' with severity 'DEBUG'.
[ "Log", "msg", "%", "args", "with", "severity", "DEBUG", "." ]
def debug(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(DEBUG): self._log(DEBUG, msg, args, **kwargs)
[ "def", "debug", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "isEnabledFor", "(", "DEBUG", ")", ":", "self", ".", "_log", "(", "DEBUG", ",", "msg", ",", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/logging/__init__.py#L1127-L1137
afruehstueck/tileGAN
0460e228b1109528a0fefc6569b970c2934a649d
tileGAN_client.py
python
ImageViewer.setMergeLevel
(self)
adjust latent merge level
adjust latent merge level
[ "adjust", "latent", "merge", "level" ]
def setMergeLevel(self): """ adjust latent merge level """ level, latentSize, ok = GetLevelDialog.getValues(m=self.mergeLevel, l=self.latentSize) #check parameter validity if latentSize > 2 ** level: latentSize = 2 ** level msg = QMessageBox() msg.setWindowTitle("Latent settings warning") msg.setIcon(QMessageBox.Warning) msg.setText("Latent size cannot be larger than 2^level.\n Latent size for level {} reset to {}".format(level, latentSize)) msg.setStandardButtons(QMessageBox.Ok) msg.exec_() if not ok or (level == self.mergeLevel and latentSize == self.latentSize): #Dialog was terminated or settings stayed the same return self.mergeLevel = level self.latentSize = latentSize print('<ImageViewer> updating new merge level to {}, latentSize {}'.format(self.mergeLevel, self.latentSize)) tf_manager.setMergeLevel(self.mergeLevel, self.latentSize) output, gridShape = np.asarray(tf_manager.getOutput()._getvalue()) self.updateGridShape(np.asarray(gridShape)) self.updateImage(output, fitToView=True)
[ "def", "setMergeLevel", "(", "self", ")", ":", "level", ",", "latentSize", ",", "ok", "=", "GetLevelDialog", ".", "getValues", "(", "m", "=", "self", ".", "mergeLevel", ",", "l", "=", "self", ".", "latentSize", ")", "#check parameter validity", "if", "latentSize", ">", "2", "**", "level", ":", "latentSize", "=", "2", "**", "level", "msg", "=", "QMessageBox", "(", ")", "msg", ".", "setWindowTitle", "(", "\"Latent settings warning\"", ")", "msg", ".", "setIcon", "(", "QMessageBox", ".", "Warning", ")", "msg", ".", "setText", "(", "\"Latent size cannot be larger than 2^level.\\n Latent size for level {} reset to {}\"", ".", "format", "(", "level", ",", "latentSize", ")", ")", "msg", ".", "setStandardButtons", "(", "QMessageBox", ".", "Ok", ")", "msg", ".", "exec_", "(", ")", "if", "not", "ok", "or", "(", "level", "==", "self", ".", "mergeLevel", "and", "latentSize", "==", "self", ".", "latentSize", ")", ":", "#Dialog was terminated or settings stayed the same", "return", "self", ".", "mergeLevel", "=", "level", "self", ".", "latentSize", "=", "latentSize", "print", "(", "'<ImageViewer> updating new merge level to {}, latentSize {}'", ".", "format", "(", "self", ".", "mergeLevel", ",", "self", ".", "latentSize", ")", ")", "tf_manager", ".", "setMergeLevel", "(", "self", ".", "mergeLevel", ",", "self", ".", "latentSize", ")", "output", ",", "gridShape", "=", "np", ".", "asarray", "(", "tf_manager", ".", "getOutput", "(", ")", ".", "_getvalue", "(", ")", ")", "self", ".", "updateGridShape", "(", "np", ".", "asarray", "(", "gridShape", ")", ")", "self", ".", "updateImage", "(", "output", ",", "fitToView", "=", "True", ")" ]
https://github.com/afruehstueck/tileGAN/blob/0460e228b1109528a0fefc6569b970c2934a649d/tileGAN_client.py#L864-L889
matthew-brett/transforms3d
f185e866ecccb66c545559bc9f2e19cb5025e0ab
transforms3d/_gohlketransforms.py
python
superimposition_matrix
(v0, v1, scale=False, usesvd=True)
return affine_matrix_from_points(v0, v1, shear=False, scale=scale, usesvd=usesvd)
r"""Return matrix to transform given 3D point set into second point set. v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. The parameters scale and usesvd are explained in the more general affine_matrix_from_points function. The returned matrix is a similarity or Euclidean transformation matrix. This function has a fast C implementation in transformations.c. >>> v0 = numpy.random.rand(3, 10) >>> M = superimposition_matrix(v0, v0) >>> numpy.allclose(M, numpy.identity(4)) True >>> R = random_rotation_matrix(numpy.random.random(3)) >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] >>> v1 = numpy.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 >>> v0[3] = 1 >>> v1 = numpy.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> S = scale_matrix(random.random()) >>> T = translation_matrix(numpy.random.random(3)-0.5) >>> M = concatenate_matrices(T, R, S) >>> v1 = numpy.dot(M, v0) >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) >>> M = superimposition_matrix(v0, v1, scale=True) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> v = numpy.empty((4, 100, 3)) >>> v[:, :, 0] = v0 >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) True
r"""Return matrix to transform given 3D point set into second point set.
[ "r", "Return", "matrix", "to", "transform", "given", "3D", "point", "set", "into", "second", "point", "set", "." ]
def superimposition_matrix(v0, v1, scale=False, usesvd=True): r"""Return matrix to transform given 3D point set into second point set. v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. The parameters scale and usesvd are explained in the more general affine_matrix_from_points function. The returned matrix is a similarity or Euclidean transformation matrix. This function has a fast C implementation in transformations.c. >>> v0 = numpy.random.rand(3, 10) >>> M = superimposition_matrix(v0, v0) >>> numpy.allclose(M, numpy.identity(4)) True >>> R = random_rotation_matrix(numpy.random.random(3)) >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] >>> v1 = numpy.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 >>> v0[3] = 1 >>> v1 = numpy.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> S = scale_matrix(random.random()) >>> T = translation_matrix(numpy.random.random(3)-0.5) >>> M = concatenate_matrices(T, R, S) >>> v1 = numpy.dot(M, v0) >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) >>> M = superimposition_matrix(v0, v1, scale=True) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) >>> numpy.allclose(v1, numpy.dot(M, v0)) True >>> v = numpy.empty((4, 100, 3)) >>> v[:, :, 0] = v0 >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) True """ v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] return affine_matrix_from_points(v0, v1, shear=False, scale=scale, usesvd=usesvd)
[ "def", "superimposition_matrix", "(", "v0", ",", "v1", ",", "scale", "=", "False", ",", "usesvd", "=", "True", ")", ":", "v0", "=", "numpy", ".", "array", "(", "v0", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "[", ":", "3", "]", "v1", "=", "numpy", ".", "array", "(", "v1", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "[", ":", "3", "]", "return", "affine_matrix_from_points", "(", "v0", ",", "v1", ",", "shear", "=", "False", ",", "scale", "=", "scale", ",", "usesvd", "=", "usesvd", ")" ]
https://github.com/matthew-brett/transforms3d/blob/f185e866ecccb66c545559bc9f2e19cb5025e0ab/transforms3d/_gohlketransforms.py#L1003-L1051
numenta/numenta-apps
02903b0062c89c2c259b533eea2df6e8bb44eaf3
htmengine/htmengine/utils/__init__.py
python
_jsonDecodeListUTF8
(data)
return rv
object_hook for json decoder used to decode unicode strings as UTF8 strings
object_hook for json decoder used to decode unicode strings as UTF8 strings
[ "object_hook", "for", "json", "decoder", "used", "to", "decode", "unicode", "strings", "as", "UTF8", "strings" ]
def _jsonDecodeListUTF8(data): """ object_hook for json decoder used to decode unicode strings as UTF8 strings """ rv = [] for item in data: if isinstance(item, unicode): item = item.encode('utf-8') elif isinstance(item, list): item = _jsonDecodeListUTF8(item) elif isinstance(item, dict): item = _jsonDecodeDictUTF8(item) rv.append(item) return rv
[ "def", "_jsonDecodeListUTF8", "(", "data", ")", ":", "rv", "=", "[", "]", "for", "item", "in", "data", ":", "if", "isinstance", "(", "item", ",", "unicode", ")", ":", "item", "=", "item", ".", "encode", "(", "'utf-8'", ")", "elif", "isinstance", "(", "item", ",", "list", ")", ":", "item", "=", "_jsonDecodeListUTF8", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "dict", ")", ":", "item", "=", "_jsonDecodeDictUTF8", "(", "item", ")", "rv", ".", "append", "(", "item", ")", "return", "rv" ]
https://github.com/numenta/numenta-apps/blob/02903b0062c89c2c259b533eea2df6e8bb44eaf3/htmengine/htmengine/utils/__init__.py#L48-L61
tdamdouni/Pythonista
3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad
markdown/markdown2pdf/reportlab/graphics/renderbase.py
python
StateTracker.getState
(self)
return self._combined[-1]
returns the complete graphics state at this point
returns the complete graphics state at this point
[ "returns", "the", "complete", "graphics", "state", "at", "this", "point" ]
def getState(self): "returns the complete graphics state at this point" return self._combined[-1]
[ "def", "getState", "(", "self", ")", ":", "return", "self", ".", "_combined", "[", "-", "1", "]" ]
https://github.com/tdamdouni/Pythonista/blob/3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad/markdown/markdown2pdf/reportlab/graphics/renderbase.py#L111-L113
j4mie/sqlsite
f2dadb8db5ed7880f8872b6591d8cb1487f777ea
sqlsite/responses.py
python
HTMLResponse.__init__
(self, *args, **kwargs)
[]
def __init__(self, *args, **kwargs): super().__init__(content_type="text/html", charset="utf-8", *args, **kwargs)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "content_type", "=", "\"text/html\"", ",", "charset", "=", "\"utf-8\"", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/j4mie/sqlsite/blob/f2dadb8db5ed7880f8872b6591d8cb1487f777ea/sqlsite/responses.py#L85-L86
tkrajina/gpxpy
208fcd625760a73b7d1b7167c4053547084268c9
gpxpy/gpx.py
python
GPXRoutePoint.adjust_time
(self, delta: mod_datetime.timedelta)
Adjusts the time of the point by the specified delta Parameters ---------- delta : datetime.timedelta Positive time delta will adjust time into the future Negative time delta will adjust time into the past
Adjusts the time of the point by the specified delta
[ "Adjusts", "the", "time", "of", "the", "point", "by", "the", "specified", "delta" ]
def adjust_time(self, delta: mod_datetime.timedelta) -> None: """ Adjusts the time of the point by the specified delta Parameters ---------- delta : datetime.timedelta Positive time delta will adjust time into the future Negative time delta will adjust time into the past """ if self.time: self.time += delta
[ "def", "adjust_time", "(", "self", ",", "delta", ":", "mod_datetime", ".", "timedelta", ")", "->", "None", ":", "if", "self", ".", "time", ":", "self", ".", "time", "+=", "delta" ]
https://github.com/tkrajina/gpxpy/blob/208fcd625760a73b7d1b7167c4053547084268c9/gpxpy/gpx.py#L324-L335
HDI-Project/ATM
dde454a95e963a460843a61bbb44d18982984b17
atm/core.py
python
ATM.work
(self, datarun_ids=None, save_files=True, choose_randomly=True, cloud_mode=False, total_time=None, wait=True, verbose=False)
Get unfinished Dataruns from the database and work on them. Check the ModelHub Database for unfinished Dataruns, and work on them as they are added. This process will continue to run until it exceeds total_time or there are no more Dataruns to process or it is killed. Args: datarun_ids (list): list of IDs of Dataruns to work on. If ``None``, this will work on any unfinished Dataruns found in the database. Optional. Defaults to ``None``. save_files (bool): Whether to save the fitted classifiers and their metrics or not. Optional. Defaults to True. choose_randomly (bool): If ``True``, work on all the highest-priority dataruns in random order. Otherwise, work on them in sequential order (by ID). Optional. Defaults to ``True``. cloud_mode (bool): Save the models and metrics in AWS S3 instead of locally. This option works only if S3 configuration has been provided on initialization. Optional. Defaults to ``False``. total_time (int): Total time to run the work process, in seconds. If ``None``, continue to run until interrupted or there are no more Dataruns to process. Optional. Defaults to ``None``. wait (bool): If ``True``, wait for more Dataruns to be inserted into the Database once all have been processed. Otherwise, exit the worker loop when they run out. Optional. Defaults to ``False``. verbose (bool): Whether to be verbose about the process. Optional. Defaults to ``True``.
Get unfinished Dataruns from the database and work on them.
[ "Get", "unfinished", "Dataruns", "from", "the", "database", "and", "work", "on", "them", "." ]
def work(self, datarun_ids=None, save_files=True, choose_randomly=True, cloud_mode=False, total_time=None, wait=True, verbose=False): """Get unfinished Dataruns from the database and work on them. Check the ModelHub Database for unfinished Dataruns, and work on them as they are added. This process will continue to run until it exceeds total_time or there are no more Dataruns to process or it is killed. Args: datarun_ids (list): list of IDs of Dataruns to work on. If ``None``, this will work on any unfinished Dataruns found in the database. Optional. Defaults to ``None``. save_files (bool): Whether to save the fitted classifiers and their metrics or not. Optional. Defaults to True. choose_randomly (bool): If ``True``, work on all the highest-priority dataruns in random order. Otherwise, work on them in sequential order (by ID). Optional. Defaults to ``True``. cloud_mode (bool): Save the models and metrics in AWS S3 instead of locally. This option works only if S3 configuration has been provided on initialization. Optional. Defaults to ``False``. total_time (int): Total time to run the work process, in seconds. If ``None``, continue to run until interrupted or there are no more Dataruns to process. Optional. Defaults to ``None``. wait (bool): If ``True``, wait for more Dataruns to be inserted into the Database once all have been processed. Otherwise, exit the worker loop when they run out. Optional. Defaults to ``False``. verbose (bool): Whether to be verbose about the process. Optional. Defaults to ``True``. """ start_time = datetime.now() # main loop while True: # get all pending and running dataruns, or all pending/running dataruns # from the list we were given dataruns = self.db.get_dataruns(include_ids=datarun_ids, ignore_complete=True) if not dataruns: if wait: LOGGER.debug('No dataruns found. Sleeping %d seconds and trying again.', self._LOOP_WAIT) time.sleep(self._LOOP_WAIT) continue else: LOGGER.info('No dataruns found. Exiting.') break # either choose a run randomly between priority, or take the run with the lowest ID if choose_randomly: run = random.choice(dataruns) else: run = sorted(dataruns, key=attrgetter('id'))[0] # say we've started working on this datarun, if we haven't already self.db.mark_datarun_running(run.id) LOGGER.info('Computing on datarun %d' % run.id) # actual work happens here worker = Worker(self.db, run, save_files=save_files, cloud_mode=cloud_mode, aws_access_key=self.aws_access_key, aws_secret_key=self.aws_secret_key, s3_bucket=self.s3_bucket, s3_folder=self.s3_folder, models_dir=self.models_dir, metrics_dir=self.metrics_dir, verbose_metrics=self.verbose_metrics) try: if run.budget_type == 'classifier': pbar = tqdm( total=run.budget, ascii=True, initial=run.completed_classifiers, disable=not verbose ) while run.status != RunStatus.COMPLETE: worker.run_classifier() run = self.db.get_datarun(run.id) if verbose and run.completed_classifiers > pbar.last_print_n: pbar.update(run.completed_classifiers - pbar.last_print_n) pbar.close() elif run.budget_type == 'walltime': pbar = tqdm( disable=not verbose, ascii=True, initial=run.completed_classifiers, unit=' Classifiers' ) while run.status != RunStatus.COMPLETE: worker.run_classifier() run = self.db.get_datarun(run.id) # Refresh the datarun object. if verbose and run.completed_classifiers > pbar.last_print_n: pbar.update(run.completed_classifiers - pbar.last_print_n) pbar.close() except ClassifierError: # the exception has already been handled; just wait a sec so we # don't go out of control reporting errors LOGGER.error('Something went wrong. Sleeping %d seconds.', self._LOOP_WAIT) time.sleep(self._LOOP_WAIT) elapsed_time = (datetime.now() - start_time).total_seconds() if total_time is not None and elapsed_time >= total_time: LOGGER.info('Total run time for worker exceeded; exiting.') break
[ "def", "work", "(", "self", ",", "datarun_ids", "=", "None", ",", "save_files", "=", "True", ",", "choose_randomly", "=", "True", ",", "cloud_mode", "=", "False", ",", "total_time", "=", "None", ",", "wait", "=", "True", ",", "verbose", "=", "False", ")", ":", "start_time", "=", "datetime", ".", "now", "(", ")", "# main loop", "while", "True", ":", "# get all pending and running dataruns, or all pending/running dataruns", "# from the list we were given", "dataruns", "=", "self", ".", "db", ".", "get_dataruns", "(", "include_ids", "=", "datarun_ids", ",", "ignore_complete", "=", "True", ")", "if", "not", "dataruns", ":", "if", "wait", ":", "LOGGER", ".", "debug", "(", "'No dataruns found. Sleeping %d seconds and trying again.'", ",", "self", ".", "_LOOP_WAIT", ")", "time", ".", "sleep", "(", "self", ".", "_LOOP_WAIT", ")", "continue", "else", ":", "LOGGER", ".", "info", "(", "'No dataruns found. Exiting.'", ")", "break", "# either choose a run randomly between priority, or take the run with the lowest ID", "if", "choose_randomly", ":", "run", "=", "random", ".", "choice", "(", "dataruns", ")", "else", ":", "run", "=", "sorted", "(", "dataruns", ",", "key", "=", "attrgetter", "(", "'id'", ")", ")", "[", "0", "]", "# say we've started working on this datarun, if we haven't already", "self", ".", "db", ".", "mark_datarun_running", "(", "run", ".", "id", ")", "LOGGER", ".", "info", "(", "'Computing on datarun %d'", "%", "run", ".", "id", ")", "# actual work happens here", "worker", "=", "Worker", "(", "self", ".", "db", ",", "run", ",", "save_files", "=", "save_files", ",", "cloud_mode", "=", "cloud_mode", ",", "aws_access_key", "=", "self", ".", "aws_access_key", ",", "aws_secret_key", "=", "self", ".", "aws_secret_key", ",", "s3_bucket", "=", "self", ".", "s3_bucket", ",", "s3_folder", "=", "self", ".", "s3_folder", ",", "models_dir", "=", "self", ".", "models_dir", ",", "metrics_dir", "=", "self", ".", "metrics_dir", ",", "verbose_metrics", "=", "self", ".", "verbose_metrics", ")", "try", ":", "if", "run", ".", "budget_type", "==", "'classifier'", ":", "pbar", "=", "tqdm", "(", "total", "=", "run", ".", "budget", ",", "ascii", "=", "True", ",", "initial", "=", "run", ".", "completed_classifiers", ",", "disable", "=", "not", "verbose", ")", "while", "run", ".", "status", "!=", "RunStatus", ".", "COMPLETE", ":", "worker", ".", "run_classifier", "(", ")", "run", "=", "self", ".", "db", ".", "get_datarun", "(", "run", ".", "id", ")", "if", "verbose", "and", "run", ".", "completed_classifiers", ">", "pbar", ".", "last_print_n", ":", "pbar", ".", "update", "(", "run", ".", "completed_classifiers", "-", "pbar", ".", "last_print_n", ")", "pbar", ".", "close", "(", ")", "elif", "run", ".", "budget_type", "==", "'walltime'", ":", "pbar", "=", "tqdm", "(", "disable", "=", "not", "verbose", ",", "ascii", "=", "True", ",", "initial", "=", "run", ".", "completed_classifiers", ",", "unit", "=", "' Classifiers'", ")", "while", "run", ".", "status", "!=", "RunStatus", ".", "COMPLETE", ":", "worker", ".", "run_classifier", "(", ")", "run", "=", "self", ".", "db", ".", "get_datarun", "(", "run", ".", "id", ")", "# Refresh the datarun object.", "if", "verbose", "and", "run", ".", "completed_classifiers", ">", "pbar", ".", "last_print_n", ":", "pbar", ".", "update", "(", "run", ".", "completed_classifiers", "-", "pbar", ".", "last_print_n", ")", "pbar", ".", "close", "(", ")", "except", "ClassifierError", ":", "# the exception has already been handled; just wait a sec so we", "# don't go out of control reporting errors", "LOGGER", ".", "error", "(", "'Something went wrong. Sleeping %d seconds.'", ",", "self", ".", "_LOOP_WAIT", ")", "time", ".", "sleep", "(", "self", ".", "_LOOP_WAIT", ")", "elapsed_time", "=", "(", "datetime", ".", "now", "(", ")", "-", "start_time", ")", ".", "total_seconds", "(", ")", "if", "total_time", "is", "not", "None", "and", "elapsed_time", ">=", "total_time", ":", "LOGGER", ".", "info", "(", "'Total run time for worker exceeded; exiting.'", ")", "break" ]
https://github.com/HDI-Project/ATM/blob/dde454a95e963a460843a61bbb44d18982984b17/atm/core.py#L242-L354
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_adm_router.py
python
RoleBindingConfig.create_dict
(self)
create a default rolebinding as a dict
create a default rolebinding as a dict
[ "create", "a", "default", "rolebinding", "as", "a", "dict" ]
def create_dict(self): ''' create a default rolebinding as a dict ''' self.data['apiVersion'] = 'v1' self.data['kind'] = 'RoleBinding' self.data['groupNames'] = self.group_names self.data['metadata']['name'] = self.name self.data['metadata']['namespace'] = self.namespace self.data['roleRef'] = self.role_ref self.data['subjects'] = self.subjects self.data['userNames'] = self.usernames
[ "def", "create_dict", "(", "self", ")", ":", "self", ".", "data", "[", "'apiVersion'", "]", "=", "'v1'", "self", ".", "data", "[", "'kind'", "]", "=", "'RoleBinding'", "self", ".", "data", "[", "'groupNames'", "]", "=", "self", ".", "group_names", "self", ".", "data", "[", "'metadata'", "]", "[", "'name'", "]", "=", "self", ".", "name", "self", ".", "data", "[", "'metadata'", "]", "[", "'namespace'", "]", "=", "self", ".", "namespace", "self", ".", "data", "[", "'roleRef'", "]", "=", "self", ".", "role_ref", "self", ".", "data", "[", "'subjects'", "]", "=", "self", ".", "subjects", "self", ".", "data", "[", "'userNames'", "]", "=", "self", ".", "usernames" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_adm_router.py#L2419-L2429
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/tornado/iostream.py
python
BaseIOStream.closed
(self)
return self._closed
Returns true if the stream has been closed.
Returns true if the stream has been closed.
[ "Returns", "true", "if", "the", "stream", "has", "been", "closed", "." ]
def closed(self): """Returns true if the stream has been closed.""" return self._closed
[ "def", "closed", "(", "self", ")", ":", "return", "self", ".", "_closed" ]
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/tornado/iostream.py#L432-L434
joerick/pyinstrument
d3c45164a385021f366c1081baec18a1a226a573
pyinstrument/frame.py
python
Frame.time
(self)
return self._time
[]
def time(self): if self._time is None: # can't use a sum(<generator>) expression here sadly, because this method # recurses down the call tree, and the generator uses an extra stack frame, # meaning we hit the stack limit when the profiled code is 500 frames deep. self._time = self.self_time for child in self.children: self._time += child.time() return self._time
[ "def", "time", "(", "self", ")", ":", "if", "self", ".", "_time", "is", "None", ":", "# can't use a sum(<generator>) expression here sadly, because this method", "# recurses down the call tree, and the generator uses an extra stack frame,", "# meaning we hit the stack limit when the profiled code is 500 frames deep.", "self", ".", "_time", "=", "self", ".", "self_time", "for", "child", "in", "self", ".", "children", ":", "self", ".", "_time", "+=", "child", ".", "time", "(", ")", "return", "self", ".", "_time" ]
https://github.com/joerick/pyinstrument/blob/d3c45164a385021f366c1081baec18a1a226a573/pyinstrument/frame.py#L259-L269
fossasia/x-mario-center
fe67afe28d995dcf4e2498e305825a4859566172
build/lib.linux-i686-2.7/softwarecenter/db/pkginfo_impl/packagekit.py
python
PackagekitInfo.get_total_size_on_install
(self, pkgname, addons_install=None, addons_remove=None, archive_suite=None)
Returns a tuple (download_size, installed_size) with disk size in KB calculated for pkgname installation plus addons change.
Returns a tuple (download_size, installed_size) with disk size in KB calculated for pkgname installation plus addons change.
[ "Returns", "a", "tuple", "(", "download_size", "installed_size", ")", "with", "disk", "size", "in", "KB", "calculated", "for", "pkgname", "installation", "plus", "addons", "change", "." ]
def get_total_size_on_install(self, pkgname, addons_install=None, addons_remove=None, archive_suite=None): """ Returns a tuple (download_size, installed_size) with disk size in KB calculated for pkgname installation plus addons change. """ # FIXME: support archive_suite here too # FIXME: PackageKit reports only one size at a time if self.is_installed(pkgname): return (0, self.get_size(pkgname)) else: return (self.get_size(pkgname), 0)
[ "def", "get_total_size_on_install", "(", "self", ",", "pkgname", ",", "addons_install", "=", "None", ",", "addons_remove", "=", "None", ",", "archive_suite", "=", "None", ")", ":", "# FIXME: support archive_suite here too", "# FIXME: PackageKit reports only one size at a time", "if", "self", ".", "is_installed", "(", "pkgname", ")", ":", "return", "(", "0", ",", "self", ".", "get_size", "(", "pkgname", ")", ")", "else", ":", "return", "(", "self", ".", "get_size", "(", "pkgname", ")", ",", "0", ")" ]
https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/build/lib.linux-i686-2.7/softwarecenter/db/pkginfo_impl/packagekit.py#L267-L280
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/pandas/core/groupby.py
python
generate_bins_generic
(values, binner, closed)
return bins
Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:]
Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted.
[ "Generate", "bin", "edge", "offsets", "and", "bin", "labels", "for", "one", "array", "using", "another", "array", "which", "has", "bin", "edge", "values", ".", "Both", "arrays", "must", "be", "sorted", "." ]
def generate_bins_generic(values, binner, closed): """ Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:] """ lenidx = len(values) lenbin = len(binner) if lenidx <= 0 or lenbin <= 0: raise ValueError("Invalid length for values or for binner") # check binner fits data if values[0] < binner[0]: raise ValueError("Values falls before first bin") if values[lenidx - 1] > binner[lenbin - 1]: raise ValueError("Values falls after last bin") bins = np.empty(lenbin - 1, dtype=np.int64) j = 0 # index into values bc = 0 # bin count # linear scan, presume nothing about values/binner except that it fits ok for i in range(0, lenbin - 1): r_bin = binner[i + 1] # count values in current bin, advance to next bin while j < lenidx and (values[j] < r_bin or (closed == 'right' and values[j] == r_bin)): j += 1 bins[bc] = j bc += 1 return bins
[ "def", "generate_bins_generic", "(", "values", ",", "binner", ",", "closed", ")", ":", "lenidx", "=", "len", "(", "values", ")", "lenbin", "=", "len", "(", "binner", ")", "if", "lenidx", "<=", "0", "or", "lenbin", "<=", "0", ":", "raise", "ValueError", "(", "\"Invalid length for values or for binner\"", ")", "# check binner fits data", "if", "values", "[", "0", "]", "<", "binner", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Values falls before first bin\"", ")", "if", "values", "[", "lenidx", "-", "1", "]", ">", "binner", "[", "lenbin", "-", "1", "]", ":", "raise", "ValueError", "(", "\"Values falls after last bin\"", ")", "bins", "=", "np", ".", "empty", "(", "lenbin", "-", "1", ",", "dtype", "=", "np", ".", "int64", ")", "j", "=", "0", "# index into values", "bc", "=", "0", "# bin count", "# linear scan, presume nothing about values/binner except that it fits ok", "for", "i", "in", "range", "(", "0", ",", "lenbin", "-", "1", ")", ":", "r_bin", "=", "binner", "[", "i", "+", "1", "]", "# count values in current bin, advance to next bin", "while", "j", "<", "lenidx", "and", "(", "values", "[", "j", "]", "<", "r_bin", "or", "(", "closed", "==", "'right'", "and", "values", "[", "j", "]", "==", "r_bin", ")", ")", ":", "j", "+=", "1", "bins", "[", "bc", "]", "=", "j", "bc", "+=", "1", "return", "bins" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/core/groupby.py#L2312-L2361
andialbrecht/sqlparse
3bc7d93254cbef71bdef91905f1814201a1b1f02
sqlparse/sql.py
python
Token.has_ancestor
(self, other)
return False
Returns ``True`` if *other* is in this tokens ancestry.
Returns ``True`` if *other* is in this tokens ancestry.
[ "Returns", "True", "if", "*", "other", "*", "is", "in", "this", "tokens", "ancestry", "." ]
def has_ancestor(self, other): """Returns ``True`` if *other* is in this tokens ancestry.""" parent = self.parent while parent: if parent == other: return True parent = parent.parent return False
[ "def", "has_ancestor", "(", "self", ",", "other", ")", ":", "parent", "=", "self", ".", "parent", "while", "parent", ":", "if", "parent", "==", "other", ":", "return", "True", "parent", "=", "parent", ".", "parent", "return", "False" ]
https://github.com/andialbrecht/sqlparse/blob/3bc7d93254cbef71bdef91905f1814201a1b1f02/sqlparse/sql.py#L139-L146
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/twisted/words/protocols/irc.py
python
IRCClient.irc_PING
(self, prefix, params)
Called when some has pinged us.
Called when some has pinged us.
[ "Called", "when", "some", "has", "pinged", "us", "." ]
def irc_PING(self, prefix, params): """ Called when some has pinged us. """ self.sendLine("PONG %s" % params[-1])
[ "def", "irc_PING", "(", "self", ",", "prefix", ",", "params", ")", ":", "self", ".", "sendLine", "(", "\"PONG %s\"", "%", "params", "[", "-", "1", "]", ")" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/twisted/words/protocols/irc.py#L2028-L2032
cw1204772/AIC2018_iamai
9c3720ba5eeb94e02deed303f32acaaa80aa893d
Detection/lib/datasets/cityscapes/tools/convert_cityscapes_to_coco.py
python
convert_cityscapes_instance_only
( data_dir, out_dir)
Convert from cityscapes format to COCO instance seg format - polygons
Convert from cityscapes format to COCO instance seg format - polygons
[ "Convert", "from", "cityscapes", "format", "to", "COCO", "instance", "seg", "format", "-", "polygons" ]
def convert_cityscapes_instance_only( data_dir, out_dir): """Convert from cityscapes format to COCO instance seg format - polygons""" sets = [ 'gtFine_val', # 'gtFine_train', # 'gtFine_test', # 'gtCoarse_train', # 'gtCoarse_val', # 'gtCoarse_train_extra' ] ann_dirs = [ 'gtFine_trainvaltest/gtFine/val', # 'gtFine_trainvaltest/gtFine/train', # 'gtFine_trainvaltest/gtFine/test', # 'gtCoarse/train', # 'gtCoarse/train_extra', # 'gtCoarse/val' ] json_name = 'instancesonly_filtered_%s.json' ends_in = '%s_polygons.json' img_id = 0 ann_id = 0 cat_id = 1 category_dict = {} category_instancesonly = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ] for data_set, ann_dir in zip(sets, ann_dirs): print('Starting %s' % data_set) ann_dict = {} images = [] annotations = [] ann_dir = os.path.join(data_dir, ann_dir) for root, _, files in os.walk(ann_dir): for filename in files: if filename.endswith(ends_in % data_set.split('_')[0]): if len(images) % 50 == 0: print("Processed %s images, %s annotations" % ( len(images), len(annotations))) json_ann = json.load(open(os.path.join(root, filename))) image = {} image['id'] = img_id img_id += 1 image['width'] = json_ann['imgWidth'] image['height'] = json_ann['imgHeight'] image['file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + 'leftImg8bit.png' image['seg_file_name'] = filename[:-len( ends_in % data_set.split('_')[0])] + \ '%s_instanceIds.png' % data_set.split('_')[0] images.append(image) fullname = os.path.join(root, image['seg_file_name']) objects = cs.instances2dict_with_polygons( [fullname], verbose=False)[fullname] for object_cls in objects: if object_cls not in category_instancesonly: continue # skip non-instance categories for obj in objects[object_cls]: if obj['contours'] == []: print('Warning: empty contours.') continue # skip non-instance categories len_p = [len(p) for p in obj['contours']] if min(len_p) <= 4: print('Warning: invalid contours.') continue # skip non-instance categories ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = obj['contours'] if object_cls not in category_dict: category_dict[object_cls] = cat_id cat_id += 1 ann['category_id'] = category_dict[object_cls] ann['iscrowd'] = 0 ann['area'] = obj['pixelCount'] ann['bbox'] = bboxs_util.xyxy_to_xywh( segms_util.polys_to_boxes( [ann['segmentation']])).tolist()[0] annotations.append(ann) ann_dict['images'] = images categories = [{"id": category_dict[name], "name": name} for name in category_dict] ann_dict['categories'] = categories ann_dict['annotations'] = annotations print("Num categories: %s" % len(categories)) print("Num images: %s" % len(images)) print("Num annotations: %s" % len(annotations)) with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile: outfile.write(json.dumps(ann_dict))
[ "def", "convert_cityscapes_instance_only", "(", "data_dir", ",", "out_dir", ")", ":", "sets", "=", "[", "'gtFine_val'", ",", "# 'gtFine_train',", "# 'gtFine_test',", "# 'gtCoarse_train',", "# 'gtCoarse_val',", "# 'gtCoarse_train_extra'", "]", "ann_dirs", "=", "[", "'gtFine_trainvaltest/gtFine/val'", ",", "# 'gtFine_trainvaltest/gtFine/train',", "# 'gtFine_trainvaltest/gtFine/test',", "# 'gtCoarse/train',", "# 'gtCoarse/train_extra',", "# 'gtCoarse/val'", "]", "json_name", "=", "'instancesonly_filtered_%s.json'", "ends_in", "=", "'%s_polygons.json'", "img_id", "=", "0", "ann_id", "=", "0", "cat_id", "=", "1", "category_dict", "=", "{", "}", "category_instancesonly", "=", "[", "'person'", ",", "'rider'", ",", "'car'", ",", "'truck'", ",", "'bus'", ",", "'train'", ",", "'motorcycle'", ",", "'bicycle'", ",", "]", "for", "data_set", ",", "ann_dir", "in", "zip", "(", "sets", ",", "ann_dirs", ")", ":", "print", "(", "'Starting %s'", "%", "data_set", ")", "ann_dict", "=", "{", "}", "images", "=", "[", "]", "annotations", "=", "[", "]", "ann_dir", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "ann_dir", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "ann_dir", ")", ":", "for", "filename", "in", "files", ":", "if", "filename", ".", "endswith", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", ":", "if", "len", "(", "images", ")", "%", "50", "==", "0", ":", "print", "(", "\"Processed %s images, %s annotations\"", "%", "(", "len", "(", "images", ")", ",", "len", "(", "annotations", ")", ")", ")", "json_ann", "=", "json", ".", "load", "(", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", ")", "image", "=", "{", "}", "image", "[", "'id'", "]", "=", "img_id", "img_id", "+=", "1", "image", "[", "'width'", "]", "=", "json_ann", "[", "'imgWidth'", "]", "image", "[", "'height'", "]", "=", "json_ann", "[", "'imgHeight'", "]", "image", "[", "'file_name'", "]", "=", "filename", "[", ":", "-", "len", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "]", "+", "'leftImg8bit.png'", "image", "[", "'seg_file_name'", "]", "=", "filename", "[", ":", "-", "len", "(", "ends_in", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "]", "+", "'%s_instanceIds.png'", "%", "data_set", ".", "split", "(", "'_'", ")", "[", "0", "]", "images", ".", "append", "(", "image", ")", "fullname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "image", "[", "'seg_file_name'", "]", ")", "objects", "=", "cs", ".", "instances2dict_with_polygons", "(", "[", "fullname", "]", ",", "verbose", "=", "False", ")", "[", "fullname", "]", "for", "object_cls", "in", "objects", ":", "if", "object_cls", "not", "in", "category_instancesonly", ":", "continue", "# skip non-instance categories", "for", "obj", "in", "objects", "[", "object_cls", "]", ":", "if", "obj", "[", "'contours'", "]", "==", "[", "]", ":", "print", "(", "'Warning: empty contours.'", ")", "continue", "# skip non-instance categories", "len_p", "=", "[", "len", "(", "p", ")", "for", "p", "in", "obj", "[", "'contours'", "]", "]", "if", "min", "(", "len_p", ")", "<=", "4", ":", "print", "(", "'Warning: invalid contours.'", ")", "continue", "# skip non-instance categories", "ann", "=", "{", "}", "ann", "[", "'id'", "]", "=", "ann_id", "ann_id", "+=", "1", "ann", "[", "'image_id'", "]", "=", "image", "[", "'id'", "]", "ann", "[", "'segmentation'", "]", "=", "obj", "[", "'contours'", "]", "if", "object_cls", "not", "in", "category_dict", ":", "category_dict", "[", "object_cls", "]", "=", "cat_id", "cat_id", "+=", "1", "ann", "[", "'category_id'", "]", "=", "category_dict", "[", "object_cls", "]", "ann", "[", "'iscrowd'", "]", "=", "0", "ann", "[", "'area'", "]", "=", "obj", "[", "'pixelCount'", "]", "ann", "[", "'bbox'", "]", "=", "bboxs_util", ".", "xyxy_to_xywh", "(", "segms_util", ".", "polys_to_boxes", "(", "[", "ann", "[", "'segmentation'", "]", "]", ")", ")", ".", "tolist", "(", ")", "[", "0", "]", "annotations", ".", "append", "(", "ann", ")", "ann_dict", "[", "'images'", "]", "=", "images", "categories", "=", "[", "{", "\"id\"", ":", "category_dict", "[", "name", "]", ",", "\"name\"", ":", "name", "}", "for", "name", "in", "category_dict", "]", "ann_dict", "[", "'categories'", "]", "=", "categories", "ann_dict", "[", "'annotations'", "]", "=", "annotations", "print", "(", "\"Num categories: %s\"", "%", "len", "(", "categories", ")", ")", "print", "(", "\"Num images: %s\"", "%", "len", "(", "images", ")", ")", "print", "(", "\"Num annotations: %s\"", "%", "len", "(", "annotations", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "json_name", "%", "data_set", ")", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "json", ".", "dumps", "(", "ann_dict", ")", ")" ]
https://github.com/cw1204772/AIC2018_iamai/blob/9c3720ba5eeb94e02deed303f32acaaa80aa893d/Detection/lib/datasets/cityscapes/tools/convert_cityscapes_to_coco.py#L82-L192
anoopkunchukuttan/indic_nlp_library
1e0f224dcf9d00664e0435399b087a4c6f07787d
indicnlp/langinfo.py
python
is_nukta
(c,lang)
return (o==NUKTA_OFFSET)
Is the character the halanta character
Is the character the halanta character
[ "Is", "the", "character", "the", "halanta", "character" ]
def is_nukta(c,lang): """ Is the character the halanta character """ o=get_offset(c,lang) return (o==NUKTA_OFFSET)
[ "def", "is_nukta", "(", "c", ",", "lang", ")", ":", "o", "=", "get_offset", "(", "c", ",", "lang", ")", "return", "(", "o", "==", "NUKTA_OFFSET", ")" ]
https://github.com/anoopkunchukuttan/indic_nlp_library/blob/1e0f224dcf9d00664e0435399b087a4c6f07787d/indicnlp/langinfo.py#L261-L266
heynemann/pyccuracy
0bbe3bcff4d13a6501bf77d5af9457f6a1491ab6
pyccuracy/airspeed.py
python
ParameterList.parse
(self)
[]
def parse(self): self.identity_match(self.START) try: self.values = self.next_element(ValueList) except NoMatch: pass self.require_match(self.END, ')')
[ "def", "parse", "(", "self", ")", ":", "self", ".", "identity_match", "(", "self", ".", "START", ")", "try", ":", "self", ".", "values", "=", "self", ".", "next_element", "(", "ValueList", ")", "except", "NoMatch", ":", "pass", "self", ".", "require_match", "(", "self", ".", "END", ",", "')'", ")" ]
https://github.com/heynemann/pyccuracy/blob/0bbe3bcff4d13a6501bf77d5af9457f6a1491ab6/pyccuracy/airspeed.py#L499-L503
mmalone/django-caching
c22a6653a87dc3eca265fd82e2f36663df46e004
app/managers.py
python
CachingManager._invalidate_cache
(self, instance)
Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app.
Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app.
[ "Explicitly", "set", "a", "None", "value", "instead", "of", "just", "deleting", "so", "we", "don", "t", "have", "any", "race", "conditions", "where", ":", "Thread", "1", "-", ">", "Cache", "miss", "get", "object", "from", "DB", "Thread", "2", "-", ">", "Object", "saved", "deleted", "from", "cache", "Thread", "1", "-", ">", "Store", "(", "stale", ")", "object", "fetched", "from", "DB", "in", "cache", "Five", "second", "should", "be", "more", "than", "enough", "time", "to", "prevent", "this", "from", "happening", "for", "a", "web", "app", "." ]
def _invalidate_cache(self, instance): """ Explicitly set a None value instead of just deleting so we don't have any race conditions where: Thread 1 -> Cache miss, get object from DB Thread 2 -> Object saved, deleted from cache Thread 1 -> Store (stale) object fetched from DB in cache Five second should be more than enough time to prevent this from happening for a web app. """ cache.set(instance.cache_key, None, 5)
[ "def", "_invalidate_cache", "(", "self", ",", "instance", ")", ":", "cache", ".", "set", "(", "instance", ".", "cache_key", ",", "None", ",", "5", ")" ]
https://github.com/mmalone/django-caching/blob/c22a6653a87dc3eca265fd82e2f36663df46e004/app/managers.py#L34-L44
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/_backport/shutil.py
python
copyfile
(src, dst)
Copy data from src to dst
Copy data from src to dst
[ "Copy", "data", "from", "src", "to", "dst" ]
def copyfile(src, dst): """Copy data from src to dst""" if _samefile(src, dst): raise Error("`%s` and `%s` are the same file" % (src, dst)) for fn in [src, dst]: try: st = os.stat(fn) except OSError: # File most likely does not exist pass else: # XXX What about other special files? (sockets, devices...) if stat.S_ISFIFO(st.st_mode): raise SpecialFileError("`%s` is a named pipe" % fn) with open(src, 'rb') as fsrc: with open(dst, 'wb') as fdst: copyfileobj(fsrc, fdst)
[ "def", "copyfile", "(", "src", ",", "dst", ")", ":", "if", "_samefile", "(", "src", ",", "dst", ")", ":", "raise", "Error", "(", "\"`%s` and `%s` are the same file\"", "%", "(", "src", ",", "dst", ")", ")", "for", "fn", "in", "[", "src", ",", "dst", "]", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "fn", ")", "except", "OSError", ":", "# File most likely does not exist", "pass", "else", ":", "# XXX What about other special files? (sockets, devices...)", "if", "stat", ".", "S_ISFIFO", "(", "st", ".", "st_mode", ")", ":", "raise", "SpecialFileError", "(", "\"`%s` is a named pipe\"", "%", "fn", ")", "with", "open", "(", "src", ",", "'rb'", ")", "as", "fsrc", ":", "with", "open", "(", "dst", ",", "'wb'", ")", "as", "fdst", ":", "copyfileobj", "(", "fsrc", ",", "fdst", ")" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/distlib/_backport/shutil.py#L87-L105
ydkhatri/mac_apt
729630c8bbe7a73cce3ca330305d3301a919cb07
plugins/helpers/common.py
python
CommonFunctions.ColumnExists
(db_conn, table_name, col_name)
return False
Checks if a specific column exists in given table in an sqlite db
Checks if a specific column exists in given table in an sqlite db
[ "Checks", "if", "a", "specific", "column", "exists", "in", "given", "table", "in", "an", "sqlite", "db" ]
def ColumnExists(db_conn, table_name, col_name): '''Checks if a specific column exists in given table in an sqlite db''' try: cursor = db_conn.execute(f'SELECT name from PRAGMA_table_info("{table_name}") where name like "{col_name}"') for row in cursor: return True except sqlite3Error as ex: log.error ("In ColumnExists({}, {}). Failed to list tables of db. Error Details:{}".format(table_name, col_name, str(ex)) ) return False
[ "def", "ColumnExists", "(", "db_conn", ",", "table_name", ",", "col_name", ")", ":", "try", ":", "cursor", "=", "db_conn", ".", "execute", "(", "f'SELECT name from PRAGMA_table_info(\"{table_name}\") where name like \"{col_name}\"'", ")", "for", "row", "in", "cursor", ":", "return", "True", "except", "sqlite3Error", "as", "ex", ":", "log", ".", "error", "(", "\"In ColumnExists({}, {}). Failed to list tables of db. Error Details:{}\"", ".", "format", "(", "table_name", ",", "col_name", ",", "str", "(", "ex", ")", ")", ")", "return", "False" ]
https://github.com/ydkhatri/mac_apt/blob/729630c8bbe7a73cce3ca330305d3301a919cb07/plugins/helpers/common.py#L195-L203
houtianze/bypy
10fd0f18378174a775a05a366cc20ba6609f96c6
bypy/bypy.py
python
makemppr
(pr)
return mppr
[]
def makemppr(pr): def mppr(msg, *args, **kwargs): return pr(mp.current_process().name + ': ' + msg, *args, **kwargs) return mppr
[ "def", "makemppr", "(", "pr", ")", ":", "def", "mppr", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "pr", "(", "mp", ".", "current_process", "(", ")", ".", "name", "+", "': '", "+", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "mppr" ]
https://github.com/houtianze/bypy/blob/10fd0f18378174a775a05a366cc20ba6609f96c6/bypy/bypy.py#L150-L153
InvestmentSystems/static-frame
0b19d6969bf6c17fb0599871aca79eb3b52cf2ed
static_frame/core/frame.py
python
Frame.iter_tuple_items
(self)
return IterNodeConstructorAxis( container=self, function_values=self._axis_tuple, function_items=self._axis_tuple_items, yield_type=IterNodeType.ITEMS, apply_type=IterNodeApplyType.SERIES_VALUES )
Iterator of pairs of label, :obj:`NamedTuple`, where tuples are drawn from columns (axis=0) or rows (axis=1)
Iterator of pairs of label, :obj:`NamedTuple`, where tuples are drawn from columns (axis=0) or rows (axis=1)
[ "Iterator", "of", "pairs", "of", "label", ":", "obj", ":", "NamedTuple", "where", "tuples", "are", "drawn", "from", "columns", "(", "axis", "=", "0", ")", "or", "rows", "(", "axis", "=", "1", ")" ]
def iter_tuple_items(self) -> IterNodeConstructorAxis['Frame']: ''' Iterator of pairs of label, :obj:`NamedTuple`, where tuples are drawn from columns (axis=0) or rows (axis=1) ''' return IterNodeConstructorAxis( container=self, function_values=self._axis_tuple, function_items=self._axis_tuple_items, yield_type=IterNodeType.ITEMS, apply_type=IterNodeApplyType.SERIES_VALUES )
[ "def", "iter_tuple_items", "(", "self", ")", "->", "IterNodeConstructorAxis", "[", "'Frame'", "]", ":", "return", "IterNodeConstructorAxis", "(", "container", "=", "self", ",", "function_values", "=", "self", ".", "_axis_tuple", ",", "function_items", "=", "self", ".", "_axis_tuple_items", ",", "yield_type", "=", "IterNodeType", ".", "ITEMS", ",", "apply_type", "=", "IterNodeApplyType", ".", "SERIES_VALUES", ")" ]
https://github.com/InvestmentSystems/static-frame/blob/0b19d6969bf6c17fb0599871aca79eb3b52cf2ed/static_frame/core/frame.py#L3124-L3134
pyvista/pyvista
012dbb95a9aae406c3cd4cd94fc8c477f871e426
pyvista/utilities/regression.py
python
wrap_image_array
(arr)
return wrap_img
Wrap a numpy array as a pyvista.UniformGrid. Parameters ---------- arr : np.ndarray A ``np.uint8`` ``(X, Y, (3 or 4)`` array. For example ``(768, 1024, 3)``.
Wrap a numpy array as a pyvista.UniformGrid.
[ "Wrap", "a", "numpy", "array", "as", "a", "pyvista", ".", "UniformGrid", "." ]
def wrap_image_array(arr): """Wrap a numpy array as a pyvista.UniformGrid. Parameters ---------- arr : np.ndarray A ``np.uint8`` ``(X, Y, (3 or 4)`` array. For example ``(768, 1024, 3)``. """ if arr.ndim != 3: raise ValueError('Expecting a X by Y by (3 or 4) array') if arr.shape[2] not in [3, 4]: raise ValueError('Expecting a X by Y by (3 or 4) array') if arr.dtype != np.uint8: raise ValueError('Expecting a np.uint8 array') img = _vtk.vtkImageData() img.SetDimensions(arr.shape[1], arr.shape[0], 1) wrap_img = pyvista.wrap(img) wrap_img.point_data['PNGImage'] = arr[::-1].reshape(-1, arr.shape[2]) return wrap_img
[ "def", "wrap_image_array", "(", "arr", ")", ":", "if", "arr", ".", "ndim", "!=", "3", ":", "raise", "ValueError", "(", "'Expecting a X by Y by (3 or 4) array'", ")", "if", "arr", ".", "shape", "[", "2", "]", "not", "in", "[", "3", ",", "4", "]", ":", "raise", "ValueError", "(", "'Expecting a X by Y by (3 or 4) array'", ")", "if", "arr", ".", "dtype", "!=", "np", ".", "uint8", ":", "raise", "ValueError", "(", "'Expecting a np.uint8 array'", ")", "img", "=", "_vtk", ".", "vtkImageData", "(", ")", "img", ".", "SetDimensions", "(", "arr", ".", "shape", "[", "1", "]", ",", "arr", ".", "shape", "[", "0", "]", ",", "1", ")", "wrap_img", "=", "pyvista", ".", "wrap", "(", "img", ")", "wrap_img", ".", "point_data", "[", "'PNGImage'", "]", "=", "arr", "[", ":", ":", "-", "1", "]", ".", "reshape", "(", "-", "1", ",", "arr", ".", "shape", "[", "2", "]", ")", "return", "wrap_img" ]
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/utilities/regression.py#L17-L38
pydicom/pydicom
935de3b4ac94a5f520f3c91b42220ff0f13bce54
pydicom/filebase.py
python
DicomIO.read_le_tag
(self)
return cast(Tuple[int, int], unpack(b"<HH", bytes_read))
Read and return two unsigned shorts (little endian) from the file.
Read and return two unsigned shorts (little endian) from the file.
[ "Read", "and", "return", "two", "unsigned", "shorts", "(", "little", "endian", ")", "from", "the", "file", "." ]
def read_le_tag(self) -> Tuple[int, int]: """Read and return two unsigned shorts (little endian) from the file. """ bytes_read = self.read(4, need_exact_length=True) return cast(Tuple[int, int], unpack(b"<HH", bytes_read))
[ "def", "read_le_tag", "(", "self", ")", "->", "Tuple", "[", "int", ",", "int", "]", ":", "bytes_read", "=", "self", ".", "read", "(", "4", ",", "need_exact_length", "=", "True", ")", "return", "cast", "(", "Tuple", "[", "int", ",", "int", "]", ",", "unpack", "(", "b\"<HH\"", ",", "bytes_read", ")", ")" ]
https://github.com/pydicom/pydicom/blob/935de3b4ac94a5f520f3c91b42220ff0f13bce54/pydicom/filebase.py#L47-L51
seopbo/nlp_classification
21ea6e3f5737e7074bdd8dd190e5f5172f86f6bf
A_Structured_Self-attentive_Sentence_Embedding_ptc/model/utils.py
python
Tokenizer.split_and_transform
(self, string: str)
return self.transform(self.split(string))
[]
def split_and_transform(self, string: str) -> List[int]: return self.transform(self.split(string))
[ "def", "split_and_transform", "(", "self", ",", "string", ":", "str", ")", "->", "List", "[", "int", "]", ":", "return", "self", ".", "transform", "(", "self", ".", "split", "(", "string", ")", ")" ]
https://github.com/seopbo/nlp_classification/blob/21ea6e3f5737e7074bdd8dd190e5f5172f86f6bf/A_Structured_Self-attentive_Sentence_Embedding_ptc/model/utils.py#L196-L197
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/api/v2010/account/call/notification.py
python
NotificationInstance.__init__
(self, version, payload, account_sid, call_sid, sid=None)
Initialize the NotificationInstance :returns: twilio.rest.api.v2010.account.call.notification.NotificationInstance :rtype: twilio.rest.api.v2010.account.call.notification.NotificationInstance
Initialize the NotificationInstance
[ "Initialize", "the", "NotificationInstance" ]
def __init__(self, version, payload, account_sid, call_sid, sid=None): """ Initialize the NotificationInstance :returns: twilio.rest.api.v2010.account.call.notification.NotificationInstance :rtype: twilio.rest.api.v2010.account.call.notification.NotificationInstance """ super(NotificationInstance, self).__init__(version) # Marshaled Properties self._properties = { 'account_sid': payload.get('account_sid'), 'api_version': payload.get('api_version'), 'call_sid': payload.get('call_sid'), 'date_created': deserialize.rfc2822_datetime(payload.get('date_created')), 'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')), 'error_code': payload.get('error_code'), 'log': payload.get('log'), 'message_date': deserialize.rfc2822_datetime(payload.get('message_date')), 'message_text': payload.get('message_text'), 'more_info': payload.get('more_info'), 'request_method': payload.get('request_method'), 'request_url': payload.get('request_url'), 'request_variables': payload.get('request_variables'), 'response_body': payload.get('response_body'), 'response_headers': payload.get('response_headers'), 'sid': payload.get('sid'), 'uri': payload.get('uri'), } # Context self._context = None self._solution = { 'account_sid': account_sid, 'call_sid': call_sid, 'sid': sid or self._properties['sid'], }
[ "def", "__init__", "(", "self", ",", "version", ",", "payload", ",", "account_sid", ",", "call_sid", ",", "sid", "=", "None", ")", ":", "super", "(", "NotificationInstance", ",", "self", ")", ".", "__init__", "(", "version", ")", "# Marshaled Properties", "self", ".", "_properties", "=", "{", "'account_sid'", ":", "payload", ".", "get", "(", "'account_sid'", ")", ",", "'api_version'", ":", "payload", ".", "get", "(", "'api_version'", ")", ",", "'call_sid'", ":", "payload", ".", "get", "(", "'call_sid'", ")", ",", "'date_created'", ":", "deserialize", ".", "rfc2822_datetime", "(", "payload", ".", "get", "(", "'date_created'", ")", ")", ",", "'date_updated'", ":", "deserialize", ".", "rfc2822_datetime", "(", "payload", ".", "get", "(", "'date_updated'", ")", ")", ",", "'error_code'", ":", "payload", ".", "get", "(", "'error_code'", ")", ",", "'log'", ":", "payload", ".", "get", "(", "'log'", ")", ",", "'message_date'", ":", "deserialize", ".", "rfc2822_datetime", "(", "payload", ".", "get", "(", "'message_date'", ")", ")", ",", "'message_text'", ":", "payload", ".", "get", "(", "'message_text'", ")", ",", "'more_info'", ":", "payload", ".", "get", "(", "'more_info'", ")", ",", "'request_method'", ":", "payload", ".", "get", "(", "'request_method'", ")", ",", "'request_url'", ":", "payload", ".", "get", "(", "'request_url'", ")", ",", "'request_variables'", ":", "payload", ".", "get", "(", "'request_variables'", ")", ",", "'response_body'", ":", "payload", ".", "get", "(", "'response_body'", ")", ",", "'response_headers'", ":", "payload", ".", "get", "(", "'response_headers'", ")", ",", "'sid'", ":", "payload", ".", "get", "(", "'sid'", ")", ",", "'uri'", ":", "payload", ".", "get", "(", "'uri'", ")", ",", "}", "# Context", "self", ".", "_context", "=", "None", "self", ".", "_solution", "=", "{", "'account_sid'", ":", "account_sid", ",", "'call_sid'", ":", "call_sid", ",", "'sid'", ":", "sid", "or", "self", ".", "_properties", "[", "'sid'", "]", ",", "}" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/call/notification.py#L290-L326
mongodb/pymodm
be1c7b079df4954ef7e79e46f1b4a9ac9510766c
pymodm/fields.py
python
GeometryCollectionField.__init__
(self, verbose_name=None, mongo_name=None, **kwargs)
:parameters: - `verbose_name`: A human-readable name for the Field. - `mongo_name`: The name of this field when stored in MongoDB. .. seealso:: constructor for :class:`~pymodm.base.fields.MongoBaseField`
:parameters: - `verbose_name`: A human-readable name for the Field. - `mongo_name`: The name of this field when stored in MongoDB.
[ ":", "parameters", ":", "-", "verbose_name", ":", "A", "human", "-", "readable", "name", "for", "the", "Field", ".", "-", "mongo_name", ":", "The", "name", "of", "this", "field", "when", "stored", "in", "MongoDB", "." ]
def __init__(self, verbose_name=None, mongo_name=None, **kwargs): """ :parameters: - `verbose_name`: A human-readable name for the Field. - `mongo_name`: The name of this field when stored in MongoDB. .. seealso:: constructor for :class:`~pymodm.base.fields.MongoBaseField` """ super(GeometryCollectionField, self).__init__(verbose_name=verbose_name, mongo_name=mongo_name, **kwargs) self.validators.append( validators.together( validators.validator_for_type(dict), validators.validator_for_geojson_type('GeometryCollection'), lambda value: validators.validator_for_type( (list, tuple), 'Geometries')(value.get('geometries')))) self.validators.append( lambda value: self.validate_geometries(value.get('geometries')))
[ "def", "__init__", "(", "self", ",", "verbose_name", "=", "None", ",", "mongo_name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", "GeometryCollectionField", ",", "self", ")", ".", "__init__", "(", "verbose_name", "=", "verbose_name", ",", "mongo_name", "=", "mongo_name", ",", "*", "*", "kwargs", ")", "self", ".", "validators", ".", "append", "(", "validators", ".", "together", "(", "validators", ".", "validator_for_type", "(", "dict", ")", ",", "validators", ".", "validator_for_geojson_type", "(", "'GeometryCollection'", ")", ",", "lambda", "value", ":", "validators", ".", "validator_for_type", "(", "(", "list", ",", "tuple", ")", ",", "'Geometries'", ")", "(", "value", ".", "get", "(", "'geometries'", ")", ")", ")", ")", "self", ".", "validators", ".", "append", "(", "lambda", "value", ":", "self", ".", "validate_geometries", "(", "value", ".", "get", "(", "'geometries'", ")", ")", ")" ]
https://github.com/mongodb/pymodm/blob/be1c7b079df4954ef7e79e46f1b4a9ac9510766c/pymodm/fields.py#L1011-L1030
PyThaiNLP/pythainlp
de38b8507bf0934540aa5094e5f7f57d7f67e2dc
pythainlp/__main__.py
python
main
(argv=None)
ThaiNLP command line.
ThaiNLP command line.
[ "ThaiNLP", "command", "line", "." ]
def main(argv=None): """ThaiNLP command line.""" if not argv: argv = sys.argv parser = argparse.ArgumentParser( prog="thainlp", description="Thai natural language processing.", usage=( "thainlp <command> [options]\n\n" "Example:\n\n" "thainlp data catalog\n\n" "--" ), ) parser.add_argument( "command", type=str, choices=cli.COMMANDS, help="text processing action", ) args = parser.parse_args(argv[1:2]) cli.exit_if_empty(args.command, parser) if hasattr(cli, args.command): command = getattr(cli, args.command) command.App(argv)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "not", "argv", ":", "argv", "=", "sys", ".", "argv", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"thainlp\"", ",", "description", "=", "\"Thai natural language processing.\"", ",", "usage", "=", "(", "\"thainlp <command> [options]\\n\\n\"", "\"Example:\\n\\n\"", "\"thainlp data catalog\\n\\n\"", "\"--\"", ")", ",", ")", "parser", ".", "add_argument", "(", "\"command\"", ",", "type", "=", "str", ",", "choices", "=", "cli", ".", "COMMANDS", ",", "help", "=", "\"text processing action\"", ",", ")", "args", "=", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "2", "]", ")", "cli", ".", "exit_if_empty", "(", "args", ".", "command", ",", "parser", ")", "if", "hasattr", "(", "cli", ",", "args", ".", "command", ")", ":", "command", "=", "getattr", "(", "cli", ",", "args", ".", "command", ")", "command", ".", "App", "(", "argv", ")" ]
https://github.com/PyThaiNLP/pythainlp/blob/de38b8507bf0934540aa5094e5f7f57d7f67e2dc/pythainlp/__main__.py#L8-L35
pulp/pulp
a0a28d804f997b6f81c391378aff2e4c90183df9
client_lib/pulp/client/commands/unit.py
python
UnitRemoveCommand.run
(self, **kwargs)
Hook used to run the command.
Hook used to run the command.
[ "Hook", "used", "to", "run", "the", "command", "." ]
def run(self, **kwargs): """ Hook used to run the command. """ self.ensure_criteria(kwargs) repo_id = kwargs.pop(OPTION_REPO_ID.keyword) self.modify_user_input(kwargs) response = self.context.server.repo_unit.remove(repo_id, **kwargs) task = response.response_body self.poll([task], kwargs)
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "ensure_criteria", "(", "kwargs", ")", "repo_id", "=", "kwargs", ".", "pop", "(", "OPTION_REPO_ID", ".", "keyword", ")", "self", ".", "modify_user_input", "(", "kwargs", ")", "response", "=", "self", ".", "context", ".", "server", ".", "repo_unit", ".", "remove", "(", "repo_id", ",", "*", "*", "kwargs", ")", "task", "=", "response", ".", "response_body", "self", ".", "poll", "(", "[", "task", "]", ",", "kwargs", ")" ]
https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/client_lib/pulp/client/commands/unit.py#L80-L91
benoitc/couchdbkit
6be148640c00b54ee87a2f2d502e9d67fa5b45a8
couchdbkit/client.py
python
Database.delete_doc
(self, doc, **params)
return result
delete a document or a list of documents @param doc: str or dict, document id or full doc. @return: dict like: .. code-block:: python {"ok":true,"rev":"2839830636"}
delete a document or a list of documents @param doc: str or dict, document id or full doc. @return: dict like:
[ "delete", "a", "document", "or", "a", "list", "of", "documents", "@param", "doc", ":", "str", "or", "dict", "document", "id", "or", "full", "doc", ".", "@return", ":", "dict", "like", ":" ]
def delete_doc(self, doc, **params): """ delete a document or a list of documents @param doc: str or dict, document id or full doc. @return: dict like: .. code-block:: python {"ok":true,"rev":"2839830636"} """ result = { 'ok': False } doc1, schema = _maybe_serialize(doc) if isinstance(doc1, dict): if not '_id' or not '_rev' in doc1: raise KeyError('_id and _rev are required to delete a doc') docid = resource.escape_docid(doc1['_id']) result = self.res.delete(docid, rev=doc1['_rev'], **params).json_body elif isinstance(doc1, basestring): # we get a docid rev = self.get_rev(doc1) docid = resource.escape_docid(doc1) result = self.res.delete(docid, rev=rev, **params).json_body if schema: doc._doc.update({ "_rev": result['rev'], "_deleted": True }) elif isinstance(doc, dict): doc.update({ "_rev": result['rev'], "_deleted": True }) return result
[ "def", "delete_doc", "(", "self", ",", "doc", ",", "*", "*", "params", ")", ":", "result", "=", "{", "'ok'", ":", "False", "}", "doc1", ",", "schema", "=", "_maybe_serialize", "(", "doc", ")", "if", "isinstance", "(", "doc1", ",", "dict", ")", ":", "if", "not", "'_id'", "or", "not", "'_rev'", "in", "doc1", ":", "raise", "KeyError", "(", "'_id and _rev are required to delete a doc'", ")", "docid", "=", "resource", ".", "escape_docid", "(", "doc1", "[", "'_id'", "]", ")", "result", "=", "self", ".", "res", ".", "delete", "(", "docid", ",", "rev", "=", "doc1", "[", "'_rev'", "]", ",", "*", "*", "params", ")", ".", "json_body", "elif", "isinstance", "(", "doc1", ",", "basestring", ")", ":", "# we get a docid", "rev", "=", "self", ".", "get_rev", "(", "doc1", ")", "docid", "=", "resource", ".", "escape_docid", "(", "doc1", ")", "result", "=", "self", ".", "res", ".", "delete", "(", "docid", ",", "rev", "=", "rev", ",", "*", "*", "params", ")", ".", "json_body", "if", "schema", ":", "doc", ".", "_doc", ".", "update", "(", "{", "\"_rev\"", ":", "result", "[", "'rev'", "]", ",", "\"_deleted\"", ":", "True", "}", ")", "elif", "isinstance", "(", "doc", ",", "dict", ")", ":", "doc", ".", "update", "(", "{", "\"_rev\"", ":", "result", "[", "'rev'", "]", ",", "\"_deleted\"", ":", "True", "}", ")", "return", "result" ]
https://github.com/benoitc/couchdbkit/blob/6be148640c00b54ee87a2f2d502e9d67fa5b45a8/couchdbkit/client.py#L645-L678
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/python-openid-2.2.5/openid/yadis/manager.py
python
YadisServiceManager.__len__
(self)
return len(self.services)
How many untried services remain?
How many untried services remain?
[ "How", "many", "untried", "services", "remain?" ]
def __len__(self): """How many untried services remain?""" return len(self.services)
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "services", ")" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/python-openid-2.2.5/openid/yadis/manager.py#L20-L22
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
gluon/gluoncv2/models/pyramidnet_cifar.py
python
pyramidnet200_a240_bn_svhn
(classes=10, **kwargs)
return get_pyramidnet_cifar( classes=classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_svhn", **kwargs)
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters.
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
[ "PyramidNet", "-", "200", "(", "a", "=", "240", "bn", ")", "model", "for", "SVHN", "from", "Deep", "Pyramidal", "Residual", "Networks", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1610", ".", "02915", "." ]
def pyramidnet200_a240_bn_svhn(classes=10, **kwargs): """ PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_pyramidnet_cifar( classes=classes, blocks=200, alpha=240, bottleneck=True, model_name="pyramidnet200_a240_bn_svhn", **kwargs)
[ "def", "pyramidnet200_a240_bn_svhn", "(", "classes", "=", "10", ",", "*", "*", "kwargs", ")", ":", "return", "get_pyramidnet_cifar", "(", "classes", "=", "classes", ",", "blocks", "=", "200", ",", "alpha", "=", "240", ",", "bottleneck", "=", "True", ",", "model_name", "=", "\"pyramidnet200_a240_bn_svhn\"", ",", "*", "*", "kwargs", ")" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/gluon/gluoncv2/models/pyramidnet_cifar.py#L511-L533
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pip/_vendor/distlib/util.py
python
Cache.prefix_to_dir
(self, prefix)
return path_to_cache_dir(prefix)
Converts a resource prefix to a directory name in the cache.
Converts a resource prefix to a directory name in the cache.
[ "Converts", "a", "resource", "prefix", "to", "a", "directory", "name", "in", "the", "cache", "." ]
def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix)
[ "def", "prefix_to_dir", "(", "self", ",", "prefix", ")", ":", "return", "path_to_cache_dir", "(", "prefix", ")" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pip/_vendor/distlib/util.py#L816-L820
monim67/django-bootstrap-datepicker-plus
7fcafa7159eeab3cd1e2a6ebdf650052d21aa2c1
bootstrap_datepicker_plus/widgets.py
python
YearPickerInput._link_to
(self, linked_picker)
Customize the options when linked with other date-time input
Customize the options when linked with other date-time input
[ "Customize", "the", "options", "when", "linked", "with", "other", "date", "-", "time", "input" ]
def _link_to(self, linked_picker): """Customize the options when linked with other date-time input""" yformat = self.config["options"]["format"].replace("-01-01", "-12-31") self.config["options"]["format"] = yformat
[ "def", "_link_to", "(", "self", ",", "linked_picker", ")", ":", "yformat", "=", "self", ".", "config", "[", "\"options\"", "]", "[", "\"format\"", "]", ".", "replace", "(", "\"-01-01\"", ",", "\"-12-31\"", ")", "self", ".", "config", "[", "\"options\"", "]", "[", "\"format\"", "]", "=", "yformat" ]
https://github.com/monim67/django-bootstrap-datepicker-plus/blob/7fcafa7159eeab3cd1e2a6ebdf650052d21aa2c1/bootstrap_datepicker_plus/widgets.py#L89-L92
jorgebastida/gordon
4c1cd0c4dea2499d98115672095714592f80f7aa
gordon/resources/apigateway.py
python
ApiGateway.register_resources_template
(self, template)
[]
def register_resources_template(self, template): deployment_resources = [] api = RestApi( self.in_project_cf_name, Name=troposphere.Join("-", [self.name, troposphere.Ref('Stage')]), Description=self.settings.get('description', '') ) template.add_resource(api) deployment_resources.append(api) invoke_lambda_role = troposphere.iam.Role( utils.valid_cloudformation_name(self.name, 'Role'), AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["apigateway.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ troposphere.iam.Policy( PolicyName=utils.valid_cloudformation_name(self.name, 'Role', 'Policy'), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": [ "*" ] } ] } ) ] ) template.add_resource(invoke_lambda_role) deployment_resources.append(invoke_lambda_role) deployment_dependencies = [] for path, resource in six.iteritems(self.settings.get('resources', {})): resource_reference = self.get_or_create_resource(path, api, template) methods = resource['methods'] if isinstance(methods, six.string_types): methods = [methods] if not isinstance(methods, dict): method_properties = copy.deepcopy(resource) method_properties.pop('methods', None) methods = dict([[method, method_properties] for method in methods]) for method, configuration in six.iteritems(methods): method_name = [self.name] method_name.extend(path.split('/')) method_name.append(method) extra = {} if 'parameters' in configuration: extra['RequestParameters'] = configuration['parameters'] m = Method( utils.valid_cloudformation_name(*method_name), HttpMethod=method, AuthorizationType=self.get_authorization_type(configuration), ApiKeyRequired=self.get_api_key_required(configuration), Integration=self.get_integration(configuration, invoke_lambda_role), MethodResponses=self.get_method_responses(configuration), ResourceId=resource_reference, RestApiId=troposphere.Ref(api), **extra ) template.add_resource(m) deployment_dependencies.append(m.name) deployment_resources.append(m) deploy_hash = hashlib.sha1(six.text_type(uuid.uuid4()).encode('utf-8')).hexdigest() deploy = Deployment( utils.valid_cloudformation_name(self.name, "Deployment", deploy_hash[:8]), DependsOn=sorted(deployment_dependencies), StageName=troposphere.Ref('Stage'), RestApiId=troposphere.Ref(api) ) template.add_resource(deploy) if self._get_true_false('cli-output', 't'): template.add_output([ troposphere.Output( utils.valid_cloudformation_name("Clioutput", self.in_project_name), Value=troposphere.Join( "", [ "https://", troposphere.Ref(api), ".execute-api.", troposphere.Ref(troposphere.AWS_REGION), ".amazonaws.com/", troposphere.Ref('Stage') ] ), ) ])
[ "def", "register_resources_template", "(", "self", ",", "template", ")", ":", "deployment_resources", "=", "[", "]", "api", "=", "RestApi", "(", "self", ".", "in_project_cf_name", ",", "Name", "=", "troposphere", ".", "Join", "(", "\"-\"", ",", "[", "self", ".", "name", ",", "troposphere", ".", "Ref", "(", "'Stage'", ")", "]", ")", ",", "Description", "=", "self", ".", "settings", ".", "get", "(", "'description'", ",", "''", ")", ")", "template", ".", "add_resource", "(", "api", ")", "deployment_resources", ".", "append", "(", "api", ")", "invoke_lambda_role", "=", "troposphere", ".", "iam", ".", "Role", "(", "utils", ".", "valid_cloudformation_name", "(", "self", ".", "name", ",", "'Role'", ")", ",", "AssumeRolePolicyDocument", "=", "{", "\"Version\"", ":", "\"2012-10-17\"", ",", "\"Statement\"", ":", "[", "{", "\"Effect\"", ":", "\"Allow\"", ",", "\"Principal\"", ":", "{", "\"Service\"", ":", "[", "\"apigateway.amazonaws.com\"", "]", "}", ",", "\"Action\"", ":", "[", "\"sts:AssumeRole\"", "]", "}", "]", "}", ",", "Policies", "=", "[", "troposphere", ".", "iam", ".", "Policy", "(", "PolicyName", "=", "utils", ".", "valid_cloudformation_name", "(", "self", ".", "name", ",", "'Role'", ",", "'Policy'", ")", ",", "PolicyDocument", "=", "{", "\"Version\"", ":", "\"2012-10-17\"", ",", "\"Statement\"", ":", "[", "{", "\"Effect\"", ":", "\"Allow\"", ",", "\"Action\"", ":", "[", "\"lambda:InvokeFunction\"", "]", ",", "\"Resource\"", ":", "[", "\"*\"", "]", "}", "]", "}", ")", "]", ")", "template", ".", "add_resource", "(", "invoke_lambda_role", ")", "deployment_resources", ".", "append", "(", "invoke_lambda_role", ")", "deployment_dependencies", "=", "[", "]", "for", "path", ",", "resource", "in", "six", ".", "iteritems", "(", "self", ".", "settings", ".", "get", "(", "'resources'", ",", "{", "}", ")", ")", ":", "resource_reference", "=", "self", ".", "get_or_create_resource", "(", "path", ",", "api", ",", "template", ")", "methods", "=", "resource", "[", "'methods'", "]", "if", "isinstance", "(", "methods", ",", "six", ".", "string_types", ")", ":", "methods", "=", "[", "methods", "]", "if", "not", "isinstance", "(", "methods", ",", "dict", ")", ":", "method_properties", "=", "copy", ".", "deepcopy", "(", "resource", ")", "method_properties", ".", "pop", "(", "'methods'", ",", "None", ")", "methods", "=", "dict", "(", "[", "[", "method", ",", "method_properties", "]", "for", "method", "in", "methods", "]", ")", "for", "method", ",", "configuration", "in", "six", ".", "iteritems", "(", "methods", ")", ":", "method_name", "=", "[", "self", ".", "name", "]", "method_name", ".", "extend", "(", "path", ".", "split", "(", "'/'", ")", ")", "method_name", ".", "append", "(", "method", ")", "extra", "=", "{", "}", "if", "'parameters'", "in", "configuration", ":", "extra", "[", "'RequestParameters'", "]", "=", "configuration", "[", "'parameters'", "]", "m", "=", "Method", "(", "utils", ".", "valid_cloudformation_name", "(", "*", "method_name", ")", ",", "HttpMethod", "=", "method", ",", "AuthorizationType", "=", "self", ".", "get_authorization_type", "(", "configuration", ")", ",", "ApiKeyRequired", "=", "self", ".", "get_api_key_required", "(", "configuration", ")", ",", "Integration", "=", "self", ".", "get_integration", "(", "configuration", ",", "invoke_lambda_role", ")", ",", "MethodResponses", "=", "self", ".", "get_method_responses", "(", "configuration", ")", ",", "ResourceId", "=", "resource_reference", ",", "RestApiId", "=", "troposphere", ".", "Ref", "(", "api", ")", ",", "*", "*", "extra", ")", "template", ".", "add_resource", "(", "m", ")", "deployment_dependencies", ".", "append", "(", "m", ".", "name", ")", "deployment_resources", ".", "append", "(", "m", ")", "deploy_hash", "=", "hashlib", ".", "sha1", "(", "six", ".", "text_type", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "deploy", "=", "Deployment", "(", "utils", ".", "valid_cloudformation_name", "(", "self", ".", "name", ",", "\"Deployment\"", ",", "deploy_hash", "[", ":", "8", "]", ")", ",", "DependsOn", "=", "sorted", "(", "deployment_dependencies", ")", ",", "StageName", "=", "troposphere", ".", "Ref", "(", "'Stage'", ")", ",", "RestApiId", "=", "troposphere", ".", "Ref", "(", "api", ")", ")", "template", ".", "add_resource", "(", "deploy", ")", "if", "self", ".", "_get_true_false", "(", "'cli-output'", ",", "'t'", ")", ":", "template", ".", "add_output", "(", "[", "troposphere", ".", "Output", "(", "utils", ".", "valid_cloudformation_name", "(", "\"Clioutput\"", ",", "self", ".", "in_project_name", ")", ",", "Value", "=", "troposphere", ".", "Join", "(", "\"\"", ",", "[", "\"https://\"", ",", "troposphere", ".", "Ref", "(", "api", ")", ",", "\".execute-api.\"", ",", "troposphere", ".", "Ref", "(", "troposphere", ".", "AWS_REGION", ")", ",", "\".amazonaws.com/\"", ",", "troposphere", ".", "Ref", "(", "'Stage'", ")", "]", ")", ",", ")", "]", ")" ]
https://github.com/jorgebastida/gordon/blob/4c1cd0c4dea2499d98115672095714592f80f7aa/gordon/resources/apigateway.py#L203-L312
jeanharb/option_critic
5d6c81a650a8f452bc8ad3250f1f211d317fde8c
learning.py
python
SGD.__init__
(self, lr=0.01)
[]
def __init__(self, lr=0.01): self.lr = lr
[ "def", "__init__", "(", "self", ",", "lr", "=", "0.01", ")", ":", "self", ".", "lr", "=", "lr" ]
https://github.com/jeanharb/option_critic/blob/5d6c81a650a8f452bc8ad3250f1f211d317fde8c/learning.py#L17-L18
mathandy/svgpathtools
abd99f0846ea636b9c33ce28453348bd662b98c7
svgpathtools/path.py
python
Path.curvature
(self, T)
return abs(dx*ddy - dy*ddx)/(dx*dx + dy*dy)**1.5
returns the curvature of this Path object at T and outputs float('inf') if not differentiable at T.
returns the curvature of this Path object at T and outputs float('inf') if not differentiable at T.
[ "returns", "the", "curvature", "of", "this", "Path", "object", "at", "T", "and", "outputs", "float", "(", "inf", ")", "if", "not", "differentiable", "at", "T", "." ]
def curvature(self, T): """returns the curvature of this Path object at T and outputs float('inf') if not differentiable at T.""" seg_idx, t = self.T2t(T) seg = self[seg_idx] if np.isclose(t, 0) and (seg_idx != 0 or self.end==self.start): previous_seg_in_path = self._segments[ (seg_idx - 1) % len(self._segments)] if not seg.joins_smoothly_with(previous_seg_in_path): return float('inf') elif np.isclose(t, 1) and (seg_idx != len(self) - 1 or self.end == self.start): next_seg_in_path = self._segments[ (seg_idx + 1) % len(self._segments)] if not next_seg_in_path.joins_smoothly_with(seg): return float('inf') dz = self.derivative(T) ddz = self.derivative(T, n=2) dx, dy = dz.real, dz.imag ddx, ddy = ddz.real, ddz.imag return abs(dx*ddy - dy*ddx)/(dx*dx + dy*dy)**1.5
[ "def", "curvature", "(", "self", ",", "T", ")", ":", "seg_idx", ",", "t", "=", "self", ".", "T2t", "(", "T", ")", "seg", "=", "self", "[", "seg_idx", "]", "if", "np", ".", "isclose", "(", "t", ",", "0", ")", "and", "(", "seg_idx", "!=", "0", "or", "self", ".", "end", "==", "self", ".", "start", ")", ":", "previous_seg_in_path", "=", "self", ".", "_segments", "[", "(", "seg_idx", "-", "1", ")", "%", "len", "(", "self", ".", "_segments", ")", "]", "if", "not", "seg", ".", "joins_smoothly_with", "(", "previous_seg_in_path", ")", ":", "return", "float", "(", "'inf'", ")", "elif", "np", ".", "isclose", "(", "t", ",", "1", ")", "and", "(", "seg_idx", "!=", "len", "(", "self", ")", "-", "1", "or", "self", ".", "end", "==", "self", ".", "start", ")", ":", "next_seg_in_path", "=", "self", ".", "_segments", "[", "(", "seg_idx", "+", "1", ")", "%", "len", "(", "self", ".", "_segments", ")", "]", "if", "not", "next_seg_in_path", ".", "joins_smoothly_with", "(", "seg", ")", ":", "return", "float", "(", "'inf'", ")", "dz", "=", "self", ".", "derivative", "(", "T", ")", "ddz", "=", "self", ".", "derivative", "(", "T", ",", "n", "=", "2", ")", "dx", ",", "dy", "=", "dz", ".", "real", ",", "dz", ".", "imag", "ddx", ",", "ddy", "=", "ddz", ".", "real", ",", "ddz", ".", "imag", "return", "abs", "(", "dx", "*", "ddy", "-", "dy", "*", "ddx", ")", "/", "(", "dx", "*", "dx", "+", "dy", "*", "dy", ")", "**", "1.5" ]
https://github.com/mathandy/svgpathtools/blob/abd99f0846ea636b9c33ce28453348bd662b98c7/svgpathtools/path.py#L2839-L2859
Calysto/calysto_scheme
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
calysto_scheme/scheme.py
python
b_proc_175_d
()
[]
def b_proc_175_d(): if (False if ((not(length_two_q(args_reg))) is False) else True): GLOBALS['msg_reg'] = "incorrect number of arguments to sort" GLOBALS['pc'] = runtime_error else: return sort_native(args_reg, env2_reg, info_reg, handler_reg, fail_reg, k2_reg)
[ "def", "b_proc_175_d", "(", ")", ":", "if", "(", "False", "if", "(", "(", "not", "(", "length_two_q", "(", "args_reg", ")", ")", ")", "is", "False", ")", "else", "True", ")", ":", "GLOBALS", "[", "'msg_reg'", "]", "=", "\"incorrect number of arguments to sort\"", "GLOBALS", "[", "'pc'", "]", "=", "runtime_error", "else", ":", "return", "sort_native", "(", "args_reg", ",", "env2_reg", ",", "info_reg", ",", "handler_reg", ",", "fail_reg", ",", "k2_reg", ")" ]
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/scheme.py#L5402-L5407
laike9m/ezcf
09b236c0670709f7ab01b17c78c12cec2cdfc779
ezcf/_base.py
python
BaseFinder.get_outerframe_skip_importlib_frame
(level)
There's a bug in Python3.4+, see http://bugs.python.org/issue23773, remove this and use sys._getframe(3) when bug is fixed
There's a bug in Python3.4+, see http://bugs.python.org/issue23773, remove this and use sys._getframe(3) when bug is fixed
[ "There", "s", "a", "bug", "in", "Python3", ".", "4", "+", "see", "http", ":", "//", "bugs", ".", "python", ".", "org", "/", "issue23773", "remove", "this", "and", "use", "sys", ".", "_getframe", "(", "3", ")", "when", "bug", "is", "fixed" ]
def get_outerframe_skip_importlib_frame(level): """ There's a bug in Python3.4+, see http://bugs.python.org/issue23773, remove this and use sys._getframe(3) when bug is fixed """ if sys.version_info < (3, 4): return sys._getframe(level) else: currentframe = inspect.currentframe() levelup = 0 while levelup < level: currentframe = currentframe.f_back if currentframe.f_globals['__name__'] == 'importlib._bootstrap': continue else: levelup += 1 return currentframe
[ "def", "get_outerframe_skip_importlib_frame", "(", "level", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "4", ")", ":", "return", "sys", ".", "_getframe", "(", "level", ")", "else", ":", "currentframe", "=", "inspect", ".", "currentframe", "(", ")", "levelup", "=", "0", "while", "levelup", "<", "level", ":", "currentframe", "=", "currentframe", ".", "f_back", "if", "currentframe", ".", "f_globals", "[", "'__name__'", "]", "==", "'importlib._bootstrap'", ":", "continue", "else", ":", "levelup", "+=", "1", "return", "currentframe" ]
https://github.com/laike9m/ezcf/blob/09b236c0670709f7ab01b17c78c12cec2cdfc779/ezcf/_base.py#L46-L62
deeptools/deepTools
ac42d29c298c026aa0c53c9db2553087ebc86b97
deeptools/utilities.py
python
getTempFileName
(suffix='')
return memFileName
Return a temporary file name. The calling function is responsible for deleting this upon completion.
Return a temporary file name. The calling function is responsible for deleting this upon completion.
[ "Return", "a", "temporary", "file", "name", ".", "The", "calling", "function", "is", "responsible", "for", "deleting", "this", "upon", "completion", "." ]
def getTempFileName(suffix=''): """ Return a temporary file name. The calling function is responsible for deleting this upon completion. """ import tempfile _tempFile = tempfile.NamedTemporaryFile(prefix="_deeptools_", suffix=suffix, delete=False) memFileName = _tempFile.name _tempFile.close() return memFileName
[ "def", "getTempFileName", "(", "suffix", "=", "''", ")", ":", "import", "tempfile", "_tempFile", "=", "tempfile", ".", "NamedTemporaryFile", "(", "prefix", "=", "\"_deeptools_\"", ",", "suffix", "=", "suffix", ",", "delete", "=", "False", ")", "memFileName", "=", "_tempFile", ".", "name", "_tempFile", ".", "close", "(", ")", "return", "memFileName" ]
https://github.com/deeptools/deepTools/blob/ac42d29c298c026aa0c53c9db2553087ebc86b97/deeptools/utilities.py#L250-L262
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_handler.py
python
V1Handler.http_get
(self)
return self._http_get
Gets the http_get of this V1Handler. HTTPGet specifies the http request to perform. :return: The http_get of this V1Handler. :rtype: V1HTTPGetAction
Gets the http_get of this V1Handler. HTTPGet specifies the http request to perform.
[ "Gets", "the", "http_get", "of", "this", "V1Handler", ".", "HTTPGet", "specifies", "the", "http", "request", "to", "perform", "." ]
def http_get(self): """ Gets the http_get of this V1Handler. HTTPGet specifies the http request to perform. :return: The http_get of this V1Handler. :rtype: V1HTTPGetAction """ return self._http_get
[ "def", "http_get", "(", "self", ")", ":", "return", "self", ".", "_http_get" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_handler.py#L73-L81
avrae/avrae
6ebe46a1ec3d4dfaa2f9b18fac948325f39f87de
gamedata/mixins.py
python
LimitedUseGrantorMixin.initialize_limited_use
(self, data)
return self
Given an instance that is in the process of being constructed, set up the LimitedUses and return the instance (for initialization chaining).
Given an instance that is in the process of being constructed, set up the LimitedUses and return the instance (for initialization chaining).
[ "Given", "an", "instance", "that", "is", "in", "the", "process", "of", "being", "constructed", "set", "up", "the", "LimitedUses", "and", "return", "the", "instance", "(", "for", "initialization", "chaining", ")", "." ]
def initialize_limited_use(self, data): """ Given an instance that is in the process of being constructed, set up the LimitedUses and return the instance (for initialization chaining). """ self.limited_use = [LimitedUse.from_dict(lu, self) for lu in data.get('grantedLimitedUse', [])] return self
[ "def", "initialize_limited_use", "(", "self", ",", "data", ")", ":", "self", ".", "limited_use", "=", "[", "LimitedUse", ".", "from_dict", "(", "lu", ",", "self", ")", "for", "lu", "in", "data", ".", "get", "(", "'grantedLimitedUse'", ",", "[", "]", ")", "]", "return", "self" ]
https://github.com/avrae/avrae/blob/6ebe46a1ec3d4dfaa2f9b18fac948325f39f87de/gamedata/mixins.py#L18-L24
virt-manager/virt-manager
c51ebdd76a9fc198c40cefcd78838860199467d3
virtManager/connection.py
python
vmmConnection._wait_for_condition
(self, compare_cb, timeout=3)
Wait for this object to emit the specified signal. Will not block the mainloop.
Wait for this object to emit the specified signal. Will not block the mainloop.
[ "Wait", "for", "this", "object", "to", "emit", "the", "specified", "signal", ".", "Will", "not", "block", "the", "mainloop", "." ]
def _wait_for_condition(self, compare_cb, timeout=3): """ Wait for this object to emit the specified signal. Will not block the mainloop. """ from gi.repository import Gtk is_main_thread = (threading.current_thread().name == "MainThread") start_time = time.time() while True: cur_time = time.time() if compare_cb(): return if (cur_time - start_time) >= timeout: return # pragma: no cover if is_main_thread: if Gtk.events_pending(): Gtk.main_iteration_do(False) continue time.sleep(.1)
[ "def", "_wait_for_condition", "(", "self", ",", "compare_cb", ",", "timeout", "=", "3", ")", ":", "from", "gi", ".", "repository", "import", "Gtk", "is_main_thread", "=", "(", "threading", ".", "current_thread", "(", ")", ".", "name", "==", "\"MainThread\"", ")", "start_time", "=", "time", ".", "time", "(", ")", "while", "True", ":", "cur_time", "=", "time", ".", "time", "(", ")", "if", "compare_cb", "(", ")", ":", "return", "if", "(", "cur_time", "-", "start_time", ")", ">=", "timeout", ":", "return", "# pragma: no cover", "if", "is_main_thread", ":", "if", "Gtk", ".", "events_pending", "(", ")", ":", "Gtk", ".", "main_iteration_do", "(", "False", ")", "continue", "time", ".", "sleep", "(", ".1", ")" ]
https://github.com/virt-manager/virt-manager/blob/c51ebdd76a9fc198c40cefcd78838860199467d3/virtManager/connection.py#L236-L257
Net-ng/kansha
85b5816da126b1c7098707c98f217d8b2e524ff2
kansha/user/models.py
python
DataUser.check_password
(self, clear_password)
return encrypted_password == self._password
Check the user password. Return True if the password is valid for this user
Check the user password. Return True if the password is valid for this user
[ "Check", "the", "user", "password", ".", "Return", "True", "if", "the", "password", "is", "valid", "for", "this", "user" ]
def check_password(self, clear_password): """Check the user password. Return True if the password is valid for this user""" encrypted_password = self._encrypt_password(self._salt, clear_password) return encrypted_password == self._password
[ "def", "check_password", "(", "self", ",", "clear_password", ")", ":", "encrypted_password", "=", "self", ".", "_encrypt_password", "(", "self", ".", "_salt", ",", "clear_password", ")", "return", "encrypted_password", "==", "self", ".", "_password" ]
https://github.com/Net-ng/kansha/blob/85b5816da126b1c7098707c98f217d8b2e524ff2/kansha/user/models.py#L102-L105
Qirky/FoxDot
76318f9630bede48ff3994146ed644affa27bfa4
FoxDot/lib/Patterns/Sequences.py
python
PStretch
(seq, size)
return Pattern(seq).stretch(size)
Returns 'seq' as a Pattern and looped until its length is 'size' e.g. `PStretch([0,1,2], 5)` returns `P[0, 1, 2, 0, 1]`
Returns 'seq' as a Pattern and looped until its length is 'size' e.g. `PStretch([0,1,2], 5)` returns `P[0, 1, 2, 0, 1]`
[ "Returns", "seq", "as", "a", "Pattern", "and", "looped", "until", "its", "length", "is", "size", "e", ".", "g", ".", "PStretch", "(", "[", "0", "1", "2", "]", "5", ")", "returns", "P", "[", "0", "1", "2", "0", "1", "]" ]
def PStretch(seq, size): ''' Returns 'seq' as a Pattern and looped until its length is 'size' e.g. `PStretch([0,1,2], 5)` returns `P[0, 1, 2, 0, 1]` ''' return Pattern(seq).stretch(size)
[ "def", "PStretch", "(", "seq", ",", "size", ")", ":", "return", "Pattern", "(", "seq", ")", ".", "stretch", "(", "size", ")" ]
https://github.com/Qirky/FoxDot/blob/76318f9630bede48ff3994146ed644affa27bfa4/FoxDot/lib/Patterns/Sequences.py#L130-L133
mit-han-lab/data-efficient-gans
6858275f08f43a33026844c8c2ac4e703e8a07ba
DiffAugment-biggan-cifar/layers.py
python
bn.__init__
(self, output_size, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False)
[]
def __init__(self, output_size, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False): super(bn, self).__init__() self.output_size = output_size # Prepare gain and bias layers self.gain = P(torch.ones(output_size), requires_grad=True) self.bias = P(torch.zeros(output_size), requires_grad=True) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn if self.cross_replica: assert False elif mybn: self.bn = myBN(output_size, self.eps, self.momentum) # Register buffers if neither of the above else: self.register_buffer('stored_mean', torch.zeros(output_size)) self.register_buffer('stored_var', torch.ones(output_size))
[ "def", "__init__", "(", "self", ",", "output_size", ",", "eps", "=", "1e-5", ",", "momentum", "=", "0.1", ",", "cross_replica", "=", "False", ",", "mybn", "=", "False", ")", ":", "super", "(", "bn", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "output_size", "=", "output_size", "# Prepare gain and bias layers", "self", ".", "gain", "=", "P", "(", "torch", ".", "ones", "(", "output_size", ")", ",", "requires_grad", "=", "True", ")", "self", ".", "bias", "=", "P", "(", "torch", ".", "zeros", "(", "output_size", ")", ",", "requires_grad", "=", "True", ")", "# epsilon to avoid dividing by 0", "self", ".", "eps", "=", "eps", "# Momentum", "self", ".", "momentum", "=", "momentum", "# Use cross-replica batchnorm?", "self", ".", "cross_replica", "=", "cross_replica", "# Use my batchnorm?", "self", ".", "mybn", "=", "mybn", "if", "self", ".", "cross_replica", ":", "assert", "False", "elif", "mybn", ":", "self", ".", "bn", "=", "myBN", "(", "output_size", ",", "self", ".", "eps", ",", "self", ".", "momentum", ")", "# Register buffers if neither of the above", "else", ":", "self", ".", "register_buffer", "(", "'stored_mean'", ",", "torch", ".", "zeros", "(", "output_size", ")", ")", "self", ".", "register_buffer", "(", "'stored_var'", ",", "torch", ".", "ones", "(", "output_size", ")", ")" ]
https://github.com/mit-han-lab/data-efficient-gans/blob/6858275f08f43a33026844c8c2ac4e703e8a07ba/DiffAugment-biggan-cifar/layers.py#L345-L368
tonybaloney/wily
e72b7d95228bbe5538a072dc5d1186daa318bb03
src/wily/archivers/__init__.py
python
BaseArchiver.checkout
(self, revision, **options)
Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict``
Checkout a specific revision.
[ "Checkout", "a", "specific", "revision", "." ]
def checkout(self, revision, **options): """ Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict`` """ raise NotImplementedError
[ "def", "checkout", "(", "self", ",", "revision", ",", "*", "*", "options", ")", ":", "raise", "NotImplementedError" ]
https://github.com/tonybaloney/wily/blob/e72b7d95228bbe5538a072dc5d1186daa318bb03/src/wily/archivers/__init__.py#L30-L40
jpmens/mqttwarn
ab407e7f4ee2974266769e57987e07f8b79cba76
mqttwarn/services/xmpp.py
python
plugin
(srv, item)
return True
Send a message to XMPP recipient(s).
Send a message to XMPP recipient(s).
[ "Send", "a", "message", "to", "XMPP", "recipient", "(", "s", ")", "." ]
def plugin(srv, item): """Send a message to XMPP recipient(s).""" srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target) xmpp_addresses = item.addrs sender = item.config['sender'] password = item.config['password'] text = item.message if not xmpp_addresses: srv.logging.warn("Skipped sending XMPP notification to %s, " "no addresses configured" % (item.target)) return False try: srv.logging.debug("Sending XMPP notification to %s, addresses: %s" % (item.target, xmpp_addresses)) for target in xmpp_addresses: jid = xmpp.protocol.JID(sender) connection = xmpp.Client(jid.getDomain(),debug=[]) connection.connect() connection.auth(jid.getNode(), password, resource=jid.getResource()) connection.send(xmpp.protocol.Message(target, text)) srv.logging.debug("Successfully sent message") except Exception as e: srv.logging.error("Error sending message to %s: %s" % (item.target, e)) return False return True
[ "def", "plugin", "(", "srv", ",", "item", ")", ":", "srv", ".", "logging", ".", "debug", "(", "\"*** MODULE=%s: service=%s, target=%s\"", ",", "__file__", ",", "item", ".", "service", ",", "item", ".", "target", ")", "xmpp_addresses", "=", "item", ".", "addrs", "sender", "=", "item", ".", "config", "[", "'sender'", "]", "password", "=", "item", ".", "config", "[", "'password'", "]", "text", "=", "item", ".", "message", "if", "not", "xmpp_addresses", ":", "srv", ".", "logging", ".", "warn", "(", "\"Skipped sending XMPP notification to %s, \"", "\"no addresses configured\"", "%", "(", "item", ".", "target", ")", ")", "return", "False", "try", ":", "srv", ".", "logging", ".", "debug", "(", "\"Sending XMPP notification to %s, addresses: %s\"", "%", "(", "item", ".", "target", ",", "xmpp_addresses", ")", ")", "for", "target", "in", "xmpp_addresses", ":", "jid", "=", "xmpp", ".", "protocol", ".", "JID", "(", "sender", ")", "connection", "=", "xmpp", ".", "Client", "(", "jid", ".", "getDomain", "(", ")", ",", "debug", "=", "[", "]", ")", "connection", ".", "connect", "(", ")", "connection", ".", "auth", "(", "jid", ".", "getNode", "(", ")", ",", "password", ",", "resource", "=", "jid", ".", "getResource", "(", ")", ")", "connection", ".", "send", "(", "xmpp", ".", "protocol", ".", "Message", "(", "target", ",", "text", ")", ")", "srv", ".", "logging", ".", "debug", "(", "\"Successfully sent message\"", ")", "except", "Exception", "as", "e", ":", "srv", ".", "logging", ".", "error", "(", "\"Error sending message to %s: %s\"", "%", "(", "item", ".", "target", ",", "e", ")", ")", "return", "False", "return", "True" ]
https://github.com/jpmens/mqttwarn/blob/ab407e7f4ee2974266769e57987e07f8b79cba76/mqttwarn/services/xmpp.py#L11-L39
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/nexml/_nexml.py
python
DNAMatrixSeqRow.hasContent_
(self)
[]
def hasContent_(self): if ( self.meta or self.seq is not None or super(DNAMatrixSeqRow, self).hasContent_() ): return True else: return False
[ "def", "hasContent_", "(", "self", ")", ":", "if", "(", "self", ".", "meta", "or", "self", ".", "seq", "is", "not", "None", "or", "super", "(", "DNAMatrixSeqRow", ",", "self", ")", ".", "hasContent_", "(", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/nexml/_nexml.py#L13483-L13491
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/db/sqlmap/lib/takeover/metasploit.py
python
Metasploit.smb
(self)
[]
def smb(self): Metasploit._initVars(self) self._randFile = "tmpu%s.txt" % randomStr(lowercase=True) self._runMsfCliSmbrelay() if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL): self.uncPath = "\\\\\\\\%s\\\\%s" % (self.lhostStr, self._randFile) else: self.uncPath = "\\\\%s\\%s" % (self.lhostStr, self._randFile) debugMsg = "Metasploit Framework console exited with return " debugMsg += "code %s" % self._controlMsfCmd(self._msfCliProc, self.uncPathRequest) logger.debug(debugMsg)
[ "def", "smb", "(", "self", ")", ":", "Metasploit", ".", "_initVars", "(", "self", ")", "self", ".", "_randFile", "=", "\"tmpu%s.txt\"", "%", "randomStr", "(", "lowercase", "=", "True", ")", "self", ".", "_runMsfCliSmbrelay", "(", ")", "if", "Backend", ".", "getIdentifiedDbms", "(", ")", "in", "(", "DBMS", ".", "MYSQL", ",", "DBMS", ".", "PGSQL", ")", ":", "self", ".", "uncPath", "=", "\"\\\\\\\\\\\\\\\\%s\\\\\\\\%s\"", "%", "(", "self", ".", "lhostStr", ",", "self", ".", "_randFile", ")", "else", ":", "self", ".", "uncPath", "=", "\"\\\\\\\\%s\\\\%s\"", "%", "(", "self", ".", "lhostStr", ",", "self", ".", "_randFile", ")", "debugMsg", "=", "\"Metasploit Framework console exited with return \"", "debugMsg", "+=", "\"code %s\"", "%", "self", ".", "_controlMsfCmd", "(", "self", ".", "_msfCliProc", ",", "self", ".", "uncPathRequest", ")", "logger", ".", "debug", "(", "debugMsg", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/db/sqlmap/lib/takeover/metasploit.py#L709-L722
vrenkens/tfkaldi
30e8f7a32582a82a58cea66c2c52bcb66c06c326
processing/base.py
python
get_filterbanks
(nfilt=20, nfft=512, samplerate=16000, lowfreq=0, highfreq=None)
return fbanks
Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1) Args: nfilt: the number of filters in the filterbank, default 20. nfft: the FFT size. Default is 512. samplerate: the samplerate of the signal we are working with. Affects mel spacing. lowfreq: lowest band edge of mel filters, default 0 Hz highfreq: highest band edge of mel filters, default samplerate/2 Returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
Compute a Mel-filterbank.
[ "Compute", "a", "Mel", "-", "filterbank", "." ]
def get_filterbanks(nfilt=20, nfft=512, samplerate=16000, lowfreq=0, highfreq=None): ''' Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1) Args: nfilt: the number of filters in the filterbank, default 20. nfft: the FFT size. Default is 512. samplerate: the samplerate of the signal we are working with. Affects mel spacing. lowfreq: lowest band edge of mel filters, default 0 Hz highfreq: highest band edge of mel filters, default samplerate/2 Returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter. ''' highfreq = highfreq or samplerate/2 assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2" # compute points evenly spaced in mels lowmel = hz2mel(lowfreq) highmel = hz2mel(highfreq) melpoints = numpy.linspace(lowmel, highmel, nfilt+2) # our points are in Hz, but we use fft bins, so we have to convert # from Hz to fft bin number bins = numpy.floor((nfft+1)*mel2hz(melpoints)/samplerate) fbanks = numpy.zeros([nfilt, nfft/2+1]) for j in xrange(0, nfilt): for i in xrange(int(bins[j]), int(bins[j+1])): fbanks[j, i] = (i - bins[j])/(bins[j+1]-bins[j]) for i in xrange(int(bins[j+1]), int(bins[j+2])): fbanks[j, i] = (bins[j+2]-i)/(bins[j+2]-bins[j+1]) return fbanks
[ "def", "get_filterbanks", "(", "nfilt", "=", "20", ",", "nfft", "=", "512", ",", "samplerate", "=", "16000", ",", "lowfreq", "=", "0", ",", "highfreq", "=", "None", ")", ":", "highfreq", "=", "highfreq", "or", "samplerate", "/", "2", "assert", "highfreq", "<=", "samplerate", "/", "2", ",", "\"highfreq is greater than samplerate/2\"", "# compute points evenly spaced in mels", "lowmel", "=", "hz2mel", "(", "lowfreq", ")", "highmel", "=", "hz2mel", "(", "highfreq", ")", "melpoints", "=", "numpy", ".", "linspace", "(", "lowmel", ",", "highmel", ",", "nfilt", "+", "2", ")", "# our points are in Hz, but we use fft bins, so we have to convert", "# from Hz to fft bin number", "bins", "=", "numpy", ".", "floor", "(", "(", "nfft", "+", "1", ")", "*", "mel2hz", "(", "melpoints", ")", "/", "samplerate", ")", "fbanks", "=", "numpy", ".", "zeros", "(", "[", "nfilt", ",", "nfft", "/", "2", "+", "1", "]", ")", "for", "j", "in", "xrange", "(", "0", ",", "nfilt", ")", ":", "for", "i", "in", "xrange", "(", "int", "(", "bins", "[", "j", "]", ")", ",", "int", "(", "bins", "[", "j", "+", "1", "]", ")", ")", ":", "fbanks", "[", "j", ",", "i", "]", "=", "(", "i", "-", "bins", "[", "j", "]", ")", "/", "(", "bins", "[", "j", "+", "1", "]", "-", "bins", "[", "j", "]", ")", "for", "i", "in", "xrange", "(", "int", "(", "bins", "[", "j", "+", "1", "]", ")", ",", "int", "(", "bins", "[", "j", "+", "2", "]", ")", ")", ":", "fbanks", "[", "j", ",", "i", "]", "=", "(", "bins", "[", "j", "+", "2", "]", "-", "i", ")", "/", "(", "bins", "[", "j", "+", "2", "]", "-", "bins", "[", "j", "+", "1", "]", ")", "return", "fbanks" ]
https://github.com/vrenkens/tfkaldi/blob/30e8f7a32582a82a58cea66c2c52bcb66c06c326/processing/base.py#L184-L223
Jenyay/outwiker
50530cf7b3f71480bb075b2829bc0669773b835b
plugins/snippets/snippets/libs/jinja2/parser.py
python
Parser.parse
(self)
return result
Parse the whole template into a `Template` node.
Parse the whole template into a `Template` node.
[ "Parse", "the", "whole", "template", "into", "a", "Template", "node", "." ]
def parse(self): """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
[ "def", "parse", "(", "self", ")", ":", "result", "=", "nodes", ".", "Template", "(", "self", ".", "subparse", "(", ")", ",", "lineno", "=", "1", ")", "result", ".", "set_environment", "(", "self", ".", "environment", ")", "return", "result" ]
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/plugins/snippets/snippets/libs/jinja2/parser.py#L899-L903
SheffieldML/GPy
bb1bc5088671f9316bc92a46d356734e34c2d5c0
GPy/likelihoods/loglogistic.py
python
LogLogistic.dlogpdf_link_dtheta
(self, f, y, Y_metadata=None)
return dlogpdf_dtheta
[]
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None): dlogpdf_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) dlogpdf_dtheta[0, :, :] = self.dlogpdf_link_dr(f, y, Y_metadata=Y_metadata) return dlogpdf_dtheta
[ "def", "dlogpdf_link_dtheta", "(", "self", ",", "f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "dlogpdf_dtheta", "=", "np", ".", "zeros", "(", "(", "self", ".", "size", ",", "f", ".", "shape", "[", "0", "]", ",", "f", ".", "shape", "[", "1", "]", ")", ")", "dlogpdf_dtheta", "[", "0", ",", ":", ",", ":", "]", "=", "self", ".", "dlogpdf_link_dr", "(", "f", ",", "y", ",", "Y_metadata", "=", "Y_metadata", ")", "return", "dlogpdf_dtheta" ]
https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/GPy/likelihoods/loglogistic.py#L303-L306
boto/botocore
f36f59394263539ed31f5a8ceb552a85354a552c
botocore/session.py
python
Session.unregister
(self, event_name, handler=None, unique_id=None, unique_id_uses_count=False)
Unregister a handler with an event. :type event_name: str :param event_name: The name of the event. :type handler: callable :param handler: The callback to unregister. :type unique_id: str :param unique_id: A unique identifier identifying the callback to unregister. You can provide either the handler or the unique_id, you do not have to provide both. :param unique_id_uses_count: boolean :param unique_id_uses_count: Specifies if the event should maintain a count when a ``unique_id`` is registered and unregisted. The event can only be completely unregistered once every ``register`` call using the ``unique_id`` has been matched by an ``unregister`` call. If the ``unique_id`` is specified, subsequent ``unregister`` calls must use the same value for ``unique_id_uses_count`` as the ``register`` call that first registered the event. :raises ValueError: If the call to ``unregister`` uses ``unique_id`` but the value for ``unique_id_uses_count`` differs from the ``unique_id_uses_count`` value declared by the very first ``register`` call for that ``unique_id``.
Unregister a handler with an event.
[ "Unregister", "a", "handler", "with", "an", "event", "." ]
def unregister(self, event_name, handler=None, unique_id=None, unique_id_uses_count=False): """Unregister a handler with an event. :type event_name: str :param event_name: The name of the event. :type handler: callable :param handler: The callback to unregister. :type unique_id: str :param unique_id: A unique identifier identifying the callback to unregister. You can provide either the handler or the unique_id, you do not have to provide both. :param unique_id_uses_count: boolean :param unique_id_uses_count: Specifies if the event should maintain a count when a ``unique_id`` is registered and unregisted. The event can only be completely unregistered once every ``register`` call using the ``unique_id`` has been matched by an ``unregister`` call. If the ``unique_id`` is specified, subsequent ``unregister`` calls must use the same value for ``unique_id_uses_count`` as the ``register`` call that first registered the event. :raises ValueError: If the call to ``unregister`` uses ``unique_id`` but the value for ``unique_id_uses_count`` differs from the ``unique_id_uses_count`` value declared by the very first ``register`` call for that ``unique_id``. """ self._events.unregister(event_name, handler=handler, unique_id=unique_id, unique_id_uses_count=unique_id_uses_count)
[ "def", "unregister", "(", "self", ",", "event_name", ",", "handler", "=", "None", ",", "unique_id", "=", "None", ",", "unique_id_uses_count", "=", "False", ")", ":", "self", ".", "_events", ".", "unregister", "(", "event_name", ",", "handler", "=", "handler", ",", "unique_id", "=", "unique_id", ",", "unique_id_uses_count", "=", "unique_id_uses_count", ")" ]
https://github.com/boto/botocore/blob/f36f59394263539ed31f5a8ceb552a85354a552c/botocore/session.py#L671-L703
apache/libcloud
90971e17bfd7b6bb97b2489986472c531cc8e140
libcloud/compute/drivers/outscale.py
python
OutscaleNodeDriver.ex_send_reset_password_email
( self, email: str, dry_run: bool = False, )
return response.json()
Replaces the account password with the new one you provide. You must also provide the token you received by email when asking for a password reset using the SendResetPasswordEmail method. :param email: The email address provided for the account. :type email: ``str`` :param dry_run: If true, checks whether you have the required permissions to perform the action. :type dry_run: ``bool`` :return: True if the action is successful :rtype: ``bool``
Replaces the account password with the new one you provide. You must also provide the token you received by email when asking for a password reset using the SendResetPasswordEmail method.
[ "Replaces", "the", "account", "password", "with", "the", "new", "one", "you", "provide", ".", "You", "must", "also", "provide", "the", "token", "you", "received", "by", "email", "when", "asking", "for", "a", "password", "reset", "using", "the", "SendResetPasswordEmail", "method", "." ]
def ex_send_reset_password_email( self, email: str, dry_run: bool = False, ): """ Replaces the account password with the new one you provide. You must also provide the token you received by email when asking for a password reset using the SendResetPasswordEmail method. :param email: The email address provided for the account. :type email: ``str`` :param dry_run: If true, checks whether you have the required permissions to perform the action. :type dry_run: ``bool`` :return: True if the action is successful :rtype: ``bool`` """ action = "SendResetPasswordEmail" data = json.dumps({"DryRun": dry_run, "Email": email}) response = self._call_api(action, data) if response.status_code == 200: return True return response.json()
[ "def", "ex_send_reset_password_email", "(", "self", ",", "email", ":", "str", ",", "dry_run", ":", "bool", "=", "False", ",", ")", ":", "action", "=", "\"SendResetPasswordEmail\"", "data", "=", "json", ".", "dumps", "(", "{", "\"DryRun\"", ":", "dry_run", ",", "\"Email\"", ":", "email", "}", ")", "response", "=", "self", ".", "_call_api", "(", "action", ",", "data", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "True", "return", "response", ".", "json", "(", ")" ]
https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/compute/drivers/outscale.py#L2157-L2182
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
pypy/objspace/std/bytesobject.py
python
W_AbstractBytesObject.descr_rfind
(self, space, w_sub, w_start=None, w_end=None)
S.rfind(sub[, start[, end]]) -> int Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.
S.rfind(sub[, start[, end]]) -> int
[ "S", ".", "rfind", "(", "sub", "[", "start", "[", "end", "]]", ")", "-", ">", "int" ]
def descr_rfind(self, space, w_sub, w_start=None, w_end=None): """S.rfind(sub[, start[, end]]) -> int Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. """
[ "def", "descr_rfind", "(", "self", ",", "space", ",", "w_sub", ",", "w_start", "=", "None", ",", "w_end", "=", "None", ")", ":" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/objspace/std/bytesobject.py#L310-L318
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_job_status.py
python
V1JobStatus.failed
(self)
return self._failed
Gets the failed of this V1JobStatus. The number of pods which reached phase Failed. :return: The failed of this V1JobStatus. :rtype: int
Gets the failed of this V1JobStatus. The number of pods which reached phase Failed.
[ "Gets", "the", "failed", "of", "this", "V1JobStatus", ".", "The", "number", "of", "pods", "which", "reached", "phase", "Failed", "." ]
def failed(self): """ Gets the failed of this V1JobStatus. The number of pods which reached phase Failed. :return: The failed of this V1JobStatus. :rtype: int """ return self._failed
[ "def", "failed", "(", "self", ")", ":", "return", "self", ".", "_failed" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_job_status.py#L128-L136
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/html/parser.py
python
HTMLParser.feed
(self, data)
r"""Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n').
r"""Feed data to the parser.
[ "r", "Feed", "data", "to", "the", "parser", "." ]
def feed(self, data): r"""Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n'). """ self.rawdata = self.rawdata + data self.goahead(0)
[ "def", "feed", "(", "self", ",", "data", ")", ":", "self", ".", "rawdata", "=", "self", ".", "rawdata", "+", "data", "self", ".", "goahead", "(", "0", ")" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/html/parser.py#L104-L111
openyou/emokit
7f25321a1c3a240f5b64a1572e5f106807b1beea
python/example_render_encrypted_data.py
python
Grapher.update
(self, packet)
Appends value and quality values to drawing buffer.
Appends value and quality values to drawing buffer.
[ "Appends", "value", "and", "quality", "values", "to", "drawing", "buffer", "." ]
def update(self, packet): """ Appends value and quality values to drawing buffer. """ if len(self.buffer) == 800 - self.x_offset: self.buffer = self.buffer[1:] self.buffer.append([packet.sensors[self.name]['value'], packet.sensors[self.name]['quality']])
[ "def", "update", "(", "self", ",", "packet", ")", ":", "if", "len", "(", "self", ".", "buffer", ")", "==", "800", "-", "self", ".", "x_offset", ":", "self", ".", "buffer", "=", "self", ".", "buffer", "[", "1", ":", "]", "self", ".", "buffer", ".", "append", "(", "[", "packet", ".", "sensors", "[", "self", ".", "name", "]", "[", "'value'", "]", ",", "packet", ".", "sensors", "[", "self", ".", "name", "]", "[", "'quality'", "]", "]", ")" ]
https://github.com/openyou/emokit/blob/7f25321a1c3a240f5b64a1572e5f106807b1beea/python/example_render_encrypted_data.py#L47-L53
ranaroussi/pywallet
468622dcf993a27a5b585289b2724986c02a1fbc
pywallet/utils/bip32.py
python
Wallet.__init__
(self, chain_code, depth=0, parent_fingerprint=0, child_number=0, private_exponent=None, private_key=None, public_pair=None, public_key=None, network="bitcoin_testnet")
Construct a new BIP32 compliant wallet. You probably don't want to use this init methd. Instead use one of the 'from_master_secret' or 'deserialize' cosntructors.
Construct a new BIP32 compliant wallet.
[ "Construct", "a", "new", "BIP32", "compliant", "wallet", "." ]
def __init__(self, chain_code, depth=0, parent_fingerprint=0, child_number=0, private_exponent=None, private_key=None, public_pair=None, public_key=None, network="bitcoin_testnet"): """Construct a new BIP32 compliant wallet. You probably don't want to use this init methd. Instead use one of the 'from_master_secret' or 'deserialize' cosntructors. """ if (not (private_exponent or private_key) and not (public_pair or public_key)): raise InsufficientKeyDataError( "You must supply one of private_exponent or public_pair") network = Wallet.get_network(network) self.private_key = None self.public_key = None if private_key: if not isinstance(private_key, PrivateKey): raise InvalidPrivateKeyError( "private_key must be of type " "bitmerchant.wallet.keys.PrivateKey") self.private_key = private_key elif private_exponent: self.private_key = PrivateKey( private_exponent, network=network) if public_key: if not isinstance(public_key, PublicKey): raise InvalidPublicKeyError( "public_key must be of type " "bitmerchant.wallet.keys.PublicKey") self.public_key = public_key elif public_pair: self.public_key = PublicKey.from_public_pair( public_pair, network=network) else: self.public_key = self.private_key.get_public_key() if (self.private_key and self.private_key.get_public_key() != self.public_key): raise KeyMismatchError( "Provided private and public values do not match") def h(val, hex_len): if isinstance(val, six.integer_types): return long_to_hex(val, hex_len) elif (isinstance(val, six.string_types) or isinstance(val, six.binary_type)) and is_hex_string(val): val = ensure_bytes(val) if len(val) != hex_len: raise ValueError("Invalid parameter length") return val else: raise ValueError("Invalid parameter type") def l(val): if isinstance(val, six.integer_types): return long_or_int(val) elif (isinstance(val, six.string_types) or isinstance(val, six.binary_type)): val = ensure_bytes(val) if not is_hex_string(val): val = hexlify(val) return long_or_int(val, 16) else: raise ValueError("parameter must be an int or long") self.network = Wallet.get_network(network) self.depth = l(depth) if (isinstance(parent_fingerprint, six.string_types) or isinstance(parent_fingerprint, six.binary_type)): val = ensure_bytes(parent_fingerprint) if val.startswith(b"0x"): parent_fingerprint = val[2:] self.parent_fingerprint = b"0x" + h(parent_fingerprint, 8) self.child_number = l(child_number) self.chain_code = h(chain_code, 64)
[ "def", "__init__", "(", "self", ",", "chain_code", ",", "depth", "=", "0", ",", "parent_fingerprint", "=", "0", ",", "child_number", "=", "0", ",", "private_exponent", "=", "None", ",", "private_key", "=", "None", ",", "public_pair", "=", "None", ",", "public_key", "=", "None", ",", "network", "=", "\"bitcoin_testnet\"", ")", ":", "if", "(", "not", "(", "private_exponent", "or", "private_key", ")", "and", "not", "(", "public_pair", "or", "public_key", ")", ")", ":", "raise", "InsufficientKeyDataError", "(", "\"You must supply one of private_exponent or public_pair\"", ")", "network", "=", "Wallet", ".", "get_network", "(", "network", ")", "self", ".", "private_key", "=", "None", "self", ".", "public_key", "=", "None", "if", "private_key", ":", "if", "not", "isinstance", "(", "private_key", ",", "PrivateKey", ")", ":", "raise", "InvalidPrivateKeyError", "(", "\"private_key must be of type \"", "\"bitmerchant.wallet.keys.PrivateKey\"", ")", "self", ".", "private_key", "=", "private_key", "elif", "private_exponent", ":", "self", ".", "private_key", "=", "PrivateKey", "(", "private_exponent", ",", "network", "=", "network", ")", "if", "public_key", ":", "if", "not", "isinstance", "(", "public_key", ",", "PublicKey", ")", ":", "raise", "InvalidPublicKeyError", "(", "\"public_key must be of type \"", "\"bitmerchant.wallet.keys.PublicKey\"", ")", "self", ".", "public_key", "=", "public_key", "elif", "public_pair", ":", "self", ".", "public_key", "=", "PublicKey", ".", "from_public_pair", "(", "public_pair", ",", "network", "=", "network", ")", "else", ":", "self", ".", "public_key", "=", "self", ".", "private_key", ".", "get_public_key", "(", ")", "if", "(", "self", ".", "private_key", "and", "self", ".", "private_key", ".", "get_public_key", "(", ")", "!=", "self", ".", "public_key", ")", ":", "raise", "KeyMismatchError", "(", "\"Provided private and public values do not match\"", ")", "def", "h", "(", "val", ",", "hex_len", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "integer_types", ")", ":", "return", "long_to_hex", "(", "val", ",", "hex_len", ")", "elif", "(", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "val", ",", "six", ".", "binary_type", ")", ")", "and", "is_hex_string", "(", "val", ")", ":", "val", "=", "ensure_bytes", "(", "val", ")", "if", "len", "(", "val", ")", "!=", "hex_len", ":", "raise", "ValueError", "(", "\"Invalid parameter length\"", ")", "return", "val", "else", ":", "raise", "ValueError", "(", "\"Invalid parameter type\"", ")", "def", "l", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "integer_types", ")", ":", "return", "long_or_int", "(", "val", ")", "elif", "(", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "val", ",", "six", ".", "binary_type", ")", ")", ":", "val", "=", "ensure_bytes", "(", "val", ")", "if", "not", "is_hex_string", "(", "val", ")", ":", "val", "=", "hexlify", "(", "val", ")", "return", "long_or_int", "(", "val", ",", "16", ")", "else", ":", "raise", "ValueError", "(", "\"parameter must be an int or long\"", ")", "self", ".", "network", "=", "Wallet", ".", "get_network", "(", "network", ")", "self", ".", "depth", "=", "l", "(", "depth", ")", "if", "(", "isinstance", "(", "parent_fingerprint", ",", "six", ".", "string_types", ")", "or", "isinstance", "(", "parent_fingerprint", ",", "six", ".", "binary_type", ")", ")", ":", "val", "=", "ensure_bytes", "(", "parent_fingerprint", ")", "if", "val", ".", "startswith", "(", "b\"0x\"", ")", ":", "parent_fingerprint", "=", "val", "[", "2", ":", "]", "self", ".", "parent_fingerprint", "=", "b\"0x\"", "+", "h", "(", "parent_fingerprint", ",", "8", ")", "self", ".", "child_number", "=", "l", "(", "child_number", ")", "self", ".", "chain_code", "=", "h", "(", "chain_code", ",", "64", ")" ]
https://github.com/ranaroussi/pywallet/blob/468622dcf993a27a5b585289b2724986c02a1fbc/pywallet/utils/bip32.py#L59-L143
googlearchive/appengine-flask-skeleton
8c25461d003a0bd99a9ff3b339c2791ee6919242
lib/jinja2/filters.py
python
do_groupby
(environment, value, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
Group a sequence of objects by a common attribute. If you for example have a list of dicts or objects that represent persons with `gender`, `first_name` and `last_name` attributes and you want to group all users by genders you can do something like the following snippet: .. sourcecode:: html+jinja <ul> {% for group in persons|groupby('gender') %} <li>{{ group.grouper }}<ul> {% for person in group.list %} <li>{{ person.first_name }} {{ person.last_name }}</li> {% endfor %}</ul></li> {% endfor %} </ul> Additionally it's possible to use tuple unpacking for the grouper and list: .. sourcecode:: html+jinja <ul> {% for grouper, list in persons|groupby('gender') %} ... {% endfor %} </ul> As you can see the item we're grouping by is stored in the `grouper` attribute and the `list` contains all the objects that have this grouper in common. .. versionchanged:: 2.6 It's now possible to use dotted notation to group by the child attribute of another attribute.
Group a sequence of objects by a common attribute.
[ "Group", "a", "sequence", "of", "objects", "by", "a", "common", "attribute", "." ]
def do_groupby(environment, value, attribute): """Group a sequence of objects by a common attribute. If you for example have a list of dicts or objects that represent persons with `gender`, `first_name` and `last_name` attributes and you want to group all users by genders you can do something like the following snippet: .. sourcecode:: html+jinja <ul> {% for group in persons|groupby('gender') %} <li>{{ group.grouper }}<ul> {% for person in group.list %} <li>{{ person.first_name }} {{ person.last_name }}</li> {% endfor %}</ul></li> {% endfor %} </ul> Additionally it's possible to use tuple unpacking for the grouper and list: .. sourcecode:: html+jinja <ul> {% for grouper, list in persons|groupby('gender') %} ... {% endfor %} </ul> As you can see the item we're grouping by is stored in the `grouper` attribute and the `list` contains all the objects that have this grouper in common. .. versionchanged:: 2.6 It's now possible to use dotted notation to group by the child attribute of another attribute. """ expr = make_attrgetter(environment, attribute) return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
[ "def", "do_groupby", "(", "environment", ",", "value", ",", "attribute", ")", ":", "expr", "=", "make_attrgetter", "(", "environment", ",", "attribute", ")", "return", "sorted", "(", "map", "(", "_GroupTuple", ",", "groupby", "(", "sorted", "(", "value", ",", "key", "=", "expr", ")", ",", "expr", ")", ")", ")" ]
https://github.com/googlearchive/appengine-flask-skeleton/blob/8c25461d003a0bd99a9ff3b339c2791ee6919242/lib/jinja2/filters.py#L673-L712
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/boto-2.46.1/boto/sdb/domain.py
python
Domain.batch_delete_attributes
(self, items)
return self.connection.batch_delete_attributes(self, items)
Delete multiple items in this domain. :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: * dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. The attribute name/value pairs will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated with the item should be deleted. :rtype: bool :return: True if successful
Delete multiple items in this domain.
[ "Delete", "multiple", "items", "in", "this", "domain", "." ]
def batch_delete_attributes(self, items): """ Delete multiple items in this domain. :type items: dict or dict-like object :param items: A dictionary-like object. The keys of the dictionary are the item names and the values are either: * dictionaries of attribute names/values, exactly the same as the attribute_names parameter of the scalar put_attributes call. The attribute name/value pairs will only be deleted if they match the name/value pairs passed in. * None which means that all attributes associated with the item should be deleted. :rtype: bool :return: True if successful """ return self.connection.batch_delete_attributes(self, items)
[ "def", "batch_delete_attributes", "(", "self", ",", "items", ")", ":", "return", "self", ".", "connection", ".", "batch_delete_attributes", "(", "self", ",", "items", ")" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/boto-2.46.1/boto/sdb/domain.py#L174-L193