function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def validate_default(self, value):
"""Validate default value assigned to field.
Args:
value: Value to validate.
Returns:
the value in casted in the correct type.
Raises:
ValidationError if value is not expected type.
"""
return self.__validate(value, self.validate_default_element) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def default(self):
"""Get default value for field."""
return self.__default | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def lookup_field_type_by_variant(cls, variant):
return cls.__variant_to_type[variant] | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def validate_element(self, value):
"""Validate StringField allowing for str and unicode.
Raises:
ValidationError if a str value is not UTF-8.
"""
# If value is str is it considered valid. Satisfies "required=True".
if isinstance(value, bytes):
try:
six.text_type(value, 'UTF-8')
except UnicodeDecodeError as err:
try:
_ = self.name
except AttributeError:
validation_error = ValidationError(
'Field encountered non-UTF-8 string %r: %s' % (value,
err))
else:
validation_error = ValidationError(
'Field %s encountered non-UTF-8 string %r: %s' % (
self.name, value, err))
validation_error.field_name = self.name
raise validation_error
else:
return super(StringField, self).validate_element(value)
return value | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def __init__(self,
message_type,
number,
required=False,
repeated=False,
variant=None):
"""Constructor.
Args:
message_type: Message type for field. Must be subclass of Message.
number: Number of field. Must be unique per message class.
required: Whether or not field is required. Mutually exclusive to
'repeated'.
repeated: Whether or not field is repeated. Mutually exclusive to
'required'.
variant: Wire-format variant hint.
Raises:
FieldDefinitionError when invalid message_type is provided.
"""
valid_type = (isinstance(message_type, six.string_types) or
(message_type is not Message and
isinstance(message_type, type) and
issubclass(message_type, Message)))
if not valid_type:
raise FieldDefinitionError(
'Invalid message class: %s' % message_type)
if isinstance(message_type, six.string_types):
self.__type_name = message_type
self.__type = None
else:
self.__type = message_type
super(MessageField, self).__init__(number,
required=required,
repeated=repeated,
variant=variant) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def type(self):
"""Message type used for field."""
if self.__type is None:
message_type = find_definition(
self.__type_name, self.message_definition())
if not (message_type is not Message and
isinstance(message_type, type) and
issubclass(message_type, Message)):
raise FieldDefinitionError(
'Invalid message class: %s' % message_type)
self.__type = message_type
return self.__type | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def message_type(self):
"""Underlying message type used for serialization.
Will always be a sub-class of Message. This is different from type
which represents the python value that message_type is mapped to for
use by the user.
"""
return self.type | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def value_to_message(self, value):
"""Convert a value instance to a message.
Used by serializers to convert Python user types to underlying
messages for transmission.
Args:
value: A value of type self.type.
Returns:
An instance of type self.message_type.
"""
if not isinstance(value, self.type):
raise EncodeError('Expected type %s, got %s: %r' %
(self.type.__name__,
type(value).__name__,
value))
return value | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def __init__(self, enum_type, number, **kwargs):
"""Constructor.
Args:
enum_type: Enum type for field. Must be subclass of Enum.
number: Number of field. Must be unique per message class.
required: Whether or not field is required. Mutually exclusive to
'repeated'.
repeated: Whether or not field is repeated. Mutually exclusive to
'required'.
variant: Wire-format variant hint.
default: Default value for field if not found in stream.
Raises:
FieldDefinitionError when invalid enum_type is provided.
"""
valid_type = (isinstance(enum_type, six.string_types) or
(enum_type is not Enum and
isinstance(enum_type, type) and
issubclass(enum_type, Enum)))
if not valid_type:
raise FieldDefinitionError('Invalid enum type: %s' % enum_type)
if isinstance(enum_type, six.string_types):
self.__type_name = enum_type
self.__type = None
else:
self.__type = enum_type
super(EnumField, self).__init__(number, **kwargs) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def type(self):
"""Enum type used for field."""
if self.__type is None:
found_type = find_definition(
self.__type_name, self.message_definition())
if not (found_type is not Enum and
isinstance(found_type, type) and
issubclass(found_type, Enum)):
raise FieldDefinitionError(
'Invalid enum type: %s' % found_type)
self.__type = found_type
return self.__type | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def default(self):
"""Default for enum field.
Will cause resolution of Enum type and unresolved default value.
"""
try:
return self.__resolved_default
except AttributeError:
resolved_default = super(EnumField, self).default
if isinstance(resolved_default, (six.string_types,
six.integer_types)):
# pylint:disable=not-callable
resolved_default = self.type(resolved_default)
self.__resolved_default = resolved_default
return self.__resolved_default | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word. | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def noun_plural(word, classical=True, custom={}):
return plural(word, NOUN, classical, custom) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def create(self, cr, uid, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, None, vals, context=context)
return super(CrmLead, self).create(
cr, uid, vals_reformated, context=context) | cgstudiomap/cgstudiomap | [
3,
2,
3,
45,
1427859918
] |
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if context.get('callerid'):
res = []
if isinstance(ids, (int, long)):
ids = [ids]
for lead in self.browse(cr, uid, ids, context=context):
if lead.partner_name and lead.contact_name:
name = u'%s (%s)' % (lead.contact_name, lead.partner_name)
elif lead.partner_name:
name = lead.partner_name
elif lead.contact_name:
name = lead.contact_name
else:
name = lead.name
res.append((lead.id, name))
return res
else:
return super(CrmLead, self).name_get(
cr, uid, ids, context=context) | cgstudiomap/cgstudiomap | [
3,
2,
3,
45,
1427859918
] |
def create(self, cr, uid, vals, context=None):
vals_reformated = self._generic_reformat_phonenumbers(
cr, uid, None, vals, context=context)
return super(CrmPhonecall, self).create(
cr, uid, vals_reformated, context=context) | cgstudiomap/cgstudiomap | [
3,
2,
3,
45,
1427859918
] |
def testNumber(self):
"""Verifies basic behavior of ee.Number."""
num = ee.Number(1)
self.assertEquals(1, num.encode())
computed = ee.Number(1).add(2)
self.assertTrue(isinstance(computed, ee.Number))
self.assertEquals(ee.ApiFunction.lookup('Number.add'), computed.func)
self.assertEquals({'left': ee.Number(1), 'right': ee.Number(2)},
computed.args) | mortcanty/earthengine | [
127,
52,
127,
4,
1487864153
] |
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func() | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __init__(self, tests=()):
self._tests = []
self._removed_tests = 0
self.addTests(tests) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def countTestCases(self):
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _removeTestAtIndex(self, index):
"""Stop holding a reference to the TestCase at index."""
try:
test = self._tests[index]
except TypeError:
# support for suite implementations that have overriden self._tests
pass
else:
# Some unittest tests add non TestCase/TestSuite objects to
# the suite.
if hasattr(test, 'countTestCases'):
self._removed_tests += test.countTestCases()
self._tests[index] = None | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug() | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if self._cleanup:
self._removeTestAtIndex(index)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout') | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout') | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout') | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __init__(self, description):
self.description = description | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def shortDescription(self):
return None | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __str__(self):
return self.id() | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __call__(self, result):
return self.run(result) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def get_lastrowid(self):
return self.cursor.lastrowid | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def visit_mod_binary(self, binary, operator, **kw):
if self.dialect._mysqlconnector_double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def escape_literal_column(self, text):
if self.dialect._mysqlconnector_double_percents:
return text.replace('%', '%%')
else:
return text | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
if self.dialect._mysqlconnector_double_percents:
return value.replace("%", "%%")
else:
return value | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def result_processor(self, dialect, coltype):
"""MySQL-connector already converts mysql bits, so."""
return None | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def supports_unicode_statements(self):
return util.py3k or self._mysqlconnector_version_info > (2, 0) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def dbapi(cls):
from mysql import connector
return connector | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _mysqlconnector_version_info(self):
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
return tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _mysqlconnector_double_percents(self):
return not util.py3k and self._mysqlconnector_version_info < (2, 0) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _detect_charset(self, connection):
return connection.connection.charset | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def is_disconnect(self, e, connection, cursor):
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
if isinstance(e, exceptions):
return e.errno in errnos or \
"MySQL Connection not available." in str(e)
else:
return False | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone() | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def get_sandbox_env(env):
"""Returns the environment flags needed for the SUID sandbox to work."""
extra_env = {}
chrome_sandbox_path = env.get(CHROME_SANDBOX_ENV, CHROME_SANDBOX_PATH)
# The above would silently disable the SUID sandbox if the env value were
# an empty string. We don't want to allow that. http://crbug.com/245376
# TODO(jln): Remove this check once it's no longer possible to disable the
# sandbox that way.
if not chrome_sandbox_path:
chrome_sandbox_path = CHROME_SANDBOX_PATH
extra_env[CHROME_SANDBOX_ENV] = chrome_sandbox_path
return extra_env | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def get_sanitizer_symbolize_command(json_path=None, executable_path=None):
"""Construct the command to invoke offline symbolization script."""
script_path = '../tools/valgrind/asan/asan_symbolize.py'
cmd = [sys.executable, script_path]
if json_path is not None:
cmd.append('--test-summary-json-file=%s' % json_path)
if executable_path is not None:
cmd.append('--executable-path=%s' % executable_path)
return cmd | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def symbolize_snippets_in_json(cmd, env):
"""Symbolize output snippets inside the JSON test summary."""
json_path = get_json_path(cmd)
if json_path is None:
return
try:
symbolize_command = get_sanitizer_symbolize_command(
json_path=json_path, executable_path=cmd[0])
p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE, env=env)
(_, stderr) = p.communicate()
except OSError as e:
print 'Exception while symbolizing snippets: %s' % e
if p.returncode != 0:
print "Error: failed to symbolize snippets in JSON:\n"
print stderr | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def main():
return run_executable(sys.argv[1:], os.environ.copy()) | Teamxrtc/webrtc-streaming-node | [
6,
5,
6,
2,
1449773735
] |
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False | mattbernst/polyhartree | [
4,
1,
4,
7,
1379927298
] |
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd) | mattbernst/polyhartree | [
4,
1,
4,
7,
1379927298
] |
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
else:
cmd = [ 'pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err) | mattbernst/polyhartree | [
4,
1,
4,
7,
1379927298
] |
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.') | hugegreenbug/libgestures | [
14,
10,
14,
7,
1391194004
] |
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testGradGrad(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testWarnInts(self):
# Running the op triggers address sanitizer errors, so we just make it
nn_ops.softplus(constant_op.constant(7)) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testStringToOneHashBucketFast(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testStringToOneHashBucketLegacyHash(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], output.eval()) | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def testStringToHashBucketsStrongInvalidKey(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765]).eval() | npuichigo/ttsflow | [
16,
6,
16,
1,
1500635633
] |
def __init__(self, t, name, unprinted_runtime=False):
self.t = t
self.name = name
self.start = None
self.unprinted_runtime = unprinted_runtime | SymbiFlow/fpga-tool-perf | [
78,
23,
78,
85,
1529693672
] |
def __exit__(self, type, value, traceback):
end = time.time()
self.t.add_runtime(
self.name,
end - self.start,
unprinted_runtime=self.unprinted_runtime
) | SymbiFlow/fpga-tool-perf | [
78,
23,
78,
85,
1529693672
] |
def get_yosys_resources(yosys_log):
with open(yosys_log, "r") as f:
data = f.readlines()
resources = dict()
print_stats = False
proc_cells = False
for line in data:
print_stats = "Printing statistics" in line or print_stats
if not print_stats:
continue
if proc_cells and line.strip():
cell, count = line.split()
resources[cell] = count
proc_cells = ("Number of cells" in line or proc_cells) and line.strip()
return resources | SymbiFlow/fpga-tool-perf | [
78,
23,
78,
85,
1529693672
] |
def which(program, get_dir=False):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
if get_dir:
return path
else:
return exe_file
return None | SymbiFlow/fpga-tool-perf | [
78,
23,
78,
85,
1529693672
] |
def __init__(self, plotly_name="textfont", parent_name="funnelarea", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size . | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, html,isForeign = True, url=None):
"""Generate the document
:param html: string of the html content.
:param url: url of the html
"""
self.url = url
self.html = html
self.link_num = 0
self.link_text_len = 0
self.total_text_tag_num = 0
self.total_text_len = 0
self.text_tag_num = 0
self.text_tag_text_len = 0
self.is_foreign = isForeign | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def _parse(self, html):
soup = BeautifulSoup(html, "lxml")
return soup | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def prettify(self):
"""Returns prettify document"""
return self.doc.prettify("utf-8") | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def content(self):
"""get the content of html page"""
clean_spam(self.doc)
candidates = self.get_candidates()
best_node = self.best_candidates(candidates)
if best_node:
return self.purify(best_node["elem"])
else:
return None | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def walk(self):
"""walk the dom tree and get the info of the page"""
g = self.doc.recursiveChildGenerator()
while True:
try:
tag = g.next()
if not isinstance(tag,unicode):
if tag.name == "a" and ((self.is_foreign and len(tag.text) > 10) or (not self.is_foreign and len(tag.text) > 4)):
self.link_num += 1
self.link_text_len += len(tag.getText())
elif TEXT_TAG_COLLECTION.has_key(tag.name):
tag_text = tag.contents[0] if len(tag.contents) > 0 and isinstance (tag.contents[0], unicode) else ""
if len(tag_text) > 0:
self.text_tag_num += 1
self.text_tag_text_len += len(tag_text)
else:
self.total_text_len += len(tag)
except StopIteration:
break | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def text_weight(self, elem):
content_score = 1
long_text_line = 0
block_size = 3
inner_text = ""
for string in elem.stripped_strings:
if (self.is_foreign and len(string) > 100) or (not self.is_foreign and len(string) > 50):
long_text_line += 1
inner_text += string
else:
inner_text += string
"""for punch"""
if len(inner_text) > 0:
if self.is_foreign:
splits = re.split(u",|\.|\?", inner_text)
content_score += len(splits)
else:
splits = re.split(u"|,|。|?", inner_text)
content_score += len(splits)
"""for text len"""
if self.is_foreign:
content_score += min((len(inner_text) / 100), 5)
else:
content_score += min((len(inner_text) / 20), 5)
"""for text block"""
block_array = map(len, elem.strings)
block_set = []
for i in range(0, len(block_array) - block_size):
block_text_len = 0
for j in range(i, i + block_size):
block_text_len += (block_array[j] - 1)
block_set.append(block_text_len)
short_block = 0
blk_text_len = 0
for block in block_set:
blk_text_len += block
if (self.is_foreign and block < 50) or (not self.is_foreign and block < 10):
short_block += 1
short_block_ratio = float(short_block) / max(len(block_set), 1)
if short_block_ratio > 0.3:
content_score -= 10
return content_score | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def class_weight(self, elem):
weight = 0
for feature in [elem.get('class', None), elem.get('id', None)]:
try:
if feature:
if REGEXES['negativeRe'].search(feature):
weight -= 25
if REGEXES['positiveRe'].search(feature):
weight += 25
except:
continue
return weight | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def score_node(self, elem):
class_score = self.class_weight(elem)
node_score = self.node_weight(elem)
content_score = class_score + node_score
return {
'score': content_score,
'elem': elem
} | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def get_link_tag_density(self, elem):
"""get the link tag density"""
return float(self.link_text_len) / max(self.total_text_len, 1) | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def purify(self, best_elem):
del best_elem['class']
del best_elem['id']
g = best_elem.recursiveChildGenerator()
while True:
try:
tag = g.next()
if tag is None:
break
#text node
if not isinstance(tag,unicode) and tag is not None:
if tag.name == 'a':
tag.unwrap()
elif tag.name == 'img':
img_src = tag.get('src')
data_src = tag.get('data-src')
if img_src is None and data_src is None:
tag.extract()
else:
if data_src is not None:
if data_src.startswith("http"):
img_src = data_src
else:
img_src = self.domain + data_src
else:
if not img_src.startswith("http"):
img_src = self.domain + img_src
attr_names = [attr for attr in tag.attrs]
for attr in attr_names:
del tag[attr]
tag['src'] = img_src
del tag['class']
del tag['id']
continue
except StopIteration:
break
return best_elem.prettify("utf-8") | desion/tidy_page | [
5,
4,
5,
1,
1487582485
] |
def __init__(
self, plotly_name="colorsrc", parent_name="waterfall.insidetextfont", **kwargs | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get(
self,
regulatory_compliance_standard_name, # type: str
regulatory_compliance_control_name, # type: str
**kwargs # type: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def gen_jump_list(ft, name, data):
res = []
if not data:
return res
items = data[0]
if items is None:
return res
for item in items:
uri = parse_uri(item['uri'])
if ft == 'go':
uri = uri.replace('%21', '!')
start = item['range']['start']
res.append({
'filename': uri,
'lnum': start['line'] + 1,
'col': start['character'] + 1,
'name': name,
})
return res | maralla/completor.vim | [
1205,
59,
1205,
84,
1473832352
] |
def format_text(data):
if not data:
return
for item in data[0]:
pass | maralla/completor.vim | [
1205,
59,
1205,
84,
1473832352
] |
def _shouldHidden(line):
for item in hiddenLines:
if item in line:
return True
return False | maralla/completor.vim | [
1205,
59,
1205,
84,
1473832352
] |
def setThreadPool(threadPool):
"""Set the default thread pool to use for executing new tasks.
@param threadPool: The new default thread pool.
@return: The previous default thread pool. This is intially None.
"""
global _threadPool, _threadPoolLock
_threadPoolLock.acquire()
try:
oldThreadPool = _threadPool
_threadPool = threadPool
finally:
_threadPoolLock.release()
return oldThreadPool | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def _makeTasks(value):
if value is None:
return []
elif isinstance(value, Task):
return [value]
else:
return list(value) | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def __init__(self, func=None):
"""Construct a task given a function. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def getCurrent():
"""Get the currently executing task. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def state(self):
"""Get the state of this task.
"""
return self._state | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def parent(self):
"""Get the parent of this task.
The parent task is the task that created this task.
"""
return self._parent | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def required(self):
"""True if this task is required to execute, False if it
has not yet been required to execute.
"""
return self._required | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def started(self):
"""True if this task has been started. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def completed(self):
"""True if this task has finished execution or has been cancelled.
"""
s = self._state
return s is Task.State.SUCCEEDED or s is Task.State.FAILED | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def succeeded(self):
"""True if this task successfully finished execution.
"""
return self._state is Task.State.SUCCEEDED | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def failed(self):
"""True if this task failed or was cancelled.
"""
return self._state is Task.State.FAILED | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def result(self):
"""If the task has completed successfully then holds the
return value of the task, otherwise raises AttributeError.
"""
if self.succeeded:
task = self
while isinstance(task._result, Task):
task = task._result
return task._result
else:
raise AttributeError("result only available on successful tasks") | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def lazyStartAfter(self, other, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
But do not start this task until the 'other' tasks have completed.
If any of the other tasks complete with failure then this task will complete
with failure without being executed.
"""
self._start(other=other, immediate=False, required=False, threadPool=threadPool) | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def startAfter(self, other, immediate=False, threadPool=None):
"""Start this task after other tasks have completed. | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
def _start(self, other, immediate, required, threadPool):
immediate = bool(immediate)
required = bool(required)
otherTasks = _makeTasks(other)
if threadPool is None:
threadPool = getDefaultThreadPool()
self._lock.acquire()
try:
if self._state is not Task.State.NEW:
raise TaskError("task already started")
self._state = Task.State.WAITING_FOR_START
self._startAfterCount = len(otherTasks) + 1
self._immediate = immediate
self._threadPool = threadPool
if required:
self._required = True
else:
required = self._required | lewissbaker/cake | [
16,
7,
16,
11,
1377774432
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.