body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
60a6bda1287651dad978a8e7b11bfa1b9e3e59d7bce54e6e974c699d95c16d3c | def test_invalid_file():
'Test building a UnstructuredData with invalid file.'
validator = RecordValidator(True)
ud = UnstructuredData('file:///var', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible == 'not a file') | Test building a UnstructuredData with invalid file. | tests/zeffTestSuite/record/test_unstructureddata.py | test_invalid_file | ziff/ZeffClient | 1 | python | def test_invalid_file():
validator = RecordValidator(True)
ud = UnstructuredData('file:///var', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible == 'not a file') | def test_invalid_file():
validator = RecordValidator(True)
ud = UnstructuredData('file:///var', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible == 'not a file')<|docstring|>Test building a UnstructuredData with invalid file.<|endoftext|> |
23f9d98d7c50ff360acbe95a4904676691120287f72fabff777b8b5e4b5783a4 | def test_permissions_file():
'Test building a UnstructuredData with no read permissions.'
validator = RecordValidator(True)
ud = UnstructuredData('file:///etc/sudoers', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible != 'OK') | Test building a UnstructuredData with no read permissions. | tests/zeffTestSuite/record/test_unstructureddata.py | test_permissions_file | ziff/ZeffClient | 1 | python | def test_permissions_file():
validator = RecordValidator(True)
ud = UnstructuredData('file:///etc/sudoers', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible != 'OK') | def test_permissions_file():
validator = RecordValidator(True)
ud = UnstructuredData('file:///etc/sudoers', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert (ud.accessible != 'OK')<|docstring|>Test building a UnstructuredData with no read permissions.<|endoftext|> |
b11c98eef1171840424975a5ec4ad5653ca66f852bdc8c45fc5db751b3148c95 | def test_invalid_url_scheme():
'Test building a UnstructuredData with invalid URL scheme.'
validator = RecordValidator(True)
ud = UnstructuredData('spam://example.com/', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert ud.accessible.lower().startswith('unknown url scheme') | Test building a UnstructuredData with invalid URL scheme. | tests/zeffTestSuite/record/test_unstructureddata.py | test_invalid_url_scheme | ziff/ZeffClient | 1 | python | def test_invalid_url_scheme():
validator = RecordValidator(True)
ud = UnstructuredData('spam://example.com/', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert ud.accessible.lower().startswith('unknown url scheme') | def test_invalid_url_scheme():
validator = RecordValidator(True)
ud = UnstructuredData('spam://example.com/', FileType.TEXT)
validator.validate_unstructured_data(ud)
assert ud.accessible.lower().startswith('unknown url scheme')<|docstring|>Test building a UnstructuredData with invalid URL scheme.<|endoftext|> |
6d4ff614967140632b56448b6d8b93d765b6f71103face333f091adebf25b453 | def test_invalid_mediatype():
'Attempt to set an invalid media type.'
validator = RecordValidator(True)
with pytest.raises(TypeError):
ud = UnstructuredData('http://example.com', 'InvalidMedia')
validator.validate_unstructured_data(ud) | Attempt to set an invalid media type. | tests/zeffTestSuite/record/test_unstructureddata.py | test_invalid_mediatype | ziff/ZeffClient | 1 | python | def test_invalid_mediatype():
validator = RecordValidator(True)
with pytest.raises(TypeError):
ud = UnstructuredData('http://example.com', 'InvalidMedia')
validator.validate_unstructured_data(ud) | def test_invalid_mediatype():
validator = RecordValidator(True)
with pytest.raises(TypeError):
ud = UnstructuredData('http://example.com', 'InvalidMedia')
validator.validate_unstructured_data(ud)<|docstring|>Attempt to set an invalid media type.<|endoftext|> |
97e0fab4a541b86e8d0690ad548ec8e9bf45b16095a8e91c0f0767ec15b7e4b2 | def time_in_range(start, end, x):
'Return true if x is in the range [start, end]'
if (start <= end):
return (start <= x <= end)
else:
return ((start <= x) or (x <= end)) | Return true if x is in the range [start, end] | mac-time-of-day-access.py | time_in_range | mattcarter11/hostapd-mac-tod-acl | 1 | python | def time_in_range(start, end, x):
if (start <= end):
return (start <= x <= end)
else:
return ((start <= x) or (x <= end)) | def time_in_range(start, end, x):
if (start <= end):
return (start <= x <= end)
else:
return ((start <= x) or (x <= end))<|docstring|>Return true if x is in the range [start, end]<|endoftext|> |
eed4b2f81324cb4c94f91fc79c55cc549e5fcaa6542e19eb71b68ebd42d2634e | def _connect(self):
'Connect to the postgres server'
try:
self._connection = self._pool.get_conn()
self._cursor = self._connection.cursor(cursor_factory=self._cursor_factory)
except Exception as e:
self._log_error(('postgresql connection failed: ' + e.message))
raise | Connect to the postgres server | pg_simple/pg_simple.py | _connect | glennib/pg_simple | 25 | python | def _connect(self):
try:
self._connection = self._pool.get_conn()
self._cursor = self._connection.cursor(cursor_factory=self._cursor_factory)
except Exception as e:
self._log_error(('postgresql connection failed: ' + e.message))
raise | def _connect(self):
try:
self._connection = self._pool.get_conn()
self._cursor = self._connection.cursor(cursor_factory=self._cursor_factory)
except Exception as e:
self._log_error(('postgresql connection failed: ' + e.message))
raise<|docstring|>Connect to the postgres server<|endoftext|> |
dec087994c9f2c6af3ffe1386cd6788403ac26fbba33d4910c8e1d24f58665b8 | def fetchone(self, table, fields='*', where=None, order=None, offset=None):
'Get a single result\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n '
cur = self._select(table, fields, where, order, 1, offset)
return cur.fetchone() | Get a single result
table = (str) table_name
fields = (field1, field2 ...) list of fields to select
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC] | pg_simple/pg_simple.py | fetchone | glennib/pg_simple | 25 | python | def fetchone(self, table, fields='*', where=None, order=None, offset=None):
'Get a single result\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n '
cur = self._select(table, fields, where, order, 1, offset)
return cur.fetchone() | def fetchone(self, table, fields='*', where=None, order=None, offset=None):
'Get a single result\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n '
cur = self._select(table, fields, where, order, 1, offset)
return cur.fetchone()<|docstring|>Get a single result
table = (str) table_name
fields = (field1, field2 ...) list of fields to select
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC]<|endoftext|> |
240ae77c5e04449cad3f8bb976171f756f66a7336daa0a65aa94dbf0f6f3261e | def fetchall(self, table, fields='*', where=None, order=None, limit=None, offset=None):
'Get all results\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit, offset]\n '
cur = self._select(table, fields, where, order, limit, offset)
return cur.fetchall() | Get all results
table = (str) table_name
fields = (field1, field2 ...) list of fields to select
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC]
limit = [limit, offset] | pg_simple/pg_simple.py | fetchall | glennib/pg_simple | 25 | python | def fetchall(self, table, fields='*', where=None, order=None, limit=None, offset=None):
'Get all results\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit, offset]\n '
cur = self._select(table, fields, where, order, limit, offset)
return cur.fetchall() | def fetchall(self, table, fields='*', where=None, order=None, limit=None, offset=None):
'Get all results\n\n table = (str) table_name\n fields = (field1, field2 ...) list of fields to select\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit, offset]\n '
cur = self._select(table, fields, where, order, limit, offset)
return cur.fetchall()<|docstring|>Get all results
table = (str) table_name
fields = (field1, field2 ...) list of fields to select
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC]
limit = [limit, offset]<|endoftext|> |
53c4360a21c1da505c837f4421c0a8a45ec5fdc5a444709a6fe34d7094567ca1 | def join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
'Run an inner left join query\n\n tables = (table1, table2)\n fields = ([fields from table1], [fields from table 2]) # fields to select\n join_fields = (field1, field2) # fields to join. field1 belongs to table1 and field2 belongs to table 2\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit1, limit2]\n '
cur = self._join(tables, fields, join_fields, where, order, limit, offset)
result = cur.fetchall()
rows = None
if result:
Row = namedtuple('Row', [f[0] for f in cur.description])
rows = [Row(*r) for r in result]
return rows | Run an inner left join query
tables = (table1, table2)
fields = ([fields from table1], [fields from table 2]) # fields to select
join_fields = (field1, field2) # fields to join. field1 belongs to table1 and field2 belongs to table 2
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC]
limit = [limit1, limit2] | pg_simple/pg_simple.py | join | glennib/pg_simple | 25 | python | def join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
'Run an inner left join query\n\n tables = (table1, table2)\n fields = ([fields from table1], [fields from table 2]) # fields to select\n join_fields = (field1, field2) # fields to join. field1 belongs to table1 and field2 belongs to table 2\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit1, limit2]\n '
cur = self._join(tables, fields, join_fields, where, order, limit, offset)
result = cur.fetchall()
rows = None
if result:
Row = namedtuple('Row', [f[0] for f in cur.description])
rows = [Row(*r) for r in result]
return rows | def join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
'Run an inner left join query\n\n tables = (table1, table2)\n fields = ([fields from table1], [fields from table 2]) # fields to select\n join_fields = (field1, field2) # fields to join. field1 belongs to table1 and field2 belongs to table 2\n where = ("parameterized_statement", [parameters])\n eg: ("id=%s and name=%s", [1, "test"])\n order = [field, ASC|DESC]\n limit = [limit1, limit2]\n '
cur = self._join(tables, fields, join_fields, where, order, limit, offset)
result = cur.fetchall()
rows = None
if result:
Row = namedtuple('Row', [f[0] for f in cur.description])
rows = [Row(*r) for r in result]
return rows<|docstring|>Run an inner left join query
tables = (table1, table2)
fields = ([fields from table1], [fields from table 2]) # fields to select
join_fields = (field1, field2) # fields to join. field1 belongs to table1 and field2 belongs to table 2
where = ("parameterized_statement", [parameters])
eg: ("id=%s and name=%s", [1, "test"])
order = [field, ASC|DESC]
limit = [limit1, limit2]<|endoftext|> |
f86cb95f1dc26f61595f0196d889dc28097898e8f12874dd1f2116a5b0894683 | def insert(self, table, data, returning=None):
'Insert a record'
(cols, vals) = self._format_insert(data)
sql = ('INSERT INTO %s (%s) VALUES(%s)' % (table, cols, vals))
sql += self._returning(returning)
cur = self.execute(sql, list(data.values()))
return (cur.fetchone() if returning else cur.rowcount) | Insert a record | pg_simple/pg_simple.py | insert | glennib/pg_simple | 25 | python | def insert(self, table, data, returning=None):
(cols, vals) = self._format_insert(data)
sql = ('INSERT INTO %s (%s) VALUES(%s)' % (table, cols, vals))
sql += self._returning(returning)
cur = self.execute(sql, list(data.values()))
return (cur.fetchone() if returning else cur.rowcount) | def insert(self, table, data, returning=None):
(cols, vals) = self._format_insert(data)
sql = ('INSERT INTO %s (%s) VALUES(%s)' % (table, cols, vals))
sql += self._returning(returning)
cur = self.execute(sql, list(data.values()))
return (cur.fetchone() if returning else cur.rowcount)<|docstring|>Insert a record<|endoftext|> |
3d134409d6e3278b8deb3fc1de9cbde9aaab674a05f138ead649d925f9c7f90b | def update(self, table, data, where=None, returning=None):
'Insert a record'
query = self._format_update(data)
sql = ('UPDATE %s SET %s' % (table, query))
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, ((list(data.values()) + where[1]) if (where and (len(where) > 1)) else list(data.values())))
return (cur.fetchall() if returning else cur.rowcount) | Insert a record | pg_simple/pg_simple.py | update | glennib/pg_simple | 25 | python | def update(self, table, data, where=None, returning=None):
query = self._format_update(data)
sql = ('UPDATE %s SET %s' % (table, query))
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, ((list(data.values()) + where[1]) if (where and (len(where) > 1)) else list(data.values())))
return (cur.fetchall() if returning else cur.rowcount) | def update(self, table, data, where=None, returning=None):
query = self._format_update(data)
sql = ('UPDATE %s SET %s' % (table, query))
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, ((list(data.values()) + where[1]) if (where and (len(where) > 1)) else list(data.values())))
return (cur.fetchall() if returning else cur.rowcount)<|docstring|>Insert a record<|endoftext|> |
04d40470d31adbaa51daeb1595cb4e352a4da337cc84fdd00f81b80ba6c80b14 | def delete(self, table, where=None, returning=None):
'Delete rows based on a where condition'
sql = ('DELETE FROM %s' % table)
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, (where[1] if (where and (len(where) > 1)) else None))
return (cur.fetchall() if returning else cur.rowcount) | Delete rows based on a where condition | pg_simple/pg_simple.py | delete | glennib/pg_simple | 25 | python | def delete(self, table, where=None, returning=None):
sql = ('DELETE FROM %s' % table)
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, (where[1] if (where and (len(where) > 1)) else None))
return (cur.fetchall() if returning else cur.rowcount) | def delete(self, table, where=None, returning=None):
sql = ('DELETE FROM %s' % table)
sql += (self._where(where) + self._returning(returning))
cur = self.execute(sql, (where[1] if (where and (len(where) > 1)) else None))
return (cur.fetchall() if returning else cur.rowcount)<|docstring|>Delete rows based on a where condition<|endoftext|> |
93d241d897701658702b92df57c555d27265003d6ddfa4e468e5f7da7b7e5b31 | def execute(self, sql, params=None):
'Executes a raw query'
try:
if (self._log and self._log_fmt):
self._cursor.timestamp = time.time()
self._cursor.execute(sql, params)
if (self._log and self._log_fmt):
self._log_cursor(self._cursor)
except Exception as e:
if (self._log and self._log_fmt):
self._log_error(('execute() failed: ' + e.message))
raise
return self._cursor | Executes a raw query | pg_simple/pg_simple.py | execute | glennib/pg_simple | 25 | python | def execute(self, sql, params=None):
try:
if (self._log and self._log_fmt):
self._cursor.timestamp = time.time()
self._cursor.execute(sql, params)
if (self._log and self._log_fmt):
self._log_cursor(self._cursor)
except Exception as e:
if (self._log and self._log_fmt):
self._log_error(('execute() failed: ' + e.message))
raise
return self._cursor | def execute(self, sql, params=None):
try:
if (self._log and self._log_fmt):
self._cursor.timestamp = time.time()
self._cursor.execute(sql, params)
if (self._log and self._log_fmt):
self._log_cursor(self._cursor)
except Exception as e:
if (self._log and self._log_fmt):
self._log_error(('execute() failed: ' + e.message))
raise
return self._cursor<|docstring|>Executes a raw query<|endoftext|> |
4c675ee87e62cf76641c8196d6ac03bc216f17475d42cd961fb9b5dc3452e2fb | def truncate(self, table, restart_identity=False, cascade=False):
"Truncate a table or set of tables\n\n db.truncate('tbl1')\n db.truncate('tbl1, tbl2')\n "
sql = 'TRUNCATE %s'
if restart_identity:
sql += ' RESTART IDENTITY'
if cascade:
sql += ' CASCADE'
self.execute((sql % table)) | Truncate a table or set of tables
db.truncate('tbl1')
db.truncate('tbl1, tbl2') | pg_simple/pg_simple.py | truncate | glennib/pg_simple | 25 | python | def truncate(self, table, restart_identity=False, cascade=False):
"Truncate a table or set of tables\n\n db.truncate('tbl1')\n db.truncate('tbl1, tbl2')\n "
sql = 'TRUNCATE %s'
if restart_identity:
sql += ' RESTART IDENTITY'
if cascade:
sql += ' CASCADE'
self.execute((sql % table)) | def truncate(self, table, restart_identity=False, cascade=False):
"Truncate a table or set of tables\n\n db.truncate('tbl1')\n db.truncate('tbl1, tbl2')\n "
sql = 'TRUNCATE %s'
if restart_identity:
sql += ' RESTART IDENTITY'
if cascade:
sql += ' CASCADE'
self.execute((sql % table))<|docstring|>Truncate a table or set of tables
db.truncate('tbl1')
db.truncate('tbl1, tbl2')<|endoftext|> |
8a558c75d005e7684a146d459249024daa1fde04eb063fd85dc236619a213546 | def drop(self, table, cascade=False):
'Drop a table'
sql = 'DROP TABLE IF EXISTS %s'
if cascade:
sql += ' CASCADE'
self.execute((sql % table)) | Drop a table | pg_simple/pg_simple.py | drop | glennib/pg_simple | 25 | python | def drop(self, table, cascade=False):
sql = 'DROP TABLE IF EXISTS %s'
if cascade:
sql += ' CASCADE'
self.execute((sql % table)) | def drop(self, table, cascade=False):
sql = 'DROP TABLE IF EXISTS %s'
if cascade:
sql += ' CASCADE'
self.execute((sql % table))<|docstring|>Drop a table<|endoftext|> |
9b07cb83d2c1ba72b585ae3d50fdd9d0b71e7d137000fcf0d98efb2c69fe6e1e | def create(self, table, schema):
"Create a table with the schema provided\n\n pg_db.create('my_table','id SERIAL PRIMARY KEY, name TEXT')"
self.execute(('CREATE TABLE %s (%s)' % (table, schema))) | Create a table with the schema provided
pg_db.create('my_table','id SERIAL PRIMARY KEY, name TEXT') | pg_simple/pg_simple.py | create | glennib/pg_simple | 25 | python | def create(self, table, schema):
"Create a table with the schema provided\n\n pg_db.create('my_table','id SERIAL PRIMARY KEY, name TEXT')"
self.execute(('CREATE TABLE %s (%s)' % (table, schema))) | def create(self, table, schema):
"Create a table with the schema provided\n\n pg_db.create('my_table','id SERIAL PRIMARY KEY, name TEXT')"
self.execute(('CREATE TABLE %s (%s)' % (table, schema)))<|docstring|>Create a table with the schema provided
pg_db.create('my_table','id SERIAL PRIMARY KEY, name TEXT')<|endoftext|> |
e9ef800675f2d26274013869d428438a8b0510bbcb5e8b821cb945989e2579fb | def commit(self):
'Commit a transaction'
return self._connection.commit() | Commit a transaction | pg_simple/pg_simple.py | commit | glennib/pg_simple | 25 | python | def commit(self):
return self._connection.commit() | def commit(self):
return self._connection.commit()<|docstring|>Commit a transaction<|endoftext|> |
b91396c9597254557f35649991c6af3daae68840080482f2a0ce27a3aeedd568 | def rollback(self):
'Roll-back a transaction'
return self._connection.rollback() | Roll-back a transaction | pg_simple/pg_simple.py | rollback | glennib/pg_simple | 25 | python | def rollback(self):
return self._connection.rollback() | def rollback(self):
return self._connection.rollback()<|docstring|>Roll-back a transaction<|endoftext|> |
81807dfb92a6385600f1ef5cd6853fbaf38307e668ab827d20304690e471532f | @property
def is_open(self):
'Check if the connection is open'
return self._connection.open | Check if the connection is open | pg_simple/pg_simple.py | is_open | glennib/pg_simple | 25 | python | @property
def is_open(self):
return self._connection.open | @property
def is_open(self):
return self._connection.open<|docstring|>Check if the connection is open<|endoftext|> |
c65a2c4fee4cdbb001606255083c3b477cb671e96a1aca3d40dd8bae0cfa8173 | def _format_insert(self, data):
'Format insert dict values into strings'
cols = ','.join(data.keys())
vals = ','.join(['%s' for k in data])
return (cols, vals) | Format insert dict values into strings | pg_simple/pg_simple.py | _format_insert | glennib/pg_simple | 25 | python | def _format_insert(self, data):
cols = ','.join(data.keys())
vals = ','.join(['%s' for k in data])
return (cols, vals) | def _format_insert(self, data):
cols = ','.join(data.keys())
vals = ','.join(['%s' for k in data])
return (cols, vals)<|docstring|>Format insert dict values into strings<|endoftext|> |
eff0bde8906e13eab329a4658a2089449b899f7ad5585f512615b8b1c7ed8617 | def _format_update(self, data):
'Format update dict values into string'
return ('=%s,'.join(data.keys()) + '=%s') | Format update dict values into string | pg_simple/pg_simple.py | _format_update | glennib/pg_simple | 25 | python | def _format_update(self, data):
return ('=%s,'.join(data.keys()) + '=%s') | def _format_update(self, data):
return ('=%s,'.join(data.keys()) + '=%s')<|docstring|>Format update dict values into string<|endoftext|> |
80637b9082c818e17749b17a884df240914437356d023c0551dc8e0e11a94a3c | def _select(self, table=None, fields=(), where=None, order=None, limit=None, offset=None):
'Run a select query'
sql = ((((('SELECT %s FROM %s' % (','.join(fields), table)) + self._where(where)) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) == 2)) else None)) | Run a select query | pg_simple/pg_simple.py | _select | glennib/pg_simple | 25 | python | def _select(self, table=None, fields=(), where=None, order=None, limit=None, offset=None):
sql = ((((('SELECT %s FROM %s' % (','.join(fields), table)) + self._where(where)) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) == 2)) else None)) | def _select(self, table=None, fields=(), where=None, order=None, limit=None, offset=None):
sql = ((((('SELECT %s FROM %s' % (','.join(fields), table)) + self._where(where)) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) == 2)) else None))<|docstring|>Run a select query<|endoftext|> |
5d6fe13bef349043eb63608bcffa476335b3d7bb77a4bf49912b029026f25cc6 | def _join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
'Run an inner left join query'
fields = ([((tables[0] + '.') + f) for f in fields[0]] + [((tables[1] + '.') + f) for f in fields[1]])
sql = 'SELECT {0:s} FROM {1:s} LEFT JOIN {2:s} ON ({3:s} = {4:s})'.format(','.join(fields), tables[0], tables[1], '{0}.{1}'.format(tables[0], join_fields[0]), '{0}.{1}'.format(tables[1], join_fields[1]))
sql += (((self._where(where) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) > 1)) else None)) | Run an inner left join query | pg_simple/pg_simple.py | _join | glennib/pg_simple | 25 | python | def _join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
fields = ([((tables[0] + '.') + f) for f in fields[0]] + [((tables[1] + '.') + f) for f in fields[1]])
sql = 'SELECT {0:s} FROM {1:s} LEFT JOIN {2:s} ON ({3:s} = {4:s})'.format(','.join(fields), tables[0], tables[1], '{0}.{1}'.format(tables[0], join_fields[0]), '{0}.{1}'.format(tables[1], join_fields[1]))
sql += (((self._where(where) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) > 1)) else None)) | def _join(self, tables=(), fields=(), join_fields=(), where=None, order=None, limit=None, offset=None):
fields = ([((tables[0] + '.') + f) for f in fields[0]] + [((tables[1] + '.') + f) for f in fields[1]])
sql = 'SELECT {0:s} FROM {1:s} LEFT JOIN {2:s} ON ({3:s} = {4:s})'.format(','.join(fields), tables[0], tables[1], '{0}.{1}'.format(tables[0], join_fields[0]), '{0}.{1}'.format(tables[1], join_fields[1]))
sql += (((self._where(where) + self._order(order)) + self._limit(limit)) + self._offset(offset))
return self.execute(sql, (where[1] if (where and (len(where) > 1)) else None))<|docstring|>Run an inner left join query<|endoftext|> |
af7745029b217ed4effc8be14455f2203d963747668afc210c5dc729eb41916d | def display_demo():
'\n 展示图像示例\n 从文件加载图像,加载成功将返回一个Image对象,不需要知道文件格式\n '
im = Image.open('image_test.jpg')
print(im.format, im.size, im.mode)
im.show() | 展示图像示例
从文件加载图像,加载成功将返回一个Image对象,不需要知道文件格式 | language/python/modules/pillow/pillow_module.py | display_demo | bigfoolliu/liu_aistuff | 1 | python | def display_demo():
'\n 展示图像示例\n 从文件加载图像,加载成功将返回一个Image对象,不需要知道文件格式\n '
im = Image.open('image_test.jpg')
print(im.format, im.size, im.mode)
im.show() | def display_demo():
'\n 展示图像示例\n 从文件加载图像,加载成功将返回一个Image对象,不需要知道文件格式\n '
im = Image.open('image_test.jpg')
print(im.format, im.size, im.mode)
im.show()<|docstring|>展示图像示例
从文件加载图像,加载成功将返回一个Image对象,不需要知道文件格式<|endoftext|> |
83a75cc5b9d78b55c8ccd0e5bc04c98bb615bd92aa1ca5bb3c7c33246a55a725 | def save_demo():
'\n 使用save()方法保存文件,保存文件的时候文件名变得重要了。除非你指定格式,\n 否则这个库将会以文件名的扩展名作为格式保存。\n '
im = Image.open('./image_test.jpg')
if (not os.path.exists('./image_test.png')):
im.save('image_test.png') | 使用save()方法保存文件,保存文件的时候文件名变得重要了。除非你指定格式,
否则这个库将会以文件名的扩展名作为格式保存。 | language/python/modules/pillow/pillow_module.py | save_demo | bigfoolliu/liu_aistuff | 1 | python | def save_demo():
'\n 使用save()方法保存文件,保存文件的时候文件名变得重要了。除非你指定格式,\n 否则这个库将会以文件名的扩展名作为格式保存。\n '
im = Image.open('./image_test.jpg')
if (not os.path.exists('./image_test.png')):
im.save('image_test.png') | def save_demo():
'\n 使用save()方法保存文件,保存文件的时候文件名变得重要了。除非你指定格式,\n 否则这个库将会以文件名的扩展名作为格式保存。\n '
im = Image.open('./image_test.jpg')
if (not os.path.exists('./image_test.png')):
im.save('image_test.png')<|docstring|>使用save()方法保存文件,保存文件的时候文件名变得重要了。除非你指定格式,
否则这个库将会以文件名的扩展名作为格式保存。<|endoftext|> |
c34517ff81ad332d33580488b8e641d47d068202dab390276adc730753959bdb | def create_demo():
'创建一张图片'
if (not os.path.exists('./create_demo.jpg')):
im = Image.new('RGB', (200, 200), 'white')
im.save('create_demo.jpg') | 创建一张图片 | language/python/modules/pillow/pillow_module.py | create_demo | bigfoolliu/liu_aistuff | 1 | python | def create_demo():
if (not os.path.exists('./create_demo.jpg')):
im = Image.new('RGB', (200, 200), 'white')
im.save('create_demo.jpg') | def create_demo():
if (not os.path.exists('./create_demo.jpg')):
im = Image.new('RGB', (200, 200), 'white')
im.save('create_demo.jpg')<|docstring|>创建一张图片<|endoftext|> |
be55402bb5bd8771ef0031a060e17da4c5defe69f230755b8a2c06a54a1893c6 | def font_demo():
'使用特定字体'
im = Image.open('./create_demo.jpg')
font = ImageFont.truetype('./SimHei.ttf', size=20)
draw = ImageDraw.Draw(im)
draw.text((50, 30), 'Test Text', font=font, fill='red')
im.save('./font_demo.jpg')
im.show() | 使用特定字体 | language/python/modules/pillow/pillow_module.py | font_demo | bigfoolliu/liu_aistuff | 1 | python | def font_demo():
im = Image.open('./create_demo.jpg')
font = ImageFont.truetype('./SimHei.ttf', size=20)
draw = ImageDraw.Draw(im)
draw.text((50, 30), 'Test Text', font=font, fill='red')
im.save('./font_demo.jpg')
im.show() | def font_demo():
im = Image.open('./create_demo.jpg')
font = ImageFont.truetype('./SimHei.ttf', size=20)
draw = ImageDraw.Draw(im)
draw.text((50, 30), 'Test Text', font=font, fill='red')
im.save('./font_demo.jpg')
im.show()<|docstring|>使用特定字体<|endoftext|> |
58befa344704b3b950bf86a1e7e925fe389790f63ebbe90cecc399d1cbb85e83 | def __on_exchange_declareok(self, unused_frame):
'Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC\n command.\n :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame\n '
self.logger.info('Exchange declared')
self.__start_publishing() | Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame | user_service/connectors/UserQueuePublisher.py | __on_exchange_declareok | mathurtx/user-service | 0 | python | def __on_exchange_declareok(self, unused_frame):
'Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC\n command.\n :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame\n '
self.logger.info('Exchange declared')
self.__start_publishing() | def __on_exchange_declareok(self, unused_frame):
'Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC\n command.\n :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame\n '
self.logger.info('Exchange declared')
self.__start_publishing()<|docstring|>Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame<|endoftext|> |
d8ba82d647f599e24f895d00c275f730b07b7ac9ba93f2a41c8a67882113625e | @abstractmethod
def compile_results(self) -> pd.DataFrame:
'\n Method for returning the final results dataframe.\n\n Each row of the dataframe consists of one utterance of one conversation.\n ' | Method for returning the final results dataframe.
Each row of the dataframe consists of one utterance of one conversation. | parlai/crowdsourcing/utils/analysis.py | compile_results | KaihuiLiang/ParlAI | 0 | python | @abstractmethod
def compile_results(self) -> pd.DataFrame:
'\n Method for returning the final results dataframe.\n\n Each row of the dataframe consists of one utterance of one conversation.\n ' | @abstractmethod
def compile_results(self) -> pd.DataFrame:
'\n Method for returning the final results dataframe.\n\n Each row of the dataframe consists of one utterance of one conversation.\n '<|docstring|>Method for returning the final results dataframe.
Each row of the dataframe consists of one utterance of one conversation.<|endoftext|> |
07df98ef8ee60a4329c39eb0e66adc9b69a3402f61f1f054ce9fef82d29bd73d | def get_worker_name(self, worker_id: str) -> str:
'\n Gets the global (AWS) id of a worker from their Mephisto worker_id.\n '
db = self.get_mephisto_db()
return db.get_worker(worker_id)['worker_name'] | Gets the global (AWS) id of a worker from their Mephisto worker_id. | parlai/crowdsourcing/utils/analysis.py | get_worker_name | KaihuiLiang/ParlAI | 0 | python | def get_worker_name(self, worker_id: str) -> str:
'\n \n '
db = self.get_mephisto_db()
return db.get_worker(worker_id)['worker_name'] | def get_worker_name(self, worker_id: str) -> str:
'\n \n '
db = self.get_mephisto_db()
return db.get_worker(worker_id)['worker_name']<|docstring|>Gets the global (AWS) id of a worker from their Mephisto worker_id.<|endoftext|> |
a783eca28d3d39433c9d32c34c98c2f6529c3dbc4917c7f60aefcec98453fa54 | def get_task_units(self, task_name: str) -> List[Unit]:
'\n Retrieves the list of work units from the Mephisto task.\n '
data_browser = self.get_mephisto_data_browser()
return data_browser.get_units_for_task_name(task_name) | Retrieves the list of work units from the Mephisto task. | parlai/crowdsourcing/utils/analysis.py | get_task_units | KaihuiLiang/ParlAI | 0 | python | def get_task_units(self, task_name: str) -> List[Unit]:
'\n \n '
data_browser = self.get_mephisto_data_browser()
return data_browser.get_units_for_task_name(task_name) | def get_task_units(self, task_name: str) -> List[Unit]:
'\n \n '
data_browser = self.get_mephisto_data_browser()
return data_browser.get_units_for_task_name(task_name)<|docstring|>Retrieves the list of work units from the Mephisto task.<|endoftext|> |
320e68d2624f3bdb1abe2f3a0a040aab39f43868a4e2101af411443d739ac21e | def get_units_data(self, task_units: List[Unit]) -> List[dict]:
'\n Retrieves task data for a list of Mephisto task units.\n '
data_browser = self.get_mephisto_data_browser()
task_data = []
for unit in task_units:
task_data.append(data_browser.get_data_from_unit(unit))
return task_data | Retrieves task data for a list of Mephisto task units. | parlai/crowdsourcing/utils/analysis.py | get_units_data | KaihuiLiang/ParlAI | 0 | python | def get_units_data(self, task_units: List[Unit]) -> List[dict]:
'\n \n '
data_browser = self.get_mephisto_data_browser()
task_data = []
for unit in task_units:
task_data.append(data_browser.get_data_from_unit(unit))
return task_data | def get_units_data(self, task_units: List[Unit]) -> List[dict]:
'\n \n '
data_browser = self.get_mephisto_data_browser()
task_data = []
for unit in task_units:
task_data.append(data_browser.get_data_from_unit(unit))
return task_data<|docstring|>Retrieves task data for a list of Mephisto task units.<|endoftext|> |
2c23e0e804e346a85f56b6918e559fd71b59aac5a4319ac9750b6c3a0a919b5b | @staticmethod
def coverage(value, user):
'amount that can be paid by user up to the given value'
return value | amount that can be paid by user up to the given value | wasch/payment.py | coverage | waschag-tvk/pywaschedv | 1 | python | @staticmethod
def coverage(value, user):
return value | @staticmethod
def coverage(value, user):
return value<|docstring|>amount that can be paid by user up to the given value<|endoftext|> |
2b4511c43cdff028fd7768f649edac329f83e14993ee7ee8eaaeb2ab9483aeec | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
return (value, '0000000001') | :param reference str: reference of original payment
:param value int: value to be refunded (<= original value);
defaults to None, meaning the whole original amount | wasch/payment.py | refund | waschag-tvk/pywaschedv | 1 | python | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
return (value, '0000000001') | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
return (value, '0000000001')<|docstring|>:param reference str: reference of original payment
:param value int: value to be refunded (<= original value);
defaults to None, meaning the whole original amount<|endoftext|> |
1ade1e252f7b2f62594f9b5c880f6828d8a57bba470d79d21f1e7eb2935e7247 | @staticmethod
def coverage(value, user):
'amount that can be paid by user up to the given value'
return 0 | amount that can be paid by user up to the given value | wasch/payment.py | coverage | waschag-tvk/pywaschedv | 1 | python | @staticmethod
def coverage(value, user):
return 0 | @staticmethod
def coverage(value, user):
return 0<|docstring|>amount that can be paid by user up to the given value<|endoftext|> |
bfeb79801a4c60294eec38ab9855d8bc83c5e153d7d032025f5fdf931b386c5b | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
raise PaymentError('Account is empty!') | :param reference str: reference of original payment
:param value int: value to be refunded (<= original value);
defaults to None, meaning the whole original amount | wasch/payment.py | refund | waschag-tvk/pywaschedv | 1 | python | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
raise PaymentError('Account is empty!') | @staticmethod
def refund(reference, value=None):
'\n :param reference str: reference of original payment\n :param value int: value to be refunded (<= original value);\n defaults to None, meaning the whole original amount\n '
raise PaymentError('Account is empty!')<|docstring|>:param reference str: reference of original payment
:param value int: value to be refunded (<= original value);
defaults to None, meaning the whole original amount<|endoftext|> |
230ea9b5f1e82c9cadb0e2bbabbec2ee796b2fe3ea2209a5f1bd216a493acc06 | def __init__(self, master_limit=1):
'\n ctor\n '
self._master_limit = master_limit
self._master_engines = []
self._slave_engines = [] | ctor | dbcluster/__init__.py | __init__ | krishardy/dbcluster | 0 | python | def __init__(self, master_limit=1):
'\n \n '
self._master_limit = master_limit
self._master_engines = []
self._slave_engines = [] | def __init__(self, master_limit=1):
'\n \n '
self._master_limit = master_limit
self._master_engines = []
self._slave_engines = []<|docstring|>ctor<|endoftext|> |
202dfea4874ebfdce2a613ce728207ccccf6bb6c2c332b8006038ebb8ab5a9cb | def get_max_sequence(self, timestamps, debug=False):
'\n Helper function that returns the longest consequtive\n sequence of timestamps seperated by at most self.interval\n seconds.\n '
total_seqs = []
current_seq = []
prev_value = timestamps[0]
for value in timestamps:
if ((value - prev_value) > self.interval):
total_seqs.append(current_seq)
current_seq = []
current_seq.append(value)
prev_value = value
total_seqs.append(current_seq)
seq_lens = [len(seq) for seq in total_seqs]
index = seq_lens.index(max(seq_lens))
if debug:
print('Total')
print(timestamps)
print('Selected')
print(total_seqs)
return total_seqs[index][:self.max_count] | Helper function that returns the longest consequtive
sequence of timestamps seperated by at most self.interval
seconds. | src/commands/actions.py | get_max_sequence | ellipses/Yaksha | 8 | python | def get_max_sequence(self, timestamps, debug=False):
'\n Helper function that returns the longest consequtive\n sequence of timestamps seperated by at most self.interval\n seconds.\n '
total_seqs = []
current_seq = []
prev_value = timestamps[0]
for value in timestamps:
if ((value - prev_value) > self.interval):
total_seqs.append(current_seq)
current_seq = []
current_seq.append(value)
prev_value = value
total_seqs.append(current_seq)
seq_lens = [len(seq) for seq in total_seqs]
index = seq_lens.index(max(seq_lens))
if debug:
print('Total')
print(timestamps)
print('Selected')
print(total_seqs)
return total_seqs[index][:self.max_count] | def get_max_sequence(self, timestamps, debug=False):
'\n Helper function that returns the longest consequtive\n sequence of timestamps seperated by at most self.interval\n seconds.\n '
total_seqs = []
current_seq = []
prev_value = timestamps[0]
for value in timestamps:
if ((value - prev_value) > self.interval):
total_seqs.append(current_seq)
current_seq = []
current_seq.append(value)
prev_value = value
total_seqs.append(current_seq)
seq_lens = [len(seq) for seq in total_seqs]
index = seq_lens.index(max(seq_lens))
if debug:
print('Total')
print(timestamps)
print('Selected')
print(total_seqs)
return total_seqs[index][:self.max_count]<|docstring|>Helper function that returns the longest consequtive
sequence of timestamps seperated by at most self.interval
seconds.<|endoftext|> |
e44a3c2542164aff60de3c1e57a4a1200ee2eae53d555195d1bf68ccaeeaaa0f | def get_episode(self, timestamps, debug=False):
'\n Finds the most relevant episode for the given timestamps.\n Current alogrythm works by selecting the episode that\n has the longest sequence of consequtive timestamps\n seperated by self.interval seconds.\n '
seq_list = [self.get_max_sequence(sorted(ts)) for (ep, ts) in timestamps.items()]
seq_len = [len(seq) for seq in seq_list]
if debug:
print('seq list')
print(seq_list)
print('seq len')
print(seq_len)
return list(timestamps)[seq_len.index(max(seq_len))] | Finds the most relevant episode for the given timestamps.
Current alogrythm works by selecting the episode that
has the longest sequence of consequtive timestamps
seperated by self.interval seconds. | src/commands/actions.py | get_episode | ellipses/Yaksha | 8 | python | def get_episode(self, timestamps, debug=False):
'\n Finds the most relevant episode for the given timestamps.\n Current alogrythm works by selecting the episode that\n has the longest sequence of consequtive timestamps\n seperated by self.interval seconds.\n '
seq_list = [self.get_max_sequence(sorted(ts)) for (ep, ts) in timestamps.items()]
seq_len = [len(seq) for seq in seq_list]
if debug:
print('seq list')
print(seq_list)
print('seq len')
print(seq_len)
return list(timestamps)[seq_len.index(max(seq_len))] | def get_episode(self, timestamps, debug=False):
'\n Finds the most relevant episode for the given timestamps.\n Current alogrythm works by selecting the episode that\n has the longest sequence of consequtive timestamps\n seperated by self.interval seconds.\n '
seq_list = [self.get_max_sequence(sorted(ts)) for (ep, ts) in timestamps.items()]
seq_len = [len(seq) for seq in seq_list]
if debug:
print('seq list')
print(seq_list)
print('seq len')
print(seq_len)
return list(timestamps)[seq_len.index(max(seq_len))]<|docstring|>Finds the most relevant episode for the given timestamps.
Current alogrythm works by selecting the episode that
has the longest sequence of consequtive timestamps
seperated by self.interval seconds.<|endoftext|> |
77cf3803e2486935da61d6ab9b556a73b1c39819c36e0a178f54a881be2aa4fc | def get_timestamps(self, screencaps, debug=False):
'\n Helper function that iterates through the list returned\n by the api endpoint to find the episode and the longest\n sequence of timestamps.\n '
episodes = {}
timestamps = {}
for screencap in screencaps:
episode = screencap['Episode']
timestamp = screencap['Timestamp']
if (episode in episodes):
episodes[episode] += 1
timestamps[episode].append(timestamp)
else:
episodes[episode] = 1
timestamps[episode] = [timestamp]
episode = self.get_episode(timestamps, debug=debug)
if debug:
print('epside count')
print(episodes)
print(episode)
print('screencaps')
print(screencaps)
print('timestamps')
print(timestamps)
max_seq = self.get_max_sequence(sorted(timestamps[episode]), debug=debug)
return (episode, max_seq) | Helper function that iterates through the list returned
by the api endpoint to find the episode and the longest
sequence of timestamps. | src/commands/actions.py | get_timestamps | ellipses/Yaksha | 8 | python | def get_timestamps(self, screencaps, debug=False):
'\n Helper function that iterates through the list returned\n by the api endpoint to find the episode and the longest\n sequence of timestamps.\n '
episodes = {}
timestamps = {}
for screencap in screencaps:
episode = screencap['Episode']
timestamp = screencap['Timestamp']
if (episode in episodes):
episodes[episode] += 1
timestamps[episode].append(timestamp)
else:
episodes[episode] = 1
timestamps[episode] = [timestamp]
episode = self.get_episode(timestamps, debug=debug)
if debug:
print('epside count')
print(episodes)
print(episode)
print('screencaps')
print(screencaps)
print('timestamps')
print(timestamps)
max_seq = self.get_max_sequence(sorted(timestamps[episode]), debug=debug)
return (episode, max_seq) | def get_timestamps(self, screencaps, debug=False):
'\n Helper function that iterates through the list returned\n by the api endpoint to find the episode and the longest\n sequence of timestamps.\n '
episodes = {}
timestamps = {}
for screencap in screencaps:
episode = screencap['Episode']
timestamp = screencap['Timestamp']
if (episode in episodes):
episodes[episode] += 1
timestamps[episode].append(timestamp)
else:
episodes[episode] = 1
timestamps[episode] = [timestamp]
episode = self.get_episode(timestamps, debug=debug)
if debug:
print('epside count')
print(episodes)
print(episode)
print('screencaps')
print(screencaps)
print('timestamps')
print(timestamps)
max_seq = self.get_max_sequence(sorted(timestamps[episode]), debug=debug)
return (episode, max_seq)<|docstring|>Helper function that iterates through the list returned
by the api endpoint to find the episode and the longest
sequence of timestamps.<|endoftext|> |
874edf0ea7b2bbbc90d880206590d2c104ed4b8c71dac68a41bf6db4700f0778 | def format_message(self, message):
'\n Formats the message by adding line breaks to prevent it\n from overflowing the gifs boundry. Line breaks are added\n at the end of a word to prevent it from being split.\n '
char_buff = 0
formated_msg = ''
for word in message.split(' '):
char_buff += len(word)
formated_msg += (' %s' % word)
if (char_buff >= 18):
char_buff = 0
formated_msg += u'\n'
return formated_msg | Formats the message by adding line breaks to prevent it
from overflowing the gifs boundry. Line breaks are added
at the end of a word to prevent it from being split. | src/commands/actions.py | format_message | ellipses/Yaksha | 8 | python | def format_message(self, message):
'\n Formats the message by adding line breaks to prevent it\n from overflowing the gifs boundry. Line breaks are added\n at the end of a word to prevent it from being split.\n '
char_buff = 0
formated_msg =
for word in message.split(' '):
char_buff += len(word)
formated_msg += (' %s' % word)
if (char_buff >= 18):
char_buff = 0
formated_msg += u'\n'
return formated_msg | def format_message(self, message):
'\n Formats the message by adding line breaks to prevent it\n from overflowing the gifs boundry. Line breaks are added\n at the end of a word to prevent it from being split.\n '
char_buff = 0
formated_msg =
for word in message.split(' '):
char_buff += len(word)
formated_msg += (' %s' % word)
if (char_buff >= 18):
char_buff = 0
formated_msg += u'\n'
return formated_msg<|docstring|>Formats the message by adding line breaks to prevent it
from overflowing the gifs boundry. Line breaks are added
at the end of a word to prevent it from being split.<|endoftext|> |
77f6b6e93b08afc6925459b9c060362955e15ab38127aa9f2351ffa8a0c46570 | async def get_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a Frinkiac url.\n Does basic error handling and calls handle_caption\n which does most of the actual work.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
return (self.gif_url % (episode, timestamps[0], timestamps[(- 1)])) | Method thats called when trying to get a Frinkiac url.
Does basic error handling and calls handle_caption
which does most of the actual work. | src/commands/actions.py | get_gif | ellipses/Yaksha | 8 | python | async def get_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a Frinkiac url.\n Does basic error handling and calls handle_caption\n which does most of the actual work.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
return (self.gif_url % (episode, timestamps[0], timestamps[(- 1)])) | async def get_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a Frinkiac url.\n Does basic error handling and calls handle_caption\n which does most of the actual work.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
return (self.gif_url % (episode, timestamps[0], timestamps[(- 1)]))<|docstring|>Method thats called when trying to get a Frinkiac url.
Does basic error handling and calls handle_caption
which does most of the actual work.<|endoftext|> |
b27cb610c3cab8012bcb61024d3ed88a4a3659382497e7adc79789a62e2102b9 | async def get_captioned_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a gif with\n a caption. Does basic error handling and base 64\n encoding and formatting of the caption.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
caption = self.format_message(caption)
try:
encoded = str(base64.b64encode(str.encode(caption)), 'utf-8')
except TypeError:
encoded = str(base64.b64encode(str.encode(caption)))
return (self.caption_url % (episode, timestamps[0], timestamps[(- 1)], encoded)) | Method thats called when trying to get a gif with
a caption. Does basic error handling and base 64
encoding and formatting of the caption. | src/commands/actions.py | get_captioned_gif | ellipses/Yaksha | 8 | python | async def get_captioned_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a gif with\n a caption. Does basic error handling and base 64\n encoding and formatting of the caption.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
caption = self.format_message(caption)
try:
encoded = str(base64.b64encode(str.encode(caption)), 'utf-8')
except TypeError:
encoded = str(base64.b64encode(str.encode(caption)))
return (self.caption_url % (episode, timestamps[0], timestamps[(- 1)], encoded)) | async def get_captioned_gif(self, caption, user, *args, **kwargs):
'\n Method thats called when trying to get a gif with\n a caption. Does basic error handling and base 64\n encoding and formatting of the caption.\n '
resp = (await self.handle_caption(caption))
if (not resp):
return 'Try fixing your quote.'
(episode, timestamps, caption) = resp
caption = self.format_message(caption)
try:
encoded = str(base64.b64encode(str.encode(caption)), 'utf-8')
except TypeError:
encoded = str(base64.b64encode(str.encode(caption)))
return (self.caption_url % (episode, timestamps[0], timestamps[(- 1)], encoded))<|docstring|>Method thats called when trying to get a gif with
a caption. Does basic error handling and base 64
encoding and formatting of the caption.<|endoftext|> |
02ad0cf3483f15f547874228ab0187ec6ec6fc9675fe926207706717934567b1 | @register('!mymention')
async def get_my_mention(self, message, user, channel, client, *args):
'\n Shows the last message in the channel that mentioned the user\n that uses this command.\n\n If an optional parameter with a number is passed is, its returns\n the last nth last mention.\n '
regex_result = re.search(self.mention_regex, message)
count = 0
if regex_result:
count = int(regex_result.group(1))
async for message in client.logs_from(channel, limit=self.history_limit):
if (user in message.content):
if (count == 0):
username = message.author.display_name
response = ('%s _by %s_' % (message.content, username))
return response
else:
count -= 1
response = ('Sorry %s, I could not find any mention of you in the last %s messages of this channel.' % (user, self.history_limit))
return response | Shows the last message in the channel that mentioned the user
that uses this command.
If an optional parameter with a number is passed is, its returns
the last nth last mention. | src/commands/actions.py | get_my_mention | ellipses/Yaksha | 8 | python | @register('!mymention')
async def get_my_mention(self, message, user, channel, client, *args):
'\n Shows the last message in the channel that mentioned the user\n that uses this command.\n\n If an optional parameter with a number is passed is, its returns\n the last nth last mention.\n '
regex_result = re.search(self.mention_regex, message)
count = 0
if regex_result:
count = int(regex_result.group(1))
async for message in client.logs_from(channel, limit=self.history_limit):
if (user in message.content):
if (count == 0):
username = message.author.display_name
response = ('%s _by %s_' % (message.content, username))
return response
else:
count -= 1
response = ('Sorry %s, I could not find any mention of you in the last %s messages of this channel.' % (user, self.history_limit))
return response | @register('!mymention')
async def get_my_mention(self, message, user, channel, client, *args):
'\n Shows the last message in the channel that mentioned the user\n that uses this command.\n\n If an optional parameter with a number is passed is, its returns\n the last nth last mention.\n '
regex_result = re.search(self.mention_regex, message)
count = 0
if regex_result:
count = int(regex_result.group(1))
async for message in client.logs_from(channel, limit=self.history_limit):
if (user in message.content):
if (count == 0):
username = message.author.display_name
response = ('%s _by %s_' % (message.content, username))
return response
else:
count -= 1
response = ('Sorry %s, I could not find any mention of you in the last %s messages of this channel.' % (user, self.history_limit))
return response<|docstring|>Shows the last message in the channel that mentioned the user
that uses this command.
If an optional parameter with a number is passed is, its returns
the last nth last mention.<|endoftext|> |
621da8e4f0291e7b7076e8c33b79b34e3711eaeb4198dc07e0f29bded37557db | def remove_older_months(self, tourney_list):
'\n Deletes every month previous of the current one\n from the tourney_list.\n '
current_month = datetime.now().month
month_index = 0
for index in range(len(tourney_list)):
tourney = tourney_list[index]
first_date = tourney[1].contents[0]
if (current_month == int(first_date[:2])):
month_index = index
break
del tourney_list[:month_index]
return tourney_list | Deletes every month previous of the current one
from the tourney_list. | src/commands/actions.py | remove_older_months | ellipses/Yaksha | 8 | python | def remove_older_months(self, tourney_list):
'\n Deletes every month previous of the current one\n from the tourney_list.\n '
current_month = datetime.now().month
month_index = 0
for index in range(len(tourney_list)):
tourney = tourney_list[index]
first_date = tourney[1].contents[0]
if (current_month == int(first_date[:2])):
month_index = index
break
del tourney_list[:month_index]
return tourney_list | def remove_older_months(self, tourney_list):
'\n Deletes every month previous of the current one\n from the tourney_list.\n '
current_month = datetime.now().month
month_index = 0
for index in range(len(tourney_list)):
tourney = tourney_list[index]
first_date = tourney[1].contents[0]
if (current_month == int(first_date[:2])):
month_index = index
break
del tourney_list[:month_index]
return tourney_list<|docstring|>Deletes every month previous of the current one
from the tourney_list.<|endoftext|> |
124ef3847da9a5e880ea7ad6e00d6c48292844c053324a80c23d7c653f08499b | def remove_older_days(self, tourney_list):
'\n Deletes every tourney entry from the current month\n whos starting date was before today.\n '
curr_day = datetime.now().day
day_index = 0
for days in tourney_list[0][1::4]:
date = days.contents[0]
if (int(date[3:5]) > curr_day):
break
day_index += 1
del tourney_list[0][:(day_index * 4)]
return tourney_list | Deletes every tourney entry from the current month
whos starting date was before today. | src/commands/actions.py | remove_older_days | ellipses/Yaksha | 8 | python | def remove_older_days(self, tourney_list):
'\n Deletes every tourney entry from the current month\n whos starting date was before today.\n '
curr_day = datetime.now().day
day_index = 0
for days in tourney_list[0][1::4]:
date = days.contents[0]
if (int(date[3:5]) > curr_day):
break
day_index += 1
del tourney_list[0][:(day_index * 4)]
return tourney_list | def remove_older_days(self, tourney_list):
'\n Deletes every tourney entry from the current month\n whos starting date was before today.\n '
curr_day = datetime.now().day
day_index = 0
for days in tourney_list[0][1::4]:
date = days.contents[0]
if (int(date[3:5]) > curr_day):
break
day_index += 1
del tourney_list[0][:(day_index * 4)]
return tourney_list<|docstring|>Deletes every tourney entry from the current month
whos starting date was before today.<|endoftext|> |
2ebd9b1bddef8a07990c7e6ab0ed0d1667190a7584aa82963f08d4e61f07de7f | @memoize(((60 * 60) * 24))
@register('!tourney')
async def get_tourneys(self, message, author, *args):
'\n Uses the list of tournaments on the srk page\n to return the upcomming tournaments.\n '
resp = (await get_request(self.tourney_url))
if resp:
soup = BeautifulSoup(resp, 'html.parser')
soup = soup.find_all('tbody')
tourney_list = [month.find_all('td') for month in soup]
tourney_list = self.remove_older_months(tourney_list)
tourney_list = self.remove_older_days(tourney_list)
tourneys = (tourney_list[0] + tourney_list[1])
if (len(tourneys) > 20):
tourneys = tourneys[:20]
tourney_list = [tourney.contents[0] for tourney in tourneys]
del tourney_list[3::4]
names = [time for time in tourney_list[0::3]]
times = [time for time in tourney_list[1::3]]
locations = [time for time in tourney_list[2::3]]
times = self.convert_times(times)
formated_tourneys = tuple(zip(names, times, locations))
formated_str = ''.join([(' %s (%s) [%s] | ' % tourney) for tourney in formated_tourneys])
return ('The upcoming tourneys are ' + formated_str[:(- 4)])
else:
return ('Got %s when trying to get list of tourneys' % resp.status_code) | Uses the list of tournaments on the srk page
to return the upcomming tournaments. | src/commands/actions.py | get_tourneys | ellipses/Yaksha | 8 | python | @memoize(((60 * 60) * 24))
@register('!tourney')
async def get_tourneys(self, message, author, *args):
'\n Uses the list of tournaments on the srk page\n to return the upcomming tournaments.\n '
resp = (await get_request(self.tourney_url))
if resp:
soup = BeautifulSoup(resp, 'html.parser')
soup = soup.find_all('tbody')
tourney_list = [month.find_all('td') for month in soup]
tourney_list = self.remove_older_months(tourney_list)
tourney_list = self.remove_older_days(tourney_list)
tourneys = (tourney_list[0] + tourney_list[1])
if (len(tourneys) > 20):
tourneys = tourneys[:20]
tourney_list = [tourney.contents[0] for tourney in tourneys]
del tourney_list[3::4]
names = [time for time in tourney_list[0::3]]
times = [time for time in tourney_list[1::3]]
locations = [time for time in tourney_list[2::3]]
times = self.convert_times(times)
formated_tourneys = tuple(zip(names, times, locations))
formated_str = .join([(' %s (%s) [%s] | ' % tourney) for tourney in formated_tourneys])
return ('The upcoming tourneys are ' + formated_str[:(- 4)])
else:
return ('Got %s when trying to get list of tourneys' % resp.status_code) | @memoize(((60 * 60) * 24))
@register('!tourney')
async def get_tourneys(self, message, author, *args):
'\n Uses the list of tournaments on the srk page\n to return the upcomming tournaments.\n '
resp = (await get_request(self.tourney_url))
if resp:
soup = BeautifulSoup(resp, 'html.parser')
soup = soup.find_all('tbody')
tourney_list = [month.find_all('td') for month in soup]
tourney_list = self.remove_older_months(tourney_list)
tourney_list = self.remove_older_days(tourney_list)
tourneys = (tourney_list[0] + tourney_list[1])
if (len(tourneys) > 20):
tourneys = tourneys[:20]
tourney_list = [tourney.contents[0] for tourney in tourneys]
del tourney_list[3::4]
names = [time for time in tourney_list[0::3]]
times = [time for time in tourney_list[1::3]]
locations = [time for time in tourney_list[2::3]]
times = self.convert_times(times)
formated_tourneys = tuple(zip(names, times, locations))
formated_str = .join([(' %s (%s) [%s] | ' % tourney) for tourney in formated_tourneys])
return ('The upcoming tourneys are ' + formated_str[:(- 4)])
else:
return ('Got %s when trying to get list of tourneys' % resp.status_code)<|docstring|>Uses the list of tournaments on the srk page
to return the upcomming tournaments.<|endoftext|> |
7123270686b857d3f753c7e1da6d77ac90b23b4db1b6590299d2b623e64becc4 | @register('!add')
async def add_command(self, msg, user, channel, *args, **kwargs):
'\n Main function that called when a user\n tries to add a new command.\n '
split_msg = msg.split(' ')
command = split_msg[0]
actions = ' '.join(split_msg[1:])
if (await self.save_command(command, actions, channel, **kwargs)):
return ('The tag _%s_ has been added.' % command)
else:
return 'Failed to add tag.' | Main function that called when a user
tries to add a new command. | src/commands/actions.py | add_command | ellipses/Yaksha | 8 | python | @register('!add')
async def add_command(self, msg, user, channel, *args, **kwargs):
'\n Main function that called when a user\n tries to add a new command.\n '
split_msg = msg.split(' ')
command = split_msg[0]
actions = ' '.join(split_msg[1:])
if (await self.save_command(command, actions, channel, **kwargs)):
return ('The tag _%s_ has been added.' % command)
else:
return 'Failed to add tag.' | @register('!add')
async def add_command(self, msg, user, channel, *args, **kwargs):
'\n Main function that called when a user\n tries to add a new command.\n '
split_msg = msg.split(' ')
command = split_msg[0]
actions = ' '.join(split_msg[1:])
if (await self.save_command(command, actions, channel, **kwargs)):
return ('The tag _%s_ has been added.' % command)
else:
return 'Failed to add tag.'<|docstring|>Main function that called when a user
tries to add a new command.<|endoftext|> |
43efbe10bccd3e52ea41103ae4ce1e46377e536c5a07df68fe2ff6e7157563aa | async def send_reminder_start_msg(self, user, channel, client, time):
'\n Gives an acknowledgement that the reminder has been set.\n '
time = time.replace(microsecond=0)
msg = (":+1: %s I'll remind you at %s UTC." % (user, str(time)))
(await channel.send(msg)) | Gives an acknowledgement that the reminder has been set. | src/commands/actions.py | send_reminder_start_msg | ellipses/Yaksha | 8 | python | async def send_reminder_start_msg(self, user, channel, client, time):
'\n \n '
time = time.replace(microsecond=0)
msg = (":+1: %s I'll remind you at %s UTC." % (user, str(time)))
(await channel.send(msg)) | async def send_reminder_start_msg(self, user, channel, client, time):
'\n \n '
time = time.replace(microsecond=0)
msg = (":+1: %s I'll remind you at %s UTC." % (user, str(time)))
(await channel.send(msg))<|docstring|>Gives an acknowledgement that the reminder has been set.<|endoftext|> |
5f16cfb0335be873486965c86c33c1a735e8a912a2d149cd09dade2dcaa74972 | async def send_reminder_end_msg(self, user, channel, client, text):
'\n Sends the message when the reminder finishes with the text\n if it was passed in.\n '
if text:
msg = ('Hello %s, you asked me to remind you of **%s**.' % (user, text))
else:
msg = ('Hello %s, you asked me to remind you at this time.' % user)
(await channel.send(msg)) | Sends the message when the reminder finishes with the text
if it was passed in. | src/commands/actions.py | send_reminder_end_msg | ellipses/Yaksha | 8 | python | async def send_reminder_end_msg(self, user, channel, client, text):
'\n Sends the message when the reminder finishes with the text\n if it was passed in.\n '
if text:
msg = ('Hello %s, you asked me to remind you of **%s**.' % (user, text))
else:
msg = ('Hello %s, you asked me to remind you at this time.' % user)
(await channel.send(msg)) | async def send_reminder_end_msg(self, user, channel, client, text):
'\n Sends the message when the reminder finishes with the text\n if it was passed in.\n '
if text:
msg = ('Hello %s, you asked me to remind you of **%s**.' % (user, text))
else:
msg = ('Hello %s, you asked me to remind you at this time.' % user)
(await channel.send(msg))<|docstring|>Sends the message when the reminder finishes with the text
if it was passed in.<|endoftext|> |
03315c60ba52a1f3c019b083eb55966694e59dd4d8e94f47892b723e9cd7fef4 | async def start_reminder_sleep(self, delta, user, channel, client, text, time):
'\n Asyncronously sleeps for the reminder length.\n '
(await self.send_reminder_start_msg(user, channel, client, time))
(await asyncio.sleep(delta.total_seconds()))
(await self.send_reminder_end_msg(user, channel, client, text)) | Asyncronously sleeps for the reminder length. | src/commands/actions.py | start_reminder_sleep | ellipses/Yaksha | 8 | python | async def start_reminder_sleep(self, delta, user, channel, client, text, time):
'\n \n '
(await self.send_reminder_start_msg(user, channel, client, time))
(await asyncio.sleep(delta.total_seconds()))
(await self.send_reminder_end_msg(user, channel, client, text)) | async def start_reminder_sleep(self, delta, user, channel, client, text, time):
'\n \n '
(await self.send_reminder_start_msg(user, channel, client, time))
(await asyncio.sleep(delta.total_seconds()))
(await self.send_reminder_end_msg(user, channel, client, text))<|docstring|>Asyncronously sleeps for the reminder length.<|endoftext|> |
e1c96d538e8b43ad4128802599863e6cb3e91d0bd8e565edb69a4da341ed8068 | def apply_regex(self, msg):
'\n Applies the regex to check if the user passed\n in a optional string in square brackets.\n Returns the original message with the string\n removed and the captured msg.\n '
regex_result = re.search(self.regex, msg)
if regex_result:
msg = re.sub(self.regex, '', msg).strip()
return (msg, regex_result.group(1))
else:
return False | Applies the regex to check if the user passed
in a optional string in square brackets.
Returns the original message with the string
removed and the captured msg. | src/commands/actions.py | apply_regex | ellipses/Yaksha | 8 | python | def apply_regex(self, msg):
'\n Applies the regex to check if the user passed\n in a optional string in square brackets.\n Returns the original message with the string\n removed and the captured msg.\n '
regex_result = re.search(self.regex, msg)
if regex_result:
msg = re.sub(self.regex, , msg).strip()
return (msg, regex_result.group(1))
else:
return False | def apply_regex(self, msg):
'\n Applies the regex to check if the user passed\n in a optional string in square brackets.\n Returns the original message with the string\n removed and the captured msg.\n '
regex_result = re.search(self.regex, msg)
if regex_result:
msg = re.sub(self.regex, , msg).strip()
return (msg, regex_result.group(1))
else:
return False<|docstring|>Applies the regex to check if the user passed
in a optional string in square brackets.
Returns the original message with the string
removed and the captured msg.<|endoftext|> |
bad6766fff4058569764587a36bac338d06dbd26be04040534452a1f01e7cd29 | def parse_msg(self, msg, user):
'\n Parses the message passed along with the !remind command.\n Uses the dateparser library to check if the time string\n is valid\n Format: !remindme <time period> [optional string]\n '
parsed_time = self.parser.get_date_data(msg)['date_obj']
if (not parsed_time):
error_msg = ('I could not interept your message %s, try specifing the time period in a different format.' % user)
return (False, error_msg)
now = datetime.utcnow()
if (parsed_time < now):
error_msg = ("Dont waste my time %s, you can't expect me to remind you of an event in the past." % user)
return (False, error_msg)
difference = (parsed_time - now)
return (True, difference, parsed_time) | Parses the message passed along with the !remind command.
Uses the dateparser library to check if the time string
is valid
Format: !remindme <time period> [optional string] | src/commands/actions.py | parse_msg | ellipses/Yaksha | 8 | python | def parse_msg(self, msg, user):
'\n Parses the message passed along with the !remind command.\n Uses the dateparser library to check if the time string\n is valid\n Format: !remindme <time period> [optional string]\n '
parsed_time = self.parser.get_date_data(msg)['date_obj']
if (not parsed_time):
error_msg = ('I could not interept your message %s, try specifing the time period in a different format.' % user)
return (False, error_msg)
now = datetime.utcnow()
if (parsed_time < now):
error_msg = ("Dont waste my time %s, you can't expect me to remind you of an event in the past." % user)
return (False, error_msg)
difference = (parsed_time - now)
return (True, difference, parsed_time) | def parse_msg(self, msg, user):
'\n Parses the message passed along with the !remind command.\n Uses the dateparser library to check if the time string\n is valid\n Format: !remindme <time period> [optional string]\n '
parsed_time = self.parser.get_date_data(msg)['date_obj']
if (not parsed_time):
error_msg = ('I could not interept your message %s, try specifing the time period in a different format.' % user)
return (False, error_msg)
now = datetime.utcnow()
if (parsed_time < now):
error_msg = ("Dont waste my time %s, you can't expect me to remind you of an event in the past." % user)
return (False, error_msg)
difference = (parsed_time - now)
return (True, difference, parsed_time)<|docstring|>Parses the message passed along with the !remind command.
Uses the dateparser library to check if the time string
is valid
Format: !remindme <time period> [optional string]<|endoftext|> |
0d52cc73fe4460509bdc9db9f77aff9ec8af4ecf7f7ae12e4d003fffd6447859 | @register('!remindme')
async def set_reminder(self, msg, user, channel, client, *args, **kwargs):
'\n Main function that called to set a reminder. Calls the\n helper functions to parse and to check if its valid.\n\n If the message is valid, the asyncronous sleep function\n is called.\n\n Currently loses state on restart ;_; could write/load\n to a file.\n '
reminder_txt = None
optional_string = self.apply_regex(msg)
if optional_string:
(msg, reminder_txt) = optional_string
parsed_msg = self.parse_msg(msg, user)
if (not parsed_msg[0]):
return parsed_msg[1]
else:
(await self.start_reminder_sleep(parsed_msg[1], user, channel, client, reminder_txt, parsed_msg[2])) | Main function that called to set a reminder. Calls the
helper functions to parse and to check if its valid.
If the message is valid, the asyncronous sleep function
is called.
Currently loses state on restart ;_; could write/load
to a file. | src/commands/actions.py | set_reminder | ellipses/Yaksha | 8 | python | @register('!remindme')
async def set_reminder(self, msg, user, channel, client, *args, **kwargs):
'\n Main function that called to set a reminder. Calls the\n helper functions to parse and to check if its valid.\n\n If the message is valid, the asyncronous sleep function\n is called.\n\n Currently loses state on restart ;_; could write/load\n to a file.\n '
reminder_txt = None
optional_string = self.apply_regex(msg)
if optional_string:
(msg, reminder_txt) = optional_string
parsed_msg = self.parse_msg(msg, user)
if (not parsed_msg[0]):
return parsed_msg[1]
else:
(await self.start_reminder_sleep(parsed_msg[1], user, channel, client, reminder_txt, parsed_msg[2])) | @register('!remindme')
async def set_reminder(self, msg, user, channel, client, *args, **kwargs):
'\n Main function that called to set a reminder. Calls the\n helper functions to parse and to check if its valid.\n\n If the message is valid, the asyncronous sleep function\n is called.\n\n Currently loses state on restart ;_; could write/load\n to a file.\n '
reminder_txt = None
optional_string = self.apply_regex(msg)
if optional_string:
(msg, reminder_txt) = optional_string
parsed_msg = self.parse_msg(msg, user)
if (not parsed_msg[0]):
return parsed_msg[1]
else:
(await self.start_reminder_sleep(parsed_msg[1], user, channel, client, reminder_txt, parsed_msg[2]))<|docstring|>Main function that called to set a reminder. Calls the
helper functions to parse and to check if its valid.
If the message is valid, the asyncronous sleep function
is called.
Currently loses state on restart ;_; could write/load
to a file.<|endoftext|> |
466133304118fe06daaee7764d2a4fdd6431147674a81448f37dc392fd5b3971 | @register('!blacklist')
async def blacklist(self, message, *args, **kwargs):
"\n Blacklists the user by adding their 'uid' to the\n currently maintained list of blacklisted users and updates the file.\n "
blacklisted_users = kwargs['blacklisted_users']
users = message.split(' ')
users = [user for user in users if (user not in blacklisted_users)]
blacklisted_users.extend(users)
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='a') as f:
(await f.writelines(users)) | Blacklists the user by adding their 'uid' to the
currently maintained list of blacklisted users and updates the file. | src/commands/actions.py | blacklist | ellipses/Yaksha | 8 | python | @register('!blacklist')
async def blacklist(self, message, *args, **kwargs):
"\n Blacklists the user by adding their 'uid' to the\n currently maintained list of blacklisted users and updates the file.\n "
blacklisted_users = kwargs['blacklisted_users']
users = message.split(' ')
users = [user for user in users if (user not in blacklisted_users)]
blacklisted_users.extend(users)
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='a') as f:
(await f.writelines(users)) | @register('!blacklist')
async def blacklist(self, message, *args, **kwargs):
"\n Blacklists the user by adding their 'uid' to the\n currently maintained list of blacklisted users and updates the file.\n "
blacklisted_users = kwargs['blacklisted_users']
users = message.split(' ')
users = [user for user in users if (user not in blacklisted_users)]
blacklisted_users.extend(users)
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='a') as f:
(await f.writelines(users))<|docstring|>Blacklists the user by adding their 'uid' to the
currently maintained list of blacklisted users and updates the file.<|endoftext|> |
4b3640a33a52671fa1df30cfdb237faf4c4e23ce9310dbb9900f1f594e15dad1 | @register('!unblacklist')
async def unblacklist(self, message, *args, **kwargs):
"\n Unblacklists the user by removing their 'uid' from the currently maintained\n list of blacklisted users and removes it from the file.\n "
users = message.split(' ')
blacklisted_users = kwargs['blacklisted_users']
users = [user for user in users if (user in blacklisted_users)]
for user in users:
del blacklisted_users[blacklisted_users.index(user)]
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='r') as f:
saved_users = (await f.readlines())
for user in users:
del saved_users[saved_users.index(user)]
async with aiofiles.open(self.blacklist_file, mode='w') as f:
(await f.writelines(saved_users)) | Unblacklists the user by removing their 'uid' from the currently maintained
list of blacklisted users and removes it from the file. | src/commands/actions.py | unblacklist | ellipses/Yaksha | 8 | python | @register('!unblacklist')
async def unblacklist(self, message, *args, **kwargs):
"\n Unblacklists the user by removing their 'uid' from the currently maintained\n list of blacklisted users and removes it from the file.\n "
users = message.split(' ')
blacklisted_users = kwargs['blacklisted_users']
users = [user for user in users if (user in blacklisted_users)]
for user in users:
del blacklisted_users[blacklisted_users.index(user)]
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='r') as f:
saved_users = (await f.readlines())
for user in users:
del saved_users[saved_users.index(user)]
async with aiofiles.open(self.blacklist_file, mode='w') as f:
(await f.writelines(saved_users)) | @register('!unblacklist')
async def unblacklist(self, message, *args, **kwargs):
"\n Unblacklists the user by removing their 'uid' from the currently maintained\n list of blacklisted users and removes it from the file.\n "
users = message.split(' ')
blacklisted_users = kwargs['blacklisted_users']
users = [user for user in users if (user in blacklisted_users)]
for user in users:
del blacklisted_users[blacklisted_users.index(user)]
users = [(user + '\n') for user in users]
async with aiofiles.open(self.blacklist_file, mode='r') as f:
saved_users = (await f.readlines())
for user in users:
del saved_users[saved_users.index(user)]
async with aiofiles.open(self.blacklist_file, mode='w') as f:
(await f.writelines(saved_users))<|docstring|>Unblacklists the user by removing their 'uid' from the currently maintained
list of blacklisted users and removes it from the file.<|endoftext|> |
5fe5ef06375a87022536cda1acc8c24d72425da9ff1bcf8863c1a33040633acc | def test_file_download(flask_app):
'Tests report download process'
response = flask_app.post('/api/v1/download/download-sample.tsv')
assert (response.status_code == 200)
assert (response.mimetype == 'application/json')
response = json.loads(response.get_data(as_text=True))
assert (response['message'] is not None) | Tests report download process | tests/apitests/test_file_download.py | test_file_download | Muazzama/Device-Verification-Subsystem | 3 | python | def test_file_download(flask_app):
response = flask_app.post('/api/v1/download/download-sample.tsv')
assert (response.status_code == 200)
assert (response.mimetype == 'application/json')
response = json.loads(response.get_data(as_text=True))
assert (response['message'] is not None) | def test_file_download(flask_app):
response = flask_app.post('/api/v1/download/download-sample.tsv')
assert (response.status_code == 200)
assert (response.mimetype == 'application/json')
response = json.loads(response.get_data(as_text=True))
assert (response['message'] is not None)<|docstring|>Tests report download process<|endoftext|> |
e4d97fba6fbc00783f47ba73a0f314f8215508f5ffc375efb0456fa8165c8ae3 | def test_compliant_report_download(flask_app):
'Test if report not found'
task = CeleryTasks.get_summary(['01206400000001', '35332206000303', '12344321000020', '35499405000401', '35236005000001', '01368900000001'], 0)
response = flask_app.post(('/api/v1/download/' + task['response']['compliant_report_name']))
assert (response.status_code == 200)
assert (response.mimetype == 'text/tab-separated-values') | Test if report not found | tests/apitests/test_file_download.py | test_compliant_report_download | Muazzama/Device-Verification-Subsystem | 3 | python | def test_compliant_report_download(flask_app):
task = CeleryTasks.get_summary(['01206400000001', '35332206000303', '12344321000020', '35499405000401', '35236005000001', '01368900000001'], 0)
response = flask_app.post(('/api/v1/download/' + task['response']['compliant_report_name']))
assert (response.status_code == 200)
assert (response.mimetype == 'text/tab-separated-values') | def test_compliant_report_download(flask_app):
task = CeleryTasks.get_summary(['01206400000001', '35332206000303', '12344321000020', '35499405000401', '35236005000001', '01368900000001'], 0)
response = flask_app.post(('/api/v1/download/' + task['response']['compliant_report_name']))
assert (response.status_code == 200)
assert (response.mimetype == 'text/tab-separated-values')<|docstring|>Test if report not found<|endoftext|> |
8979cb34c1c77546ff848ed233a97b055cf8350b4c616230dfc5b79020785dd0 | async def __aenter__(self):
"\n Moves the current tasks's execution to an executor thread.\n \n This method is a coroutine.\n \n Raises\n ------\n RuntimeError\n - Called from outside of an ``EventThread``.\n - Called from outside of a ``Task``.\n "
thread = current_thread()
if (not isinstance(thread, EventThread)):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of `{EventThread.__name__}`, at {thread!r}.')
task = thread.current_task
if (task is None):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of a `{Task.__name__}`.')
self._task = task
loop = task._loop
future = Future(loop)
self._enter_future = future
loop.call_soon(self._enter_executor)
(await future)
return self | Moves the current tasks's execution to an executor thread.
This method is a coroutine.
Raises
------
RuntimeError
- Called from outside of an ``EventThread``.
- Called from outside of a ``Task``. | scarletio/core/traps/task_thread_switcher.py | __aenter__ | HuyaneMatsu/scarletio | 3 | python | async def __aenter__(self):
"\n Moves the current tasks's execution to an executor thread.\n \n This method is a coroutine.\n \n Raises\n ------\n RuntimeError\n - Called from outside of an ``EventThread``.\n - Called from outside of a ``Task``.\n "
thread = current_thread()
if (not isinstance(thread, EventThread)):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of `{EventThread.__name__}`, at {thread!r}.')
task = thread.current_task
if (task is None):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of a `{Task.__name__}`.')
self._task = task
loop = task._loop
future = Future(loop)
self._enter_future = future
loop.call_soon(self._enter_executor)
(await future)
return self | async def __aenter__(self):
"\n Moves the current tasks's execution to an executor thread.\n \n This method is a coroutine.\n \n Raises\n ------\n RuntimeError\n - Called from outside of an ``EventThread``.\n - Called from outside of a ``Task``.\n "
thread = current_thread()
if (not isinstance(thread, EventThread)):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of `{EventThread.__name__}`, at {thread!r}.')
task = thread.current_task
if (task is None):
raise RuntimeError(f'`{self.__class__.__name__}` entered outside of a `{Task.__name__}`.')
self._task = task
loop = task._loop
future = Future(loop)
self._enter_future = future
loop.call_soon(self._enter_executor)
(await future)
return self<|docstring|>Moves the current tasks's execution to an executor thread.
This method is a coroutine.
Raises
------
RuntimeError
- Called from outside of an ``EventThread``.
- Called from outside of a ``Task``.<|endoftext|> |
3d2f31e5b847efed2553650c4406210dea5849ac7501abe48ea3a849af05c944 | async def __aexit__(self, exc_type, exc_val, exc_tb):
"\n Moves the current task's executor back from an executor thread.\n \n This method is a coroutine.\n "
(await self._exit_future)
self._enter_future = None
self._task = None
self._exit_future = None
self._waited_future = None
return False | Moves the current task's executor back from an executor thread.
This method is a coroutine. | scarletio/core/traps/task_thread_switcher.py | __aexit__ | HuyaneMatsu/scarletio | 3 | python | async def __aexit__(self, exc_type, exc_val, exc_tb):
"\n Moves the current task's executor back from an executor thread.\n \n This method is a coroutine.\n "
(await self._exit_future)
self._enter_future = None
self._task = None
self._exit_future = None
self._waited_future = None
return False | async def __aexit__(self, exc_type, exc_val, exc_tb):
"\n Moves the current task's executor back from an executor thread.\n \n This method is a coroutine.\n "
(await self._exit_future)
self._enter_future = None
self._task = None
self._exit_future = None
self._waited_future = None
return False<|docstring|>Moves the current task's executor back from an executor thread.
This method is a coroutine.<|endoftext|> |
f4cddd981fae848d30cd989f2249b3ccfb92d875e10ccea3cdb7d926590f0c35 | def _enter_executor(self):
"\n Moves the task's execution to an executor thread and wakes it up.\n "
callbacks = self._enter_future._callbacks
callbacks.clear()
task = self._task
task.add_done_callback(self._cancel_callback)
task._loop.run_in_executor(self._executor_task) | Moves the task's execution to an executor thread and wakes it up. | scarletio/core/traps/task_thread_switcher.py | _enter_executor | HuyaneMatsu/scarletio | 3 | python | def _enter_executor(self):
"\n \n "
callbacks = self._enter_future._callbacks
callbacks.clear()
task = self._task
task.add_done_callback(self._cancel_callback)
task._loop.run_in_executor(self._executor_task) | def _enter_executor(self):
"\n \n "
callbacks = self._enter_future._callbacks
callbacks.clear()
task = self._task
task.add_done_callback(self._cancel_callback)
task._loop.run_in_executor(self._executor_task)<|docstring|>Moves the task's execution to an executor thread and wakes it up.<|endoftext|> |
0793c8d2921becb76e6d4f3b2e9d4f9ad67b4d7b9338c73d93b185093f3a4ce8 | def _cancel_callback(self, future):
'\n Callback added to the wrapped task. If the wrapped task is cancelled, then the section running inside of the\n executor will be cancelled as well, whenever it gives back the context with an `await`.\n '
if (future._state != FUTURE_STATE_CANCELLED):
return
waited_future = self._waited_future
if (waited_future is None):
return
waited_future.cancel() | Callback added to the wrapped task. If the wrapped task is cancelled, then the section running inside of the
executor will be cancelled as well, whenever it gives back the context with an `await`. | scarletio/core/traps/task_thread_switcher.py | _cancel_callback | HuyaneMatsu/scarletio | 3 | python | def _cancel_callback(self, future):
'\n Callback added to the wrapped task. If the wrapped task is cancelled, then the section running inside of the\n executor will be cancelled as well, whenever it gives back the context with an `await`.\n '
if (future._state != FUTURE_STATE_CANCELLED):
return
waited_future = self._waited_future
if (waited_future is None):
return
waited_future.cancel() | def _cancel_callback(self, future):
'\n Callback added to the wrapped task. If the wrapped task is cancelled, then the section running inside of the\n executor will be cancelled as well, whenever it gives back the context with an `await`.\n '
if (future._state != FUTURE_STATE_CANCELLED):
return
waited_future = self._waited_future
if (waited_future is None):
return
waited_future.cancel()<|docstring|>Callback added to the wrapped task. If the wrapped task is cancelled, then the section running inside of the
executor will be cancelled as well, whenever it gives back the context with an `await`.<|endoftext|> |
0b0cd7690f665d26f9ed93c5c4a1bddb33e7f718b8468805cd211935105bbabf | def _executor_task(self):
"\n Wraps the tasks's section's running inside of an executor, still allowing it to use `await`-s.\n "
task = self._task
loop = task._loop
end_future = Future(loop)
task._waited_future = end_future
self._exit_future = end_future
self._enter_future.set_result(None)
exception = None
coroutine = task._coroutine
local_waited_future = None
try:
while True:
if task._must_cancel:
exception = task._must_exception(exception)
if (local_waited_future is not None):
if (local_waited_future is end_future):
end_future.set_result(None)
loop.call_soon_thread_safe(task._step, exception)
break
try:
self._waited_future = local_waited_future
if (type(exception) is CancelledError):
local_waited_future.cancel()
local_waited_future.sync_wrap().wait()
except CancelledError:
break
except BaseException as err:
exception = err
finally:
local_waited_future = None
self._waited_future = None
if (task._state != FUTURE_STATE_PENDING):
break
try:
if (exception is None):
result = coroutine.send(None)
else:
result = coroutine.throw(exception)
except StopIteration as exception:
if task._must_cancel:
task._must_cancel = False
Future.set_exception(task, CancelledError())
else:
Future.set_result(task, exception.value)
loop.wake_up()
break
except CancelledError:
Future.cancel(task)
loop.wake_up()
break
except BaseException as exception:
Future.set_exception(task, exception)
loop.wake_up()
break
else:
if (isinstance(result, Future) and result._blocking):
result._blocking = False
local_waited_future = result
if task._must_cancel:
if local_waited_future.cancel():
task._must_cancel = False
else:
continue
finally:
task.remove_done_callback(self._cancel_callback)
self = None
task = None | Wraps the tasks's section's running inside of an executor, still allowing it to use `await`-s. | scarletio/core/traps/task_thread_switcher.py | _executor_task | HuyaneMatsu/scarletio | 3 | python | def _executor_task(self):
"\n \n "
task = self._task
loop = task._loop
end_future = Future(loop)
task._waited_future = end_future
self._exit_future = end_future
self._enter_future.set_result(None)
exception = None
coroutine = task._coroutine
local_waited_future = None
try:
while True:
if task._must_cancel:
exception = task._must_exception(exception)
if (local_waited_future is not None):
if (local_waited_future is end_future):
end_future.set_result(None)
loop.call_soon_thread_safe(task._step, exception)
break
try:
self._waited_future = local_waited_future
if (type(exception) is CancelledError):
local_waited_future.cancel()
local_waited_future.sync_wrap().wait()
except CancelledError:
break
except BaseException as err:
exception = err
finally:
local_waited_future = None
self._waited_future = None
if (task._state != FUTURE_STATE_PENDING):
break
try:
if (exception is None):
result = coroutine.send(None)
else:
result = coroutine.throw(exception)
except StopIteration as exception:
if task._must_cancel:
task._must_cancel = False
Future.set_exception(task, CancelledError())
else:
Future.set_result(task, exception.value)
loop.wake_up()
break
except CancelledError:
Future.cancel(task)
loop.wake_up()
break
except BaseException as exception:
Future.set_exception(task, exception)
loop.wake_up()
break
else:
if (isinstance(result, Future) and result._blocking):
result._blocking = False
local_waited_future = result
if task._must_cancel:
if local_waited_future.cancel():
task._must_cancel = False
else:
continue
finally:
task.remove_done_callback(self._cancel_callback)
self = None
task = None | def _executor_task(self):
"\n \n "
task = self._task
loop = task._loop
end_future = Future(loop)
task._waited_future = end_future
self._exit_future = end_future
self._enter_future.set_result(None)
exception = None
coroutine = task._coroutine
local_waited_future = None
try:
while True:
if task._must_cancel:
exception = task._must_exception(exception)
if (local_waited_future is not None):
if (local_waited_future is end_future):
end_future.set_result(None)
loop.call_soon_thread_safe(task._step, exception)
break
try:
self._waited_future = local_waited_future
if (type(exception) is CancelledError):
local_waited_future.cancel()
local_waited_future.sync_wrap().wait()
except CancelledError:
break
except BaseException as err:
exception = err
finally:
local_waited_future = None
self._waited_future = None
if (task._state != FUTURE_STATE_PENDING):
break
try:
if (exception is None):
result = coroutine.send(None)
else:
result = coroutine.throw(exception)
except StopIteration as exception:
if task._must_cancel:
task._must_cancel = False
Future.set_exception(task, CancelledError())
else:
Future.set_result(task, exception.value)
loop.wake_up()
break
except CancelledError:
Future.cancel(task)
loop.wake_up()
break
except BaseException as exception:
Future.set_exception(task, exception)
loop.wake_up()
break
else:
if (isinstance(result, Future) and result._blocking):
result._blocking = False
local_waited_future = result
if task._must_cancel:
if local_waited_future.cancel():
task._must_cancel = False
else:
continue
finally:
task.remove_done_callback(self._cancel_callback)
self = None
task = None<|docstring|>Wraps the tasks's section's running inside of an executor, still allowing it to use `await`-s.<|endoftext|> |
8d57fa7c35966c9f8e231dd291207d329b7e1524ebdc29799d99fc0672e05ce5 | def incident(kvec, point):
'\n Incident wave\n\n @param kvec incident wave vector\n @param point target point\n @return complex number\n '
return numpy.exp((1j * kvec.dot(point))) | Incident wave
@param kvec incident wave vector
@param point target point
@return complex number | multiproc/wave.py | incident | pletzer/scatter | 2 | python | def incident(kvec, point):
'\n Incident wave\n\n @param kvec incident wave vector\n @param point target point\n @return complex number\n '
return numpy.exp((1j * kvec.dot(point))) | def incident(kvec, point):
'\n Incident wave\n\n @param kvec incident wave vector\n @param point target point\n @return complex number\n '
return numpy.exp((1j * kvec.dot(point)))<|docstring|>Incident wave
@param kvec incident wave vector
@param point target point
@return complex number<|endoftext|> |
39cb124793c84dce13ed2f6df2b07c9d50a984d46de95c2aa3b119620fc0d40f | def gradIncident(nvec, kvec, point):
'\n Normal gradient of the incident wave, assumes incident wave is exp(1j * kvec.x)\n\n @param nvec normal vector pointing inwards\n @param kvec incident wave vector\n @param point (source) point\n @return complex number\n '
return ((1j * nvec.dot(kvec)) * incident(kvec, point)) | Normal gradient of the incident wave, assumes incident wave is exp(1j * kvec.x)
@param nvec normal vector pointing inwards
@param kvec incident wave vector
@param point (source) point
@return complex number | multiproc/wave.py | gradIncident | pletzer/scatter | 2 | python | def gradIncident(nvec, kvec, point):
'\n Normal gradient of the incident wave, assumes incident wave is exp(1j * kvec.x)\n\n @param nvec normal vector pointing inwards\n @param kvec incident wave vector\n @param point (source) point\n @return complex number\n '
return ((1j * nvec.dot(kvec)) * incident(kvec, point)) | def gradIncident(nvec, kvec, point):
'\n Normal gradient of the incident wave, assumes incident wave is exp(1j * kvec.x)\n\n @param nvec normal vector pointing inwards\n @param kvec incident wave vector\n @param point (source) point\n @return complex number\n '
return ((1j * nvec.dot(kvec)) * incident(kvec, point))<|docstring|>Normal gradient of the incident wave, assumes incident wave is exp(1j * kvec.x)
@param nvec normal vector pointing inwards
@param kvec incident wave vector
@param point (source) point
@return complex number<|endoftext|> |
2e4d299a734e30931f94d1750ed7f0b8eb5e2058a17c8c93e7874ea72bf67a6b | def computeScatteredWaveElement(kvec, p0, p1, point):
'\n Scattered wave contribution from a single segment\n @param kvec incident wave vector\n @param p0 starting point of the segment\n @param p1 end point of the segment\n @param point observer point\n @return complex value\n '
xdot = (p1 - p0)
pmid = (0.5 * (p0 + p1))
dsdt = numpy.sqrt(xdot.dot(xdot))
nvec = numpy.array([(- xdot[1]), xdot[0]])
nvec /= numpy.sqrt(nvec.dot(nvec))
rvec = (point - pmid)
r = numpy.sqrt(rvec.dot(rvec))
kmod = numpy.sqrt(kvec.dot(kvec))
kr = (kmod * r)
g = ((1j / 4.0) * hankel1(0, kr))
dgdn = ((((((- 1j) / 4.0) * hankel1(1, kr)) * kmod) * nvec.dot(rvec)) / r)
scattered_wave = (((- dsdt) * g) * gradIncident(nvec, kvec, pmid))
shadow = (2 * ((nvec.dot(kvec) > 0.0) - 0.5))
scattered_wave += (((shadow * dsdt) * dgdn) * incident(kvec, pmid))
return scattered_wave | Scattered wave contribution from a single segment
@param kvec incident wave vector
@param p0 starting point of the segment
@param p1 end point of the segment
@param point observer point
@return complex value | multiproc/wave.py | computeScatteredWaveElement | pletzer/scatter | 2 | python | def computeScatteredWaveElement(kvec, p0, p1, point):
'\n Scattered wave contribution from a single segment\n @param kvec incident wave vector\n @param p0 starting point of the segment\n @param p1 end point of the segment\n @param point observer point\n @return complex value\n '
xdot = (p1 - p0)
pmid = (0.5 * (p0 + p1))
dsdt = numpy.sqrt(xdot.dot(xdot))
nvec = numpy.array([(- xdot[1]), xdot[0]])
nvec /= numpy.sqrt(nvec.dot(nvec))
rvec = (point - pmid)
r = numpy.sqrt(rvec.dot(rvec))
kmod = numpy.sqrt(kvec.dot(kvec))
kr = (kmod * r)
g = ((1j / 4.0) * hankel1(0, kr))
dgdn = ((((((- 1j) / 4.0) * hankel1(1, kr)) * kmod) * nvec.dot(rvec)) / r)
scattered_wave = (((- dsdt) * g) * gradIncident(nvec, kvec, pmid))
shadow = (2 * ((nvec.dot(kvec) > 0.0) - 0.5))
scattered_wave += (((shadow * dsdt) * dgdn) * incident(kvec, pmid))
return scattered_wave | def computeScatteredWaveElement(kvec, p0, p1, point):
'\n Scattered wave contribution from a single segment\n @param kvec incident wave vector\n @param p0 starting point of the segment\n @param p1 end point of the segment\n @param point observer point\n @return complex value\n '
xdot = (p1 - p0)
pmid = (0.5 * (p0 + p1))
dsdt = numpy.sqrt(xdot.dot(xdot))
nvec = numpy.array([(- xdot[1]), xdot[0]])
nvec /= numpy.sqrt(nvec.dot(nvec))
rvec = (point - pmid)
r = numpy.sqrt(rvec.dot(rvec))
kmod = numpy.sqrt(kvec.dot(kvec))
kr = (kmod * r)
g = ((1j / 4.0) * hankel1(0, kr))
dgdn = ((((((- 1j) / 4.0) * hankel1(1, kr)) * kmod) * nvec.dot(rvec)) / r)
scattered_wave = (((- dsdt) * g) * gradIncident(nvec, kvec, pmid))
shadow = (2 * ((nvec.dot(kvec) > 0.0) - 0.5))
scattered_wave += (((shadow * dsdt) * dgdn) * incident(kvec, pmid))
return scattered_wave<|docstring|>Scattered wave contribution from a single segment
@param kvec incident wave vector
@param p0 starting point of the segment
@param p1 end point of the segment
@param point observer point
@return complex value<|endoftext|> |
af7dedca06474d373e6d556bdd6e855370ab0eb6229f4f3234364b894003690e | def computeScatteredWave(kvec, xc, yc, point):
'\n Total scattered wave response, summing up \n contributions from each segment\n\n @param kvec incident wave vector\n @param xc list of x coordinates representing the contour, must close\n @param yc list of y coordinates representing the contour, must close\n @param point observer point\n @return complex value\n '
res = 0j
n = len(xc)
for i0 in range((n - 1)):
p0 = numpy.array([xc[i0], yc[i0]])
i1 = (i0 + 1)
p1 = numpy.array([xc[i1], yc[i1]])
res += computeScatteredWaveElement(kvec, p0, p1, point)
return res | Total scattered wave response, summing up
contributions from each segment
@param kvec incident wave vector
@param xc list of x coordinates representing the contour, must close
@param yc list of y coordinates representing the contour, must close
@param point observer point
@return complex value | multiproc/wave.py | computeScatteredWave | pletzer/scatter | 2 | python | def computeScatteredWave(kvec, xc, yc, point):
'\n Total scattered wave response, summing up \n contributions from each segment\n\n @param kvec incident wave vector\n @param xc list of x coordinates representing the contour, must close\n @param yc list of y coordinates representing the contour, must close\n @param point observer point\n @return complex value\n '
res = 0j
n = len(xc)
for i0 in range((n - 1)):
p0 = numpy.array([xc[i0], yc[i0]])
i1 = (i0 + 1)
p1 = numpy.array([xc[i1], yc[i1]])
res += computeScatteredWaveElement(kvec, p0, p1, point)
return res | def computeScatteredWave(kvec, xc, yc, point):
'\n Total scattered wave response, summing up \n contributions from each segment\n\n @param kvec incident wave vector\n @param xc list of x coordinates representing the contour, must close\n @param yc list of y coordinates representing the contour, must close\n @param point observer point\n @return complex value\n '
res = 0j
n = len(xc)
for i0 in range((n - 1)):
p0 = numpy.array([xc[i0], yc[i0]])
i1 = (i0 + 1)
p1 = numpy.array([xc[i1], yc[i1]])
res += computeScatteredWaveElement(kvec, p0, p1, point)
return res<|docstring|>Total scattered wave response, summing up
contributions from each segment
@param kvec incident wave vector
@param xc list of x coordinates representing the contour, must close
@param yc list of y coordinates representing the contour, must close
@param point observer point
@return complex value<|endoftext|> |
18235ff4d9dfc4cfc253d84bafc8f0982f7ddefc4999fdcd618d730d9bf62d42 | def run(self, timestamp, ttd=None):
'Runs the planner.\n\n Note:\n The planner assumes that the world is up-to-date.\n\n Returns:\n :py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the\n planned trajectory.\n '
obstacle_list = self._world.get_obstacle_list()
if (len(obstacle_list) == 0):
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
self._logger.debug('@{}: Hyperparameters: {}'.format(timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(obstacle_list)
self._logger.debug('@{}: Initial conditions: {}'.format(timestamp, initial_conditions))
(path_x, path_y, success) = apply_rrt_star(initial_conditions, self._hyperparameters)
if success:
self._logger.debug('@{}: RRT* succeeded'.format(timestamp))
speeds = ([self._flags.target_speed] * len(path_x))
self._logger.debug('@{}: RRT* Path X: {}'.format(timestamp, path_x.tolist()))
self._logger.debug('@{}: RRT* Path Y: {}'.format(timestamp, path_y.tolist()))
self._logger.debug('@{}: RRT* Speeds: {}'.format(timestamp, speeds))
output_wps = self.build_output_waypoints(path_x, path_y, speeds)
else:
self._logger.error('@{}: RRT* failed. Sending emergency stop.'.format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps | Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory. | pylot/planning/rrt_star/rrt_star_planner.py | run | chirpyjh/pylot | 231 | python | def run(self, timestamp, ttd=None):
'Runs the planner.\n\n Note:\n The planner assumes that the world is up-to-date.\n\n Returns:\n :py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the\n planned trajectory.\n '
obstacle_list = self._world.get_obstacle_list()
if (len(obstacle_list) == 0):
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
self._logger.debug('@{}: Hyperparameters: {}'.format(timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(obstacle_list)
self._logger.debug('@{}: Initial conditions: {}'.format(timestamp, initial_conditions))
(path_x, path_y, success) = apply_rrt_star(initial_conditions, self._hyperparameters)
if success:
self._logger.debug('@{}: RRT* succeeded'.format(timestamp))
speeds = ([self._flags.target_speed] * len(path_x))
self._logger.debug('@{}: RRT* Path X: {}'.format(timestamp, path_x.tolist()))
self._logger.debug('@{}: RRT* Path Y: {}'.format(timestamp, path_y.tolist()))
self._logger.debug('@{}: RRT* Speeds: {}'.format(timestamp, speeds))
output_wps = self.build_output_waypoints(path_x, path_y, speeds)
else:
self._logger.error('@{}: RRT* failed. Sending emergency stop.'.format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps | def run(self, timestamp, ttd=None):
'Runs the planner.\n\n Note:\n The planner assumes that the world is up-to-date.\n\n Returns:\n :py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the\n planned trajectory.\n '
obstacle_list = self._world.get_obstacle_list()
if (len(obstacle_list) == 0):
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
self._logger.debug('@{}: Hyperparameters: {}'.format(timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(obstacle_list)
self._logger.debug('@{}: Initial conditions: {}'.format(timestamp, initial_conditions))
(path_x, path_y, success) = apply_rrt_star(initial_conditions, self._hyperparameters)
if success:
self._logger.debug('@{}: RRT* succeeded'.format(timestamp))
speeds = ([self._flags.target_speed] * len(path_x))
self._logger.debug('@{}: RRT* Path X: {}'.format(timestamp, path_x.tolist()))
self._logger.debug('@{}: RRT* Path Y: {}'.format(timestamp, path_y.tolist()))
self._logger.debug('@{}: RRT* Speeds: {}'.format(timestamp, speeds))
output_wps = self.build_output_waypoints(path_x, path_y, speeds)
else:
self._logger.error('@{}: RRT* failed. Sending emergency stop.'.format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps<|docstring|>Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.<|endoftext|> |
c91d691ba658c8abd67c5701c4c62fd6ec29ed980aa7dc80eb521e3fd907f76d | def __init__(self, name, container_config: DockerConfig, config_hash=0, min_instances=0, max_instances=None, growth=600, shrink=None, backlog=500, queue=None, shutdown_seconds=30):
'\n :param name: Name of the service to manage\n :param container_config: Instructions on how to start this service\n :param min_instances: The minimum number of copies of this service keep running\n :param max_instances: The maximum number of copies permitted to be running\n :param growth: Delay before growing a service, unit-less, approximately seconds\n :param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth\n :param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.\n :param queue: Queue name for monitoring\n '
self.name = name
self.queue: PriorityQueue = queue
self.container_config = container_config
self.target_duty_cycle = 0.9
self.shutdown_seconds = shutdown_seconds
self.config_hash = config_hash
self.min_instances = self._min_instances = max(0, int(min_instances))
self._max_instances = (max(0, int(max_instances)) if max_instances else float('inf'))
self.desired_instances: int = 0
self.running_instances: int = 0
self.pressure: float = 0
self.growth_threshold = abs(float(growth))
self.shrink_threshold = (((- self.growth_threshold) / 2) if (shrink is None) else (- abs(float(shrink))))
self.leak_rate: float = 0.1
self.backlog = int(backlog)
self.queue_length = 0
self.duty_cycle = 0
self.last_update = 0 | :param name: Name of the service to manage
:param container_config: Instructions on how to start this service
:param min_instances: The minimum number of copies of this service keep running
:param max_instances: The maximum number of copies permitted to be running
:param growth: Delay before growing a service, unit-less, approximately seconds
:param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth
:param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.
:param queue: Queue name for monitoring | assemblyline_core/scaler/scaler_server.py | __init__ | kryptoslogic/assemblyline-core | 0 | python | def __init__(self, name, container_config: DockerConfig, config_hash=0, min_instances=0, max_instances=None, growth=600, shrink=None, backlog=500, queue=None, shutdown_seconds=30):
'\n :param name: Name of the service to manage\n :param container_config: Instructions on how to start this service\n :param min_instances: The minimum number of copies of this service keep running\n :param max_instances: The maximum number of copies permitted to be running\n :param growth: Delay before growing a service, unit-less, approximately seconds\n :param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth\n :param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.\n :param queue: Queue name for monitoring\n '
self.name = name
self.queue: PriorityQueue = queue
self.container_config = container_config
self.target_duty_cycle = 0.9
self.shutdown_seconds = shutdown_seconds
self.config_hash = config_hash
self.min_instances = self._min_instances = max(0, int(min_instances))
self._max_instances = (max(0, int(max_instances)) if max_instances else float('inf'))
self.desired_instances: int = 0
self.running_instances: int = 0
self.pressure: float = 0
self.growth_threshold = abs(float(growth))
self.shrink_threshold = (((- self.growth_threshold) / 2) if (shrink is None) else (- abs(float(shrink))))
self.leak_rate: float = 0.1
self.backlog = int(backlog)
self.queue_length = 0
self.duty_cycle = 0
self.last_update = 0 | def __init__(self, name, container_config: DockerConfig, config_hash=0, min_instances=0, max_instances=None, growth=600, shrink=None, backlog=500, queue=None, shutdown_seconds=30):
'\n :param name: Name of the service to manage\n :param container_config: Instructions on how to start this service\n :param min_instances: The minimum number of copies of this service keep running\n :param max_instances: The maximum number of copies permitted to be running\n :param growth: Delay before growing a service, unit-less, approximately seconds\n :param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth\n :param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.\n :param queue: Queue name for monitoring\n '
self.name = name
self.queue: PriorityQueue = queue
self.container_config = container_config
self.target_duty_cycle = 0.9
self.shutdown_seconds = shutdown_seconds
self.config_hash = config_hash
self.min_instances = self._min_instances = max(0, int(min_instances))
self._max_instances = (max(0, int(max_instances)) if max_instances else float('inf'))
self.desired_instances: int = 0
self.running_instances: int = 0
self.pressure: float = 0
self.growth_threshold = abs(float(growth))
self.shrink_threshold = (((- self.growth_threshold) / 2) if (shrink is None) else (- abs(float(shrink))))
self.leak_rate: float = 0.1
self.backlog = int(backlog)
self.queue_length = 0
self.duty_cycle = 0
self.last_update = 0<|docstring|>:param name: Name of the service to manage
:param container_config: Instructions on how to start this service
:param min_instances: The minimum number of copies of this service keep running
:param max_instances: The maximum number of copies permitted to be running
:param growth: Delay before growing a service, unit-less, approximately seconds
:param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth
:param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.
:param queue: Queue name for monitoring<|endoftext|> |
654c7335f620f94585960ae2cc6f9353e102e2049313e736f2f9fd1993406838 | def heartbeat(self):
'Periodically touch a file on disk.\n\n Since tasks are run serially, the delay between touches will be the maximum of\n HEARTBEAT_INTERVAL and the longest running task.\n '
if self.config.logging.heartbeat_file:
self.scheduler.enter(HEARTBEAT_INTERVAL, 0, self.heartbeat)
super().heartbeat() | Periodically touch a file on disk.
Since tasks are run serially, the delay between touches will be the maximum of
HEARTBEAT_INTERVAL and the longest running task. | assemblyline_core/scaler/scaler_server.py | heartbeat | kryptoslogic/assemblyline-core | 0 | python | def heartbeat(self):
'Periodically touch a file on disk.\n\n Since tasks are run serially, the delay between touches will be the maximum of\n HEARTBEAT_INTERVAL and the longest running task.\n '
if self.config.logging.heartbeat_file:
self.scheduler.enter(HEARTBEAT_INTERVAL, 0, self.heartbeat)
super().heartbeat() | def heartbeat(self):
'Periodically touch a file on disk.\n\n Since tasks are run serially, the delay between touches will be the maximum of\n HEARTBEAT_INTERVAL and the longest running task.\n '
if self.config.logging.heartbeat_file:
self.scheduler.enter(HEARTBEAT_INTERVAL, 0, self.heartbeat)
super().heartbeat()<|docstring|>Periodically touch a file on disk.
Since tasks are run serially, the delay between touches will be the maximum of
HEARTBEAT_INTERVAL and the longest running task.<|endoftext|> |
ba329b28a106d4d2b075ec690ae9c797c0ab4f77421d07d0c04bf708f6673649 | def update_scaling(self):
'Check if we need to scale any services up or down.'
self.scheduler.enter(SCALE_INTERVAL, 0, self.update_scaling)
try:
profiles: List[ServiceProfile] = list(self.profiles.values())
targets = {_p.name: self.controller.get_target(_p.name) for _p in profiles}
for (name, profile) in self.profiles.items():
self.log.debug(f'{name}')
self.log.debug(f'Instances {profile.min_instances} < {profile.desired_instances} | {targets[name]} < {profile.max_instances}')
self.log.debug(f'Pressure {profile.shrink_threshold} < {profile.pressure} < {profile.growth_threshold}')
for (name, profile) in self.profiles.items():
if (targets[name] > profile.desired_instances):
self.log.info(f'{name} wants less resources changing allocation {targets[name]} -> {profile.desired_instances}')
self.controller.set_target(name, profile.desired_instances)
targets[name] = profile.desired_instances
if (not self.running):
return
for (name, profile) in self.profiles.items():
if (targets[name] < profile.min_instances):
self.log.info(f"{name} isn't meeting minimum allocation {targets[name]} -> {profile.min_instances}")
self.controller.set_target(name, profile.min_instances)
targets[name] = profile.min_instances
free_cpu = self.controller.free_cpu()
free_memory = self.controller.free_memory()
def trim(prof: List[ServiceProfile]):
prof = [_p for _p in prof if (_p.desired_instances > targets[_p.name])]
drop = [_p for _p in prof if ((_p.cpu > free_cpu) or (_p.ram > free_memory))]
if drop:
drop = {_p.name: (_p.cpu, _p.ram) for _p in drop}
self.log.debug(f"Can't make more because not enough resources {drop}")
prof = [_p for _p in prof if ((_p.cpu <= free_cpu) and (_p.ram <= free_memory))]
return prof
profiles = trim(profiles)
while profiles:
if True:
profiles.sort(key=(lambda _p: self.controller.get_target(_p.name)))
free_memory -= profiles[0].container_config.ram_mb
free_cpu -= profiles[0].container_config.cpu_cores
targets[profiles[0].name] += 1
profiles = trim(profiles)
for (name, value) in targets.items():
old = self.controller.get_target(name)
if (value != old):
self.log.info(f'Scaling service {name}: {old} -> {value}')
self.controller.set_target(name, value)
if (not self.running):
return
except ServiceControlError as error:
self.log.exception('Error while scaling services.')
self.handle_service_error(error.service_name) | Check if we need to scale any services up or down. | assemblyline_core/scaler/scaler_server.py | update_scaling | kryptoslogic/assemblyline-core | 0 | python | def update_scaling(self):
self.scheduler.enter(SCALE_INTERVAL, 0, self.update_scaling)
try:
profiles: List[ServiceProfile] = list(self.profiles.values())
targets = {_p.name: self.controller.get_target(_p.name) for _p in profiles}
for (name, profile) in self.profiles.items():
self.log.debug(f'{name}')
self.log.debug(f'Instances {profile.min_instances} < {profile.desired_instances} | {targets[name]} < {profile.max_instances}')
self.log.debug(f'Pressure {profile.shrink_threshold} < {profile.pressure} < {profile.growth_threshold}')
for (name, profile) in self.profiles.items():
if (targets[name] > profile.desired_instances):
self.log.info(f'{name} wants less resources changing allocation {targets[name]} -> {profile.desired_instances}')
self.controller.set_target(name, profile.desired_instances)
targets[name] = profile.desired_instances
if (not self.running):
return
for (name, profile) in self.profiles.items():
if (targets[name] < profile.min_instances):
self.log.info(f"{name} isn't meeting minimum allocation {targets[name]} -> {profile.min_instances}")
self.controller.set_target(name, profile.min_instances)
targets[name] = profile.min_instances
free_cpu = self.controller.free_cpu()
free_memory = self.controller.free_memory()
def trim(prof: List[ServiceProfile]):
prof = [_p for _p in prof if (_p.desired_instances > targets[_p.name])]
drop = [_p for _p in prof if ((_p.cpu > free_cpu) or (_p.ram > free_memory))]
if drop:
drop = {_p.name: (_p.cpu, _p.ram) for _p in drop}
self.log.debug(f"Can't make more because not enough resources {drop}")
prof = [_p for _p in prof if ((_p.cpu <= free_cpu) and (_p.ram <= free_memory))]
return prof
profiles = trim(profiles)
while profiles:
if True:
profiles.sort(key=(lambda _p: self.controller.get_target(_p.name)))
free_memory -= profiles[0].container_config.ram_mb
free_cpu -= profiles[0].container_config.cpu_cores
targets[profiles[0].name] += 1
profiles = trim(profiles)
for (name, value) in targets.items():
old = self.controller.get_target(name)
if (value != old):
self.log.info(f'Scaling service {name}: {old} -> {value}')
self.controller.set_target(name, value)
if (not self.running):
return
except ServiceControlError as error:
self.log.exception('Error while scaling services.')
self.handle_service_error(error.service_name) | def update_scaling(self):
self.scheduler.enter(SCALE_INTERVAL, 0, self.update_scaling)
try:
profiles: List[ServiceProfile] = list(self.profiles.values())
targets = {_p.name: self.controller.get_target(_p.name) for _p in profiles}
for (name, profile) in self.profiles.items():
self.log.debug(f'{name}')
self.log.debug(f'Instances {profile.min_instances} < {profile.desired_instances} | {targets[name]} < {profile.max_instances}')
self.log.debug(f'Pressure {profile.shrink_threshold} < {profile.pressure} < {profile.growth_threshold}')
for (name, profile) in self.profiles.items():
if (targets[name] > profile.desired_instances):
self.log.info(f'{name} wants less resources changing allocation {targets[name]} -> {profile.desired_instances}')
self.controller.set_target(name, profile.desired_instances)
targets[name] = profile.desired_instances
if (not self.running):
return
for (name, profile) in self.profiles.items():
if (targets[name] < profile.min_instances):
self.log.info(f"{name} isn't meeting minimum allocation {targets[name]} -> {profile.min_instances}")
self.controller.set_target(name, profile.min_instances)
targets[name] = profile.min_instances
free_cpu = self.controller.free_cpu()
free_memory = self.controller.free_memory()
def trim(prof: List[ServiceProfile]):
prof = [_p for _p in prof if (_p.desired_instances > targets[_p.name])]
drop = [_p for _p in prof if ((_p.cpu > free_cpu) or (_p.ram > free_memory))]
if drop:
drop = {_p.name: (_p.cpu, _p.ram) for _p in drop}
self.log.debug(f"Can't make more because not enough resources {drop}")
prof = [_p for _p in prof if ((_p.cpu <= free_cpu) and (_p.ram <= free_memory))]
return prof
profiles = trim(profiles)
while profiles:
if True:
profiles.sort(key=(lambda _p: self.controller.get_target(_p.name)))
free_memory -= profiles[0].container_config.ram_mb
free_cpu -= profiles[0].container_config.cpu_cores
targets[profiles[0].name] += 1
profiles = trim(profiles)
for (name, value) in targets.items():
old = self.controller.get_target(name)
if (value != old):
self.log.info(f'Scaling service {name}: {old} -> {value}')
self.controller.set_target(name, value)
if (not self.running):
return
except ServiceControlError as error:
self.log.exception('Error while scaling services.')
self.handle_service_error(error.service_name)<|docstring|>Check if we need to scale any services up or down.<|endoftext|> |
fe5cba65ffcad1e8740c165fe1fe2abd83c739aa79010ec5cc4b65228361b5ee | def handle_service_error(self, service_name):
'Handle an error occurring in the *analysis* service.\n\n Errors for core systems should simply be logged, and a best effort to continue made.\n\n For analysis services, ignore the error a few times, then disable the service.\n '
self.error_count[service_name] = (self.error_count.get(service_name, 0) + 1)
if (self.error_count[service_name] >= MAXIMUM_SERVICE_ERRORS):
self.datastore.service_delta.update(service_name, [(self.datastore.service_delta.UPDATE_SET, 'enabled', False)])
del self.error_count[service_name] | Handle an error occurring in the *analysis* service.
Errors for core systems should simply be logged, and a best effort to continue made.
For analysis services, ignore the error a few times, then disable the service. | assemblyline_core/scaler/scaler_server.py | handle_service_error | kryptoslogic/assemblyline-core | 0 | python | def handle_service_error(self, service_name):
'Handle an error occurring in the *analysis* service.\n\n Errors for core systems should simply be logged, and a best effort to continue made.\n\n For analysis services, ignore the error a few times, then disable the service.\n '
self.error_count[service_name] = (self.error_count.get(service_name, 0) + 1)
if (self.error_count[service_name] >= MAXIMUM_SERVICE_ERRORS):
self.datastore.service_delta.update(service_name, [(self.datastore.service_delta.UPDATE_SET, 'enabled', False)])
del self.error_count[service_name] | def handle_service_error(self, service_name):
'Handle an error occurring in the *analysis* service.\n\n Errors for core systems should simply be logged, and a best effort to continue made.\n\n For analysis services, ignore the error a few times, then disable the service.\n '
self.error_count[service_name] = (self.error_count.get(service_name, 0) + 1)
if (self.error_count[service_name] >= MAXIMUM_SERVICE_ERRORS):
self.datastore.service_delta.update(service_name, [(self.datastore.service_delta.UPDATE_SET, 'enabled', False)])
del self.error_count[service_name]<|docstring|>Handle an error occurring in the *analysis* service.
Errors for core systems should simply be logged, and a best effort to continue made.
For analysis services, ignore the error a few times, then disable the service.<|endoftext|> |
2f7407e61a05d79df9f4ff02d3004f787abbb99a65a5b71229fcacea6ba67c67 | def sync_metrics(self):
'Check if there are any pubsub messages we need.'
self.scheduler.enter(METRIC_SYNC_INTERVAL, 3, self.sync_metrics)
service_data = self.status_table.items()
for (host, (service, state, time_limit)) in service_data.items():
if (time.time() < time_limit):
self.state.update(service=service, host=host, throughput=0, busy_seconds=(METRIC_SYNC_INTERVAL if (state == ServiceStatus.Running) else 0))
if (time.time() > (time_limit + 600)):
self.status_table.pop(host)
export_interval = self.config.core.metrics.export_interval
for (profile_name, profile) in self.profiles.items():
update = self.state.read(profile_name)
if update:
delta = (time.time() - profile.last_update)
profile.update(delta=delta, backlog=profile.queue.length(), **update)
if ((self.controller.get_target(profile_name) == 0) and (profile.desired_instances == 0) and profile.queue):
queue_length = profile.queue.length()
if (queue_length > 0):
self.log.info(f'Service at zero instances has messages: {profile.name} ({queue_length} in queue)')
profile.update(delta=export_interval, instances=0, backlog=queue_length, duty_cycle=profile.target_duty_cycle) | Check if there are any pubsub messages we need. | assemblyline_core/scaler/scaler_server.py | sync_metrics | kryptoslogic/assemblyline-core | 0 | python | def sync_metrics(self):
self.scheduler.enter(METRIC_SYNC_INTERVAL, 3, self.sync_metrics)
service_data = self.status_table.items()
for (host, (service, state, time_limit)) in service_data.items():
if (time.time() < time_limit):
self.state.update(service=service, host=host, throughput=0, busy_seconds=(METRIC_SYNC_INTERVAL if (state == ServiceStatus.Running) else 0))
if (time.time() > (time_limit + 600)):
self.status_table.pop(host)
export_interval = self.config.core.metrics.export_interval
for (profile_name, profile) in self.profiles.items():
update = self.state.read(profile_name)
if update:
delta = (time.time() - profile.last_update)
profile.update(delta=delta, backlog=profile.queue.length(), **update)
if ((self.controller.get_target(profile_name) == 0) and (profile.desired_instances == 0) and profile.queue):
queue_length = profile.queue.length()
if (queue_length > 0):
self.log.info(f'Service at zero instances has messages: {profile.name} ({queue_length} in queue)')
profile.update(delta=export_interval, instances=0, backlog=queue_length, duty_cycle=profile.target_duty_cycle) | def sync_metrics(self):
self.scheduler.enter(METRIC_SYNC_INTERVAL, 3, self.sync_metrics)
service_data = self.status_table.items()
for (host, (service, state, time_limit)) in service_data.items():
if (time.time() < time_limit):
self.state.update(service=service, host=host, throughput=0, busy_seconds=(METRIC_SYNC_INTERVAL if (state == ServiceStatus.Running) else 0))
if (time.time() > (time_limit + 600)):
self.status_table.pop(host)
export_interval = self.config.core.metrics.export_interval
for (profile_name, profile) in self.profiles.items():
update = self.state.read(profile_name)
if update:
delta = (time.time() - profile.last_update)
profile.update(delta=delta, backlog=profile.queue.length(), **update)
if ((self.controller.get_target(profile_name) == 0) and (profile.desired_instances == 0) and profile.queue):
queue_length = profile.queue.length()
if (queue_length > 0):
self.log.info(f'Service at zero instances has messages: {profile.name} ({queue_length} in queue)')
profile.update(delta=export_interval, instances=0, backlog=queue_length, duty_cycle=profile.target_duty_cycle)<|docstring|>Check if there are any pubsub messages we need.<|endoftext|> |
98b3b6accc111a4812bbf22f7d684ebd48753d5154b2f0ce9b429c84b8eb7ba5 | def flush_service_status(self):
'The service status table may have references to containers that have crashed. Try to remove them all.'
self.scheduler.enter(SERVICE_STATUS_FLUSH, 0, self.flush_service_status)
names = set(self.controller.get_running_container_names())
for hostname in self.status_table.keys():
if (hostname not in names):
self.status_table.pop(hostname) | The service status table may have references to containers that have crashed. Try to remove them all. | assemblyline_core/scaler/scaler_server.py | flush_service_status | kryptoslogic/assemblyline-core | 0 | python | def flush_service_status(self):
self.scheduler.enter(SERVICE_STATUS_FLUSH, 0, self.flush_service_status)
names = set(self.controller.get_running_container_names())
for hostname in self.status_table.keys():
if (hostname not in names):
self.status_table.pop(hostname) | def flush_service_status(self):
self.scheduler.enter(SERVICE_STATUS_FLUSH, 0, self.flush_service_status)
names = set(self.controller.get_running_container_names())
for hostname in self.status_table.keys():
if (hostname not in names):
self.status_table.pop(hostname)<|docstring|>The service status table may have references to containers that have crashed. Try to remove them all.<|endoftext|> |
ceb4c146f252b61bdc50c5bc289fd9cccb6af422d66c1e1514df52bdc54fa988 | def log_container_events(self):
'The service status table may have references to containers that have crashed. Try to remove them all.'
self.scheduler.enter(CONTAINER_EVENTS_LOG_INTERVAL, 0, self.log_container_events)
for message in self.controller.new_events():
self.log.warning(('Container Event :: ' + message)) | The service status table may have references to containers that have crashed. Try to remove them all. | assemblyline_core/scaler/scaler_server.py | log_container_events | kryptoslogic/assemblyline-core | 0 | python | def log_container_events(self):
self.scheduler.enter(CONTAINER_EVENTS_LOG_INTERVAL, 0, self.log_container_events)
for message in self.controller.new_events():
self.log.warning(('Container Event :: ' + message)) | def log_container_events(self):
self.scheduler.enter(CONTAINER_EVENTS_LOG_INTERVAL, 0, self.log_container_events)
for message in self.controller.new_events():
self.log.warning(('Container Event :: ' + message))<|docstring|>The service status table may have references to containers that have crashed. Try to remove them all.<|endoftext|> |
5ad597bd7d435c6927859560025e932423761b7895a1c3f23fcf9df7acc50ead | def hello(person):
'Says hello and returns the greeting\n\n Parameters\n ----------\n person : str\n the name of the person that we want to say hello to\n\n Returns\n -------\n str\n the greeting used to say hello'
greeting = f'Hello {person}!'
print(greeting)
return greeting | Says hello and returns the greeting
Parameters
----------
person : str
the name of the person that we want to say hello to
Returns
-------
str
the greeting used to say hello | Lecture Material/examples/07_functions_examples.py | hello | knherrera/pcc-cis-012-intro-to-programming-python | 23 | python | def hello(person):
'Says hello and returns the greeting\n\n Parameters\n ----------\n person : str\n the name of the person that we want to say hello to\n\n Returns\n -------\n str\n the greeting used to say hello'
greeting = f'Hello {person}!'
print(greeting)
return greeting | def hello(person):
'Says hello and returns the greeting\n\n Parameters\n ----------\n person : str\n the name of the person that we want to say hello to\n\n Returns\n -------\n str\n the greeting used to say hello'
greeting = f'Hello {person}!'
print(greeting)
return greeting<|docstring|>Says hello and returns the greeting
Parameters
----------
person : str
the name of the person that we want to say hello to
Returns
-------
str
the greeting used to say hello<|endoftext|> |
09067fba4f40eacd6287a48975bf5c8e93f03057cde1bd496ba51b46d4c25da1 | def test_bot_parse_routine(self):
'\n Tests whether all links inside of the file can be successfully parsed.\n The key in the json has to either be "level", "ascendency", "skill" or any of the categories in the bot output.\n I.e. "Offense", "Defense", ...\n '
demo_author = None
json = get_builds()
for build in json['builds']:
with self.subTest(i=build['name']):
build_embed = parse_pob(demo_author, build['pastebin'])
self.assertTrue(isinstance(build_embed, Embed))
embed_dict = build_embed.to_dict()
print(embed_dict)
for assertion in build['assertions']:
print(assertion)
(term, value, negated) = (assertion.get('key', ''), assertion.get('value', ''), assertion.get('not', False))
if (('level' in term) or ('lvl' in term)):
assertion_succeeded = (value in embed_dict.get('title', ''))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('ascendency' in term):
assertion_succeeded = (value in embed_dict.get('title', ''))
assertion_succeeded = (assertion_succeeded or (value in embed_dict.get('thumbnail', '').get('url', '')))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('skill' in term):
assertion_succeeded = (value in embed_dict.get('title', ''))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
else:
assertion_succeeded = False
for field in embed_dict['fields']:
assertion_succeeded = self.single_assert(field, term, value, negated)
if assertion_succeeded:
break | Tests whether all links inside of the file can be successfully parsed.
The key in the json has to either be "level", "ascendency", "skill" or any of the categories in the bot output.
I.e. "Offense", "Defense", ... | tests/test_bot_specific_builds.py | test_bot_parse_routine | enpinzolas/discord-pob | 10 | python | def test_bot_parse_routine(self):
'\n Tests whether all links inside of the file can be successfully parsed.\n The key in the json has to either be "level", "ascendency", "skill" or any of the categories in the bot output.\n I.e. "Offense", "Defense", ...\n '
demo_author = None
json = get_builds()
for build in json['builds']:
with self.subTest(i=build['name']):
build_embed = parse_pob(demo_author, build['pastebin'])
self.assertTrue(isinstance(build_embed, Embed))
embed_dict = build_embed.to_dict()
print(embed_dict)
for assertion in build['assertions']:
print(assertion)
(term, value, negated) = (assertion.get('key', ), assertion.get('value', ), assertion.get('not', False))
if (('level' in term) or ('lvl' in term)):
assertion_succeeded = (value in embed_dict.get('title', ))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('ascendency' in term):
assertion_succeeded = (value in embed_dict.get('title', ))
assertion_succeeded = (assertion_succeeded or (value in embed_dict.get('thumbnail', ).get('url', )))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('skill' in term):
assertion_succeeded = (value in embed_dict.get('title', ))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
else:
assertion_succeeded = False
for field in embed_dict['fields']:
assertion_succeeded = self.single_assert(field, term, value, negated)
if assertion_succeeded:
break | def test_bot_parse_routine(self):
'\n Tests whether all links inside of the file can be successfully parsed.\n The key in the json has to either be "level", "ascendency", "skill" or any of the categories in the bot output.\n I.e. "Offense", "Defense", ...\n '
demo_author = None
json = get_builds()
for build in json['builds']:
with self.subTest(i=build['name']):
build_embed = parse_pob(demo_author, build['pastebin'])
self.assertTrue(isinstance(build_embed, Embed))
embed_dict = build_embed.to_dict()
print(embed_dict)
for assertion in build['assertions']:
print(assertion)
(term, value, negated) = (assertion.get('key', ), assertion.get('value', ), assertion.get('not', False))
if (('level' in term) or ('lvl' in term)):
assertion_succeeded = (value in embed_dict.get('title', ))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('ascendency' in term):
assertion_succeeded = (value in embed_dict.get('title', ))
assertion_succeeded = (assertion_succeeded or (value in embed_dict.get('thumbnail', ).get('url', )))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
elif ('skill' in term):
assertion_succeeded = (value in embed_dict.get('title', ))
self.assertTrue(assertion_succeeded, msg=f"Assertion ({term}:'{value}') in embed={embed_dict} failed.")
else:
assertion_succeeded = False
for field in embed_dict['fields']:
assertion_succeeded = self.single_assert(field, term, value, negated)
if assertion_succeeded:
break<|docstring|>Tests whether all links inside of the file can be successfully parsed.
The key in the json has to either be "level", "ascendency", "skill" or any of the categories in the bot output.
I.e. "Offense", "Defense", ...<|endoftext|> |
6be29f91db9333238f5ce26e6215db8f323f73f814a1ddf1fbbec43fad7b6586 | def single_assert(self, field, term, value, negated):
'\n Assert that the term, value and whether the term is negated apply to the given field.\n :param field: embed field to search\n :param term: term we search for\n :param value: we want to match\n :param negated: negated\n :return: true if the assertion succeeded\n '
do_check = (term in field['name'])
print(f"searching title={field['name']}for {term},{value} - negated? {negated}")
if do_check:
print(f'searching for {term},{value} - negated? {negated}')
if (not negated):
self.assertTrue((value in field['value']), msg=f"Assertion ({term}:'{value}') in embed failed.")
elif negated:
self.assertTrue((value not in field['value']), msg=f"Assertion ({term}:'{value}') not in embed failed.") | Assert that the term, value and whether the term is negated apply to the given field.
:param field: embed field to search
:param term: term we search for
:param value: we want to match
:param negated: negated
:return: true if the assertion succeeded | tests/test_bot_specific_builds.py | single_assert | enpinzolas/discord-pob | 10 | python | def single_assert(self, field, term, value, negated):
'\n Assert that the term, value and whether the term is negated apply to the given field.\n :param field: embed field to search\n :param term: term we search for\n :param value: we want to match\n :param negated: negated\n :return: true if the assertion succeeded\n '
do_check = (term in field['name'])
print(f"searching title={field['name']}for {term},{value} - negated? {negated}")
if do_check:
print(f'searching for {term},{value} - negated? {negated}')
if (not negated):
self.assertTrue((value in field['value']), msg=f"Assertion ({term}:'{value}') in embed failed.")
elif negated:
self.assertTrue((value not in field['value']), msg=f"Assertion ({term}:'{value}') not in embed failed.") | def single_assert(self, field, term, value, negated):
'\n Assert that the term, value and whether the term is negated apply to the given field.\n :param field: embed field to search\n :param term: term we search for\n :param value: we want to match\n :param negated: negated\n :return: true if the assertion succeeded\n '
do_check = (term in field['name'])
print(f"searching title={field['name']}for {term},{value} - negated? {negated}")
if do_check:
print(f'searching for {term},{value} - negated? {negated}')
if (not negated):
self.assertTrue((value in field['value']), msg=f"Assertion ({term}:'{value}') in embed failed.")
elif negated:
self.assertTrue((value not in field['value']), msg=f"Assertion ({term}:'{value}') not in embed failed.")<|docstring|>Assert that the term, value and whether the term is negated apply to the given field.
:param field: embed field to search
:param term: term we search for
:param value: we want to match
:param negated: negated
:return: true if the assertion succeeded<|endoftext|> |
6abafdec3dbaffa91306aa31edc8d420a298323ff0b52c178187476fc3966002 | def _cat_collate(batch):
'concat if all tensors have same size.'
(img_ids, imgs) = [_ for _ in zip(*batch)]
imgs = [img[None] for img in imgs]
if all(((imgs[0].shape == img.shape) for img in imgs)):
imgs = [torch.cat(imgs, dim=0)]
return (img_ids, imgs) | concat if all tensors have same size. | src/data_utils.py | _cat_collate | Hidberg/Landmark2019-1st-and-3rd-Place-Solution | 430 | python | def _cat_collate(batch):
(img_ids, imgs) = [_ for _ in zip(*batch)]
imgs = [img[None] for img in imgs]
if all(((imgs[0].shape == img.shape) for img in imgs)):
imgs = [torch.cat(imgs, dim=0)]
return (img_ids, imgs) | def _cat_collate(batch):
(img_ids, imgs) = [_ for _ in zip(*batch)]
imgs = [img[None] for img in imgs]
if all(((imgs[0].shape == img.shape) for img in imgs)):
imgs = [torch.cat(imgs, dim=0)]
return (img_ids, imgs)<|docstring|>concat if all tensors have same size.<|endoftext|> |
22ef586e5628c42d3972be53abc9dbf685182a378bdae1de3cfedb5fd1af724e | def make_predict_loaders(params, data_root, eval_transform=None, scale='S', splits=('index', 'test'), num_workers=4, n_blocks=1, block_id=0):
'\n :param splits: 読み込むデータセットの種類\n :param block_id: ブロック分割したときのID\n '
data_loaders = dict()
if ('2' in scale):
bins = [0.67, 0.77, 1.33, 1.5]
else:
bins = [0.77, 1.33]
for split in splits:
df = pd.read_pickle(f'{data_root}/{split}.pkl')
n_per_block = math.ceil((len(df) / n_blocks))
df = df.iloc[(block_id * n_per_block):((block_id + 1) * n_per_block)]
if ('path' not in df.columns):
if ('gld_v1' in data_root):
df['path'] = df['id'].apply((lambda x: f'{data_root}/{split}/{x}.jpg'))
if ('gld_v2' in data_root):
df['path'] = df['id'].apply((lambda x: f"{data_root}/{split}/{'/'.join(x[:3])}/{x}.jpg"))
if (scale == 'O'):
dataset = LandmarkDataset(paths=df['path'].values, transform=eval_transform)
data_loaders[split] = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, pin_memory=True, num_workers=num_workers)
else:
df['aspect_ratio'] = (df['height'] / df['width'])
df['aspect_gid'] = _quantize(df['aspect_ratio'], bins=bins)
data_loaders[split] = prepare_grouped_loader_from_df(df, eval_transform, params['test_batch_size'], scale=scale, is_train=False, num_workers=num_workers)
return data_loaders | :param splits: 読み込むデータセットの種類
:param block_id: ブロック分割したときのID | src/data_utils.py | make_predict_loaders | Hidberg/Landmark2019-1st-and-3rd-Place-Solution | 430 | python | def make_predict_loaders(params, data_root, eval_transform=None, scale='S', splits=('index', 'test'), num_workers=4, n_blocks=1, block_id=0):
'\n :param splits: 読み込むデータセットの種類\n :param block_id: ブロック分割したときのID\n '
data_loaders = dict()
if ('2' in scale):
bins = [0.67, 0.77, 1.33, 1.5]
else:
bins = [0.77, 1.33]
for split in splits:
df = pd.read_pickle(f'{data_root}/{split}.pkl')
n_per_block = math.ceil((len(df) / n_blocks))
df = df.iloc[(block_id * n_per_block):((block_id + 1) * n_per_block)]
if ('path' not in df.columns):
if ('gld_v1' in data_root):
df['path'] = df['id'].apply((lambda x: f'{data_root}/{split}/{x}.jpg'))
if ('gld_v2' in data_root):
df['path'] = df['id'].apply((lambda x: f"{data_root}/{split}/{'/'.join(x[:3])}/{x}.jpg"))
if (scale == 'O'):
dataset = LandmarkDataset(paths=df['path'].values, transform=eval_transform)
data_loaders[split] = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, pin_memory=True, num_workers=num_workers)
else:
df['aspect_ratio'] = (df['height'] / df['width'])
df['aspect_gid'] = _quantize(df['aspect_ratio'], bins=bins)
data_loaders[split] = prepare_grouped_loader_from_df(df, eval_transform, params['test_batch_size'], scale=scale, is_train=False, num_workers=num_workers)
return data_loaders | def make_predict_loaders(params, data_root, eval_transform=None, scale='S', splits=('index', 'test'), num_workers=4, n_blocks=1, block_id=0):
'\n :param splits: 読み込むデータセットの種類\n :param block_id: ブロック分割したときのID\n '
data_loaders = dict()
if ('2' in scale):
bins = [0.67, 0.77, 1.33, 1.5]
else:
bins = [0.77, 1.33]
for split in splits:
df = pd.read_pickle(f'{data_root}/{split}.pkl')
n_per_block = math.ceil((len(df) / n_blocks))
df = df.iloc[(block_id * n_per_block):((block_id + 1) * n_per_block)]
if ('path' not in df.columns):
if ('gld_v1' in data_root):
df['path'] = df['id'].apply((lambda x: f'{data_root}/{split}/{x}.jpg'))
if ('gld_v2' in data_root):
df['path'] = df['id'].apply((lambda x: f"{data_root}/{split}/{'/'.join(x[:3])}/{x}.jpg"))
if (scale == 'O'):
dataset = LandmarkDataset(paths=df['path'].values, transform=eval_transform)
data_loaders[split] = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, pin_memory=True, num_workers=num_workers)
else:
df['aspect_ratio'] = (df['height'] / df['width'])
df['aspect_gid'] = _quantize(df['aspect_ratio'], bins=bins)
data_loaders[split] = prepare_grouped_loader_from_df(df, eval_transform, params['test_batch_size'], scale=scale, is_train=False, num_workers=num_workers)
return data_loaders<|docstring|>:param splits: 読み込むデータセットの種類
:param block_id: ブロック分割したときのID<|endoftext|> |
676c969b843794d11e8fb26d86fab9a6d27b3a57ad4bf4163e128f4c4762a8e3 | def _map_to_pandas(rdds):
' Needs to be here due to pickling issues '
return [pd.DataFrame(list(rdds))] | Needs to be here due to pickling issues | spark_script/utils.py | _map_to_pandas | mikolaje/ZhihuDisplay | 18 | python | def _map_to_pandas(rdds):
' '
return [pd.DataFrame(list(rdds))] | def _map_to_pandas(rdds):
' '
return [pd.DataFrame(list(rdds))]<|docstring|>Needs to be here due to pickling issues<|endoftext|> |
fe78c915b04d42127f9fb46833bf28500b3df7fd287aef767b770041514e3fd5 | def toPandas(df, n_partitions=None):
'\n Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is\n repartitioned if `n_partitions` is passed.\n :param df: pyspark.sql.DataFrame\n :param n_partitions: int or None\n :return: pandas.DataFrame\n '
if (n_partitions is not None):
df = df.repartition(n_partitions)
df_pand = df.rdd.mapPartitions(_map_to_pandas).collect()
df_pand = pd.concat(df_pand)
df_pand.columns = df.columns
return df_pand | Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is
repartitioned if `n_partitions` is passed.
:param df: pyspark.sql.DataFrame
:param n_partitions: int or None
:return: pandas.DataFrame | spark_script/utils.py | toPandas | mikolaje/ZhihuDisplay | 18 | python | def toPandas(df, n_partitions=None):
'\n Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is\n repartitioned if `n_partitions` is passed.\n :param df: pyspark.sql.DataFrame\n :param n_partitions: int or None\n :return: pandas.DataFrame\n '
if (n_partitions is not None):
df = df.repartition(n_partitions)
df_pand = df.rdd.mapPartitions(_map_to_pandas).collect()
df_pand = pd.concat(df_pand)
df_pand.columns = df.columns
return df_pand | def toPandas(df, n_partitions=None):
'\n Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is\n repartitioned if `n_partitions` is passed.\n :param df: pyspark.sql.DataFrame\n :param n_partitions: int or None\n :return: pandas.DataFrame\n '
if (n_partitions is not None):
df = df.repartition(n_partitions)
df_pand = df.rdd.mapPartitions(_map_to_pandas).collect()
df_pand = pd.concat(df_pand)
df_pand.columns = df.columns
return df_pand<|docstring|>Returns the contents of `df` as a local `pandas.DataFrame` in a speedy fashion. The DataFrame is
repartitioned if `n_partitions` is passed.
:param df: pyspark.sql.DataFrame
:param n_partitions: int or None
:return: pandas.DataFrame<|endoftext|> |
5fd2216c2ea9bcba95be9e82930f0027e4b75ef30acfeacb42600f6e8bfa22fe | def check_if_all_integers(x):
'check a pandas.Series is made of all integers.'
return all((float(i).is_integer() for i in x.unique())) | check a pandas.Series is made of all integers. | pmlb/support_funcs.py | check_if_all_integers | EpistasisLab/penn-ml-benchmarks | 540 | python | def check_if_all_integers(x):
return all((float(i).is_integer() for i in x.unique())) | def check_if_all_integers(x):
return all((float(i).is_integer() for i in x.unique()))<|docstring|>check a pandas.Series is made of all integers.<|endoftext|> |
c12a3ce2bde39d07333c29365507dfa4374de101a1c43bb9f06c0d4d94a1a22b | def generate_summarystats(dataset_name, dataset_stats, local_cache_dir=None, write_summary=False):
'Generates summary stats for a given dataset in its summary_stats.csv\n file in a dataset local_cache_dir file.\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n :param write_summary: bool\n Whether new summary statistics should be written out to directory.\n '
print('generating summary stats for', dataset_name)
feat = dataset_stats['feat']
mse = dataset_stats['mse']
stats_df = pd.DataFrame({'dataset': dataset_name, 'n_instances': dataset_stats['n_instances'], 'n_features': dataset_stats['n_features'], 'n_binary_features': feat[0], 'n_categorical_features': feat[1], 'n_continuous_features': feat[2], 'endpoint_type': dataset_stats['endpoint'], 'n_classes': mse[0], 'imbalance': mse[1], 'task': dataset_stats['yaml_task']}, index=[0])
if write_summary:
assert (local_cache_dir != None)
stats_df.to_csv(pathlib.Path(f'{local_cache_dir}{dataset_name}/summary_stats.tsv'), index=False, sep='\t')
return stats_df | Generates summary stats for a given dataset in its summary_stats.csv
file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.
:param write_summary: bool
Whether new summary statistics should be written out to directory. | pmlb/support_funcs.py | generate_summarystats | EpistasisLab/penn-ml-benchmarks | 540 | python | def generate_summarystats(dataset_name, dataset_stats, local_cache_dir=None, write_summary=False):
'Generates summary stats for a given dataset in its summary_stats.csv\n file in a dataset local_cache_dir file.\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n :param write_summary: bool\n Whether new summary statistics should be written out to directory.\n '
print('generating summary stats for', dataset_name)
feat = dataset_stats['feat']
mse = dataset_stats['mse']
stats_df = pd.DataFrame({'dataset': dataset_name, 'n_instances': dataset_stats['n_instances'], 'n_features': dataset_stats['n_features'], 'n_binary_features': feat[0], 'n_categorical_features': feat[1], 'n_continuous_features': feat[2], 'endpoint_type': dataset_stats['endpoint'], 'n_classes': mse[0], 'imbalance': mse[1], 'task': dataset_stats['yaml_task']}, index=[0])
if write_summary:
assert (local_cache_dir != None)
stats_df.to_csv(pathlib.Path(f'{local_cache_dir}{dataset_name}/summary_stats.tsv'), index=False, sep='\t')
return stats_df | def generate_summarystats(dataset_name, dataset_stats, local_cache_dir=None, write_summary=False):
'Generates summary stats for a given dataset in its summary_stats.csv\n file in a dataset local_cache_dir file.\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n :param write_summary: bool\n Whether new summary statistics should be written out to directory.\n '
print('generating summary stats for', dataset_name)
feat = dataset_stats['feat']
mse = dataset_stats['mse']
stats_df = pd.DataFrame({'dataset': dataset_name, 'n_instances': dataset_stats['n_instances'], 'n_features': dataset_stats['n_features'], 'n_binary_features': feat[0], 'n_categorical_features': feat[1], 'n_continuous_features': feat[2], 'endpoint_type': dataset_stats['endpoint'], 'n_classes': mse[0], 'imbalance': mse[1], 'task': dataset_stats['yaml_task']}, index=[0])
if write_summary:
assert (local_cache_dir != None)
stats_df.to_csv(pathlib.Path(f'{local_cache_dir}{dataset_name}/summary_stats.tsv'), index=False, sep='\t')
return stats_df<|docstring|>Generates summary stats for a given dataset in its summary_stats.csv
file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.
:param write_summary: bool
Whether new summary statistics should be written out to directory.<|endoftext|> |
223da6fc251b209704347ef8cf241734550a5888d10d6b61758fbe17349bc956 | def compute_imbalance(data):
" Computes imbalance metric for a given dataset.\n Imbalance metric is equal to 0 when a dataset is perfectly balanced\n (i.e. number of in each class is exact).\n :param data : pandas.DataFrame\n A dataset in a panda's data frame\n :returns int\n A value of imbalance metric, where zero means that the dataset is\n perfectly balanced and the higher the value, the more imbalanced the\n dataset.\n "
if (not data):
return 0
imb = 0
num_classes = float(len(Counter(data)))
for x in Counter(data).values():
p_x = (float(x) / len(data))
if (p_x > 0):
imb += ((p_x - (1 / num_classes)) * (p_x - (1 / num_classes)))
worst_case = (((num_classes - 1) * pow((1 / num_classes), 2)) + pow((1 - (1 / num_classes)), 2))
return (num_classes, (imb / worst_case)) | Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced
(i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is
perfectly balanced and the higher the value, the more imbalanced the
dataset. | pmlb/support_funcs.py | compute_imbalance | EpistasisLab/penn-ml-benchmarks | 540 | python | def compute_imbalance(data):
" Computes imbalance metric for a given dataset.\n Imbalance metric is equal to 0 when a dataset is perfectly balanced\n (i.e. number of in each class is exact).\n :param data : pandas.DataFrame\n A dataset in a panda's data frame\n :returns int\n A value of imbalance metric, where zero means that the dataset is\n perfectly balanced and the higher the value, the more imbalanced the\n dataset.\n "
if (not data):
return 0
imb = 0
num_classes = float(len(Counter(data)))
for x in Counter(data).values():
p_x = (float(x) / len(data))
if (p_x > 0):
imb += ((p_x - (1 / num_classes)) * (p_x - (1 / num_classes)))
worst_case = (((num_classes - 1) * pow((1 / num_classes), 2)) + pow((1 - (1 / num_classes)), 2))
return (num_classes, (imb / worst_case)) | def compute_imbalance(data):
" Computes imbalance metric for a given dataset.\n Imbalance metric is equal to 0 when a dataset is perfectly balanced\n (i.e. number of in each class is exact).\n :param data : pandas.DataFrame\n A dataset in a panda's data frame\n :returns int\n A value of imbalance metric, where zero means that the dataset is\n perfectly balanced and the higher the value, the more imbalanced the\n dataset.\n "
if (not data):
return 0
imb = 0
num_classes = float(len(Counter(data)))
for x in Counter(data).values():
p_x = (float(x) / len(data))
if (p_x > 0):
imb += ((p_x - (1 / num_classes)) * (p_x - (1 / num_classes)))
worst_case = (((num_classes - 1) * pow((1 / num_classes), 2)) + pow((1 - (1 / num_classes)), 2))
return (num_classes, (imb / worst_case))<|docstring|>Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced
(i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is
perfectly balanced and the higher the value, the more imbalanced the
dataset.<|endoftext|> |
3c97dc9b4b79770f52002ecc185b5258619fab5f210fe129caba25fdb8632486 | def count_features_type(types, include_binary=False):
' Counts two or three different types of features\n (binary (optional), categorical, continuous).\n :param types: list of types from get_type\n :returns a tuple (binary (optional), categorical, continuous)\n '
if include_binary:
return (types.count('binary'), types.count('categorical'), types.count('continuous'))
else:
return (types.count('categorical'), types.count('continuous')) | Counts two or three different types of features
(binary (optional), categorical, continuous).
:param types: list of types from get_type
:returns a tuple (binary (optional), categorical, continuous) | pmlb/support_funcs.py | count_features_type | EpistasisLab/penn-ml-benchmarks | 540 | python | def count_features_type(types, include_binary=False):
' Counts two or three different types of features\n (binary (optional), categorical, continuous).\n :param types: list of types from get_type\n :returns a tuple (binary (optional), categorical, continuous)\n '
if include_binary:
return (types.count('binary'), types.count('categorical'), types.count('continuous'))
else:
return (types.count('categorical'), types.count('continuous')) | def count_features_type(types, include_binary=False):
' Counts two or three different types of features\n (binary (optional), categorical, continuous).\n :param types: list of types from get_type\n :returns a tuple (binary (optional), categorical, continuous)\n '
if include_binary:
return (types.count('binary'), types.count('categorical'), types.count('continuous'))
else:
return (types.count('categorical'), types.count('continuous'))<|docstring|>Counts two or three different types of features
(binary (optional), categorical, continuous).
:param types: list of types from get_type
:returns a tuple (binary (optional), categorical, continuous)<|endoftext|> |
c3ce54eed10b3cfe18056785beabc40cf22b41cf0ffc564d996bf53d7443b21a | def generate_metadata(df, dataset_name, dataset_stats, overwrite_existing=True, local_cache_dir=None):
'Generates description for a given dataset in its metadata.yaml file in a\n dataset local_cache_dir file.\n\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n '
metadata_template = '{header_to_print}\ndataset: {dataset_name}\ndescription: {none_yet}\nsource: {none_yet}\npublication: {none_yet}\ntask: {task}\nkeywords:\n -\n -\ntarget:\n type: {endpoint}\n description: {none_yet}\n code: {none_yet}\nfeatures:\n{all_features}'
feature_template = ' - name: {feat_name}\n type: {feat_type}\n'
feat_extra_template = ' description:\n code:\n transform:\n'
feat_extra_first = ' description: # optional but recommended, what the feature measures/indicates, unit\n code: # optional, coding information, e.g., Control = 0, Case = 1\n transform: # optional, any transformation performed on the feature, e.g., log scaled\n'
none_yet = 'None yet. See our contributing guide to help us add one.'
header_to_print = '# Reviewed by [your name here]'
assert (local_cache_dir != None)
meta_path = pathlib.Path(f'{local_cache_dir}{dataset_name}/metadata.yaml')
if meta_path.exists():
if (not overwrite_existing):
logger.warning(f'''Not writing {dataset_name}/metadata.yaml ; File exists (use overwrite_existing=True to override.
''')
return None
print(f'WARNING: {meta_path} exists. Overwriting...')
print('Generating metadata for', dataset_name)
all_features = ''
first = True
for (feature, feature_type) in zip(dataset_stats['feat_names'], dataset_stats['types']):
if (feature in protected_feature_names):
feature = f'"{feature}"'
all_features += feature_template.format(feat_name=feature, feat_type=feature_type)
if first:
all_features += feat_extra_first
first = False
else:
all_features += feat_extra_template
metadata = metadata_template.format(header_to_print=header_to_print, dataset_name=dataset_name, none_yet=none_yet, endpoint=dataset_stats['endpoint'], task=dataset_stats['task'], all_features=all_features)
try:
meta_path.write_text(metadata)
except IOError as err:
print(err) | Generates description for a given dataset in its metadata.yaml file in a
dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used. | pmlb/support_funcs.py | generate_metadata | EpistasisLab/penn-ml-benchmarks | 540 | python | def generate_metadata(df, dataset_name, dataset_stats, overwrite_existing=True, local_cache_dir=None):
'Generates description for a given dataset in its metadata.yaml file in a\n dataset local_cache_dir file.\n\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n '
metadata_template = '{header_to_print}\ndataset: {dataset_name}\ndescription: {none_yet}\nsource: {none_yet}\npublication: {none_yet}\ntask: {task}\nkeywords:\n -\n -\ntarget:\n type: {endpoint}\n description: {none_yet}\n code: {none_yet}\nfeatures:\n{all_features}'
feature_template = ' - name: {feat_name}\n type: {feat_type}\n'
feat_extra_template = ' description:\n code:\n transform:\n'
feat_extra_first = ' description: # optional but recommended, what the feature measures/indicates, unit\n code: # optional, coding information, e.g., Control = 0, Case = 1\n transform: # optional, any transformation performed on the feature, e.g., log scaled\n'
none_yet = 'None yet. See our contributing guide to help us add one.'
header_to_print = '# Reviewed by [your name here]'
assert (local_cache_dir != None)
meta_path = pathlib.Path(f'{local_cache_dir}{dataset_name}/metadata.yaml')
if meta_path.exists():
if (not overwrite_existing):
logger.warning(f'Not writing {dataset_name}/metadata.yaml ; File exists (use overwrite_existing=True to override.
')
return None
print(f'WARNING: {meta_path} exists. Overwriting...')
print('Generating metadata for', dataset_name)
all_features =
first = True
for (feature, feature_type) in zip(dataset_stats['feat_names'], dataset_stats['types']):
if (feature in protected_feature_names):
feature = f'"{feature}"'
all_features += feature_template.format(feat_name=feature, feat_type=feature_type)
if first:
all_features += feat_extra_first
first = False
else:
all_features += feat_extra_template
metadata = metadata_template.format(header_to_print=header_to_print, dataset_name=dataset_name, none_yet=none_yet, endpoint=dataset_stats['endpoint'], task=dataset_stats['task'], all_features=all_features)
try:
meta_path.write_text(metadata)
except IOError as err:
print(err) | def generate_metadata(df, dataset_name, dataset_stats, overwrite_existing=True, local_cache_dir=None):
'Generates description for a given dataset in its metadata.yaml file in a\n dataset local_cache_dir file.\n\n :param dataset_name: str\n The name of the data set to load from PMLB.\n :param local_cache_dir: str (required)\n The directory on your local machine to store the data files.\n If None, then the local data cache will not be used.\n '
metadata_template = '{header_to_print}\ndataset: {dataset_name}\ndescription: {none_yet}\nsource: {none_yet}\npublication: {none_yet}\ntask: {task}\nkeywords:\n -\n -\ntarget:\n type: {endpoint}\n description: {none_yet}\n code: {none_yet}\nfeatures:\n{all_features}'
feature_template = ' - name: {feat_name}\n type: {feat_type}\n'
feat_extra_template = ' description:\n code:\n transform:\n'
feat_extra_first = ' description: # optional but recommended, what the feature measures/indicates, unit\n code: # optional, coding information, e.g., Control = 0, Case = 1\n transform: # optional, any transformation performed on the feature, e.g., log scaled\n'
none_yet = 'None yet. See our contributing guide to help us add one.'
header_to_print = '# Reviewed by [your name here]'
assert (local_cache_dir != None)
meta_path = pathlib.Path(f'{local_cache_dir}{dataset_name}/metadata.yaml')
if meta_path.exists():
if (not overwrite_existing):
logger.warning(f'Not writing {dataset_name}/metadata.yaml ; File exists (use overwrite_existing=True to override.
')
return None
print(f'WARNING: {meta_path} exists. Overwriting...')
print('Generating metadata for', dataset_name)
all_features =
first = True
for (feature, feature_type) in zip(dataset_stats['feat_names'], dataset_stats['types']):
if (feature in protected_feature_names):
feature = f'"{feature}"'
all_features += feature_template.format(feat_name=feature, feat_type=feature_type)
if first:
all_features += feat_extra_first
first = False
else:
all_features += feat_extra_template
metadata = metadata_template.format(header_to_print=header_to_print, dataset_name=dataset_name, none_yet=none_yet, endpoint=dataset_stats['endpoint'], task=dataset_stats['task'], all_features=all_features)
try:
meta_path.write_text(metadata)
except IOError as err:
print(err)<|docstring|>Generates description for a given dataset in its metadata.yaml file in a
dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used.<|endoftext|> |
c46d569c89c33ffcbbe9b1e0aa7d67d581ac0bc4a9d79926c824c6be20910c74 | def last_commit_message() -> str:
'\n Get commit message from last commit, excluding merge commits\n '
command = 'git log --no-merges -1 --pretty=%B'.split()
message = subprocess.check_output(command, universal_newlines=True)
return message | Get commit message from last commit, excluding merge commits | pmlb/support_funcs.py | last_commit_message | EpistasisLab/penn-ml-benchmarks | 540 | python | def last_commit_message() -> str:
'\n \n '
command = 'git log --no-merges -1 --pretty=%B'.split()
message = subprocess.check_output(command, universal_newlines=True)
return message | def last_commit_message() -> str:
'\n \n '
command = 'git log --no-merges -1 --pretty=%B'.split()
message = subprocess.check_output(command, universal_newlines=True)
return message<|docstring|>Get commit message from last commit, excluding merge commits<|endoftext|> |
88599abf98854b848143745897223efc51229455250fc32d6eb1f8ff140efcd4 | def __init__(self, id=None, name=None, description=None, zone_id=None, zone_name=None, type=None, ttl=None, records=None, create_at=None, update_at=None, status=None, default=None, project_id=None, links=None, line=None, weight=None, health_check_id=None, alias_target=None):
'ShowRecordSetByZoneResp - a model defined in huaweicloud sdk'
self._id = None
self._name = None
self._description = None
self._zone_id = None
self._zone_name = None
self._type = None
self._ttl = None
self._records = None
self._create_at = None
self._update_at = None
self._status = None
self._default = None
self._project_id = None
self._links = None
self._line = None
self._weight = None
self._health_check_id = None
self._alias_target = None
self.discriminator = None
if (id is not None):
self.id = id
if (name is not None):
self.name = name
if (description is not None):
self.description = description
if (zone_id is not None):
self.zone_id = zone_id
if (zone_name is not None):
self.zone_name = zone_name
if (type is not None):
self.type = type
if (ttl is not None):
self.ttl = ttl
if (records is not None):
self.records = records
if (create_at is not None):
self.create_at = create_at
if (update_at is not None):
self.update_at = update_at
if (status is not None):
self.status = status
if (default is not None):
self.default = default
if (project_id is not None):
self.project_id = project_id
if (links is not None):
self.links = links
if (line is not None):
self.line = line
if (weight is not None):
self.weight = weight
if (health_check_id is not None):
self.health_check_id = health_check_id
if (alias_target is not None):
self.alias_target = alias_target | ShowRecordSetByZoneResp - a model defined in huaweicloud sdk | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | __init__ | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def __init__(self, id=None, name=None, description=None, zone_id=None, zone_name=None, type=None, ttl=None, records=None, create_at=None, update_at=None, status=None, default=None, project_id=None, links=None, line=None, weight=None, health_check_id=None, alias_target=None):
self._id = None
self._name = None
self._description = None
self._zone_id = None
self._zone_name = None
self._type = None
self._ttl = None
self._records = None
self._create_at = None
self._update_at = None
self._status = None
self._default = None
self._project_id = None
self._links = None
self._line = None
self._weight = None
self._health_check_id = None
self._alias_target = None
self.discriminator = None
if (id is not None):
self.id = id
if (name is not None):
self.name = name
if (description is not None):
self.description = description
if (zone_id is not None):
self.zone_id = zone_id
if (zone_name is not None):
self.zone_name = zone_name
if (type is not None):
self.type = type
if (ttl is not None):
self.ttl = ttl
if (records is not None):
self.records = records
if (create_at is not None):
self.create_at = create_at
if (update_at is not None):
self.update_at = update_at
if (status is not None):
self.status = status
if (default is not None):
self.default = default
if (project_id is not None):
self.project_id = project_id
if (links is not None):
self.links = links
if (line is not None):
self.line = line
if (weight is not None):
self.weight = weight
if (health_check_id is not None):
self.health_check_id = health_check_id
if (alias_target is not None):
self.alias_target = alias_target | def __init__(self, id=None, name=None, description=None, zone_id=None, zone_name=None, type=None, ttl=None, records=None, create_at=None, update_at=None, status=None, default=None, project_id=None, links=None, line=None, weight=None, health_check_id=None, alias_target=None):
self._id = None
self._name = None
self._description = None
self._zone_id = None
self._zone_name = None
self._type = None
self._ttl = None
self._records = None
self._create_at = None
self._update_at = None
self._status = None
self._default = None
self._project_id = None
self._links = None
self._line = None
self._weight = None
self._health_check_id = None
self._alias_target = None
self.discriminator = None
if (id is not None):
self.id = id
if (name is not None):
self.name = name
if (description is not None):
self.description = description
if (zone_id is not None):
self.zone_id = zone_id
if (zone_name is not None):
self.zone_name = zone_name
if (type is not None):
self.type = type
if (ttl is not None):
self.ttl = ttl
if (records is not None):
self.records = records
if (create_at is not None):
self.create_at = create_at
if (update_at is not None):
self.update_at = update_at
if (status is not None):
self.status = status
if (default is not None):
self.default = default
if (project_id is not None):
self.project_id = project_id
if (links is not None):
self.links = links
if (line is not None):
self.line = line
if (weight is not None):
self.weight = weight
if (health_check_id is not None):
self.health_check_id = health_check_id
if (alias_target is not None):
self.alias_target = alias_target<|docstring|>ShowRecordSetByZoneResp - a model defined in huaweicloud sdk<|endoftext|> |
82a5065c926444c27d09d99338edd200aa6a8436d76b000d186a81be62bf0994 | @property
def id(self):
'Gets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :return: The id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._id | Gets the id of this ShowRecordSetByZoneResp.
Record Set的ID。
:return: The id of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def id(self):
'Gets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :return: The id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._id | @property
def id(self):
'Gets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :return: The id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._id<|docstring|>Gets the id of this ShowRecordSetByZoneResp.
Record Set的ID。
:return: The id of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
38bce15bb7360ad98ee8c15c2c3d30288d04d8533491b5f001bbad0c966c4e33 | @id.setter
def id(self, id):
'Sets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :param id: The id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._id = id | Sets the id of this ShowRecordSetByZoneResp.
Record Set的ID。
:param id: The id of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @id.setter
def id(self, id):
'Sets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :param id: The id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._id = id | @id.setter
def id(self, id):
'Sets the id of this ShowRecordSetByZoneResp.\n\n Record Set的ID。\n\n :param id: The id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._id = id<|docstring|>Sets the id of this ShowRecordSetByZoneResp.
Record Set的ID。
:param id: The id of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
991246a8dda2450b7c8d8a70abb536fccfb46803c89d73cd83276adfbf3b3816 | @property
def name(self):
'Gets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :return: The name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._name | Gets the name of this ShowRecordSetByZoneResp.
Record Set的名称。
:return: The name of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | name | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def name(self):
'Gets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :return: The name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._name | @property
def name(self):
'Gets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :return: The name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._name<|docstring|>Gets the name of this ShowRecordSetByZoneResp.
Record Set的名称。
:return: The name of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
fd32bf18d1d6c67240e39345cb858040f13a82d77ceaa5a8d201717a9abb0fe8 | @name.setter
def name(self, name):
'Sets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :param name: The name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._name = name | Sets the name of this ShowRecordSetByZoneResp.
Record Set的名称。
:param name: The name of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | name | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @name.setter
def name(self, name):
'Sets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :param name: The name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._name = name | @name.setter
def name(self, name):
'Sets the name of this ShowRecordSetByZoneResp.\n\n Record Set的名称。\n\n :param name: The name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._name = name<|docstring|>Sets the name of this ShowRecordSetByZoneResp.
Record Set的名称。
:param name: The name of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
8eac5769efd2b874d40f317fa44f6b66734d8f3e3083dfb8a7739f7f7bcfebc9 | @property
def description(self):
'Gets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :return: The description of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._description | Gets the description of this ShowRecordSetByZoneResp.
Record Set的描述信息。
:return: The description of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | description | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def description(self):
'Gets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :return: The description of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._description | @property
def description(self):
'Gets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :return: The description of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._description<|docstring|>Gets the description of this ShowRecordSetByZoneResp.
Record Set的描述信息。
:return: The description of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
8e9f4babb32050a147d95da2a87387015037445c2cfdb8ea045db9b5fd456d2c | @description.setter
def description(self, description):
'Sets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :param description: The description of this ShowRecordSetByZoneResp.\n :type: str\n '
self._description = description | Sets the description of this ShowRecordSetByZoneResp.
Record Set的描述信息。
:param description: The description of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | description | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @description.setter
def description(self, description):
'Sets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :param description: The description of this ShowRecordSetByZoneResp.\n :type: str\n '
self._description = description | @description.setter
def description(self, description):
'Sets the description of this ShowRecordSetByZoneResp.\n\n Record Set的描述信息。\n\n :param description: The description of this ShowRecordSetByZoneResp.\n :type: str\n '
self._description = description<|docstring|>Sets the description of this ShowRecordSetByZoneResp.
Record Set的描述信息。
:param description: The description of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
9247ecd2db36997cb95c231181dbe1dd4013c8555da7511418145a35e1375333 | @property
def zone_id(self):
'Gets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :return: The zone_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_id | Gets the zone_id of this ShowRecordSetByZoneResp.
托管该记录的zone_id。
:return: The zone_id of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | zone_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def zone_id(self):
'Gets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :return: The zone_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_id | @property
def zone_id(self):
'Gets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :return: The zone_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_id<|docstring|>Gets the zone_id of this ShowRecordSetByZoneResp.
托管该记录的zone_id。
:return: The zone_id of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
d4999ec4460e3fd305718ccb2e7231cd22ba0122486290eb3d75ca5261a01390 | @zone_id.setter
def zone_id(self, zone_id):
'Sets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :param zone_id: The zone_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_id = zone_id | Sets the zone_id of this ShowRecordSetByZoneResp.
托管该记录的zone_id。
:param zone_id: The zone_id of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | zone_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @zone_id.setter
def zone_id(self, zone_id):
'Sets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :param zone_id: The zone_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_id = zone_id | @zone_id.setter
def zone_id(self, zone_id):
'Sets the zone_id of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_id。\n\n :param zone_id: The zone_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_id = zone_id<|docstring|>Sets the zone_id of this ShowRecordSetByZoneResp.
托管该记录的zone_id。
:param zone_id: The zone_id of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
048f11af2b082bb38b2d4ce9a5b0b0a8599409ae2cde983e5cc9c7d3f3e8289d | @property
def zone_name(self):
'Gets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :return: The zone_name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_name | Gets the zone_name of this ShowRecordSetByZoneResp.
托管该记录的zone_name。
:return: The zone_name of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | zone_name | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def zone_name(self):
'Gets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :return: The zone_name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_name | @property
def zone_name(self):
'Gets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :return: The zone_name of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._zone_name<|docstring|>Gets the zone_name of this ShowRecordSetByZoneResp.
托管该记录的zone_name。
:return: The zone_name of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
145ca1985f3071c2148a8c48668e56fe4efa557a02209ec019412c6b99d9f968 | @zone_name.setter
def zone_name(self, zone_name):
'Sets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :param zone_name: The zone_name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_name = zone_name | Sets the zone_name of this ShowRecordSetByZoneResp.
托管该记录的zone_name。
:param zone_name: The zone_name of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | zone_name | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @zone_name.setter
def zone_name(self, zone_name):
'Sets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :param zone_name: The zone_name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_name = zone_name | @zone_name.setter
def zone_name(self, zone_name):
'Sets the zone_name of this ShowRecordSetByZoneResp.\n\n 托管该记录的zone_name。\n\n :param zone_name: The zone_name of this ShowRecordSetByZoneResp.\n :type: str\n '
self._zone_name = zone_name<|docstring|>Sets the zone_name of this ShowRecordSetByZoneResp.
托管该记录的zone_name。
:param zone_name: The zone_name of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
d629f092584c14966d32ed679355a0081a7fea37eecb91a81dba3800dca1e7a6 | @property
def type(self):
'Gets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :return: The type of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._type | Gets the type of this ShowRecordSetByZoneResp.
记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。
:return: The type of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | type | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def type(self):
'Gets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :return: The type of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._type | @property
def type(self):
'Gets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :return: The type of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._type<|docstring|>Gets the type of this ShowRecordSetByZoneResp.
记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。
:return: The type of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.