function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def __init__(self, component):
self._component = component | rwl/muntjac | [
43,
14,
43,
5,
1316308871
] |
def __init__(self, component):
super(BackgroundProcess, self).__init__()
self._component = component | rwl/muntjac | [
43,
14,
43,
5,
1316308871
] |
def assume_role(
role_arn: str, aws_region: str, credentials: Optional[dict] = None | linkedin/WhereHows | [
7356,
2096,
7356,
166,
1447825660
] |
def get_session(self) -> Session:
if (
self.aws_access_key_id
and self.aws_secret_access_key
and self.aws_session_token
):
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
region_name=self.aws_region,
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
)
elif self.aws_role:
if isinstance(self.aws_role, str):
credentials = assume_role(self.aws_role, self.aws_region)
else:
credentials = reduce(
lambda new_credentials, role_arn: assume_role(
role_arn, self.aws_region, new_credentials
),
self.aws_role,
{},
)
return Session(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
region_name=self.aws_region,
)
else:
return Session(region_name=self.aws_region) | linkedin/WhereHows | [
7356,
2096,
7356,
166,
1447825660
] |
def get_glue_client(self) -> "GlueClient":
return self.get_session().client("glue") | linkedin/WhereHows | [
7356,
2096,
7356,
166,
1447825660
] |
def __init__(self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause | netheosgithub/pcs_api | [
8,
5,
8,
4,
1401784319
] |
def __init__(self, c_path, expected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
(c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob | netheosgithub/pcs_api | [
8,
5,
8,
4,
1401784319
] |
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay | netheosgithub/pcs_api | [
8,
5,
8,
4,
1401784319
] |
def __init__(self, message, c_path):
super(CFileNotFoundError, self).__init__(message)
self.path = c_path | netheosgithub/pcs_api | [
8,
5,
8,
4,
1401784319
] |
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason | netheosgithub/pcs_api | [
8,
5,
8,
4,
1401784319
] |
def _get_registered_content(obj, method, template_context):
"""
Given an object and a PluginTemplateExtension method name and the template context, return all the
registered content for the object's model.
"""
html = ''
context = {
'object': obj,
'request': template_context['request'],
'settings': template_context['settings'],
'csrf_token': template_context['csrf_token'],
'perms': template_context['perms'],
}
model_name = obj._meta.label_lower
template_extensions = registry['plugin_template_extensions'].get(model_name, [])
for template_extension in template_extensions:
# If the class has not overridden the specified method, we can skip it (because we know it
# will raise NotImplementedError).
if getattr(template_extension, method) == getattr(PluginTemplateExtension, method):
continue
# Update context with plugin-specific configuration parameters
plugin_name = template_extension.__module__.split('.')[0]
context['config'] = settings.PLUGINS_CONFIG.get(plugin_name, {})
# Call the method to render content
instance = template_extension(context)
content = getattr(instance, method)()
html += content
return mark_safe(html) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def plugin_buttons(context, obj):
"""
Render all buttons registered by plugins
"""
return _get_registered_content(obj, 'buttons', context) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def plugin_left_page(context, obj):
"""
Render all left page content registered by plugins
"""
return _get_registered_content(obj, 'left_page', context) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def plugin_right_page(context, obj):
"""
Render all right page content registered by plugins
"""
return _get_registered_content(obj, 'right_page', context) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def send_email(token_id, user, domain):
try:
sparkpost = SparkPost()
default_app_name = "INFOSYSTEM"
default_email_use_sandbox = False
default_reset_url = 'http://objetorelacional.com.br/#/reset'
default_noreply_email = '[email protected]'
default_email_subject = 'INFOSYSTEM - CONFIRMAR email e CRIAR senha'
infosystem_app_name = os.environ.get(
'INFOSYSTEM_APP_NAME', default_app_name)
infosystem_reset_url = os.environ.get(
'INFOSYSTEM_RESET_URL', default_reset_url)
infosystem_noreply_email = os.environ.get(
'INFOSYSTEM_NOREPLY_EMAIL', default_noreply_email)
infosystem_email_subject = os.environ.get(
'INFOSYSTEM_EMAIL_SUBJECT', default_email_subject)
infosystem_email_use_sandbox = os.environ.get(
'INFOSYSTEM_EMAIL_USE_SANDBOX',
default_email_use_sandbox) == 'True'
url = infosystem_reset_url + '/' + token_id + '/' + domain.name
sparkpost.transmissions.send(
use_sandbox=infosystem_email_use_sandbox,
recipients=[user.email],
html=_HTML_EMAIL_TEMPLATE.format(
app_name=infosystem_app_name, reset_url=url),
from_email=infosystem_noreply_email,
subject=infosystem_email_subject
)
except Exception:
# TODO(fdoliveira): do something here!
pass | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def do(self, session, **kwargs):
self.entity = super().do(session, **kwargs)
self.token = self.manager.api.tokens.create(
session=session, user=self.entity)
self.domain = self.manager.api.domains.get(id=self.entity.domain_id)
if not self.domain:
raise exception.OperationBadRequest()
return self.entity | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def do(self, session, **kwargs):
password = kwargs.get('password', None)
if password:
kwargs['password'] = hashlib.sha256(
password.encode('utf-8')).hexdigest()
self.entity = super().do(session, **kwargs)
return self.entity | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def pre(self, **kwargs):
email = kwargs.get('email', None)
domain_name = kwargs.get('domain_name', None)
infosystem_reset_url = os.environ.get(
'INFOSYSTEM_RESET_URL', 'http://objetorelacional.com.br/#/reset/')
self.reset_url = kwargs.get('reset_url', infosystem_reset_url)
if not (domain_name and email and self.reset_url):
raise exception.OperationBadRequest()
domains = self.manager.api.domains.list(name=domain_name)
if not domains:
raise exception.OperationBadRequest()
self.domain = domains[0]
users = self.manager.api.users.list(
email=email, domain_id=self.domain.id)
if not users:
raise exception.OperationBadRequest()
self.user = users[0]
return True | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def pre(self, **kwargs):
self.token = flask.request.headers.get('token')
self.password = kwargs.get('password')
if not (self.token and self.password):
raise exception.OperationBadRequest()
return True | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def post(self):
self.manager.api.tokens.delete(id=self.token) | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def do(self, session, user_id, **kwargs):
grants = self.manager.api.grants.list(user_id=user_id)
grants_ids = [g.role_id for g in grants]
roles = self.manager.api.roles.list()
user_roles_id = [r.id for r in roles if r.id in grants_ids]
# FIXME(fdoliveira) Try to send user_roles_id as paramater on query
policies = self.manager.api.policies.list()
policies_capabilitys_id = [
p.capability_id for p in policies if p.role_id in user_roles_id]
user = self.manager.api.users.list(id=user_id)[0]
capabilities = self.manager.api.capabilities.list(
domain_id=user.domain_id)
policy_capabilities = [
c for c in capabilities if c.id in policies_capabilitys_id]
# NOTE(samueldmq): if there is no policy for a capabiltiy,
# then it's open! add it too!
restricted_capabilities = [p.capability_id for p in policies]
open_capabilities = [
c for c in capabilities if c.id not in restricted_capabilities]
user_routes = [self.manager.api.routes.get(id=c.route_id) for c in (
policy_capabilities + open_capabilities)]
bypass_routes = self.manager.api.routes.list(bypass=True)
return list(set(user_routes).union(set(bypass_routes))) | samueldmq/infosystem | [
3,
2,
3,
107,
1462652014
] |
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_jobs: gapic_v1.method.wrap_method(
self.list_jobs,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_job: gapic_v1.method.wrap_method(
self.get_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_job: gapic_v1.method.wrap_method(
self.create_job, default_timeout=600.0, client_info=client_info,
),
self.update_job: gapic_v1.method.wrap_method(
self.update_job, default_timeout=600.0, client_info=client_info,
),
self.delete_job: gapic_v1.method.wrap_method(
self.delete_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.pause_job: gapic_v1.method.wrap_method(
self.pause_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.resume_job: gapic_v1.method.wrap_method(
self.resume_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.run_job: gapic_v1.method.wrap_method(
self.run_job, default_timeout=600.0, client_info=client_info,
),
} | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def list_jobs(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def get_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def create_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def update_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def delete_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def pause_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def resume_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def run_job(
self, | googleapis/python-scheduler | [
28,
10,
28,
2,
1575936606
] |
def __init__(self, db):
idtable.CachedIdTable.__init__(self, db, "Tags", "tagId", "tag") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def get(self, vs, ts):
key = vs, ts
if self.has_key(key):
return self[key]
ts = [ float(x) for x in ts.split(":") ]
v = versions.VersionFromString(vs, timeStamps = ts)
self[key] = v
return v | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def get(self, frozen):
if self.has_key(frozen):
return self[frozen]
if frozen is None:
f = deps.deps.Flavor()
else:
f = deps.deps.ThawFlavor(frozen)
self[frozen] = f
return f | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, db):
self.db = db
schema.createDBTroveFiles(db)
self.tags = Tags(self.db) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getByInstanceId(self, instanceId, justPresent = True):
cu = self.db.cursor()
if justPresent:
cu.execute("SELECT path, stream FROM DBTroveFiles "
"WHERE instanceId=? and isPresent=1", instanceId)
else:
cu.execute("SELECT path, stream FROM DBTroveFiles "
"WHERE instanceId=?", instanceId)
for path, stream in cu:
yield (path, stream) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getFileByFileId(self, fileId, justPresent = True):
cu = self.db.cursor()
if justPresent:
cu.execute("SELECT path, stream FROM DBTroveFiles "
"WHERE fileId=? AND isPresent = 1", fileId)
else:
cu.execute("SELECT path, stream FROM DBTroveFiles "
"WHERE fileId=?", fileId)
# there could be multiple matches, but they should all be redundant
try:
path, stream = cu.next()
return (path, stream)
except StopIteration:
raise KeyError, fileId | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def iterPath(self, path):
cu = self.db.cursor()
cu.execute("SELECT instanceId FROM DBTroveFiles WHERE "
"isPresent=1 AND path=?", path)
for instanceId in cu:
yield instanceId[0] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _updatePathIdsPresent(self, instanceId, pathIdList, isPresent):
# Max number of bound params
chunkSize = 990
plen = len(pathIdList)
cu = self.db.cursor()
i = 0
while i < plen:
clen = min(chunkSize, plen - i)
bvals = [ isPresent, instanceId ] + pathIdList[i : i + clen]
bparams = ','.join('?' * clen)
cu.execute("UPDATE DBTroveFiles "
"SET isPresent=? "
"WHERE instanceId=? AND pathId in (%s)" % bparams,
bvals)
i += clen | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def restorePathIds(self, instanceId, pathIdList):
self._updatePathIdsPresent(instanceId, pathIdList, isPresent = 1) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, db):
self.db = db
schema.createInstances(db) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def hasName(self, name):
cu = self.db.cursor()
cu.execute("SELECT instanceId FROM Instances "
"WHERE troveName=? AND isPresent=1",
name)
return cu.fetchone() != None | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def addId(self, troveName, versionId, flavorId, timeStamps,
isPresent = True, pinned = False):
assert(min(timeStamps) > 0)
if isPresent:
isPresent = 1
else:
isPresent = 0
cu = self.db.cursor()
cu.execute("INSERT INTO Instances(troveName, versionId, flavorId, "
" timeStamps, isPresent, pinned) "
"VALUES (?, ?, ?, ?, ?, ?)",
(troveName, versionId, flavorId,
":".join([ "%.3f" % x for x in timeStamps]), isPresent,
pinned))
return cu.lastrowid | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getId(self, theId, justPresent = True):
cu = self.db.cursor()
if justPresent:
pres = "AND isPresent=1"
else:
pres = ""
cu.execute("SELECT troveName, versionId, flavorId, isPresent "
"FROM Instances WHERE instanceId=? %s" % pres, theId)
try:
return cu.next()
except StopIteration:
raise KeyError, theId | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def idIsPresent(self, instanceId):
cu = self.db.cursor()
cu.execute("SELECT isPresent FROM Instances WHERE "
"instanceId=?", instanceId)
val = cu.fetchone()
if not val:
return 0
return val[0] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def has_key(self, item):
cu = self.db.cursor()
cu.execute("SELECT instanceId FROM Instances WHERE "
"troveName=? AND versionId=? AND flavorId=?",
item)
return not(cu.fetchone() == None) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def get(self, item, defValue, justPresent = True):
cu = self.db.cursor()
if justPresent:
pres = " AND isPresent=1"
else:
pres = ""
cu.execute("SELECT instanceId FROM Instances WHERE "
"troveName=? AND versionId=? AND "
"flavorId=? %s" % pres, item)
item = cu.fetchone()
if not item:
return defValue
return item[0] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def addId(self, flavor):
return idtable.IdTable.addId(self, flavor.freeze()) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getId(self, flavorId):
return deps.deps.ThawFlavor(idtable.IdTable.getId(self, flavorId)) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __delitem__(self, flavor):
# XXX: We really should be testing for a deps.deps.Flavor
# instance, but the split of Flavor from DependencySet would
# cause too much code breakage right now....
assert(isinstance(flavor, deps.deps.DependencySet))
if flavor.isEmpty():
return
idtable.IdTable.__delitem__(self, flavor.freeze()) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, db):
idtable.IdTable.__init__(self, db, "Flavors", "flavorId", "flavor")
cu = db.cursor()
cu.execute("SELECT FlavorID from Flavors")
if cu.fetchone() == None:
# reserve flavor 0 for "no flavor information"
cu.execute("INSERT INTO Flavors (flavorId, flavor) VALUES (0, NULL)") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, db):
idtable.IdMapping.__init__(self, db, "DBFlavorMap", "instanceId",
"flavorId") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, path, timeout = None):
if timeout is not None:
self.timeout = timeout
self.db = None
try:
self.db = dbstore.connect(path, driver = "sqlite",
timeout=self.timeout)
self.schemaVersion = self.db.getVersion().major
except sqlerrors.DatabaseLocked:
raise errors.DatabaseLockedError
self.db.dbh._BEGIN = "BEGIN"
try:
# dbstore? what's dbstore
cu = self.db.cursor()
cu.execute("BEGIN IMMEDIATE")
except sqlerrors.ReadOnlyDatabase:
readOnly = True
else:
readOnly = False
self.db.rollback()
if readOnly and self.schemaVersion < schema.VERSION:
raise OldDatabaseSchema(
"The Conary database on this system is too old. It will be \n"
"automatically converted as soon as you run Conary with \n"
"write permissions for the database (which normally means \n"
"as root). \n")
elif self.schemaVersion > schema.VERSION:
raise schema.NewDatabaseSchema()
self.db.loadSchema()
newCursor = self.schemaVersion < schema.VERSION
schema.checkVersion(self.db)
if newCursor:
cu = self.db.cursor()
if self.schemaVersion == 0:
schema.createSchema(self.db)
schema.setupTempDepTables(self.db, cu)
schema.setupTempTables(self.db, cu)
self.troveFiles = DBTroveFiles(self.db)
self.instances = DBInstanceTable(self.db)
self.versionTable = versiontable.VersionTable(self.db)
self.flavors = Flavors(self.db)
self.flavorMap = DBFlavorMap(self.db)
self.depTables = deptable.DependencyTables(self.db)
self.troveInfoTable = troveinfo.TroveInfoTable(self.db)
self.needsCleanup = False
self.addVersionCache = {}
self.flavorsNeeded = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def begin(self):
"""
Force the database to begin a transaction; this locks the database
so no one can touch it until a commit() or rollback().
"""
return self.db.transaction() | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def iterAllTroveNames(self):
return self.instances.iterNames() | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def iterVersionByName(self, name, withFlavors):
cu = self.db.cursor()
if withFlavors:
flavorCol = "flavor"
flavorClause = """INNER JOIN Flavors ON
Flavors.flavorId = Instances.flavorId"""
else:
flavorCol = "NULL"
flavorClause = ""
cu.execute("""SELECT DISTINCT version, timeStamps, %s
FROM Instances NATURAL JOIN Versions
%s
WHERE troveName='%s' AND isPresent=1"""
% (flavorCol, flavorClause, name))
flavors = {}
for (match, timeStamps, flavorStr) in cu:
ts = [float(x) for x in timeStamps.split(':')]
version = versions.VersionFromString(match, timeStamps=ts)
if withFlavors:
f = flavors.get(flavorStr, None)
if f is None:
f = deps.deps.ThawFlavor(flavorStr)
flavors[flavorStr] = f
yield (version, f)
else:
yield (version) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def iterAllTroves(self, withPins = False):
cu = self.db.cursor()
cu.execute("""
SELECT troveName, version, timeStamps, flavor, pinned
FROM Instances NATURAL JOIN Versions
INNER JOIN Flavors
ON Instances.flavorid = Flavors.flavorid
WHERE isPresent=1""")
versionCache = VersionCache()
flavorCache = FlavorCache()
for (troveName, version, timeStamps, flavor, pinned) in cu:
version = versionCache.get(version, timeStamps)
flavor = flavorCache.get(flavor)
nvf = TroveTuple(troveName, version, flavor)
if withPins:
yield nvf, (pinned != 0)
else:
yield nvf | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def trovesArePinned(self, troveList):
"""
Get a list of which troves in troveList are pinned
@param troveList: a list of troves in (name, version, flavor) form
@type troveList: list
@note:
This function makes database calls and may raise any exceptions
defined in L{conary.dbstore.sqlerrors}
@raises AssertionError:
"""
cu = self.db.cursor()
cu.execute("""
CREATE TEMPORARY TABLE tlList(
name %(STRING)s,
version %(STRING)s,
flavor %(STRING)s
)""" % self.db.keywords, start_transaction = False)
def _iter(tl):
for name, version, flavor in troveList:
yield (name, version.asString(), flavor.freeze())
cu.executemany("INSERT INTO tlList VALUES(?, ?, ?)", _iter(troveList),
start_transaction = False)
# count the number of items we're inserting
count = cu.execute('SELECT count(*) FROM tlList').next()[0]
cu.execute(""" | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def hasByName(self, name):
return self.instances.hasName(name) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getInstanceId(self, troveName, versionId, flavorId,
timeStamps, isPresent = True):
theId = self.instances.get((troveName, versionId, flavorId),
None)
if theId is None:
theId = self.instances.addId(troveName, versionId, flavorId,
timeStamps, isPresent = isPresent)
return theId | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def addTrove(self, trove, pin = False, oldTroveSpec = None):
cu = self.db.cursor()
troveName = trove.getName()
troveVersion = trove.getVersion()
troveVersionId = self.getVersionId(troveVersion, {})
self.addVersionCache[troveVersion] = troveVersionId
if oldTroveSpec is not None:
oldTroveId = self._findTroveInstanceId(cu, *oldTroveSpec)
else:
oldTroveId = None
troveFlavor = trove.getFlavor()
if not troveFlavor.isEmpty():
self.flavorsNeeded[troveFlavor] = True
for (name, version, flavor) in trove.iterTroveList(strongRefs=True,
weakRefs=True):
if not flavor.isEmpty():
self.flavorsNeeded[flavor] = True
if self.flavorsNeeded:
# create all of the flavor id's we'll need
cu.execute("""
CREATE TEMPORARY TABLE flavorsNeeded(
empty INTEGER,
flavor %(STRING)s
)""" % self.db.keywords)
for flavor in self.flavorsNeeded.keys():
cu.execute("INSERT INTO flavorsNeeded VALUES(?, ?)",
None, flavor.freeze())
cu.execute("""
INSERT INTO Flavors (flavorId, flavor)
SELECT flavorsNeeded.empty, flavorsNeeded.flavor
FROM flavorsNeeded LEFT OUTER JOIN Flavors USING(flavor)
WHERE Flavors.flavorId is NULL
""")
cu.execute("DROP TABLE flavorsNeeded")
self.flavorsNeeded = {}
# get all of the flavor id's we might need; this could be somewhat
# more efficient for an update, but it's not clear making it
# more efficient is actually a speedup (as we'd have to figure out
# which flavorId's we need). it could be that all of this code
# would get faster if we just added the files to a temporary table
# first and insert'd into the final table???
flavors = {}
if not troveFlavor.isEmpty():
flavors[troveFlavor] = True
for (name, version, flavor) in trove.iterTroveList(strongRefs=True,
weakRefs=True):
if not flavor.isEmpty():
flavors[flavor] = True
flavorMap = self.flavors.getItemDict(flavors.iterkeys())
del flavors
if troveFlavor.isEmpty():
troveFlavorId = 0
else:
troveFlavorId = flavorMap[troveFlavor.freeze()]
# the instance may already exist (it could be referenced by a package
# which has already been added, or it may be in the database as
# not present)
troveInstanceId = self.instances.get((troveName, troveVersionId,
troveFlavorId), None, justPresent = False)
if troveInstanceId:
self.instances.setPresent(troveInstanceId, 1, pinned=pin)
else:
assert(min(troveVersion.timeStamps()) > 0)
troveInstanceId = self.instances.addId(troveName, troveVersionId,
troveFlavorId, troveVersion.timeStamps(),
pinned = pin)
assert(cu.execute("SELECT COUNT(*) FROM TroveTroves WHERE "
"instanceId=?", troveInstanceId).next()[0] == 0)
cu.execute("""
CREATE TEMPORARY TABLE IncludedTroves(
troveName %(STRING)s,
versionId INTEGER,
flavorId INTEGER,
timeStamps %(STRING)s,
flags INTEGER
) """ % self.db.keywords)
def _iter(trove):
for (name, version, flavor), byDefault, isStrong \
in trove.iterTroveListInfo():
versionId = self.getVersionId(version, self.addVersionCache)
if flavor.isEmpty():
flavorId = 0
else:
flavorId = flavorMap[flavor.freeze()]
flags = 0
if not isStrong:
flags |= schema.TROVE_TROVES_WEAKREF
if byDefault:
flags |= schema.TROVE_TROVES_BYDEFAULT;
yield (name, versionId, flavorId,
":".join([ "%.3f" % x for x in version.timeStamps()]),
flags)
cu.executemany("INSERT INTO IncludedTroves VALUES(?, ?, ?, ?, ?)",
_iter(trove))
# make sure every trove we include has an instanceid
cu.execute("""
INSERT INTO Instances (troveName, versionId, flavorId,
timeStamps, isPresent, pinned)
SELECT IncludedTroves.troveName,
IncludedTroves.versionId,
IncludedTroves.flavorId,
IncludedTroves.timeStamps, 0, 0
FROM IncludedTroves LEFT OUTER JOIN Instances ON
IncludedTroves.troveName == Instances.troveName AND
IncludedTroves.versionId == Instances.versionId AND
IncludedTroves.flavorId == Instances.flavorId
WHERE
instanceId is NULL
""")
# now include the troves in this one
cu.execute("""
INSERT INTO TroveTroves(instanceId, includedId, flags, inPristine)
SELECT ?, instanceId, flags, ?
FROM IncludedTroves JOIN Instances ON
IncludedTroves.troveName == Instances.troveName AND
IncludedTroves.versionId == Instances.versionId AND
IncludedTroves.flavorId == Instances.flavorId
""", troveInstanceId, True)
cu.execute("DROP TABLE IncludedTroves")
trove.troveInfo.installTime.set(time.time())
self.depTables.add(cu, trove, troveInstanceId)
self.troveInfoTable.addInfo(cu, trove, troveInstanceId)
# these are collections that _could_ include trove (they have
# an empty slot where this trove might fit)
cu.execute('''SELECT TroveTroves.instanceId FROM Instances
JOIN TroveTroves
ON (Instances.instanceId = TroveTroves.includedId)
WHERE troveName = ? AND isPresent=0''', trove.getName())
collections = cu.fetchall()
cu.execute("select instanceId from trovetroves where includedid=?", troveInstanceId)
collections += cu.fetchall()
for x, in collections:
self._sanitizeTroveCollection(cu, x, nameHint = trove.getName())
self._sanitizeTroveCollection(cu, troveInstanceId)
cu.execute("""CREATE TEMPORARY TABLE NewFiles (
pathId BLOB,
versionId INTEGER,
path %(PATHTYPE)s,
fileId BLOB,
stream BLOB,
isPresent INTEGER)""" % self.db.keywords)
cu.execute("""CREATE TEMPORARY TABLE NewFileTags (
pathId BLOB,
tag %(STRING)s)""" % self.db.keywords)
stmt = cu.compile("""
INSERT INTO NewFiles (pathId, versionId, path, fileId,
stream, isPresent)
VALUES (?, ?, ?, ?, ?, ?)""")
return (cu, troveInstanceId, stmt, oldTroveId) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def addFile(self, troveInfo, pathId, path, fileId, fileVersion,
fileStream = None, isPresent = True):
(cu, troveInstanceId, addFileStmt, oldInstanceId) = troveInfo
versionId = self.getVersionId(fileVersion, self.addVersionCache)
if fileStream:
cu.execstmt(addFileStmt, pathId, versionId, path, fileId,
fileStream, isPresent)
tags = files.frozenFileTags(fileStream)
if tags:
cu.executemany("INSERT INTO NewFileTags VALUES (?, ?)",
itertools.izip(itertools.repeat(pathId), tags))
else:
cu.execute("""
UPDATE DBTroveFiles
SET instanceId=?, isPresent=?, path=?, versionId=?
WHERE pathId=? AND instanceId=?""",
troveInstanceId, isPresent, path, versionId,
pathId, oldInstanceId) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def markUserReplacedFiles(self, userReplaced):
cu = self.db.cursor()
cu.execute("""CREATE TEMPORARY TABLE UserReplaced(
name STRING, version STRING, flavor STRING,
pathId BLOB)""")
for (name, version, flavor), fileList in userReplaced.iteritems():
for pathId, content, fileObj in fileList:
flavorStr = flavor.freeze()
if not flavorStr:
flavorStr = None
cu.execute("""
INSERT INTO UserReplaced(name, version, flavor, pathId)
VALUES (?, ?, ?, ?)
""", name, version.asString(), flavorStr, pathId)
cu.execute("""
UPDATE DBTroveFiles SET isPresent = 0 WHERE
rowId IN (SELECT DBTroveFiles.rowId FROM UserReplaced
JOIN Versions ON
UserReplaced.version = Versions.version
JOIN Flavors ON
UserReplaced.flavor = Flavors.flavor OR
(UserReplaced.flavor IS NULL AND
Flavors.flavor IS NULL)
JOIN Instances ON
Instances.troveName = UserReplaced.name AND
Instances.versionId = versions.versionId AND
Instances.flavorId = flavors.flavorId
JOIN DBTroveFiles ON
DBTroveFiles.instanceId = Instances.instanceId AND
DBTroveFiles.pathId = UserReplaced.pathId)
""")
cu.execute("DROP TABLE UserReplaced") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getFile(self, pathId, fileId, pristine = False):
stream = self.troveFiles.getFileByFileId(fileId,
justPresent = not pristine)[1]
return files.ThawFile(stream, pathId) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def findFileVersion(self, fileId):
cu = self.db.cursor()
cu.execute("""SELECT stream FROM DBTroveFiles
INNER JOIN Versions ON
DBTroveFiles.versionId == Versions.versionId
WHERE fileId == ?""", fileId)
for (stream,) in cu:
return files.ThawFile(stream, None)
return None | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def hasTroves(self, troveList):
instances = self._lookupTroves(troveList)
result = [ False ] * len(troveList)
for i, instanceId in enumerate(instances):
if instanceId is not None:
result[i] = True
return result | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getTroveFiles(self, troveList, onlyDirectories = False):
instanceIds = self._lookupTroves(troveList)
if None in instanceIds:
raise KeyError
trvByInstanceId = dict([ (instId, trvInfo) for
instId, trvInfo in itertools.izip(instanceIds, troveList)
if instId is not None ])
instanceIds = trvByInstanceId.keys()
cu = self.db.cursor()
cu.execute("""CREATE TEMPORARY TABLE getTrovesTbl(
idx %(PRIMARYKEY)s,
instanceId INT)
""" % self.db.keywords, start_transaction = False)
cu.executemany("INSERT INTO getTrovesTbl VALUES (?, ?)",
list(enumerate(instanceIds)), start_transaction=False)
if onlyDirectories:
dirClause = "AND stream LIKE 'd%'"
else:
dirClause = ""
cu.execute("""SELECT instanceId, path, stream FROM getTrovesTbl JOIN
DBTroveFiles USING (instanceId)
WHERE isPresent = 1 %s
ORDER BY path""" % dirClause)
lastId = None
for instanceId, path, stream in cu:
yield trvByInstanceId[instanceId], path, stream
cu.execute("DROP TABLE getTrovesTbl", start_transaction = False) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _iter(tl):
for i, (name, version, flavor) in enumerate(tl):
yield (i, name, str(version), flavor.freeze()) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _iterTroves(self, pristine, instanceIds, withFiles = True,
withDeps = True, errorOnMissing=True,
withFileObjects = False):
"""
Iterates over the troves associated with a list of instanceIds.
@param pristine: Return the trove unmodified based on the local system.
@type pristine: boolean
@param instanceIds: Instance ids to iterate over.
@type instanceIds: list of int
@param withFiles: Include (pathId, path, fileId, version) information
for the files referenced by troves.
@type withFiles: boolean
@param errorOnMissing: Raise an error on a missing instanceId,
otherwise return None
@type errorOnMissing: boolean
@param withFileObjects: Return Trove objects w/ file objects included.
@type withFileObjects: boolean
"""
instanceIds = list(instanceIds)
if withFileObjects:
troveClass = trove.TroveWithFileObjects
else:
troveClass = trove.Trove
cu = self.db.cursor()
cu.execute("""CREATE TEMPORARY TABLE getTrovesTbl(
idx %(PRIMARYKEY)s,
instanceId INT)
""" % self.db.keywords, start_transaction = False)
cu.executemany("INSERT INTO getTrovesTbl VALUES (?, ?)",
list(enumerate(instanceIds)), start_transaction=False)
cu.execute("""SELECT idx, troveName, version, flavor, timeStamps
FROM getTrovesTbl
JOIN Instances USING(instanceId)
JOIN Versions USING(versionId)
JOIN Flavors ON (Instances.flavorId = Flavors.flavorId)
""")
versionCache = VersionCache()
flavorCache = FlavorCache()
results = [ None for x in instanceIds ]
for idx, troveName, versionStr, flavorStr, timeStamps in cu:
troveFlavor = flavorCache.get(flavorStr)
troveVersion = versionCache.get(versionStr, timeStamps)
trv = troveClass(troveName, troveVersion, troveFlavor, None,
setVersion = False)
results[idx] = trv
# add all of the troves which are references from this trove; the
# flavor cache is already complete
cu = self.db.cursor()
if pristine:
pristineClause = "TroveTroves.inPristine = 1"
else:
pristineClause = "Instances.isPresent = 1"
cu.execute("""
SELECT idx, troveName, version, flags, timeStamps, flavor
FROM getTrovesTbl
JOIN TroveTroves USING(instanceId)
JOIN Instances
JOIN Versions ON
Versions.versionId = Instances.versionId
JOIN Flavors ON
TroveTroves.includedId = Instances.instanceId AND
Flavors.flavorId = Instances.flavorId
WHERE %s
ORDER BY idx
""" % pristineClause)
for idx, name, versionStr, flags, timeStamps, flavorStr in cu:
version = versionCache.get(versionStr, timeStamps)
flavor = flavorCache.get(flavorStr)
byDefault = (flags & schema.TROVE_TROVES_BYDEFAULT) != 0
weakRef = (flags & schema.TROVE_TROVES_WEAKREF) != 0
results[idx].addTrove(name, version, flavor, byDefault = byDefault,
weakRef = weakRef)
for idx, instanceId in enumerate(instanceIds):
trv = results[idx]
if withDeps:
self.depTables.get(cu, trv, instanceId)
self.troveInfoTable.getInfo(cu, trv, instanceId)
if not withFiles:
yield trv
if not pristine or withFiles:
if withFileObjects:
streamStr = "stream"
else:
streamStr = "NULL"
cu.execute("""SELECT idx, pathId, path, version, fileId, isPresent,
%s
FROM getTrovesTbl
JOIN DBTroveFiles USING(instanceId)
JOIN Versions ON
Versions.versionId = DBTroveFiles.versionId
ORDER BY idx
""" % streamStr)
curIdx = 0
for (idx, pathId, path, version, fileId, isPresent, stream) in cu:
if not pristine and not isPresent:
continue
version = versions.VersionFromString(version)
results[idx].addFile(pathId, path, version, fileId)
if stream:
results[idx].addFileObject(fileId,
files.ThawFile(stream, pathId))
while idx != curIdx:
yield results[curIdx]
curIdx += 1
while curIdx < len(results):
if not pristine:
results[idx].computePathHashes()
if not withFiles:
results[idx].removeAllFiles()
yield results[curIdx]
curIdx += 1
cu.execute("DROP TABLE getTrovesTbl", start_transaction = False) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def commit(self):
if self.needsCleanup:
# this join could be slow; it would be much better if we could
# restrict the select on Instances by instanceId, but that's
# not so easy and may require multiple passes (since we may
# now be able to remove a trove which was included by a trove
# which was included by a trove which was removed; getting that
# closure may have to be iterative?). that process may be faster
# then the full join?
# NOTE: if we could assume that we have weak references this
# would be a two-step process
cu = self.db.cursor()
cu.execute("""DELETE FROM TroveInfo WHERE
instanceId IN (
SELECT Instances.instanceId FROM Instances
WHERE isPresent = 0)
""")
cu.execute("""DELETE FROM Instances WHERE
instanceId IN (
SELECT Instances.instanceId
FROM
Instances LEFT OUTER JOIN TroveTroves
ON Instances.instanceId = TroveTroves.includedId
WHERE isPresent = 0 AND TroveTroves.includedId IS NULL)
""")
cu.execute("""DELETE FROM Versions WHERE Versions.versionId IN
(SELECT rmvdVer FROM RemovedVersions
LEFT OUTER JOIN Instances ON
rmvdVer == Instances.versionId
LEFT OUTER JOIN DBTroveFiles ON
rmvdVer == DBTroveFiles.versionId
WHERE
Instances.versionId is NULL AND
DBTroveFiles.versionId is NULL)""")
cu.execute("DROP TABLE RemovedVersions")
self.needsCleanup = False
self.db.commit()
self.addVersionCache = {}
self.flavorsNeeded = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def pathIsOwned(self, path):
for instanceId in self.troveFiles.iterPath(path):
if self.instances.idIsPresent(instanceId):
return True
return False | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def pathsOwned(self, pathList):
if not pathList:
return []
cu = self.db.cursor()
cu.execute("""
CREATE TEMPORARY TABLE pathList(
path %(STRING)s
)""" % self.db.keywords, start_transaction = False)
self.db.bulkload("pathList", [ (x,) for x in pathList ], [ "path" ],
start_transaction = False)
cu.execute("""
SELECT path FROM pathList JOIN DBTroveFiles USING(path) WHERE
DBTroveFiles.isPresent = 1
""")
pathsFound = set( x[0] for x in cu )
cu.execute("DROP TABLE pathList", start_transaction = False)
return [ path in pathsFound for path in pathList ] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def removeFileFromTrove(self, trove, path):
versionId = self.versionTable[trove.getVersion()]
flavorId = self.flavors[trove.getFlavor()]
instanceId = self.instances[(trove.getName(), versionId, flavorId)]
self.troveFiles.removePath(instanceId, path) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def restorePathIdsToTrove(self, troveName, troveVersion, troveFlavor,
pathIdList):
versionId = self.versionTable[troveVersion]
flavorId = self.flavors[troveFlavor]
instanceId = self.instances[(troveName, versionId, flavorId)]
self.troveFiles.restorePathIds(instanceId, pathIdList) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def mapPinnedTroves(self, mapList):
if not mapList:
return
cu = self.db.cursor()
cu.execute("""
CREATE TEMPORARY TABLE mlt(
name %(STRING)s,
pinnedVersion %(STRING)s,
pinnedFlavor %(STRING)s,
mappedVersion %(STRING)s,
mappedTimestamps %(STRING)s,
mappedFlavor %(STRING)s
) """ % self.db.keywords)
def _iter(ml):
for (name, pinnedInfo, mapInfo) in ml:
assert(sum(mapInfo[0].timeStamps()) > 0)
if pinnedInfo[1] is None or pinnedInfo[1].isEmpty():
pinnedFlavor = None
else:
pinnedFlavor = pinnedInfo[1].freeze()
if mapInfo[1] is None or mapInfo[1].isEmpty():
mapFlavor = None
else:
mapFlavor = mapInfo[1].freeze()
yield (name, pinnedInfo[0].asString(), pinnedFlavor,
mapInfo[0].asString(),
":".join([ "%.3f" % x for x in mapInfo[0].timeStamps()]),
mapFlavor)
cu.executemany("INSERT INTO mlt VALUES(?, ?, ?, ?, ?, ?)",
_iter(mapList))
# now add link collections to these troves
cu.execute("""INSERT INTO TroveTroves (instanceId, includedId,
flags, inPristine)
SELECT TroveTroves.instanceId, pinnedInst.instanceId,
TroveTroves.flags, 0 FROM
mlt JOIN Flavors AS pinFlv ON
pinnedFlavor == pinFlv.flavor OR
pinnedFlavor IS NULL and pinFlv.flavor IS NULL
JOIN Versions AS pinVers ON
pinnedVersion == pinVers.version
JOIN Instances as pinnedInst ON
pinnedInst.troveName == mlt.name AND
pinnedInst.flavorId == pinFlv.flavorId AND
pinnedInst.versionId == pinVers.versionId
JOIN Flavors AS mapFlv ON
mappedFlavor == mapFlv.flavor OR
mappedFlavor IS NULL and mapFlv.flavor IS NULL
JOIN Versions AS mapVers ON
mappedVersion == mapVers.version
JOIN Instances as mapInst ON
mapInst.troveName == mlt.name AND
mapInst.flavorId == mapFlv.flavorId AND
mapInst.versionId == mapVers.versionId
JOIN TroveTroves ON
TroveTroves.includedId == mapInst.instanceId
LEFT JOIN TroveTroves AS dup ON
(dup.instanceId == TroveTroves.instanceId AND
dup.includedId == pinnedInst.instanceId)
WHERE dup.instanceId IS NULL
""")
cu.execute("DROP TABLE mlt") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getTroveTroves(self, l, weakRefs = False, justPresent = False,
pristineOnly = True):
"""
Return the troves which the troves in l include as strong references.
If weakRefs is True, also include the troves included as weak
references. If justPresent is True, only include troves present
in the database. If pristineOnly is True, inferred references aren't
included.
"""
return self._getTroveInclusions(l, True, weakRefs = weakRefs,
justPresent = justPresent,
pristineOnly = pristineOnly) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _iter(infoList, resultList):
for idx, info in enumerate(infoList):
resultList.append([])
yield (idx, info[0], info[1].asString(), info[2].freeze()) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def findTroveContainers(self, names):
# XXX this fn could be factored out w/ getTroveContainers above
cu = self.db.cursor()
cu.execute("""
CREATE TEMPORARY TABLE ftc(
idx INTEGER,
name %(STRING)s
)""" % self.db.keywords, start_transaction = False)
cu.executemany("INSERT INTO ftc VALUES(?, ?)", enumerate(names),
start_transaction = False)
cu.execute("""SELECT idx, Instances.troveName, Versions.version,
Flavors.flavor
FROM ftc
JOIN Instances AS IncInst ON
ftc.name = IncInst.troveName
JOIN TroveTroves ON
IncInst.instanceId = TroveTroves.includedId
JOIN Instances ON
TroveTroves.instanceId = Instances.instanceId
JOIN Flavors ON
Instances.flavorId = Flavors.flavorId
JOIN Versions ON
Instances.versionId = Versions.versionId
""")
result = [ [] for x in names ]
for (idx, name, version, flavor) in cu:
result[idx].append((name, versions.VersionFromString(version),
deps.deps.ThawFlavor(flavor)))
cu.execute("DROP TABLE ftc", start_transaction = False)
return result | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def findUnreferencedTroves(self):
cu = self.db.cursor()
cu.execute("""
SELECT troveName, version, flavor FROM Instances
LEFT OUTER JOIN TroveTroves ON
Instances.instanceId = TroveTroves.includedId
JOIN Versions ON
Instances.versionId = Versions.versionId
JOIN Flavors ON
Instances.flavorId = Flavors.flavorId
WHERE
includedid IS NULL AND
version NOT LIKE "%/local@LOCAL:%"
""")
l = []
for (name, version, flavorStr) in cu:
if flavorStr is None:
flavorStr = deps.deps.Flavor()
else:
flavorStr = deps.deps.ThawFlavor(flavorStr)
l.append((name, versions.VersionFromString(version), flavorStr))
return l | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getAllTroveInfo(self, troveInfoTag):
cu = self.db.cursor()
cu.execute("""
SELECT troveName, version, timeStamps, flavor, data FROM TroveInfo
JOIN Instances USING (instanceId)
JOIN Flavors USING (flavorId)
JOIN Versions ON Instances.versionId = Versions.versionId
WHERE infoType = ?
""", troveInfoTag)
versionCache = VersionCache()
flavorCache = FlavorCache()
return [ (
TroveTuple(name=x[0],
version=versionCache.get(x[1], x[2]),
flavor=flavorCache.get(x[3])),
x[4]) for x in cu ] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _iter(tl, r):
for i, (name, version, flavor) in enumerate(tl):
flavorId = self.flavors.get(flavor, None)
if flavorId is None:
continue
versionId = self.versionTable.get(version, None)
if versionId is None:
continue
r.append(None)
yield (i, name, versionId, flavorId) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getPathHashesForTroveList(self, troveList):
"""
Returns the pathHashes for the given trove list.
"""
return self._getTroveInfo(troveList, trove._TROVEINFO_TAG_PATH_HASHES) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getTroveScripts(self, troveList):
"""
Returns the trove scripts for the given trove list. None is
returned for troves with no scripts. Returns a list of
trove.TroveScripts objects.
"""
return self._getTroveInfo(troveList, trove._TROVEINFO_TAG_SCRIPTS) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def findRemovedByName(self, name):
"""
Returns information on erased troves with a given name.
"""
cu = self.db.cursor()
cu.execute("""SELECT troveName, version, flavor FROM
Instances JOIN Versions ON
Instances.versionId = Versions.versionId
JOIN Flavors ON
Instances.flavorId = Flavors.flavorId
WHERE
isPresent = 0 AND
troveName = (?)""", name)
return [ (n, versions.VersionFromString(v),
deps.deps.ThawFlavor(f)) for (n, v, f) in cu ] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def troveIsIncomplete(self, name, version, flavor):
cu = self.db.cursor()
if isinstance(flavor, deps.deps.Flavor) and not flavor.isEmpty():
flavorStr = 'flavor = ?'
flavorArgs = [flavor.freeze()]
else:
flavorStr = 'flavor IS NULL'
flavorArgs = []
cu.execute("""
SELECT data FROM Instances
JOIN Versions USING (versionId)
JOIN Flavors ON
Instances.flavorId = Flavors.flavorId
JOIN TroveInfo ON
Instances.instanceId = TroveInfo.instanceId
WHERE
infoType = ? AND
troveName = ? AND
version = ? AND
%s""" % flavorStr,
[trove._TROVEINFO_TAG_INCOMPLETE,
name, str(version)] + flavorArgs)
frzIncomplete = cu.next()[0]
return streams.ByteStream(frzIncomplete)() != 0 | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getTrovesWithProvides(self, depSetList):
return self.depTables.getLocalProvides(depSetList) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getMissingPathIds(self, name, version, flavor):
cu = self.db.cursor()
flavorId = self.flavors.get(flavor, None)
if flavorId is None:
raise KeyError
versionId = self.versionTable.get(version, None)
if versionId is None:
raise KeyError
cu.execute("""
SELECT pathId FROM Instances JOIN DBTroveFiles USING (instanceId)
WHERE Instances.troveName = ? AND Instances.versionId = ?
AND Instances.flavorId = ? AND DBTroveFiles.isPresent = 0""",
name, versionId, flavorId)
return [ x[0] for x in cu ] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def getTransactionCounter(self):
"""Get transaction counter"""
field = "transaction counter"
return self._getTransactionCounter(field)[1] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, language='hr'):
super(Lexicon, self).__init__()
self.language = language | clarinsi/reldi-lib | [
9,
3,
9,
3,
1474892492
] |
def get_3d_input(self):
input_shape = (2, 16, 16, 16, 8)
x = tf.ones(input_shape)
return x | NifTK/NiftyNet | [
1325,
408,
1325,
103,
1504079743
] |
def _test_nd_downsample_output_shape(self,
rank,
param_dict,
output_shape):
if rank == 2:
input_data = self.get_2d_input()
elif rank == 3:
input_data = self.get_3d_input()
downsample_layer = DownSampleLayer(**param_dict)
output_data = downsample_layer(input_data)
print(downsample_layer)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(output_data)
self.assertAllClose(output_shape, out.shape) | NifTK/NiftyNet | [
1325,
408,
1325,
103,
1504079743
] |
def test_3d_avg_shape(self):
input_param = {'func': 'AVG',
'kernel_size': [3, 3, 2],
'stride': [3, 2, 1]}
self._test_nd_downsample_output_shape(rank=3,
param_dict=input_param,
output_shape=(2, 6, 8, 16, 8)) | NifTK/NiftyNet | [
1325,
408,
1325,
103,
1504079743
] |
def test_2d_max_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [1, 3],
'stride': 3}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 6, 6, 8)) | NifTK/NiftyNet | [
1325,
408,
1325,
103,
1504079743
] |
def test_2d_const_shape(self):
input_param = {'func': 'CONSTANT',
'kernel_size': [2, 3],
'stride': [2, 3]}
self._test_nd_downsample_output_shape(rank=2,
param_dict=input_param,
output_shape=(2, 8, 6, 8)) | NifTK/NiftyNet | [
1325,
408,
1325,
103,
1504079743
] |
def get_next_command_id():
global _command_counter
_command_counter += 1
return _command_counter | GoogleChromeLabs/chromium-bidi | [
46,
11,
46,
58,
1623839211
] |
def recursiveCompare(expected, actual, ignore_attributes=[]):
assert type(expected) == type(actual)
if type(expected) is list:
assert len(expected) == len(actual)
for index, val in enumerate(expected):
recursiveCompare(expected[index], actual[index], ignore_attributes)
return
if type(expected) is dict:
assert expected.keys() == actual.keys(), \
f"Key sets should be the same: " \
f"\nNot present: {set(expected.keys()) - set(actual.keys())}" \
f"\nUnexpected: {set(actual.keys()) - set(expected.keys())}"
for index, val in enumerate(expected):
if val not in ignore_attributes:
recursiveCompare(expected[val], actual[val], ignore_attributes)
return
assert expected == actual | GoogleChromeLabs/chromium-bidi | [
46,
11,
46,
58,
1623839211
] |
def __init__(self,
datacatalog_project_id,
datacatalog_location_id,
atlas_connection_args,
atlas_entity_types=None,
enable_monitoring=None):
self._project_id = datacatalog_project_id
self._location_id = datacatalog_location_id
self._atlas_connection_args = atlas_connection_args
self._atlas_entity_types = atlas_entity_types
event_hook = atlas_connection_args.get('event_hook')
if not event_hook:
self._metadata_scraper = scrape.MetadataScraper(
atlas_connection_args)
self._tag_template_factory = prepare.DataCatalogTagTemplateFactory(
project_id=datacatalog_project_id,
location_id=datacatalog_location_id)
self._instance_url = self._extract_instance_url(atlas_connection_args)
self._assembled_entry_factory = prepare.AssembledEntryFactory(
project_id=datacatalog_project_id,
location_id=datacatalog_location_id,
entry_group_id=self._ENTRY_GROUP_ID,
user_specified_system=self._SPECIFIED_SYSTEM,
instance_url=self._instance_url)
self._task_id = uuid.uuid4().hex[:8]
self._metrics_processor = metrics_processor.MetricsProcessor(
datacatalog_project_id, datacatalog_location_id,
self._ENTRY_GROUP_ID, enable_monitoring, self._task_id) | GoogleCloudPlatform/datacatalog-connectors-hive | [
20,
14,
20,
3,
1588024316
] |
def _extract_instance_url(cls, atlas_connection_args):
return atlas_connection_args['host'] | GoogleCloudPlatform/datacatalog-connectors-hive | [
20,
14,
20,
3,
1588024316
] |
def _sync_assembled_entries(self, assembled_entries, tag_templates_dict):
self._log_entries(assembled_entries)
if assembled_entries:
logging.info('')
logging.info('===> Mapping Data Catalog entries relationships...')
self._map_datacatalog_relationships(assembled_entries)
logging.info('==== DONE ========================================')
logging.info(
'===> Synchronizing Apache Atlas :: Data Catalog metadata...')
self._ingest_metadata(tag_templates_dict, assembled_entries)
logging.info('==== DONE ========================================') | GoogleCloudPlatform/datacatalog-connectors-hive | [
20,
14,
20,
3,
1588024316
] |
def _log_metadata(self, metadata):
self._metrics_processor.process_metadata_payload_bytes_metric(metadata) | GoogleCloudPlatform/datacatalog-connectors-hive | [
20,
14,
20,
3,
1588024316
] |
Subsets and Splits