body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
ae77430b1fcb66dafba0b01e67e6495cc33c14d9bcc0ac83ddfca3e4ca89b2ca
@Sakuya.interactions(guild=MY_GUILD) async def volume_(event, volume: ('int', 'Volume to set to.')=None): "Changes the player's volume." voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' if (volume is None): return f'{(voice_client.volume * 100.0):.0f}%' if (volume <= 0): volume = 0.0 elif (volume >= 200): volume = 2.0 else: volume /= 100.0 voice_client.volume = volume return f'Volume set to {(volume * 100.0):.0f}%'
Changes the player's volume.
docs/examples/e11_basic_voice/main.py
volume_
monoidic/hata
173
python
@Sakuya.interactions(guild=MY_GUILD) async def volume_(event, volume: ('int', 'Volume to set to.')=None): voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' if (volume is None): return f'{(voice_client.volume * 100.0):.0f}%' if (volume <= 0): volume = 0.0 elif (volume >= 200): volume = 2.0 else: volume /= 100.0 voice_client.volume = volume return f'Volume set to {(volume * 100.0):.0f}%'
@Sakuya.interactions(guild=MY_GUILD) async def volume_(event, volume: ('int', 'Volume to set to.')=None): voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' if (volume is None): return f'{(voice_client.volume * 100.0):.0f}%' if (volume <= 0): volume = 0.0 elif (volume >= 200): volume = 2.0 else: volume /= 100.0 voice_client.volume = volume return f'Volume set to {(volume * 100.0):.0f}%'<|docstring|>Changes the player's volume.<|endoftext|>
10836e4265eeef0384270ed28d586a946dfea90019fef35e0c745842d79f5cf1
@Sakuya.interactions(guild=MY_GUILD) async def disconnect(event): 'Disconnects the bot from voice.' voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' (await voice_client.disconnect()) return 'Disconnected.'
Disconnects the bot from voice.
docs/examples/e11_basic_voice/main.py
disconnect
monoidic/hata
173
python
@Sakuya.interactions(guild=MY_GUILD) async def disconnect(event): voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' (await voice_client.disconnect()) return 'Disconnected.'
@Sakuya.interactions(guild=MY_GUILD) async def disconnect(event): voice_client = event.voice_client if (voice_client is None): return 'There is no voice client at your guild.' (await voice_client.disconnect()) return 'Disconnected.'<|docstring|>Disconnects the bot from voice.<|endoftext|>
609966d0773d59ec4050d7a8f608dc38d320acca014cc74257ff5b7091790571
@staticmethod def test_connection_string(string1, string2): 'tests if a database has the same instance and name' db_props1 = {k: v for (k, v) in iter((s.split('=') for s in string1.split(';')))} db_props2 = {k: v for (k, v) in iter((s.split('=') for s in string2.split(';')))} return (';'.join([db_props1.get('DATABASE'), db_props1.get('INSTANCE', 'NULL')]) == ';'.join([db_props2.get('DATABASE'), db_props2.get('INSTANCE', 'NULL')]))
tests if a database has the same instance and name
GP/python/restapi/admin/utils.py
test_connection_string
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
@staticmethod def test_connection_string(string1, string2): db_props1 = {k: v for (k, v) in iter((s.split('=') for s in string1.split(';')))} db_props2 = {k: v for (k, v) in iter((s.split('=') for s in string2.split(';')))} return (';'.join([db_props1.get('DATABASE'), db_props1.get('INSTANCE', 'NULL')]) == ';'.join([db_props2.get('DATABASE'), db_props2.get('INSTANCE', 'NULL')]))
@staticmethod def test_connection_string(string1, string2): db_props1 = {k: v for (k, v) in iter((s.split('=') for s in string1.split(';')))} db_props2 = {k: v for (k, v) in iter((s.split('=') for s in string2.split(';')))} return (';'.join([db_props1.get('DATABASE'), db_props1.get('INSTANCE', 'NULL')]) == ';'.join([db_props2.get('DATABASE'), db_props2.get('INSTANCE', 'NULL')]))<|docstring|>tests if a database has the same instance and name<|endoftext|>
c6818211d6451ba26380cadcb655d63e66518780c4e4c5549d12f262501848a6
def find_services_containing(self, ws, fcs=[], stop=False): 'finds services containing an entire workspace and any specific feature classes\n\n Required:\n ws -- SDE workspace path\n fcs -- list of specific feature classes to search for\n\n Optional:\n stop -- option to stop service once item is found\n ' ws = self.find_ws(ws) con_str = self.form_connection_string(ws) service_map = {'workspace': [], 'feature_classes': {}} toStop = [] for fc in fcs: service_map['feature_classes'][fc.split('.')[(- 1)]] = [] for service in self.ags.iter_services(): if (hasattr(service, 'type') and (service.type == 'MapServer')): manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: if (self.test_connection_string(con_str, db.onServerConnectionString) or self.test_connection_string(con_str, db.onPremiseConnectionString)): service_map['workspace'].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) for ds in db.datasets: lyr_name = ds.onServerName if (lyr_name in service_map['feature_classes']): service_map['feature_classes'][lyr_name].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) if stop: for service in toStop: service.stop() print('Stopped service: "{}"'.format(service.serviceName)) self.__stopped_services.append(service) return ServerResources(service_map)
finds services containing an entire workspace and any specific feature classes Required: ws -- SDE workspace path fcs -- list of specific feature classes to search for Optional: stop -- option to stop service once item is found
GP/python/restapi/admin/utils.py
find_services_containing
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
def find_services_containing(self, ws, fcs=[], stop=False): 'finds services containing an entire workspace and any specific feature classes\n\n Required:\n ws -- SDE workspace path\n fcs -- list of specific feature classes to search for\n\n Optional:\n stop -- option to stop service once item is found\n ' ws = self.find_ws(ws) con_str = self.form_connection_string(ws) service_map = {'workspace': [], 'feature_classes': {}} toStop = [] for fc in fcs: service_map['feature_classes'][fc.split('.')[(- 1)]] = [] for service in self.ags.iter_services(): if (hasattr(service, 'type') and (service.type == 'MapServer')): manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: if (self.test_connection_string(con_str, db.onServerConnectionString) or self.test_connection_string(con_str, db.onPremiseConnectionString)): service_map['workspace'].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) for ds in db.datasets: lyr_name = ds.onServerName if (lyr_name in service_map['feature_classes']): service_map['feature_classes'][lyr_name].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) if stop: for service in toStop: service.stop() print('Stopped service: "{}"'.format(service.serviceName)) self.__stopped_services.append(service) return ServerResources(service_map)
def find_services_containing(self, ws, fcs=[], stop=False): 'finds services containing an entire workspace and any specific feature classes\n\n Required:\n ws -- SDE workspace path\n fcs -- list of specific feature classes to search for\n\n Optional:\n stop -- option to stop service once item is found\n ' ws = self.find_ws(ws) con_str = self.form_connection_string(ws) service_map = {'workspace': [], 'feature_classes': {}} toStop = [] for fc in fcs: service_map['feature_classes'][fc.split('.')[(- 1)]] = [] for service in self.ags.iter_services(): if (hasattr(service, 'type') and (service.type == 'MapServer')): manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: if (self.test_connection_string(con_str, db.onServerConnectionString) or self.test_connection_string(con_str, db.onPremiseConnectionString)): service_map['workspace'].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) for ds in db.datasets: lyr_name = ds.onServerName if (lyr_name in service_map['feature_classes']): service_map['feature_classes'][lyr_name].append(MunchEncoder({'name': service.serviceName, 'service': service})) if (service not in toStop): toStop.append(service) if stop: for service in toStop: service.stop() print('Stopped service: "{}"'.format(service.serviceName)) self.__stopped_services.append(service) return ServerResources(service_map)<|docstring|>finds services containing an entire workspace and any specific feature classes Required: ws -- SDE workspace path fcs -- list of specific feature classes to search for Optional: stop -- option to stop service once item is found<|endoftext|>
bc920c588d6ae29cbedf736582e7e6e4468672ebff063a2db9d76fa876a84b18
def startStoppedServices(self): 'start all stopped services that are in this instances cache, meaning those\n that have been stopped from this instance\n ' for s in self.__stopped_services: s.start() print('Started service: "{}"'.format(s.serviceName)) self.__stopped_services.remove(s) self.__started_services.append(s)
start all stopped services that are in this instances cache, meaning those that have been stopped from this instance
GP/python/restapi/admin/utils.py
startStoppedServices
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
def startStoppedServices(self): 'start all stopped services that are in this instances cache, meaning those\n that have been stopped from this instance\n ' for s in self.__stopped_services: s.start() print('Started service: "{}"'.format(s.serviceName)) self.__stopped_services.remove(s) self.__started_services.append(s)
def startStoppedServices(self): 'start all stopped services that are in this instances cache, meaning those\n that have been stopped from this instance\n ' for s in self.__stopped_services: s.start() print('Started service: "{}"'.format(s.serviceName)) self.__stopped_services.remove(s) self.__started_services.append(s)<|docstring|>start all stopped services that are in this instances cache, meaning those that have been stopped from this instance<|endoftext|>
c818f8471ffb61fc36b938dc24f1a887cb34e6403b3369daf00bb6b03167c764
@staticmethod def find_ws(path, ws_type='', return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' def find_existing(path): if arcpy.Exists(path): return path elif (not arcpy.Exists(path)): return find_existing(os.path.dirname(path)) if isinstance(path, (arcpy.mapping.Layer, arcpy.mapping.TableView)): path = path.dataSource if (os.sep not in str(path)): if hasattr(path, 'dataSource'): path = path.dataSource else: path = arcpy.Describe(path).catalogPath path = find_existing(path) desc = arcpy.Describe(path) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (path, desc.workspaceType) else: return path elif return_type: return (path, desc.workspaceType) else: return path path = str(path) split = filter(None, str(path).split(os.sep)) if path.startswith('\\\\'): split[0] = '\\\\{0}'.format(split[0]) for i in xrange(1, len(split)): sub_dir = os.sep.join(split[:(- i)]) desc = arcpy.Describe(sub_dir) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (sub_dir, desc.workspaceType) else: return sub_dir elif return_type: return (sub_dir, desc.workspaceType) else: return sub_dir
finds a valid workspace path for an arcpy.da.Editor() Session Required: path -- path to features or workspace Optional: ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase) return_type -- option to return workspace type as well. If this option is selected, a tuple of the full workspace path and type are returned
GP/python/restapi/admin/utils.py
find_ws
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
@staticmethod def find_ws(path, ws_type=, return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' def find_existing(path): if arcpy.Exists(path): return path elif (not arcpy.Exists(path)): return find_existing(os.path.dirname(path)) if isinstance(path, (arcpy.mapping.Layer, arcpy.mapping.TableView)): path = path.dataSource if (os.sep not in str(path)): if hasattr(path, 'dataSource'): path = path.dataSource else: path = arcpy.Describe(path).catalogPath path = find_existing(path) desc = arcpy.Describe(path) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (path, desc.workspaceType) else: return path elif return_type: return (path, desc.workspaceType) else: return path path = str(path) split = filter(None, str(path).split(os.sep)) if path.startswith('\\\\'): split[0] = '\\\\{0}'.format(split[0]) for i in xrange(1, len(split)): sub_dir = os.sep.join(split[:(- i)]) desc = arcpy.Describe(sub_dir) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (sub_dir, desc.workspaceType) else: return sub_dir elif return_type: return (sub_dir, desc.workspaceType) else: return sub_dir
@staticmethod def find_ws(path, ws_type=, return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' def find_existing(path): if arcpy.Exists(path): return path elif (not arcpy.Exists(path)): return find_existing(os.path.dirname(path)) if isinstance(path, (arcpy.mapping.Layer, arcpy.mapping.TableView)): path = path.dataSource if (os.sep not in str(path)): if hasattr(path, 'dataSource'): path = path.dataSource else: path = arcpy.Describe(path).catalogPath path = find_existing(path) desc = arcpy.Describe(path) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (path, desc.workspaceType) else: return path elif return_type: return (path, desc.workspaceType) else: return path path = str(path) split = filter(None, str(path).split(os.sep)) if path.startswith('\\\\'): split[0] = '\\\\{0}'.format(split[0]) for i in xrange(1, len(split)): sub_dir = os.sep.join(split[:(- i)]) desc = arcpy.Describe(sub_dir) if hasattr(desc, 'workspaceType'): if (ws_type == desc.workspaceType): if return_type: return (sub_dir, desc.workspaceType) else: return sub_dir elif return_type: return (sub_dir, desc.workspaceType) else: return sub_dir<|docstring|>finds a valid workspace path for an arcpy.da.Editor() Session Required: path -- path to features or workspace Optional: ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase) return_type -- option to return workspace type as well. If this option is selected, a tuple of the full workspace path and type are returned<|endoftext|>
9b9e233c5f1c0f128791157132987a38aeeaa7d0fe9ae83c471c96c1b78a4693
@staticmethod def form_connection_string(ws): "esri's describe workspace connection string does not work at 10.4, bug???" desc = arcpy.Describe(ws) if ('SdeWorkspaceFactory' in desc.workspaceFactoryProgID): cp = desc.connectionProperties props = ['server', 'instance', 'database', 'version', 'authentication_mode'] db_client = cp.instance.split(':')[1] con_properties = cp.server parts = [] for prop in props: parts.append('{}={}'.format(prop.upper(), getattr(cp, prop))) parts.insert(2, 'DBCLIENT={}'.format(db_client)) parts.insert(3, 'DB_CONNECTION_PROPERTIES={}'.format(cp.server)) return ';'.join(parts) else: return ('DATABASE=' + ws)
esri's describe workspace connection string does not work at 10.4, bug???
GP/python/restapi/admin/utils.py
form_connection_string
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
@staticmethod def form_connection_string(ws): desc = arcpy.Describe(ws) if ('SdeWorkspaceFactory' in desc.workspaceFactoryProgID): cp = desc.connectionProperties props = ['server', 'instance', 'database', 'version', 'authentication_mode'] db_client = cp.instance.split(':')[1] con_properties = cp.server parts = [] for prop in props: parts.append('{}={}'.format(prop.upper(), getattr(cp, prop))) parts.insert(2, 'DBCLIENT={}'.format(db_client)) parts.insert(3, 'DB_CONNECTION_PROPERTIES={}'.format(cp.server)) return ';'.join(parts) else: return ('DATABASE=' + ws)
@staticmethod def form_connection_string(ws): desc = arcpy.Describe(ws) if ('SdeWorkspaceFactory' in desc.workspaceFactoryProgID): cp = desc.connectionProperties props = ['server', 'instance', 'database', 'version', 'authentication_mode'] db_client = cp.instance.split(':')[1] con_properties = cp.server parts = [] for prop in props: parts.append('{}={}'.format(prop.upper(), getattr(cp, prop))) parts.insert(2, 'DBCLIENT={}'.format(db_client)) parts.insert(3, 'DB_CONNECTION_PROPERTIES={}'.format(cp.server)) return ';'.join(parts) else: return ('DATABASE=' + ws)<|docstring|>esri's describe workspace connection string does not work at 10.4, bug???<|endoftext|>
082d4b18956a9d2e674bc551cc73920e3fa60111d931be045c8af027901a24b7
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): 'will stop a service and compress all SDE databases within the map service\n\n Required:\n sde_loc -- location containing .sde connections\n service_url_or_name -- full path to REST endpoint or service name\n ' service = self.ags.service(service_url_or_name) workspaces = [] manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: dbType = db.onServerWorkspaceFactoryProgID if ('SdeWorkspaceFactory' in dbType): cs = (db.onServerConnectionString or db.onPremiseConnectionString) db_name = {k: v for (k, v) in iter((s.split('=') for s in cs.split(';')))}['DATABASE'] sde = os.path.join(sde_loc, (db_name + '.sde')) workspaces.append(sde) if workspaces: service.stop() self.__stopped_services.append(service) print('Stopped Service...\n') for ws in workspaces: arcpy.management.Compress(ws) service.start() self.__started_services.append(service) print('\nStarted Service') return workspaces
will stop a service and compress all SDE databases within the map service Required: sde_loc -- location containing .sde connections service_url_or_name -- full path to REST endpoint or service name
GP/python/restapi/admin/utils.py
stopServiceAndCompressDatabase
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): 'will stop a service and compress all SDE databases within the map service\n\n Required:\n sde_loc -- location containing .sde connections\n service_url_or_name -- full path to REST endpoint or service name\n ' service = self.ags.service(service_url_or_name) workspaces = [] manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: dbType = db.onServerWorkspaceFactoryProgID if ('SdeWorkspaceFactory' in dbType): cs = (db.onServerConnectionString or db.onPremiseConnectionString) db_name = {k: v for (k, v) in iter((s.split('=') for s in cs.split(';')))}['DATABASE'] sde = os.path.join(sde_loc, (db_name + '.sde')) workspaces.append(sde) if workspaces: service.stop() self.__stopped_services.append(service) print('Stopped Service...\n') for ws in workspaces: arcpy.management.Compress(ws) service.start() self.__started_services.append(service) print('\nStarted Service') return workspaces
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): 'will stop a service and compress all SDE databases within the map service\n\n Required:\n sde_loc -- location containing .sde connections\n service_url_or_name -- full path to REST endpoint or service name\n ' service = self.ags.service(service_url_or_name) workspaces = [] manifest = service.manifest() if hasattr(manifest, 'databases'): for db in manifest.databases: dbType = db.onServerWorkspaceFactoryProgID if ('SdeWorkspaceFactory' in dbType): cs = (db.onServerConnectionString or db.onPremiseConnectionString) db_name = {k: v for (k, v) in iter((s.split('=') for s in cs.split(';')))}['DATABASE'] sde = os.path.join(sde_loc, (db_name + '.sde')) workspaces.append(sde) if workspaces: service.stop() self.__stopped_services.append(service) print('Stopped Service...\n') for ws in workspaces: arcpy.management.Compress(ws) service.start() self.__started_services.append(service) print('\nStarted Service') return workspaces<|docstring|>will stop a service and compress all SDE databases within the map service Required: sde_loc -- location containing .sde connections service_url_or_name -- full path to REST endpoint or service name<|endoftext|>
2abe968e32abe3fb64b4d2d26decf7a92c316b727d621e69305c57a680306eca
@staticmethod def find_ws(path, ws_type='', return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' if ((os.path.splitext(path)[1] in ('.gdb', '.mdb', '.sde')) and (ws_type != 'FileSystem')): if return_type: return (path, ('RemoteDatabase' if (os.path.splitext(path)[1] == '.sde') else 'LocalDatabase')) return path elif os.path.isdir(path): if return_type: return (path, 'FileSystem') return path elif os.path.isfile(path): return find_ws(os.path.dirname(path))
finds a valid workspace path for an arcpy.da.Editor() Session Required: path -- path to features or workspace Optional: ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase) return_type -- option to return workspace type as well. If this option is selected, a tuple of the full workspace path and type are returned
GP/python/restapi/admin/utils.py
find_ws
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
@staticmethod def find_ws(path, ws_type=, return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' if ((os.path.splitext(path)[1] in ('.gdb', '.mdb', '.sde')) and (ws_type != 'FileSystem')): if return_type: return (path, ('RemoteDatabase' if (os.path.splitext(path)[1] == '.sde') else 'LocalDatabase')) return path elif os.path.isdir(path): if return_type: return (path, 'FileSystem') return path elif os.path.isfile(path): return find_ws(os.path.dirname(path))
@staticmethod def find_ws(path, ws_type=, return_type=False): 'finds a valid workspace path for an arcpy.da.Editor() Session\n\n Required:\n path -- path to features or workspace\n\n Optional:\n ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase)\n return_type -- option to return workspace type as well. If this option is selected, a tuple\n of the full workspace path and type are returned\n\n ' if ((os.path.splitext(path)[1] in ('.gdb', '.mdb', '.sde')) and (ws_type != 'FileSystem')): if return_type: return (path, ('RemoteDatabase' if (os.path.splitext(path)[1] == '.sde') else 'LocalDatabase')) return path elif os.path.isdir(path): if return_type: return (path, 'FileSystem') return path elif os.path.isfile(path): return find_ws(os.path.dirname(path))<|docstring|>finds a valid workspace path for an arcpy.da.Editor() Session Required: path -- path to features or workspace Optional: ws_type -- option to find specific workspace type (FileSystem|LocalDatabase|RemoteDatabase) return_type -- option to return workspace type as well. If this option is selected, a tuple of the full workspace path and type are returned<|endoftext|>
1ae6c01ee093b8f8a6e9a4d127226a54479e5558e761fd026f158cb8ed6b5b41
@staticmethod def form_connection_string(ws): 'form connection string by parsing .sde connection files' if ws.endswith('.sde'): with open(ws, 'rb') as f: data = f.read() datastr = data.replace('\x00', '') server = datastr.split('SERVER\x08\x0e')[1].split('\x12')[0] instance = datastr.split('INSTANCE\x08*')[1].split('\x12')[0] dbclient = ''.join((s for s in datastr.split('DBCLIENT\x08\x14')[1].split('DB_CONNECTION')[0] if s.isalpha())) db_connection_properties = datastr.split('DB_CONNECTION_PROPERTIES\x08\x0e')[1].split('\x12')[0] database = datastr.split('DATABASE\x08\x16')[1].split('\x1e')[0] version = datastr.split('VERSION\x08\x18')[1].split('\x1a')[0] authentication_mode = datastr.split('AUTHENTICATION_MODE\x08\x08')[1].split('\x10')[0] parts = [server, instance, dbclient, db_connection_properties, database, version, authentication_mode] props = ['SERVER', 'INSTANCE', 'DBCLIENT', 'DB_CONNECTION_PROPERTIES', 'DATABASE', 'VERSION', 'AUTHENTICATION_MODE'] return ';'.join(map((lambda p: '{}={}'.format(*p)), zip(props, parts))) else: return ('DATABASE=' + ws)
form connection string by parsing .sde connection files
GP/python/restapi/admin/utils.py
form_connection_string
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
@staticmethod def form_connection_string(ws): if ws.endswith('.sde'): with open(ws, 'rb') as f: data = f.read() datastr = data.replace('\x00', ) server = datastr.split('SERVER\x08\x0e')[1].split('\x12')[0] instance = datastr.split('INSTANCE\x08*')[1].split('\x12')[0] dbclient = .join((s for s in datastr.split('DBCLIENT\x08\x14')[1].split('DB_CONNECTION')[0] if s.isalpha())) db_connection_properties = datastr.split('DB_CONNECTION_PROPERTIES\x08\x0e')[1].split('\x12')[0] database = datastr.split('DATABASE\x08\x16')[1].split('\x1e')[0] version = datastr.split('VERSION\x08\x18')[1].split('\x1a')[0] authentication_mode = datastr.split('AUTHENTICATION_MODE\x08\x08')[1].split('\x10')[0] parts = [server, instance, dbclient, db_connection_properties, database, version, authentication_mode] props = ['SERVER', 'INSTANCE', 'DBCLIENT', 'DB_CONNECTION_PROPERTIES', 'DATABASE', 'VERSION', 'AUTHENTICATION_MODE'] return ';'.join(map((lambda p: '{}={}'.format(*p)), zip(props, parts))) else: return ('DATABASE=' + ws)
@staticmethod def form_connection_string(ws): if ws.endswith('.sde'): with open(ws, 'rb') as f: data = f.read() datastr = data.replace('\x00', ) server = datastr.split('SERVER\x08\x0e')[1].split('\x12')[0] instance = datastr.split('INSTANCE\x08*')[1].split('\x12')[0] dbclient = .join((s for s in datastr.split('DBCLIENT\x08\x14')[1].split('DB_CONNECTION')[0] if s.isalpha())) db_connection_properties = datastr.split('DB_CONNECTION_PROPERTIES\x08\x0e')[1].split('\x12')[0] database = datastr.split('DATABASE\x08\x16')[1].split('\x1e')[0] version = datastr.split('VERSION\x08\x18')[1].split('\x1a')[0] authentication_mode = datastr.split('AUTHENTICATION_MODE\x08\x08')[1].split('\x10')[0] parts = [server, instance, dbclient, db_connection_properties, database, version, authentication_mode] props = ['SERVER', 'INSTANCE', 'DBCLIENT', 'DB_CONNECTION_PROPERTIES', 'DATABASE', 'VERSION', 'AUTHENTICATION_MODE'] return ';'.join(map((lambda p: '{}={}'.format(*p)), zip(props, parts))) else: return ('DATABASE=' + ws)<|docstring|>form connection string by parsing .sde connection files<|endoftext|>
bab72eced519c4905eabd4212217667bf0c55b9e8c8c3efe18f81e90be9a8a47
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): 'stops service and compresses all associated databases' raise NotImplementedError('No access to the Arcpy Module!')
stops service and compresses all associated databases
GP/python/restapi/admin/utils.py
stopServiceAndCompressDatabase
Bolton-and-Menk-GIS/Geoprocessing-Services-JSAPI-2018
2
python
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): raise NotImplementedError('No access to the Arcpy Module!')
def stopServiceAndCompressDatabase(self, sde_loc, service_url_or_name): raise NotImplementedError('No access to the Arcpy Module!')<|docstring|>stops service and compresses all associated databases<|endoftext|>
b3866250990f8161b60d330d11276905aa8c8cea94809f080853f094090920c7
def test_get_resources_200(self): 'Test get resources with status 200.' async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.get_resources()) return res resources = self.loop.run_until_complete(go()) self.assertEqual(2, len(resources['sceneIds'])) self.assertEqual(2, len(resources['sceneData'])) self.assertEqual('Dining Vanes Open', resources['sceneData'][0]['name_unicode']) self.assertEqual('Master Open', resources['sceneData'][1]['name_unicode'])
Test get resources with status 200.
tests/test_scenes.py
test_get_resources_200
bdraco/aio-powerview-api
6
python
def test_get_resources_200(self): async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.get_resources()) return res resources = self.loop.run_until_complete(go()) self.assertEqual(2, len(resources['sceneIds'])) self.assertEqual(2, len(resources['sceneData'])) self.assertEqual('Dining Vanes Open', resources['sceneData'][0]['name_unicode']) self.assertEqual('Master Open', resources['sceneData'][1]['name_unicode'])
def test_get_resources_200(self): async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.get_resources()) return res resources = self.loop.run_until_complete(go()) self.assertEqual(2, len(resources['sceneIds'])) self.assertEqual(2, len(resources['sceneData'])) self.assertEqual('Dining Vanes Open', resources['sceneData'][0]['name_unicode']) self.assertEqual('Master Open', resources['sceneData'][1]['name_unicode'])<|docstring|>Test get resources with status 200.<|endoftext|>
f00f710f47256a7e7b755048f8eadca178cf77112cb20379aff3ef3f2f1bfaf6
def test_create_scene_201(self): 'Tests create new scene.' async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.create_scene(12372, 'New scene', color_id=1, icon_id=2)) return res resp = self.loop.run_until_complete(go()) self.assertEqual({'scene': {'roomId': 12372, 'name': 'TmV3IHNjZW5l', 'colorId': 1, 'iconId': 2}}, resp)
Tests create new scene.
tests/test_scenes.py
test_create_scene_201
bdraco/aio-powerview-api
6
python
def test_create_scene_201(self): async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.create_scene(12372, 'New scene', color_id=1, icon_id=2)) return res resp = self.loop.run_until_complete(go()) self.assertEqual({'scene': {'roomId': 12372, 'name': 'TmV3IHNjZW5l', 'colorId': 1, 'iconId': 2}}, resp)
def test_create_scene_201(self): async def go(): (await self.start_fake_server()) scenes = Scenes(self.request) res = (await scenes.create_scene(12372, 'New scene', color_id=1, icon_id=2)) return res resp = self.loop.run_until_complete(go()) self.assertEqual({'scene': {'roomId': 12372, 'name': 'TmV3IHNjZW5l', 'colorId': 1, 'iconId': 2}}, resp)<|docstring|>Tests create new scene.<|endoftext|>
c3e3e1770247b7e6334791d6cb086507b1db662da62c6937dc1c992cd024c263
def test_no_render_in_subquery(self): 'test #6378' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=Address.id)}) stmt = select(User) self.assert_compile(select(stmt.subquery()), 'SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, users.name AS name FROM users) AS anon_1') self.assert_compile(stmt, 'SELECT users.id, users.name, addresses_1.id AS id_1, addresses_1.user_id, addresses_1.email_address FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses_1.id')
test #6378
test/orm/test_eager_relations.py
test_no_render_in_subquery
ToGoBananas/sqlalchemy
5,383
python
def test_no_render_in_subquery(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=Address.id)}) stmt = select(User) self.assert_compile(select(stmt.subquery()), 'SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, users.name AS name FROM users) AS anon_1') self.assert_compile(stmt, 'SELECT users.id, users.name, addresses_1.id AS id_1, addresses_1.user_id, addresses_1.email_address FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses_1.id')
def test_no_render_in_subquery(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=Address.id)}) stmt = select(User) self.assert_compile(select(stmt.subquery()), 'SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, users.name AS name FROM users) AS anon_1') self.assert_compile(stmt, 'SELECT users.id, users.name, addresses_1.id AS id_1, addresses_1.user_id, addresses_1.email_address FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses_1.id')<|docstring|>test #6378<|endoftext|>
c418790e269fb2818372afc97ebcebf195447f3af93056887ff739327adbfe57
def test_no_orphan(self): 'An eagerly loaded child object is not marked as an orphan' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, cascade='all,delete-orphan', lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').hasparent(sa.orm.attributes.instance_state(user.addresses[0]), optimistic=True) assert (not sa.orm.class_mapper(Address)._is_orphan(sa.orm.attributes.instance_state(user.addresses[0])))
An eagerly loaded child object is not marked as an orphan
test/orm/test_eager_relations.py
test_no_orphan
ToGoBananas/sqlalchemy
5,383
python
def test_no_orphan(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, cascade='all,delete-orphan', lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').hasparent(sa.orm.attributes.instance_state(user.addresses[0]), optimistic=True) assert (not sa.orm.class_mapper(Address)._is_orphan(sa.orm.attributes.instance_state(user.addresses[0])))
def test_no_orphan(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, cascade='all,delete-orphan', lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').hasparent(sa.orm.attributes.instance_state(user.addresses[0]), optimistic=True) assert (not sa.orm.class_mapper(Address)._is_orphan(sa.orm.attributes.instance_state(user.addresses[0])))<|docstring|>An eagerly loaded child object is not marked as an orphan<|endoftext|>
23e610936c7f2d94a0b3793c8c21b8fca4007b5d3d0eedc498efd5a5ea37cb02
def test_orderby_related(self): 'A regular mapper select on a single table can\n order by a relationship to a second table' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((User.id == Address.user_id)).order_by(Address.email_address).all() eq_([User(id=8, addresses=[Address(id=2, email_address='[email protected]'), Address(id=3, email_address='[email protected]'), Address(id=4, email_address='[email protected]')]), User(id=9, addresses=[Address(id=5)]), User(id=7, addresses=[Address(id=1)])], result)
A regular mapper select on a single table can order by a relationship to a second table
test/orm/test_eager_relations.py
test_orderby_related
ToGoBananas/sqlalchemy
5,383
python
def test_orderby_related(self): 'A regular mapper select on a single table can\n order by a relationship to a second table' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((User.id == Address.user_id)).order_by(Address.email_address).all() eq_([User(id=8, addresses=[Address(id=2, email_address='[email protected]'), Address(id=3, email_address='[email protected]'), Address(id=4, email_address='[email protected]')]), User(id=9, addresses=[Address(id=5)]), User(id=7, addresses=[Address(id=1)])], result)
def test_orderby_related(self): 'A regular mapper select on a single table can\n order by a relationship to a second table' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((User.id == Address.user_id)).order_by(Address.email_address).all() eq_([User(id=8, addresses=[Address(id=2, email_address='[email protected]'), Address(id=3, email_address='[email protected]'), Address(id=4, email_address='[email protected]')]), User(id=9, addresses=[Address(id=5)]), User(id=7, addresses=[Address(id=1)])], result)<|docstring|>A regular mapper select on a single table can order by a relationship to a second table<|endoftext|>
4eed48f07de503edf8caa8aaefc21f4f14137d544d7a6717f6adba2cd2ddbdb5
def test_no_ad_hoc_orderby(self): "part of #2992; make sure string label references can't\n access an eager loader, else an eager load can corrupt the query.\n\n " (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address))) sess = fixture_session() q = sess.query(User).join(User.addresses).options(joinedload(User.addresses)).order_by('email_address') self.assert_compile(q, 'SELECT users.id AS users_id, users.name AS users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses.email_address') q = sess.query(User).options(joinedload(User.addresses)).order_by('email_address') assert_raises_message(sa.exc.CompileError, "Can't resolve label reference for ORDER BY / GROUP BY.", q.all)
part of #2992; make sure string label references can't access an eager loader, else an eager load can corrupt the query.
test/orm/test_eager_relations.py
test_no_ad_hoc_orderby
ToGoBananas/sqlalchemy
5,383
python
def test_no_ad_hoc_orderby(self): "part of #2992; make sure string label references can't\n access an eager loader, else an eager load can corrupt the query.\n\n " (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address))) sess = fixture_session() q = sess.query(User).join(User.addresses).options(joinedload(User.addresses)).order_by('email_address') self.assert_compile(q, 'SELECT users.id AS users_id, users.name AS users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses.email_address') q = sess.query(User).options(joinedload(User.addresses)).order_by('email_address') assert_raises_message(sa.exc.CompileError, "Can't resolve label reference for ORDER BY / GROUP BY.", q.all)
def test_no_ad_hoc_orderby(self): "part of #2992; make sure string label references can't\n access an eager loader, else an eager load can corrupt the query.\n\n " (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address))) sess = fixture_session() q = sess.query(User).join(User.addresses).options(joinedload(User.addresses)).order_by('email_address') self.assert_compile(q, 'SELECT users.id AS users_id, users.name AS users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses.email_address') q = sess.query(User).options(joinedload(User.addresses)).order_by('email_address') assert_raises_message(sa.exc.CompileError, "Can't resolve label reference for ORDER BY / GROUP BY.", q.all)<|docstring|>part of #2992; make sure string label references can't access an eager loader, else an eager load can corrupt the query.<|endoftext|>
6e5f272076d120daac45e6107a1069ce3f8db0dd89e0a683d9d259c314551230
def test_we_adapt_for_compound_for_getter(self): 'test #6596.\n\n Ensure loading.py uses the compound eager adapter on the target\n column before looking for a populator, rather than creating\n a new populator.\n\n ' (User, Address) = self.classes('User', 'Address') (users, addresses) = self.tables('users', 'addresses') self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, order_by=addresses.c.id)}) self.mapper_registry.map_imperatively(Address, addresses) s = fixture_session() q = select(User).options(joinedload(User.addresses)).order_by(User.id).limit(2) def strict_getter(self, key, raiseerr=True): try: rec = self._keymap[key] except KeyError: assert False index = rec[0] return operator.itemgetter(index) with mock.patch('sqlalchemy.engine.result.ResultMetaData._getter', strict_getter): result = s.execute(q).unique().scalars().all() eq_(result, self.static.user_address_result[0:2])
test #6596. Ensure loading.py uses the compound eager adapter on the target column before looking for a populator, rather than creating a new populator.
test/orm/test_eager_relations.py
test_we_adapt_for_compound_for_getter
ToGoBananas/sqlalchemy
5,383
python
def test_we_adapt_for_compound_for_getter(self): 'test #6596.\n\n Ensure loading.py uses the compound eager adapter on the target\n column before looking for a populator, rather than creating\n a new populator.\n\n ' (User, Address) = self.classes('User', 'Address') (users, addresses) = self.tables('users', 'addresses') self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, order_by=addresses.c.id)}) self.mapper_registry.map_imperatively(Address, addresses) s = fixture_session() q = select(User).options(joinedload(User.addresses)).order_by(User.id).limit(2) def strict_getter(self, key, raiseerr=True): try: rec = self._keymap[key] except KeyError: assert False index = rec[0] return operator.itemgetter(index) with mock.patch('sqlalchemy.engine.result.ResultMetaData._getter', strict_getter): result = s.execute(q).unique().scalars().all() eq_(result, self.static.user_address_result[0:2])
def test_we_adapt_for_compound_for_getter(self): 'test #6596.\n\n Ensure loading.py uses the compound eager adapter on the target\n column before looking for a populator, rather than creating\n a new populator.\n\n ' (User, Address) = self.classes('User', 'Address') (users, addresses) = self.tables('users', 'addresses') self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, order_by=addresses.c.id)}) self.mapper_registry.map_imperatively(Address, addresses) s = fixture_session() q = select(User).options(joinedload(User.addresses)).order_by(User.id).limit(2) def strict_getter(self, key, raiseerr=True): try: rec = self._keymap[key] except KeyError: assert False index = rec[0] return operator.itemgetter(index) with mock.patch('sqlalchemy.engine.result.ResultMetaData._getter', strict_getter): result = s.execute(q).unique().scalars().all() eq_(result, self.static.user_address_result[0:2])<|docstring|>test #6596. Ensure loading.py uses the compound eager adapter on the target column before looking for a populator, rather than creating a new populator.<|endoftext|>
ac23c749fc818c73b2902818ab3c497045f6e2527295561861676fdf6886ef3a
def test_disable_dynamic(self): 'test no joined option on a dynamic.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='dynamic')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() assert_raises_message(sa.exc.InvalidRequestError, "User.addresses' does not support object population - eager loading cannot be applied.", sess.query(User).options(joinedload(User.addresses)).first)
test no joined option on a dynamic.
test/orm/test_eager_relations.py
test_disable_dynamic
ToGoBananas/sqlalchemy
5,383
python
def test_disable_dynamic(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='dynamic')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() assert_raises_message(sa.exc.InvalidRequestError, "User.addresses' does not support object population - eager loading cannot be applied.", sess.query(User).options(joinedload(User.addresses)).first)
def test_disable_dynamic(self): (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='dynamic')}) self.mapper_registry.map_imperatively(Address, addresses) sess = fixture_session() assert_raises_message(sa.exc.InvalidRequestError, "User.addresses' does not support object population - eager loading cannot be applied.", sess.query(User).options(joinedload(User.addresses)).first)<|docstring|>test no joined option on a dynamic.<|endoftext|>
9d9675e709b297ea89d95522eba69468d88208a84919a2034187b3563dc821db
def test_cyclical(self): 'A circular eager relationship breaks the cycle with a lazy loader' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', backref=sa.orm.backref('user', lazy='joined'), order_by=Address.id))) eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined') eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined') sess = fixture_session() eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all())
A circular eager relationship breaks the cycle with a lazy loader
test/orm/test_eager_relations.py
test_cyclical
ToGoBananas/sqlalchemy
5,383
python
def test_cyclical(self): (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', backref=sa.orm.backref('user', lazy='joined'), order_by=Address.id))) eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined') eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined') sess = fixture_session() eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all())
def test_cyclical(self): (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', backref=sa.orm.backref('user', lazy='joined'), order_by=Address.id))) eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined') eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined') sess = fixture_session() eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all())<|docstring|>A circular eager relationship breaks the cycle with a lazy loader<|endoftext|>
6b401755b5b6c19d5c85d663f2a2189af99d080bff2c07508b352767b2377d6e
def test_double_w_ac(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, orders) closed_mapper = aliased(Order, orders) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, primaryjoin=sa.and_((open_mapper.isopen == 1), (users.c.id == open_mapper.user_id)), lazy='joined', order_by=open_mapper.id, viewonly=True), closed_orders=relationship(closed_mapper, primaryjoin=sa.and_((closed_mapper.isopen == 0), (users.c.id == closed_mapper.user_id)), lazy='joined', order_by=closed_mapper.id, viewonly=True))) self._run_double_test()
Eager loading with two relationships simultaneously, from the same table, using aliases.
test/orm/test_eager_relations.py
test_double_w_ac
ToGoBananas/sqlalchemy
5,383
python
def test_double_w_ac(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, orders) closed_mapper = aliased(Order, orders) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, primaryjoin=sa.and_((open_mapper.isopen == 1), (users.c.id == open_mapper.user_id)), lazy='joined', order_by=open_mapper.id, viewonly=True), closed_orders=relationship(closed_mapper, primaryjoin=sa.and_((closed_mapper.isopen == 0), (users.c.id == closed_mapper.user_id)), lazy='joined', order_by=closed_mapper.id, viewonly=True))) self._run_double_test()
def test_double_w_ac(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, orders) closed_mapper = aliased(Order, orders) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, primaryjoin=sa.and_((open_mapper.isopen == 1), (users.c.id == open_mapper.user_id)), lazy='joined', order_by=open_mapper.id, viewonly=True), closed_orders=relationship(closed_mapper, primaryjoin=sa.and_((closed_mapper.isopen == 0), (users.c.id == closed_mapper.user_id)), lazy='joined', order_by=closed_mapper.id, viewonly=True))) self._run_double_test()<|docstring|>Eager loading with two relationships simultaneously, from the same table, using aliases.<|endoftext|>
21a86a45e3e95e352c6555397a83d53b539af23dac73e880544807503e0a19e5
def test_double_w_ac_against_subquery(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, select(orders).where((orders.c.isopen == 1)).alias()) closed_mapper = aliased(Order, select(orders).where((orders.c.isopen == 0)).alias()) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, lazy='joined', order_by=open_mapper.id), closed_orders=relationship(closed_mapper, lazy='joined', order_by=closed_mapper.id))) self._run_double_test()
Eager loading with two relationships simultaneously, from the same table, using aliases.
test/orm/test_eager_relations.py
test_double_w_ac_against_subquery
ToGoBananas/sqlalchemy
5,383
python
def test_double_w_ac_against_subquery(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, select(orders).where((orders.c.isopen == 1)).alias()) closed_mapper = aliased(Order, select(orders).where((orders.c.isopen == 0)).alias()) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, lazy='joined', order_by=open_mapper.id), closed_orders=relationship(closed_mapper, lazy='joined', order_by=closed_mapper.id))) self._run_double_test()
def test_double_w_ac_against_subquery(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (users, orders, User, Address, Order, addresses, Item, items, order_items) = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses, self.classes.Item, self.tables.items, self.tables.order_items) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) open_mapper = aliased(Order, select(orders).where((orders.c.isopen == 1)).alias()) closed_mapper = aliased(Order, select(orders).where((orders.c.isopen == 0)).alias()) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(open_mapper, lazy='joined', order_by=open_mapper.id), closed_orders=relationship(closed_mapper, lazy='joined', order_by=closed_mapper.id))) self._run_double_test()<|docstring|>Eager loading with two relationships simultaneously, from the same table, using aliases.<|endoftext|>
2b61dc77fb52fcdc1d57477b319bdb32741c4b708a5fdcc5a405e06249b31702
def test_double_same_mappers(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 1), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True), closed_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 0), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True))) self._run_double_test()
Eager loading with two relationships simultaneously, from the same table, using aliases.
test/orm/test_eager_relations.py
test_double_same_mappers
ToGoBananas/sqlalchemy
5,383
python
def test_double_same_mappers(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 1), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True), closed_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 0), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True))) self._run_double_test()
def test_double_same_mappers(self): 'Eager loading with two relationships simultaneously,\n from the same table, using aliases.' (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 1), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True), closed_orders=relationship(Order, primaryjoin=sa.and_((orders.c.isopen == 0), (users.c.id == orders.c.user_id)), lazy='joined', order_by=orders.c.id, viewonly=True))) self._run_double_test()<|docstring|>Eager loading with two relationships simultaneously, from the same table, using aliases.<|endoftext|>
6821838bd7e2a6db720847ca3483d69a5ffea72789c11baf7197486e0299910f
def test_no_false_hits(self): "Eager loaders don't interpret main table columns as\n part of their eager load." (addresses, orders, User, Address, Order, users) = (self.tables.addresses, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='joined'), 'orders': relationship(Order, lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders) self.allusers = fixture_session().query(User).all() noeagers = fixture_session().query(User).from_statement(text('select * from users')).all() assert ('orders' not in noeagers[0].__dict__) assert ('addresses' not in noeagers[0].__dict__)
Eager loaders don't interpret main table columns as part of their eager load.
test/orm/test_eager_relations.py
test_no_false_hits
ToGoBananas/sqlalchemy
5,383
python
def test_no_false_hits(self): "Eager loaders don't interpret main table columns as\n part of their eager load." (addresses, orders, User, Address, Order, users) = (self.tables.addresses, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='joined'), 'orders': relationship(Order, lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders) self.allusers = fixture_session().query(User).all() noeagers = fixture_session().query(User).from_statement(text('select * from users')).all() assert ('orders' not in noeagers[0].__dict__) assert ('addresses' not in noeagers[0].__dict__)
def test_no_false_hits(self): "Eager loaders don't interpret main table columns as\n part of their eager load." (addresses, orders, User, Address, Order, users) = (self.tables.addresses, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(Address, lazy='joined'), 'orders': relationship(Order, lazy='joined')}) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Order, orders) self.allusers = fixture_session().query(User).all() noeagers = fixture_session().query(User).from_statement(text('select * from users')).all() assert ('orders' not in noeagers[0].__dict__) assert ('addresses' not in noeagers[0].__dict__)<|docstring|>Eager loaders don't interpret main table columns as part of their eager load.<|endoftext|>
cd6460cd8612277da75056c0940600fc9c45d7e42e1244b982a5058c2e141e40
def test_limit(self): 'Limit operations combined with lazy-load relationships.' (users, items, order_items, orders, Item, User, Address, Order, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id), 'orders': relationship(Order, lazy='select', order_by=orders.c.id)}) sess = fixture_session() q = sess.query(User) result = q.order_by(User.id).limit(2).offset(1).all() eq_(self.static.user_all_result[1:3], result)
Limit operations combined with lazy-load relationships.
test/orm/test_eager_relations.py
test_limit
ToGoBananas/sqlalchemy
5,383
python
def test_limit(self): (users, items, order_items, orders, Item, User, Address, Order, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id), 'orders': relationship(Order, lazy='select', order_by=orders.c.id)}) sess = fixture_session() q = sess.query(User) result = q.order_by(User.id).limit(2).offset(1).all() eq_(self.static.user_all_result[1:3], result)
def test_limit(self): (users, items, order_items, orders, Item, User, Address, Order, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties={'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) self.mapper_registry.map_imperatively(User, users, properties={'addresses': relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id), 'orders': relationship(Order, lazy='select', order_by=orders.c.id)}) sess = fixture_session() q = sess.query(User) result = q.order_by(User.id).limit(2).offset(1).all() eq_(self.static.user_all_result[1:3], result)<|docstring|>Limit operations combined with lazy-load relationships.<|endoftext|>
65e2460ab5e628af61442ecb8712718ea9c539aeda6655f557d488512e7da098
def test_limit_3(self): "test that the ORDER BY is propagated from the inner\n select to the outer select, when using the\n 'wrapped' select statement resulting from the combination of\n eager loading and limit/offset clauses." (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined'))) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), orders=relationship(Order, lazy='joined', order_by=orders.c.id))) sess = fixture_session() q = sess.query(User) if (not testing.against('mssql')): result = q.join('orders').order_by(Order.user_id.desc()).limit(2).offset(1) eq_([User(id=9, orders=[Order(id=2), Order(id=4)], addresses=[Address(id=5)]), User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all()) result = q.join('addresses').order_by(Address.email_address.desc()).limit(1).offset(0) eq_([User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all())
test that the ORDER BY is propagated from the inner select to the outer select, when using the 'wrapped' select statement resulting from the combination of eager loading and limit/offset clauses.
test/orm/test_eager_relations.py
test_limit_3
ToGoBananas/sqlalchemy
5,383
python
def test_limit_3(self): "test that the ORDER BY is propagated from the inner\n select to the outer select, when using the\n 'wrapped' select statement resulting from the combination of\n eager loading and limit/offset clauses." (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined'))) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), orders=relationship(Order, lazy='joined', order_by=orders.c.id))) sess = fixture_session() q = sess.query(User) if (not testing.against('mssql')): result = q.join('orders').order_by(Order.user_id.desc()).limit(2).offset(1) eq_([User(id=9, orders=[Order(id=2), Order(id=4)], addresses=[Address(id=5)]), User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all()) result = q.join('addresses').order_by(Address.email_address.desc()).limit(1).offset(0) eq_([User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all())
def test_limit_3(self): "test that the ORDER BY is propagated from the inner\n select to the outer select, when using the\n 'wrapped' select statement resulting from the combination of\n eager loading and limit/offset clauses." (addresses, items, order_items, orders, Item, User, Address, Order, users) = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined'))) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), orders=relationship(Order, lazy='joined', order_by=orders.c.id))) sess = fixture_session() q = sess.query(User) if (not testing.against('mssql')): result = q.join('orders').order_by(Order.user_id.desc()).limit(2).offset(1) eq_([User(id=9, orders=[Order(id=2), Order(id=4)], addresses=[Address(id=5)]), User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all()) result = q.join('addresses').order_by(Address.email_address.desc()).limit(1).offset(0) eq_([User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)])], result.all())<|docstring|>test that the ORDER BY is propagated from the inner select to the outer select, when using the 'wrapped' select statement resulting from the combination of eager loading and limit/offset clauses.<|endoftext|>
69a4c3081c4a9a7e478448bc74db51ab29ac3825c0abff6e18aed0f2d6df4bb7
def test_useget_cancels_eager(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) sess = fixture_session() u1 = sess.query(User).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))
test that a one to many lazyload cancels the unnecessary eager many-to-one join on the other side.
test/orm/test_eager_relations.py
test_useget_cancels_eager
ToGoBananas/sqlalchemy
5,383
python
def test_useget_cancels_eager(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) sess = fixture_session() u1 = sess.query(User).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))
def test_useget_cancels_eager(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) sess = fixture_session() u1 = sess.query(User).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))<|docstring|>test that a one to many lazyload cancels the unnecessary eager many-to-one join on the other side.<|endoftext|>
cd11cd6d1cbfa8606bc16bc0e5efefebe5b084fac20a1a19916129cc77b061a2
def test_useget_cancels_eager_propagated_present(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side, even when a propagated\n option is present.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) from sqlalchemy.orm.interfaces import MapperOption class MyBogusOption(MapperOption): propagate_to_loaders = True sess = fixture_session() u1 = sess.query(User).options(MyBogusOption()).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))
test that a one to many lazyload cancels the unnecessary eager many-to-one join on the other side, even when a propagated option is present.
test/orm/test_eager_relations.py
test_useget_cancels_eager_propagated_present
ToGoBananas/sqlalchemy
5,383
python
def test_useget_cancels_eager_propagated_present(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side, even when a propagated\n option is present.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) from sqlalchemy.orm.interfaces import MapperOption class MyBogusOption(MapperOption): propagate_to_loaders = True sess = fixture_session() u1 = sess.query(User).options(MyBogusOption()).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))
def test_useget_cancels_eager_propagated_present(self): 'test that a one to many lazyload cancels the unnecessary\n eager many-to-one join on the other side, even when a propagated\n option is present.' (users, Address, addresses, User) = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Address, addresses, properties={'user': relationship(User, lazy='joined', backref='addresses')}) from sqlalchemy.orm.interfaces import MapperOption class MyBogusOption(MapperOption): propagate_to_loaders = True sess = fixture_session() u1 = sess.query(User).options(MyBogusOption()).filter((User.id == 8)).one() def go(): eq_(u1.addresses[0].user, u1) with testing.expect_warnings(raise_on_any_unexpected=True): self.assert_sql_execution(testing.db, go, CompiledSQL('SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address FROM addresses WHERE :param_1 = addresses.user_id', {'param_1': 8}))<|docstring|>test that a one to many lazyload cancels the unnecessary eager many-to-one join on the other side, even when a propagated option is present.<|endoftext|>
ada6c3d2ef320779f591f05437a3389f731d39067e49d6c66e060346de7b21d5
def test_manytoone_limit(self): 'test that the subquery wrapping only occurs with\n limit/offset and m2m or o2m joins present.' (users, items, order_items, Order, Item, User, Address, orders, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) self.mapper_registry.map_imperatively(User, users, properties=odict(orders=relationship(Order, backref='user'))) self.mapper_registry.map_imperatively(Order, orders, properties=odict([('items', relationship(Item, secondary=order_items, backref='orders')), ('address', relationship(Address))])) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Item, items) sess = fixture_session() self.assert_compile(sess.query(User).options(joinedload(User.orders)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders LEFT OUTER JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user, innerjoin=True)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.address)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.items), joinedload(User.orders).joinedload(Order.address)), 'SELECT users.id AS users_id, users.name AS users_name, items_1.id AS items_1_id, items_1.description AS items_1_description, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id LEFT OUTER JOIN (order_items AS order_items_1 JOIN items AS items_1 ON items_1.id = order_items_1.item_id) ON orders_1.id = order_items_1.order_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id') self.assert_compile(sess.query(User).options(joinedload(User.orders), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN (orders AS orders_1 JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id) ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders, innerjoin=True), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10})
test that the subquery wrapping only occurs with limit/offset and m2m or o2m joins present.
test/orm/test_eager_relations.py
test_manytoone_limit
ToGoBananas/sqlalchemy
5,383
python
def test_manytoone_limit(self): 'test that the subquery wrapping only occurs with\n limit/offset and m2m or o2m joins present.' (users, items, order_items, Order, Item, User, Address, orders, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) self.mapper_registry.map_imperatively(User, users, properties=odict(orders=relationship(Order, backref='user'))) self.mapper_registry.map_imperatively(Order, orders, properties=odict([('items', relationship(Item, secondary=order_items, backref='orders')), ('address', relationship(Address))])) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Item, items) sess = fixture_session() self.assert_compile(sess.query(User).options(joinedload(User.orders)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders LEFT OUTER JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user, innerjoin=True)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.address)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.items), joinedload(User.orders).joinedload(Order.address)), 'SELECT users.id AS users_id, users.name AS users_name, items_1.id AS items_1_id, items_1.description AS items_1_description, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id LEFT OUTER JOIN (order_items AS order_items_1 JOIN items AS items_1 ON items_1.id = order_items_1.item_id) ON orders_1.id = order_items_1.order_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id') self.assert_compile(sess.query(User).options(joinedload(User.orders), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN (orders AS orders_1 JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id) ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders, innerjoin=True), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10})
def test_manytoone_limit(self): 'test that the subquery wrapping only occurs with\n limit/offset and m2m or o2m joins present.' (users, items, order_items, Order, Item, User, Address, orders, addresses) = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) self.mapper_registry.map_imperatively(User, users, properties=odict(orders=relationship(Order, backref='user'))) self.mapper_registry.map_imperatively(Order, orders, properties=odict([('items', relationship(Item, secondary=order_items, backref='orders')), ('address', relationship(Address))])) self.mapper_registry.map_imperatively(Address, addresses) self.mapper_registry.map_imperatively(Item, items) sess = fixture_session() self.assert_compile(sess.query(User).options(joinedload(User.orders)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders LEFT OUTER JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(Order).options(joinedload(Order.user, innerjoin=True)).limit(10), 'SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id LIMIT :param_1', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.address)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders).joinedload(Order.items), joinedload(User.orders).joinedload(Order.address)), 'SELECT users.id AS users_id, users.name AS users_name, items_1.id AS items_1_id, items_1.description AS items_1_description, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id LEFT OUTER JOIN (order_items AS order_items_1 JOIN items AS items_1 ON items_1.id = order_items_1.item_id) ON orders_1.id = order_items_1.order_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id') self.assert_compile(sess.query(User).options(joinedload(User.orders), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 LEFT OUTER JOIN (orders AS orders_1 JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id) ON anon_1.users_id = orders_1.user_id', {'param_1': 10}) self.assert_compile(sess.query(User).options(joinedload(User.orders, innerjoin=True), joinedload(User.orders, Order.address, innerjoin=True)).limit(10), 'SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM (SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id', {'param_1': 10})<|docstring|>test that the subquery wrapping only occurs with limit/offset and m2m or o2m joins present.<|endoftext|>
044d2bd23f8ab1743a0ac165e02f69a0b59a87521516e855baf9a7510fe4d341
def test_many_to_one_null(self): 'test that a many-to-one eager load which loads None does\n not later trigger a lazy load.\n\n ' (Order, Address, addresses, orders) = (self.classes.Order, self.classes.Address, self.tables.addresses, self.tables.orders) self.mapper_registry.map_imperatively(Order, orders, properties=dict(address=relationship(self.mapper_registry.map_imperatively(Address, addresses), primaryjoin=and_((addresses.c.id == orders.c.address_id), (addresses.c.email_address != None)), lazy='joined'))) sess = fixture_session() def go(): o1 = sess.query(Order).options(lazyload(Order.address)).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 2) sess.expunge_all() def go(): o1 = sess.query(Order).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 1)
test that a many-to-one eager load which loads None does not later trigger a lazy load.
test/orm/test_eager_relations.py
test_many_to_one_null
ToGoBananas/sqlalchemy
5,383
python
def test_many_to_one_null(self): 'test that a many-to-one eager load which loads None does\n not later trigger a lazy load.\n\n ' (Order, Address, addresses, orders) = (self.classes.Order, self.classes.Address, self.tables.addresses, self.tables.orders) self.mapper_registry.map_imperatively(Order, orders, properties=dict(address=relationship(self.mapper_registry.map_imperatively(Address, addresses), primaryjoin=and_((addresses.c.id == orders.c.address_id), (addresses.c.email_address != None)), lazy='joined'))) sess = fixture_session() def go(): o1 = sess.query(Order).options(lazyload(Order.address)).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 2) sess.expunge_all() def go(): o1 = sess.query(Order).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 1)
def test_many_to_one_null(self): 'test that a many-to-one eager load which loads None does\n not later trigger a lazy load.\n\n ' (Order, Address, addresses, orders) = (self.classes.Order, self.classes.Address, self.tables.addresses, self.tables.orders) self.mapper_registry.map_imperatively(Order, orders, properties=dict(address=relationship(self.mapper_registry.map_imperatively(Address, addresses), primaryjoin=and_((addresses.c.id == orders.c.address_id), (addresses.c.email_address != None)), lazy='joined'))) sess = fixture_session() def go(): o1 = sess.query(Order).options(lazyload(Order.address)).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 2) sess.expunge_all() def go(): o1 = sess.query(Order).filter((Order.id == 5)).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 1)<|docstring|>test that a many-to-one eager load which loads None does not later trigger a lazy load.<|endoftext|>
9c8c95699f7abdf1983e81d3140610b152d5ebdbb8fff8243e15ed1d631acf64
def test_one_and_many(self): 'tests eager load for a parent object with a child object that\n contains a many-to-many relationship to a third object.' (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'orders': relationship(Order, lazy='joined', order_by=orders.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id))) q = fixture_session().query(User) result = q.filter(text('users.id in (7, 8, 9)')).order_by(text('users.id')) def go(): eq_(self.static.user_order_result[0:3], result.all()) self.assert_sql_count(testing.db, go, 1)
tests eager load for a parent object with a child object that contains a many-to-many relationship to a third object.
test/orm/test_eager_relations.py
test_one_and_many
ToGoBananas/sqlalchemy
5,383
python
def test_one_and_many(self): 'tests eager load for a parent object with a child object that\n contains a many-to-many relationship to a third object.' (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'orders': relationship(Order, lazy='joined', order_by=orders.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id))) q = fixture_session().query(User) result = q.filter(text('users.id in (7, 8, 9)')).order_by(text('users.id')) def go(): eq_(self.static.user_order_result[0:3], result.all()) self.assert_sql_count(testing.db, go, 1)
def test_one_and_many(self): 'tests eager load for a parent object with a child object that\n contains a many-to-many relationship to a third object.' (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'orders': relationship(Order, lazy='joined', order_by=orders.c.id)}) self.mapper_registry.map_imperatively(Item, items) self.mapper_registry.map_imperatively(Order, orders, properties=dict(items=relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id))) q = fixture_session().query(User) result = q.filter(text('users.id in (7, 8, 9)')).order_by(text('users.id')) def go(): eq_(self.static.user_order_result[0:3], result.all()) self.assert_sql_count(testing.db, go, 1)<|docstring|>tests eager load for a parent object with a child object that contains a many-to-many relationship to a third object.<|endoftext|>
2535f4b143a2aae1e9d18bda89ee255dba10c14520bfaf0cadd6a68d401546d4
def test_uselist_false_warning(self): 'test that multiple rows received by a\n uselist=False raises a warning.' (User, users, orders, Order) = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'order': relationship(Order, uselist=False)}) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() assert_raises(sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all)
test that multiple rows received by a uselist=False raises a warning.
test/orm/test_eager_relations.py
test_uselist_false_warning
ToGoBananas/sqlalchemy
5,383
python
def test_uselist_false_warning(self): 'test that multiple rows received by a\n uselist=False raises a warning.' (User, users, orders, Order) = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'order': relationship(Order, uselist=False)}) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() assert_raises(sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all)
def test_uselist_false_warning(self): 'test that multiple rows received by a\n uselist=False raises a warning.' (User, users, orders, Order) = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) self.mapper_registry.map_imperatively(User, users, properties={'order': relationship(Order, uselist=False)}) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() assert_raises(sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all)<|docstring|>test that multiple rows received by a uselist=False raises a warning.<|endoftext|>
e32c66af17d40c07a30adc8a351abe170b60a0666609b0bd25afecf99ec9f0ea
def test_against_select(self): 'test eager loading of a mapper which is against a select' (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) s = sa.select(orders).where((orders.c.isopen == 1)).alias('openorders') self.mapper_registry.map_imperatively(Order, s, properties={'user': relationship(User, lazy='joined')}) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Item, items) q = fixture_session().query(Order) eq_([Order(id=3, user=User(id=7)), Order(id=4, user=User(id=9))], q.all()) q = q.select_from(s.join(order_items).join(items)).filter((~ Item.id.in_([1, 2, 5]))) eq_([Order(id=3, user=User(id=7))], q.all())
test eager loading of a mapper which is against a select
test/orm/test_eager_relations.py
test_against_select
ToGoBananas/sqlalchemy
5,383
python
def test_against_select(self): (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) s = sa.select(orders).where((orders.c.isopen == 1)).alias('openorders') self.mapper_registry.map_imperatively(Order, s, properties={'user': relationship(User, lazy='joined')}) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Item, items) q = fixture_session().query(Order) eq_([Order(id=3, user=User(id=7)), Order(id=4, user=User(id=9))], q.all()) q = q.select_from(s.join(order_items).join(items)).filter((~ Item.id.in_([1, 2, 5]))) eq_([Order(id=3, user=User(id=7))], q.all())
def test_against_select(self): (users, items, order_items, orders, Item, User, Order) = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) s = sa.select(orders).where((orders.c.isopen == 1)).alias('openorders') self.mapper_registry.map_imperatively(Order, s, properties={'user': relationship(User, lazy='joined')}) self.mapper_registry.map_imperatively(User, users) self.mapper_registry.map_imperatively(Item, items) q = fixture_session().query(Order) eq_([Order(id=3, user=User(id=7)), Order(id=4, user=User(id=9))], q.all()) q = q.select_from(s.join(order_items).join(items)).filter((~ Item.id.in_([1, 2, 5]))) eq_([Order(id=3, user=User(id=7))], q.all())<|docstring|>test eager loading of a mapper which is against a select<|endoftext|>
5ffb3af899506fe53219b5fd798895dd6951bb516bf532da89c4cfa6e27314c4
def test_aliasing(self): 'test that eager loading uses aliases to insulate the eager\n load from regular criterion against those tables.' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((addresses.c.email_address == '[email protected]')).filter((Address.user_id == User.id)).order_by(User.id) eq_(self.static.user_address_result[1:2], result.all())
test that eager loading uses aliases to insulate the eager load from regular criterion against those tables.
test/orm/test_eager_relations.py
test_aliasing
ToGoBananas/sqlalchemy
5,383
python
def test_aliasing(self): 'test that eager loading uses aliases to insulate the eager\n load from regular criterion against those tables.' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((addresses.c.email_address == '[email protected]')).filter((Address.user_id == User.id)).order_by(User.id) eq_(self.static.user_address_result[1:2], result.all())
def test_aliasing(self): 'test that eager loading uses aliases to insulate the eager\n load from regular criterion against those tables.' (Address, addresses, users, User) = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) self.mapper_registry.map_imperatively(User, users, properties=dict(addresses=relationship(self.mapper_registry.map_imperatively(Address, addresses), lazy='joined', order_by=addresses.c.id))) q = fixture_session().query(User) result = q.filter((addresses.c.email_address == '[email protected]')).filter((Address.user_id == User.id)).order_by(User.id) eq_(self.static.user_address_result[1:2], result.all())<|docstring|>test that eager loading uses aliases to insulate the eager load from regular criterion against those tables.<|endoftext|>
de1d8425cf20d7bc97f2cf947a266637103f61e40f1ec3740f27f60c005f7126
@testing.combinations((True, 'score'), (True, None), (False, None)) def test_label_anonymizing(self, labeled, labelname): "Eager loading works with subqueries with labels,\n\n Even if an explicit labelname which conflicts with a label on the\n parent.\n\n There's not much reason a column_property() would ever need to have a\n label of a specific name (and they don't even need labels these days),\n unless you'd like the name to line up with a name that you may be\n using for a straight textual statement used for loading instances of\n that type.\n\n " (tags_table, users_table) = (self.tables.tags_table, self.tables.users_table) class User(fixtures.ComparableEntity): @property def prop_score(self): return sum([tag.prop_score for tag in self.tags]) class Tag(fixtures.ComparableEntity): @property def prop_score(self): return (self.score1 * self.score2) tag_score = (tags_table.c.score1 * tags_table.c.score2) user_score = sa.select(sa.func.sum((tags_table.c.score1 * tags_table.c.score2))).where((tags_table.c.user_id == users_table.c.id)) if labeled: tag_score = tag_score.label(labelname) user_score = user_score.label(labelname) else: user_score = user_score.scalar_subquery() self.mapper_registry.map_imperatively(Tag, tags_table, properties={'query_score': sa.orm.column_property(tag_score)}) self.mapper_registry.map_imperatively(User, users_table, properties={'tags': relationship(Tag, backref='user', lazy='joined'), 'query_score': sa.orm.column_property(user_score)}) session = fixture_session() session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)])) session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)])) session.flush() session.expunge_all() for user in session.query(User).all(): eq_(user.query_score, user.prop_score) def go(): u = session.query(User).filter_by(name='joe').one() eq_(u.query_score, u.prop_score) self.assert_sql_count(testing.db, go, 1)
Eager loading works with subqueries with labels, Even if an explicit labelname which conflicts with a label on the parent. There's not much reason a column_property() would ever need to have a label of a specific name (and they don't even need labels these days), unless you'd like the name to line up with a name that you may be using for a straight textual statement used for loading instances of that type.
test/orm/test_eager_relations.py
test_label_anonymizing
ToGoBananas/sqlalchemy
5,383
python
@testing.combinations((True, 'score'), (True, None), (False, None)) def test_label_anonymizing(self, labeled, labelname): "Eager loading works with subqueries with labels,\n\n Even if an explicit labelname which conflicts with a label on the\n parent.\n\n There's not much reason a column_property() would ever need to have a\n label of a specific name (and they don't even need labels these days),\n unless you'd like the name to line up with a name that you may be\n using for a straight textual statement used for loading instances of\n that type.\n\n " (tags_table, users_table) = (self.tables.tags_table, self.tables.users_table) class User(fixtures.ComparableEntity): @property def prop_score(self): return sum([tag.prop_score for tag in self.tags]) class Tag(fixtures.ComparableEntity): @property def prop_score(self): return (self.score1 * self.score2) tag_score = (tags_table.c.score1 * tags_table.c.score2) user_score = sa.select(sa.func.sum((tags_table.c.score1 * tags_table.c.score2))).where((tags_table.c.user_id == users_table.c.id)) if labeled: tag_score = tag_score.label(labelname) user_score = user_score.label(labelname) else: user_score = user_score.scalar_subquery() self.mapper_registry.map_imperatively(Tag, tags_table, properties={'query_score': sa.orm.column_property(tag_score)}) self.mapper_registry.map_imperatively(User, users_table, properties={'tags': relationship(Tag, backref='user', lazy='joined'), 'query_score': sa.orm.column_property(user_score)}) session = fixture_session() session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)])) session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)])) session.flush() session.expunge_all() for user in session.query(User).all(): eq_(user.query_score, user.prop_score) def go(): u = session.query(User).filter_by(name='joe').one() eq_(u.query_score, u.prop_score) self.assert_sql_count(testing.db, go, 1)
@testing.combinations((True, 'score'), (True, None), (False, None)) def test_label_anonymizing(self, labeled, labelname): "Eager loading works with subqueries with labels,\n\n Even if an explicit labelname which conflicts with a label on the\n parent.\n\n There's not much reason a column_property() would ever need to have a\n label of a specific name (and they don't even need labels these days),\n unless you'd like the name to line up with a name that you may be\n using for a straight textual statement used for loading instances of\n that type.\n\n " (tags_table, users_table) = (self.tables.tags_table, self.tables.users_table) class User(fixtures.ComparableEntity): @property def prop_score(self): return sum([tag.prop_score for tag in self.tags]) class Tag(fixtures.ComparableEntity): @property def prop_score(self): return (self.score1 * self.score2) tag_score = (tags_table.c.score1 * tags_table.c.score2) user_score = sa.select(sa.func.sum((tags_table.c.score1 * tags_table.c.score2))).where((tags_table.c.user_id == users_table.c.id)) if labeled: tag_score = tag_score.label(labelname) user_score = user_score.label(labelname) else: user_score = user_score.scalar_subquery() self.mapper_registry.map_imperatively(Tag, tags_table, properties={'query_score': sa.orm.column_property(tag_score)}) self.mapper_registry.map_imperatively(User, users_table, properties={'tags': relationship(Tag, backref='user', lazy='joined'), 'query_score': sa.orm.column_property(user_score)}) session = fixture_session() session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)])) session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)])) session.flush() session.expunge_all() for user in session.query(User).all(): eq_(user.query_score, user.prop_score) def go(): u = session.query(User).filter_by(name='joe').one() eq_(u.query_score, u.prop_score) self.assert_sql_count(testing.db, go, 1)<|docstring|>Eager loading works with subqueries with labels, Even if an explicit labelname which conflicts with a label on the parent. There's not much reason a column_property() would ever need to have a label of a specific name (and they don't even need labels these days), unless you'd like the name to line up with a name that you may be using for a straight textual statement used for loading instances of that type.<|endoftext|>
5a7c5d6f60f7395b6b610938e023cbbae6e1d847489b07a27b99a932e59823db
def test_deep_options_2(self): 'test (joined|subquery)load_all() options' (User, Order, Item) = self.classes('User', 'Order', 'Item') sess = fixture_session() result = sess.query(User).order_by(User.id).options(sa.orm.joinedload(User.orders).joinedload(Order.items).joinedload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) sess = fixture_session() result = sess.query(User).options(sa.orm.subqueryload(User.orders).subqueryload(Order.items).subqueryload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go)
test (joined|subquery)load_all() options
test/orm/test_eager_relations.py
test_deep_options_2
ToGoBananas/sqlalchemy
5,383
python
def test_deep_options_2(self): (User, Order, Item) = self.classes('User', 'Order', 'Item') sess = fixture_session() result = sess.query(User).order_by(User.id).options(sa.orm.joinedload(User.orders).joinedload(Order.items).joinedload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) sess = fixture_session() result = sess.query(User).options(sa.orm.subqueryload(User.orders).subqueryload(Order.items).subqueryload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go)
def test_deep_options_2(self): (User, Order, Item) = self.classes('User', 'Order', 'Item') sess = fixture_session() result = sess.query(User).order_by(User.id).options(sa.orm.joinedload(User.orders).joinedload(Order.items).joinedload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) sess = fixture_session() result = sess.query(User).options(sa.orm.subqueryload(User.orders).subqueryload(Order.items).subqueryload(Item.keywords)).all() def go(): result[0].orders[1].items[0].keywords[1] self.sql_count_(0, go)<|docstring|>test (joined|subquery)load_all() options<|endoftext|>
f07881f35f0883601b684e209f3dc1fe0a9115fbed1954a752a4f57a02e92cc2
def assert_vws_failure(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS failure response is as expected.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS error format\n for the given codes.\n ' assert (response.json().keys() == {'transaction_id', 'result_code'}) assert_vws_response(response=response, status_code=status_code, result_code=result_code)
Assert that a VWS failure response is as expected. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS error format for the given codes.
tests/mock_vws/utils/assertions.py
assert_vws_failure
admdev8/vws-python-mock
1
python
def assert_vws_failure(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS failure response is as expected.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS error format\n for the given codes.\n ' assert (response.json().keys() == {'transaction_id', 'result_code'}) assert_vws_response(response=response, status_code=status_code, result_code=result_code)
def assert_vws_failure(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS failure response is as expected.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS error format\n for the given codes.\n ' assert (response.json().keys() == {'transaction_id', 'result_code'}) assert_vws_response(response=response, status_code=status_code, result_code=result_code)<|docstring|>Assert that a VWS failure response is as expected. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS error format for the given codes.<|endoftext|>
e48106ab7d0345ee8e3844972f16db483c7d765281aa885f18c7e8bb282f340d
def assert_valid_date_header(response: Response) -> None: '\n Assert that a response includes a `Date` header which is within two minutes\n of "now".\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a `Date` header which is\n within one minute of "now".\n ' date_response = response.headers['Date'] date_from_response = email.utils.parsedate(date_response) assert (date_from_response is not None) (year, month, day, hour, minute, second, _, _, _) = date_from_response gmt = ZoneInfo('GMT') datetime_from_response = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, tzinfo=gmt) current_date = datetime.datetime.now(tz=gmt) time_difference = abs((current_date - datetime_from_response)) assert (time_difference < datetime.timedelta(minutes=2))
Assert that a response includes a `Date` header which is within two minutes of "now". Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response does not include a `Date` header which is within one minute of "now".
tests/mock_vws/utils/assertions.py
assert_valid_date_header
admdev8/vws-python-mock
1
python
def assert_valid_date_header(response: Response) -> None: '\n Assert that a response includes a `Date` header which is within two minutes\n of "now".\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a `Date` header which is\n within one minute of "now".\n ' date_response = response.headers['Date'] date_from_response = email.utils.parsedate(date_response) assert (date_from_response is not None) (year, month, day, hour, minute, second, _, _, _) = date_from_response gmt = ZoneInfo('GMT') datetime_from_response = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, tzinfo=gmt) current_date = datetime.datetime.now(tz=gmt) time_difference = abs((current_date - datetime_from_response)) assert (time_difference < datetime.timedelta(minutes=2))
def assert_valid_date_header(response: Response) -> None: '\n Assert that a response includes a `Date` header which is within two minutes\n of "now".\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a `Date` header which is\n within one minute of "now".\n ' date_response = response.headers['Date'] date_from_response = email.utils.parsedate(date_response) assert (date_from_response is not None) (year, month, day, hour, minute, second, _, _, _) = date_from_response gmt = ZoneInfo('GMT') datetime_from_response = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, tzinfo=gmt) current_date = datetime.datetime.now(tz=gmt) time_difference = abs((current_date - datetime_from_response)) assert (time_difference < datetime.timedelta(minutes=2))<|docstring|>Assert that a response includes a `Date` header which is within two minutes of "now". Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response does not include a `Date` header which is within one minute of "now".<|endoftext|>
a894890705330d786acceb0932d8661e0232c427f67f4739403ec45eb5052d68
def assert_valid_transaction_id(response: Response) -> None: '\n Assert that a response includes a valid transaction ID.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a valid transaction ID.\n ' transaction_id = response.json()['transaction_id'] assert (len(transaction_id) == 32) assert all(((char in hexdigits) for char in transaction_id))
Assert that a response includes a valid transaction ID. Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response does not include a valid transaction ID.
tests/mock_vws/utils/assertions.py
assert_valid_transaction_id
admdev8/vws-python-mock
1
python
def assert_valid_transaction_id(response: Response) -> None: '\n Assert that a response includes a valid transaction ID.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a valid transaction ID.\n ' transaction_id = response.json()['transaction_id'] assert (len(transaction_id) == 32) assert all(((char in hexdigits) for char in transaction_id))
def assert_valid_transaction_id(response: Response) -> None: '\n Assert that a response includes a valid transaction ID.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response does not include a valid transaction ID.\n ' transaction_id = response.json()['transaction_id'] assert (len(transaction_id) == 32) assert all(((char in hexdigits) for char in transaction_id))<|docstring|>Assert that a response includes a valid transaction ID. Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response does not include a valid transaction ID.<|endoftext|>
10b0490a4169dd8f9b89991c3cb3da15fdf48df985517c2c53dcadaef64dd8b6
def assert_json_separators(response: Response) -> None: '\n Assert that a JSON response is formatted correctly.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response JSON is not formatted correctly.\n ' assert (response.text == json.dumps(obj=response.json(), separators=(',', ':')))
Assert that a JSON response is formatted correctly. Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response JSON is not formatted correctly.
tests/mock_vws/utils/assertions.py
assert_json_separators
admdev8/vws-python-mock
1
python
def assert_json_separators(response: Response) -> None: '\n Assert that a JSON response is formatted correctly.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response JSON is not formatted correctly.\n ' assert (response.text == json.dumps(obj=response.json(), separators=(',', ':')))
def assert_json_separators(response: Response) -> None: '\n Assert that a JSON response is formatted correctly.\n\n Args:\n response: The response returned by a request to a Vuforia service.\n\n Raises:\n AssertionError: The response JSON is not formatted correctly.\n ' assert (response.text == json.dumps(obj=response.json(), separators=(',', ':')))<|docstring|>Assert that a JSON response is formatted correctly. Args: response: The response returned by a request to a Vuforia service. Raises: AssertionError: The response JSON is not formatted correctly.<|endoftext|>
d2af5adfd891989110bd50ce48fd5a3d32b7b47e1817f9d6fc6b8ce5f087b874
def assert_vws_response(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS response is as expected, at least in part.\n\n https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API.html#How-To-Interperete-VWS-API-Result-Codes\n implies that the expected status code can be worked out from the result\n code. However, this is not the case as the real results differ from the\n documentation.\n\n For example, it is possible to get a "Fail" result code and a 400 error.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS format for the\n given codes.\n ' assert (response.status_code == status_code) response_result_code = response.json()['result_code'] assert (response_result_code == result_code.value) response_header_keys = {'Connection', 'Content-Length', 'Content-Type', 'Date', 'Server'} assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert (response.headers['Content-Type'] == 'application/json') assert (response.headers['Server'] == 'nginx') assert_json_separators(response=response) assert_valid_transaction_id(response=response) assert_valid_date_header(response=response)
Assert that a VWS response is as expected, at least in part. https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API.html#How-To-Interperete-VWS-API-Result-Codes implies that the expected status code can be worked out from the result code. However, this is not the case as the real results differ from the documentation. For example, it is possible to get a "Fail" result code and a 400 error. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS format for the given codes.
tests/mock_vws/utils/assertions.py
assert_vws_response
admdev8/vws-python-mock
1
python
def assert_vws_response(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS response is as expected, at least in part.\n\n https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API.html#How-To-Interperete-VWS-API-Result-Codes\n implies that the expected status code can be worked out from the result\n code. However, this is not the case as the real results differ from the\n documentation.\n\n For example, it is possible to get a "Fail" result code and a 400 error.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS format for the\n given codes.\n ' assert (response.status_code == status_code) response_result_code = response.json()['result_code'] assert (response_result_code == result_code.value) response_header_keys = {'Connection', 'Content-Length', 'Content-Type', 'Date', 'Server'} assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert (response.headers['Content-Type'] == 'application/json') assert (response.headers['Server'] == 'nginx') assert_json_separators(response=response) assert_valid_transaction_id(response=response) assert_valid_date_header(response=response)
def assert_vws_response(response: Response, status_code: int, result_code: ResultCodes) -> None: '\n Assert that a VWS response is as expected, at least in part.\n\n https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API.html#How-To-Interperete-VWS-API-Result-Codes\n implies that the expected status code can be worked out from the result\n code. However, this is not the case as the real results differ from the\n documentation.\n\n For example, it is possible to get a "Fail" result code and a 400 error.\n\n Args:\n response: The response returned by a request to VWS.\n status_code: The expected status code of the response.\n result_code: The expected result code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWS format for the\n given codes.\n ' assert (response.status_code == status_code) response_result_code = response.json()['result_code'] assert (response_result_code == result_code.value) response_header_keys = {'Connection', 'Content-Length', 'Content-Type', 'Date', 'Server'} assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert (response.headers['Content-Type'] == 'application/json') assert (response.headers['Server'] == 'nginx') assert_json_separators(response=response) assert_valid_transaction_id(response=response) assert_valid_date_header(response=response)<|docstring|>Assert that a VWS response is as expected, at least in part. https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API.html#How-To-Interperete-VWS-API-Result-Codes implies that the expected status code can be worked out from the result code. However, this is not the case as the real results differ from the documentation. For example, it is possible to get a "Fail" result code and a 400 error. Args: response: The response returned by a request to VWS. status_code: The expected status code of the response. result_code: The expected result code of the response. Raises: AssertionError: The response is not in the expected VWS format for the given codes.<|endoftext|>
dcb4f501d2fb47c9f26bb3f5f9abf6e51137be49aa5aacd714e89c7ac6431294
def assert_query_success(response: Response) -> None: '\n Assert that the given response is a success response for performing an\n image recognition query.\n\n Raises:\n AssertionError: The given response is not a valid success response\n for performing an image recognition query.\n ' assert (response.status_code == HTTPStatus.OK) assert (response.json().keys() == {'result_code', 'results', 'query_id'}) query_id = response.json()['query_id'] assert (len(query_id) == 32) assert all(((char in hexdigits) for char in query_id)) assert (response.json()['result_code'] == 'Success') assert_valid_date_header(response=response) copied_response_headers = dict(copy.deepcopy(response.headers)) copied_response_headers.pop('Date') content_encoding = copied_response_headers.pop('Content-Encoding', None) assert (content_encoding in (None, 'gzip')) expected_response_header_not_chunked = {'Connection': 'keep-alive', 'Content-Length': str(response.raw.tell()), 'Content-Type': 'application/json', 'Server': 'nginx'} expected_response_header_chunked = {'Connection': 'keep-alive', 'Content-Type': 'application/json', 'Server': 'nginx', 'transfer-encoding': 'chunked'} assert (copied_response_headers in (expected_response_header_chunked, expected_response_header_not_chunked))
Assert that the given response is a success response for performing an image recognition query. Raises: AssertionError: The given response is not a valid success response for performing an image recognition query.
tests/mock_vws/utils/assertions.py
assert_query_success
admdev8/vws-python-mock
1
python
def assert_query_success(response: Response) -> None: '\n Assert that the given response is a success response for performing an\n image recognition query.\n\n Raises:\n AssertionError: The given response is not a valid success response\n for performing an image recognition query.\n ' assert (response.status_code == HTTPStatus.OK) assert (response.json().keys() == {'result_code', 'results', 'query_id'}) query_id = response.json()['query_id'] assert (len(query_id) == 32) assert all(((char in hexdigits) for char in query_id)) assert (response.json()['result_code'] == 'Success') assert_valid_date_header(response=response) copied_response_headers = dict(copy.deepcopy(response.headers)) copied_response_headers.pop('Date') content_encoding = copied_response_headers.pop('Content-Encoding', None) assert (content_encoding in (None, 'gzip')) expected_response_header_not_chunked = {'Connection': 'keep-alive', 'Content-Length': str(response.raw.tell()), 'Content-Type': 'application/json', 'Server': 'nginx'} expected_response_header_chunked = {'Connection': 'keep-alive', 'Content-Type': 'application/json', 'Server': 'nginx', 'transfer-encoding': 'chunked'} assert (copied_response_headers in (expected_response_header_chunked, expected_response_header_not_chunked))
def assert_query_success(response: Response) -> None: '\n Assert that the given response is a success response for performing an\n image recognition query.\n\n Raises:\n AssertionError: The given response is not a valid success response\n for performing an image recognition query.\n ' assert (response.status_code == HTTPStatus.OK) assert (response.json().keys() == {'result_code', 'results', 'query_id'}) query_id = response.json()['query_id'] assert (len(query_id) == 32) assert all(((char in hexdigits) for char in query_id)) assert (response.json()['result_code'] == 'Success') assert_valid_date_header(response=response) copied_response_headers = dict(copy.deepcopy(response.headers)) copied_response_headers.pop('Date') content_encoding = copied_response_headers.pop('Content-Encoding', None) assert (content_encoding in (None, 'gzip')) expected_response_header_not_chunked = {'Connection': 'keep-alive', 'Content-Length': str(response.raw.tell()), 'Content-Type': 'application/json', 'Server': 'nginx'} expected_response_header_chunked = {'Connection': 'keep-alive', 'Content-Type': 'application/json', 'Server': 'nginx', 'transfer-encoding': 'chunked'} assert (copied_response_headers in (expected_response_header_chunked, expected_response_header_not_chunked))<|docstring|>Assert that the given response is a success response for performing an image recognition query. Raises: AssertionError: The given response is not a valid success response for performing an image recognition query.<|endoftext|>
4d5a1f4dbdbec265bc8d98222602a41589b4ef848c9ebb172c685cd154d6fbcc
def assert_vwq_failure(response: Response, status_code: int, content_type: Optional[str]) -> None: '\n Assert that a VWQ failure response is as expected.\n\n Args:\n response: The response returned by a request to VWQ.\n content_type: The expected Content-Type header.\n status_code: The expected status code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWQ error format\n for the given codes.\n ' assert (response.status_code == status_code) response_header_keys = {'Connection', 'Content-Length', 'Date', 'Server'} if (status_code == HTTPStatus.INTERNAL_SERVER_ERROR): response_header_keys.add('Cache-Control') cache_control = 'must-revalidate,no-cache,no-store' assert (response.headers['Cache-Control'] == cache_control) if (content_type is not None): response_header_keys.add('Content-Type') assert (response.headers['Content-Type'] == content_type) if (status_code == HTTPStatus.UNAUTHORIZED): response_header_keys.add('WWW-Authenticate') assert (response.headers['WWW-Authenticate'] == 'VWS') assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert_valid_date_header(response=response) assert (response.headers['Server'] == 'nginx')
Assert that a VWQ failure response is as expected. Args: response: The response returned by a request to VWQ. content_type: The expected Content-Type header. status_code: The expected status code of the response. Raises: AssertionError: The response is not in the expected VWQ error format for the given codes.
tests/mock_vws/utils/assertions.py
assert_vwq_failure
admdev8/vws-python-mock
1
python
def assert_vwq_failure(response: Response, status_code: int, content_type: Optional[str]) -> None: '\n Assert that a VWQ failure response is as expected.\n\n Args:\n response: The response returned by a request to VWQ.\n content_type: The expected Content-Type header.\n status_code: The expected status code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWQ error format\n for the given codes.\n ' assert (response.status_code == status_code) response_header_keys = {'Connection', 'Content-Length', 'Date', 'Server'} if (status_code == HTTPStatus.INTERNAL_SERVER_ERROR): response_header_keys.add('Cache-Control') cache_control = 'must-revalidate,no-cache,no-store' assert (response.headers['Cache-Control'] == cache_control) if (content_type is not None): response_header_keys.add('Content-Type') assert (response.headers['Content-Type'] == content_type) if (status_code == HTTPStatus.UNAUTHORIZED): response_header_keys.add('WWW-Authenticate') assert (response.headers['WWW-Authenticate'] == 'VWS') assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert_valid_date_header(response=response) assert (response.headers['Server'] == 'nginx')
def assert_vwq_failure(response: Response, status_code: int, content_type: Optional[str]) -> None: '\n Assert that a VWQ failure response is as expected.\n\n Args:\n response: The response returned by a request to VWQ.\n content_type: The expected Content-Type header.\n status_code: The expected status code of the response.\n\n Raises:\n AssertionError: The response is not in the expected VWQ error format\n for the given codes.\n ' assert (response.status_code == status_code) response_header_keys = {'Connection', 'Content-Length', 'Date', 'Server'} if (status_code == HTTPStatus.INTERNAL_SERVER_ERROR): response_header_keys.add('Cache-Control') cache_control = 'must-revalidate,no-cache,no-store' assert (response.headers['Cache-Control'] == cache_control) if (content_type is not None): response_header_keys.add('Content-Type') assert (response.headers['Content-Type'] == content_type) if (status_code == HTTPStatus.UNAUTHORIZED): response_header_keys.add('WWW-Authenticate') assert (response.headers['WWW-Authenticate'] == 'VWS') assert (response.headers.keys() == response_header_keys) assert (response.headers['Connection'] == 'keep-alive') assert (response.headers['Content-Length'] == str(len(response.text))) assert_valid_date_header(response=response) assert (response.headers['Server'] == 'nginx')<|docstring|>Assert that a VWQ failure response is as expected. Args: response: The response returned by a request to VWQ. content_type: The expected Content-Type header. status_code: The expected status code of the response. Raises: AssertionError: The response is not in the expected VWQ error format for the given codes.<|endoftext|>
933fc0918b9357e7f848542b7d3d9c598e7462097ba6227d186ff34e976960bc
def shift1d(tensor: T, shift: int) -> T: '\n Shifts a Tensor to the left or right and pads with zeros.\n\n Args:\n tensor: Input Tensor [N×C×L]\n shift: the shift, negative for left shift, positive for right\n\n Returns:\n the shifted tensor, same size\n ' assert (tensor.ndimension() == 3) L = tensor.shape[2] pad = [(- min(shift, 0)), max(shift, 0)] out = F.pad(tensor, pad) out = out[(:, :, pad[1]:(pad[1] + L))] return out.contiguous()
Shifts a Tensor to the left or right and pads with zeros. Args: tensor: Input Tensor [N×C×L] shift: the shift, negative for left shift, positive for right Returns: the shifted tensor, same size
thesis/functional.py
shift1d
morris-frank/unnamed-source-separation
2
python
def shift1d(tensor: T, shift: int) -> T: '\n Shifts a Tensor to the left or right and pads with zeros.\n\n Args:\n tensor: Input Tensor [N×C×L]\n shift: the shift, negative for left shift, positive for right\n\n Returns:\n the shifted tensor, same size\n ' assert (tensor.ndimension() == 3) L = tensor.shape[2] pad = [(- min(shift, 0)), max(shift, 0)] out = F.pad(tensor, pad) out = out[(:, :, pad[1]:(pad[1] + L))] return out.contiguous()
def shift1d(tensor: T, shift: int) -> T: '\n Shifts a Tensor to the left or right and pads with zeros.\n\n Args:\n tensor: Input Tensor [N×C×L]\n shift: the shift, negative for left shift, positive for right\n\n Returns:\n the shifted tensor, same size\n ' assert (tensor.ndimension() == 3) L = tensor.shape[2] pad = [(- min(shift, 0)), max(shift, 0)] out = F.pad(tensor, pad) out = out[(:, :, pad[1]:(pad[1] + L))] return out.contiguous()<|docstring|>Shifts a Tensor to the left or right and pads with zeros. Args: tensor: Input Tensor [N×C×L] shift: the shift, negative for left shift, positive for right Returns: the shifted tensor, same size<|endoftext|>
0b52f165cb560c1098eebd4931511a934d006c76ec08b62f44d13f89a70635cc
def normalize(waveform: T): '\n Removes any DC offset and scales to range [-1, 1]\n\n Args:\n waveform:\n\n Returns:\n scaled waveform\n ' waveform = (waveform - waveform.mean(dim=(- 1), keepdim=True)) waveform = F.normalize(waveform, p=float('inf'), dim=(- 1)) return waveform
Removes any DC offset and scales to range [-1, 1] Args: waveform: Returns: scaled waveform
thesis/functional.py
normalize
morris-frank/unnamed-source-separation
2
python
def normalize(waveform: T): '\n Removes any DC offset and scales to range [-1, 1]\n\n Args:\n waveform:\n\n Returns:\n scaled waveform\n ' waveform = (waveform - waveform.mean(dim=(- 1), keepdim=True)) waveform = F.normalize(waveform, p=float('inf'), dim=(- 1)) return waveform
def normalize(waveform: T): '\n Removes any DC offset and scales to range [-1, 1]\n\n Args:\n waveform:\n\n Returns:\n scaled waveform\n ' waveform = (waveform - waveform.mean(dim=(- 1), keepdim=True)) waveform = F.normalize(waveform, p=float('inf'), dim=(- 1)) return waveform<|docstring|>Removes any DC offset and scales to range [-1, 1] Args: waveform: Returns: scaled waveform<|endoftext|>
ed3764ddafbfcc4c90cca487169988f14f01716328a29814d249d32a73d6a8c3
def apply(df: pd.DataFrame, paths: List[Tuple[(str, str)]], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Apply a filter on traces containing / not containing a path\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) paths = [((path[0] + DEFAULT_VARIANT_SEP) + path[1]) for path in paths] df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df = stacked_df[(stacked_df[case_id_glue] == stacked_df[(case_id_glue + '_2')])] stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[stacked_df['@@path'].isin(paths)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret
Apply a filter on traces containing / not containing a path Parameters ---------- df Dataframe paths Paths to filter on parameters Possible parameters of the algorithm, including: Parameters.CASE_ID_KEY -> Case ID column in the dataframe Parameters.ATTRIBUTE_KEY -> Attribute we want to filter Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True) or excluding traces (positive=False) Returns ---------- df Filtered dataframe
ws2122-lspm/Lib/site-packages/pm4py/algo/filtering/pandas/paths/paths_filter.py
apply
Malekhy/ws2122-lspm
1
python
def apply(df: pd.DataFrame, paths: List[Tuple[(str, str)]], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Apply a filter on traces containing / not containing a path\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) paths = [((path[0] + DEFAULT_VARIANT_SEP) + path[1]) for path in paths] df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df = stacked_df[(stacked_df[case_id_glue] == stacked_df[(case_id_glue + '_2')])] stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[stacked_df['@@path'].isin(paths)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret
def apply(df: pd.DataFrame, paths: List[Tuple[(str, str)]], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Apply a filter on traces containing / not containing a path\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) paths = [((path[0] + DEFAULT_VARIANT_SEP) + path[1]) for path in paths] df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df = stacked_df[(stacked_df[case_id_glue] == stacked_df[(case_id_glue + '_2')])] stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[stacked_df['@@path'].isin(paths)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret<|docstring|>Apply a filter on traces containing / not containing a path Parameters ---------- df Dataframe paths Paths to filter on parameters Possible parameters of the algorithm, including: Parameters.CASE_ID_KEY -> Case ID column in the dataframe Parameters.ATTRIBUTE_KEY -> Attribute we want to filter Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True) or excluding traces (positive=False) Returns ---------- df Filtered dataframe<|endoftext|>
601736d28a43dd210a76d2867ec49098b2624d1e5eae726463ae0f51b538e3e4
def apply_performance(df: pd.DataFrame, provided_path: Tuple[(str, str)], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Filters the cases of a dataframe where there is at least one occurrence of the provided path\n occurring in the defined timedelta range.\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.TIMESTAMP_KEY -> Attribute identifying the timestamp in the log\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Parameters.MIN_PERFORMANCE -> Minimal allowed performance of the provided path\n Parameters.MAX_PERFORMANCE -> Maximal allowed performance of the provided path\n\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) provided_path = ((provided_path[0] + DEFAULT_VARIANT_SEP) + provided_path[1]) min_performance = exec_utils.get_param_value(Parameters.MIN_PERFORMANCE, parameters, 0) max_performance = exec_utils.get_param_value(Parameters.MAX_PERFORMANCE, parameters, sys.maxsize) df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key, timestamp_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[(stacked_df['@@path'] == provided_path)] stacked_df['@@timedelta'] = (stacked_df[(timestamp_key + '_2')] - stacked_df[timestamp_key]).astype('timedelta64[s]') stacked_df = stacked_df[(stacked_df['@@timedelta'] >= min_performance)] stacked_df = stacked_df[(stacked_df['@@timedelta'] <= max_performance)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret
Filters the cases of a dataframe where there is at least one occurrence of the provided path occurring in the defined timedelta range. Parameters ---------- df Dataframe paths Paths to filter on parameters Possible parameters of the algorithm, including: Parameters.CASE_ID_KEY -> Case ID column in the dataframe Parameters.ATTRIBUTE_KEY -> Attribute we want to filter Parameters.TIMESTAMP_KEY -> Attribute identifying the timestamp in the log Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True) or excluding traces (positive=False) Parameters.MIN_PERFORMANCE -> Minimal allowed performance of the provided path Parameters.MAX_PERFORMANCE -> Maximal allowed performance of the provided path Returns ---------- df Filtered dataframe
ws2122-lspm/Lib/site-packages/pm4py/algo/filtering/pandas/paths/paths_filter.py
apply_performance
Malekhy/ws2122-lspm
1
python
def apply_performance(df: pd.DataFrame, provided_path: Tuple[(str, str)], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Filters the cases of a dataframe where there is at least one occurrence of the provided path\n occurring in the defined timedelta range.\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.TIMESTAMP_KEY -> Attribute identifying the timestamp in the log\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Parameters.MIN_PERFORMANCE -> Minimal allowed performance of the provided path\n Parameters.MAX_PERFORMANCE -> Maximal allowed performance of the provided path\n\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) provided_path = ((provided_path[0] + DEFAULT_VARIANT_SEP) + provided_path[1]) min_performance = exec_utils.get_param_value(Parameters.MIN_PERFORMANCE, parameters, 0) max_performance = exec_utils.get_param_value(Parameters.MAX_PERFORMANCE, parameters, sys.maxsize) df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key, timestamp_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[(stacked_df['@@path'] == provided_path)] stacked_df['@@timedelta'] = (stacked_df[(timestamp_key + '_2')] - stacked_df[timestamp_key]).astype('timedelta64[s]') stacked_df = stacked_df[(stacked_df['@@timedelta'] >= min_performance)] stacked_df = stacked_df[(stacked_df['@@timedelta'] <= max_performance)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret
def apply_performance(df: pd.DataFrame, provided_path: Tuple[(str, str)], parameters: Optional[Dict[(Union[(str, Parameters)], Any)]]=None) -> pd.DataFrame: '\n Filters the cases of a dataframe where there is at least one occurrence of the provided path\n occurring in the defined timedelta range.\n\n Parameters\n ----------\n df\n Dataframe\n paths\n Paths to filter on\n parameters\n Possible parameters of the algorithm, including:\n Parameters.CASE_ID_KEY -> Case ID column in the dataframe\n Parameters.ATTRIBUTE_KEY -> Attribute we want to filter\n Parameters.TIMESTAMP_KEY -> Attribute identifying the timestamp in the log\n Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True)\n or excluding traces (positive=False)\n Parameters.MIN_PERFORMANCE -> Minimal allowed performance of the provided path\n Parameters.MAX_PERFORMANCE -> Maximal allowed performance of the provided path\n\n Returns\n ----------\n df\n Filtered dataframe\n ' if (parameters is None): parameters = {} case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, CASE_CONCEPT_NAME) attribute_key = exec_utils.get_param_value(Parameters.ATTRIBUTE_KEY, parameters, DEFAULT_NAME_KEY) timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, DEFAULT_TIMESTAMP_KEY) positive = exec_utils.get_param_value(Parameters.POSITIVE, parameters, True) provided_path = ((provided_path[0] + DEFAULT_VARIANT_SEP) + provided_path[1]) min_performance = exec_utils.get_param_value(Parameters.MIN_PERFORMANCE, parameters, 0) max_performance = exec_utils.get_param_value(Parameters.MAX_PERFORMANCE, parameters, sys.maxsize) df = df.sort_values([case_id_glue, timestamp_key]) filt_df = df[[case_id_glue, attribute_key, timestamp_key]] filt_dif_shifted = filt_df.shift((- 1)) filt_dif_shifted.columns = [(str(col) + '_2') for col in filt_dif_shifted.columns] stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1) stacked_df['@@path'] = ((stacked_df[attribute_key] + DEFAULT_VARIANT_SEP) + stacked_df[(attribute_key + '_2')]) stacked_df = stacked_df[(stacked_df['@@path'] == provided_path)] stacked_df['@@timedelta'] = (stacked_df[(timestamp_key + '_2')] - stacked_df[timestamp_key]).astype('timedelta64[s]') stacked_df = stacked_df[(stacked_df['@@timedelta'] >= min_performance)] stacked_df = stacked_df[(stacked_df['@@timedelta'] <= max_performance)] i1 = df.set_index(case_id_glue).index i2 = stacked_df.set_index(case_id_glue).index if positive: ret = df[i1.isin(i2)] else: ret = df[(~ i1.isin(i2))] ret.attrs = (copy(df.attrs) if hasattr(df, 'attrs') else {}) return ret<|docstring|>Filters the cases of a dataframe where there is at least one occurrence of the provided path occurring in the defined timedelta range. Parameters ---------- df Dataframe paths Paths to filter on parameters Possible parameters of the algorithm, including: Parameters.CASE_ID_KEY -> Case ID column in the dataframe Parameters.ATTRIBUTE_KEY -> Attribute we want to filter Parameters.TIMESTAMP_KEY -> Attribute identifying the timestamp in the log Parameters.POSITIVE -> Specifies if the filter should be applied including traces (positive=True) or excluding traces (positive=False) Parameters.MIN_PERFORMANCE -> Minimal allowed performance of the provided path Parameters.MAX_PERFORMANCE -> Maximal allowed performance of the provided path Returns ---------- df Filtered dataframe<|endoftext|>
e84690ca664d712e20c398e0429b43e61b5d71efea211f5063aeb7dec8e49885
def get_schema_fields(schema, pointer=None, path=None, definition_pointer=None, definition_path=None, deprecated=None): "\n Yields a ``Field`` object for each field (whether under ``properties`` or ``patternProperties``) in a JSON schema.\n\n :param dict schema: a JSON schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n " if (pointer is None): pointer = () if (path is None): path = () if (definition_pointer is None): definition_pointer = () if (definition_path is None): definition_path = () multilingual = set() hidden = set() for (key, value) in schema.get('patternProperties', {}).items(): for offset in (2, 1): end = ((- LANGUAGE_CODE_SUFFIX_LEN) - offset) if ((key[end:(- offset)] == LANGUAGE_CODE_SUFFIX) and (key[:offset] == '^('[:offset]) and (key[(- offset):] == ')$'[(- offset):])): multilingual.add(key[offset:end]) hidden.add(key) break for (key, value) in schema.get('properties', {}).items(): new_pointer = (pointer + ('properties', key)) new_path = (path + (key,)) required = schema.get('required', []) (yield from _get_schema_field(key, value, new_pointer, new_path, definition_pointer, definition_path, required, (deprecated or _deprecated(value)), multilingual)) for (key, value) in schema.get('definitions', {}).items(): new_pointer = (pointer + ('definitions', key)) (yield from get_schema_fields(value, pointer=new_pointer, path=(), definition_pointer=new_pointer, definition_path=(key,), deprecated=deprecated)) for (key, value) in schema.get('patternProperties', {}).items(): if (key not in hidden): new_pointer = (pointer + ('patternProperties', key)) new_path = (path + (f'({key})',)) (yield Field(schema=value, pointer=new_pointer, path=new_path, definition_pointer=definition_pointer, definition_path=definition_path, required=False, deprecated=(deprecated or _deprecated(value)), multilingual=False))
Yields a ``Field`` object for each field (whether under ``properties`` or ``patternProperties``) in a JSON schema. :param dict schema: a JSON schema :param tuple pointer: the JSON pointer to the field in the schema, e.g. ``('properties', 'tender', 'properties', 'id')`` :param tuple path: the path to the field in data, e.g. ``('tender', 'id')`` :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g. ``('definitions', 'Item')`` :param tuple definition_path: the path to the definition in which the field is defined, e.g. ``('Item')`` :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated`` object
ocdskit/schema.py
get_schema_fields
open-contracting/ocdsk
12
python
def get_schema_fields(schema, pointer=None, path=None, definition_pointer=None, definition_path=None, deprecated=None): "\n Yields a ``Field`` object for each field (whether under ``properties`` or ``patternProperties``) in a JSON schema.\n\n :param dict schema: a JSON schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n " if (pointer is None): pointer = () if (path is None): path = () if (definition_pointer is None): definition_pointer = () if (definition_path is None): definition_path = () multilingual = set() hidden = set() for (key, value) in schema.get('patternProperties', {}).items(): for offset in (2, 1): end = ((- LANGUAGE_CODE_SUFFIX_LEN) - offset) if ((key[end:(- offset)] == LANGUAGE_CODE_SUFFIX) and (key[:offset] == '^('[:offset]) and (key[(- offset):] == ')$'[(- offset):])): multilingual.add(key[offset:end]) hidden.add(key) break for (key, value) in schema.get('properties', {}).items(): new_pointer = (pointer + ('properties', key)) new_path = (path + (key,)) required = schema.get('required', []) (yield from _get_schema_field(key, value, new_pointer, new_path, definition_pointer, definition_path, required, (deprecated or _deprecated(value)), multilingual)) for (key, value) in schema.get('definitions', {}).items(): new_pointer = (pointer + ('definitions', key)) (yield from get_schema_fields(value, pointer=new_pointer, path=(), definition_pointer=new_pointer, definition_path=(key,), deprecated=deprecated)) for (key, value) in schema.get('patternProperties', {}).items(): if (key not in hidden): new_pointer = (pointer + ('patternProperties', key)) new_path = (path + (f'({key})',)) (yield Field(schema=value, pointer=new_pointer, path=new_path, definition_pointer=definition_pointer, definition_path=definition_path, required=False, deprecated=(deprecated or _deprecated(value)), multilingual=False))
def get_schema_fields(schema, pointer=None, path=None, definition_pointer=None, definition_path=None, deprecated=None): "\n Yields a ``Field`` object for each field (whether under ``properties`` or ``patternProperties``) in a JSON schema.\n\n :param dict schema: a JSON schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n " if (pointer is None): pointer = () if (path is None): path = () if (definition_pointer is None): definition_pointer = () if (definition_path is None): definition_path = () multilingual = set() hidden = set() for (key, value) in schema.get('patternProperties', {}).items(): for offset in (2, 1): end = ((- LANGUAGE_CODE_SUFFIX_LEN) - offset) if ((key[end:(- offset)] == LANGUAGE_CODE_SUFFIX) and (key[:offset] == '^('[:offset]) and (key[(- offset):] == ')$'[(- offset):])): multilingual.add(key[offset:end]) hidden.add(key) break for (key, value) in schema.get('properties', {}).items(): new_pointer = (pointer + ('properties', key)) new_path = (path + (key,)) required = schema.get('required', []) (yield from _get_schema_field(key, value, new_pointer, new_path, definition_pointer, definition_path, required, (deprecated or _deprecated(value)), multilingual)) for (key, value) in schema.get('definitions', {}).items(): new_pointer = (pointer + ('definitions', key)) (yield from get_schema_fields(value, pointer=new_pointer, path=(), definition_pointer=new_pointer, definition_path=(key,), deprecated=deprecated)) for (key, value) in schema.get('patternProperties', {}).items(): if (key not in hidden): new_pointer = (pointer + ('patternProperties', key)) new_path = (path + (f'({key})',)) (yield Field(schema=value, pointer=new_pointer, path=new_path, definition_pointer=definition_pointer, definition_path=definition_path, required=False, deprecated=(deprecated or _deprecated(value)), multilingual=False))<|docstring|>Yields a ``Field`` object for each field (whether under ``properties`` or ``patternProperties``) in a JSON schema. :param dict schema: a JSON schema :param tuple pointer: the JSON pointer to the field in the schema, e.g. ``('properties', 'tender', 'properties', 'id')`` :param tuple path: the path to the field in data, e.g. ``('tender', 'id')`` :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g. ``('definitions', 'Item')`` :param tuple definition_path: the path to the definition in which the field is defined, e.g. ``('Item')`` :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated`` object<|endoftext|>
41126afb3562e2c5aea44a762979c01a729d3e18ed16c5fb9d8b942db5b07a94
def add_validation_properties(schema, unique_items=True, coordinates=False): '\n Adds "minItems" and "uniqueItems" if an array, adds "minProperties" if an object, and adds "minLength" if a string\n and if "enum", "format" and "pattern" aren\'t set.\n\n :param dict schema: a JSON schema\n :param bool unique_items: whether to add "uniqueItems" properties to array fields\n :param bool coordinates: whether the parent is a geospatial coordinates field\n ' if isinstance(schema, list): for item in schema: add_validation_properties(item, unique_items=unique_items) elif isinstance(schema, dict): if ('type' in schema): if (('string' in schema['type']) and ('enum' not in schema) and ('format' not in schema) and ('pattern' not in schema)): schema.setdefault('minLength', 1) if ('array' in schema['type']): schema.setdefault('minItems', 1) if (sorted(schema.get('items', {}).get('type', [])) == ['array', 'number']): coordinates = True if (unique_items and (not coordinates)): schema.setdefault('uniqueItems', True) if ('object' in schema['type']): schema.setdefault('minProperties', 1) for value in schema.values(): add_validation_properties(value, unique_items=unique_items, coordinates=coordinates)
Adds "minItems" and "uniqueItems" if an array, adds "minProperties" if an object, and adds "minLength" if a string and if "enum", "format" and "pattern" aren't set. :param dict schema: a JSON schema :param bool unique_items: whether to add "uniqueItems" properties to array fields :param bool coordinates: whether the parent is a geospatial coordinates field
ocdskit/schema.py
add_validation_properties
open-contracting/ocdsk
12
python
def add_validation_properties(schema, unique_items=True, coordinates=False): '\n Adds "minItems" and "uniqueItems" if an array, adds "minProperties" if an object, and adds "minLength" if a string\n and if "enum", "format" and "pattern" aren\'t set.\n\n :param dict schema: a JSON schema\n :param bool unique_items: whether to add "uniqueItems" properties to array fields\n :param bool coordinates: whether the parent is a geospatial coordinates field\n ' if isinstance(schema, list): for item in schema: add_validation_properties(item, unique_items=unique_items) elif isinstance(schema, dict): if ('type' in schema): if (('string' in schema['type']) and ('enum' not in schema) and ('format' not in schema) and ('pattern' not in schema)): schema.setdefault('minLength', 1) if ('array' in schema['type']): schema.setdefault('minItems', 1) if (sorted(schema.get('items', {}).get('type', [])) == ['array', 'number']): coordinates = True if (unique_items and (not coordinates)): schema.setdefault('uniqueItems', True) if ('object' in schema['type']): schema.setdefault('minProperties', 1) for value in schema.values(): add_validation_properties(value, unique_items=unique_items, coordinates=coordinates)
def add_validation_properties(schema, unique_items=True, coordinates=False): '\n Adds "minItems" and "uniqueItems" if an array, adds "minProperties" if an object, and adds "minLength" if a string\n and if "enum", "format" and "pattern" aren\'t set.\n\n :param dict schema: a JSON schema\n :param bool unique_items: whether to add "uniqueItems" properties to array fields\n :param bool coordinates: whether the parent is a geospatial coordinates field\n ' if isinstance(schema, list): for item in schema: add_validation_properties(item, unique_items=unique_items) elif isinstance(schema, dict): if ('type' in schema): if (('string' in schema['type']) and ('enum' not in schema) and ('format' not in schema) and ('pattern' not in schema)): schema.setdefault('minLength', 1) if ('array' in schema['type']): schema.setdefault('minItems', 1) if (sorted(schema.get('items', {}).get('type', [])) == ['array', 'number']): coordinates = True if (unique_items and (not coordinates)): schema.setdefault('uniqueItems', True) if ('object' in schema['type']): schema.setdefault('minProperties', 1) for value in schema.values(): add_validation_properties(value, unique_items=unique_items, coordinates=coordinates)<|docstring|>Adds "minItems" and "uniqueItems" if an array, adds "minProperties" if an object, and adds "minLength" if a string and if "enum", "format" and "pattern" aren't set. :param dict schema: a JSON schema :param bool unique_items: whether to add "uniqueItems" properties to array fields :param bool coordinates: whether the parent is a geospatial coordinates field<|endoftext|>
4528f87a2eba800e421e56fbcf516fdd4a3bc0bad870560809d8cde9f32eea96
def __init__(self, schema=None, pointer=None, path=None, definition_pointer=None, definition_path=None, required=None, deprecated=None, multilingual=None, sep='.'): "\n Initializes a schema field object.\n\n :param dict schema: the field's schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param bool required: whether the field is listed in the schema's ``required``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n :param bool multilingual: whether the field has a corresponding field in the schema's ``patternProperties``\n :param str sep: the separator to use in string representations of paths\n " self.schema = schema self.pointer_components = pointer self.path_components = path self.definition_pointer_components = definition_pointer self.definition_path_components = definition_path self.required = required self.deprecated = deprecated self.multilingual = multilingual self._sep = sep
Initializes a schema field object. :param dict schema: the field's schema :param tuple pointer: the JSON pointer to the field in the schema, e.g. ``('properties', 'tender', 'properties', 'id')`` :param tuple path: the path to the field in data, e.g. ``('tender', 'id')`` :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g. ``('definitions', 'Item')`` :param tuple definition_path: the path to the definition in which the field is defined, e.g. ``('Item')`` :param bool required: whether the field is listed in the schema's ``required`` :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated`` object :param bool multilingual: whether the field has a corresponding field in the schema's ``patternProperties`` :param str sep: the separator to use in string representations of paths
ocdskit/schema.py
__init__
open-contracting/ocdsk
12
python
def __init__(self, schema=None, pointer=None, path=None, definition_pointer=None, definition_path=None, required=None, deprecated=None, multilingual=None, sep='.'): "\n Initializes a schema field object.\n\n :param dict schema: the field's schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param bool required: whether the field is listed in the schema's ``required``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n :param bool multilingual: whether the field has a corresponding field in the schema's ``patternProperties``\n :param str sep: the separator to use in string representations of paths\n " self.schema = schema self.pointer_components = pointer self.path_components = path self.definition_pointer_components = definition_pointer self.definition_path_components = definition_path self.required = required self.deprecated = deprecated self.multilingual = multilingual self._sep = sep
def __init__(self, schema=None, pointer=None, path=None, definition_pointer=None, definition_path=None, required=None, deprecated=None, multilingual=None, sep='.'): "\n Initializes a schema field object.\n\n :param dict schema: the field's schema\n :param tuple pointer: the JSON pointer to the field in the schema, e.g.\n ``('properties', 'tender', 'properties', 'id')``\n :param tuple path: the path to the field in data, e.g.\n ``('tender', 'id')``\n :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g.\n ``('definitions', 'Item')``\n :param tuple definition_path: the path to the definition in which the field is defined, e.g.\n ``('Item')``\n :param bool required: whether the field is listed in the schema's ``required``\n :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated``\n object\n :param bool multilingual: whether the field has a corresponding field in the schema's ``patternProperties``\n :param str sep: the separator to use in string representations of paths\n " self.schema = schema self.pointer_components = pointer self.path_components = path self.definition_pointer_components = definition_pointer self.definition_path_components = definition_path self.required = required self.deprecated = deprecated self.multilingual = multilingual self._sep = sep<|docstring|>Initializes a schema field object. :param dict schema: the field's schema :param tuple pointer: the JSON pointer to the field in the schema, e.g. ``('properties', 'tender', 'properties', 'id')`` :param tuple path: the path to the field in data, e.g. ``('tender', 'id')`` :param tuple definition_pointer: the JSON pointer to the definition in which the field is defined, e.g. ``('definitions', 'Item')`` :param tuple definition_path: the path to the definition in which the field is defined, e.g. ``('Item')`` :param bool required: whether the field is listed in the schema's ``required`` :param dict deprecated: if the field, or an ancestor of the field, sets ``deprecated``, the ``deprecated`` object :param bool multilingual: whether the field has a corresponding field in the schema's ``patternProperties`` :param str sep: the separator to use in string representations of paths<|endoftext|>
f9c26265c491472701333dd71b060d6db213267321f8e3d04f992ae20ce47015
@property def pointer(self): '\n Returns the JSON pointer to the field in the schema, e.g. ``/properties/tender/properties/id``.\n ' return _join_sep('/', self.pointer_components)
Returns the JSON pointer to the field in the schema, e.g. ``/properties/tender/properties/id``.
ocdskit/schema.py
pointer
open-contracting/ocdsk
12
python
@property def pointer(self): '\n \n ' return _join_sep('/', self.pointer_components)
@property def pointer(self): '\n \n ' return _join_sep('/', self.pointer_components)<|docstring|>Returns the JSON pointer to the field in the schema, e.g. ``/properties/tender/properties/id``.<|endoftext|>
88c68686e7cc31fc19b87be18ecd3014d703acdd64cfb3f7c53aed0701aed638
@property def definition_pointer(self): '\n Returns the JSON pointer to the definition in which the field is defined, e.g. ``/definitions/Item``.\n ' return _join_sep('/', self.definition_pointer_components)
Returns the JSON pointer to the definition in which the field is defined, e.g. ``/definitions/Item``.
ocdskit/schema.py
definition_pointer
open-contracting/ocdsk
12
python
@property def definition_pointer(self): '\n \n ' return _join_sep('/', self.definition_pointer_components)
@property def definition_pointer(self): '\n \n ' return _join_sep('/', self.definition_pointer_components)<|docstring|>Returns the JSON pointer to the definition in which the field is defined, e.g. ``/definitions/Item``.<|endoftext|>
7c73702361a90e0144fadc2a795fdf3e1b11ff902441ecdff328731db4ba5a5b
@property def path(self): '\n Returns the path to the field in data with ``self.sep`` as separator, e.g. ``tender.id``.\n ' return self._sep.join(self.path_components)
Returns the path to the field in data with ``self.sep`` as separator, e.g. ``tender.id``.
ocdskit/schema.py
path
open-contracting/ocdsk
12
python
@property def path(self): '\n \n ' return self._sep.join(self.path_components)
@property def path(self): '\n \n ' return self._sep.join(self.path_components)<|docstring|>Returns the path to the field in data with ``self.sep`` as separator, e.g. ``tender.id``.<|endoftext|>
dfc4ee5becc00b108f6a4d4fe0dc0936745b78d5e4d87409a0edbd18bee5ce62
@property def definition_path(self): '\n Returns the path to the definition in which the field is defined with ``self.sep`` as separator, e.g. ``Item``.\n ' return self._sep.join(self.definition_path_components)
Returns the path to the definition in which the field is defined with ``self.sep`` as separator, e.g. ``Item``.
ocdskit/schema.py
definition_path
open-contracting/ocdsk
12
python
@property def definition_path(self): '\n \n ' return self._sep.join(self.definition_path_components)
@property def definition_path(self): '\n \n ' return self._sep.join(self.definition_path_components)<|docstring|>Returns the path to the definition in which the field is defined with ``self.sep`` as separator, e.g. ``Item``.<|endoftext|>
0b7fbcca3471ccec9e5b198e517bb278e1fa5d7c05ec1723d49645834423d842
@property def sep(self): '\n Returns the separator to use in string representations of paths.\n ' return self._sep
Returns the separator to use in string representations of paths.
ocdskit/schema.py
sep
open-contracting/ocdsk
12
python
@property def sep(self): '\n \n ' return self._sep
@property def sep(self): '\n \n ' return self._sep<|docstring|>Returns the separator to use in string representations of paths.<|endoftext|>
c760605867f0ee582640c9181873dc4d6b16df025335e41af616030b15180d80
@sep.setter def sep(self, sep): '\n Sets the separator to use in string representations of paths.\n ' self._sep = sep
Sets the separator to use in string representations of paths.
ocdskit/schema.py
sep
open-contracting/ocdsk
12
python
@sep.setter def sep(self, sep): '\n \n ' self._sep = sep
@sep.setter def sep(self, sep): '\n \n ' self._sep = sep<|docstring|>Sets the separator to use in string representations of paths.<|endoftext|>
c05f68b7f19a938a1fd5698eb275b1334aede2a5f43d6869b2445e8edc043e4d
def asdict(self, sep=None, exclude=None): '\n Returns the field as a dict, with keys for: ``schema``, ``pointer``, ``path``,\n ``definition_pointer``, ``definition_path``, ``required``, ``deprecated``, ``multilingual``.\n\n :param list sep: the separator to use in string representations of paths, overriding ``self.sep``\n :param list exclude: a list of keys to exclude from the dict\n ' data = {} exclude = (exclude or ()) sep = (sep or self.sep) for (key, value) in self.__dict__.items(): if ((key not in exclude) and (not key.startswith('_')) and (not key.endswith('_components'))): data[key] = value for key in ('pointer', 'definition_pointer'): if (key not in exclude): data[key] = getattr(self, key) for key in ('path', 'definition_path'): if (key not in exclude): data[key] = sep.join(getattr(self, f'{key}_components')) return data
Returns the field as a dict, with keys for: ``schema``, ``pointer``, ``path``, ``definition_pointer``, ``definition_path``, ``required``, ``deprecated``, ``multilingual``. :param list sep: the separator to use in string representations of paths, overriding ``self.sep`` :param list exclude: a list of keys to exclude from the dict
ocdskit/schema.py
asdict
open-contracting/ocdsk
12
python
def asdict(self, sep=None, exclude=None): '\n Returns the field as a dict, with keys for: ``schema``, ``pointer``, ``path``,\n ``definition_pointer``, ``definition_path``, ``required``, ``deprecated``, ``multilingual``.\n\n :param list sep: the separator to use in string representations of paths, overriding ``self.sep``\n :param list exclude: a list of keys to exclude from the dict\n ' data = {} exclude = (exclude or ()) sep = (sep or self.sep) for (key, value) in self.__dict__.items(): if ((key not in exclude) and (not key.startswith('_')) and (not key.endswith('_components'))): data[key] = value for key in ('pointer', 'definition_pointer'): if (key not in exclude): data[key] = getattr(self, key) for key in ('path', 'definition_path'): if (key not in exclude): data[key] = sep.join(getattr(self, f'{key}_components')) return data
def asdict(self, sep=None, exclude=None): '\n Returns the field as a dict, with keys for: ``schema``, ``pointer``, ``path``,\n ``definition_pointer``, ``definition_path``, ``required``, ``deprecated``, ``multilingual``.\n\n :param list sep: the separator to use in string representations of paths, overriding ``self.sep``\n :param list exclude: a list of keys to exclude from the dict\n ' data = {} exclude = (exclude or ()) sep = (sep or self.sep) for (key, value) in self.__dict__.items(): if ((key not in exclude) and (not key.startswith('_')) and (not key.endswith('_components'))): data[key] = value for key in ('pointer', 'definition_pointer'): if (key not in exclude): data[key] = getattr(self, key) for key in ('path', 'definition_path'): if (key not in exclude): data[key] = sep.join(getattr(self, f'{key}_components')) return data<|docstring|>Returns the field as a dict, with keys for: ``schema``, ``pointer``, ``path``, ``definition_pointer``, ``definition_path``, ``required``, ``deprecated``, ``multilingual``. :param list sep: the separator to use in string representations of paths, overriding ``self.sep`` :param list exclude: a list of keys to exclude from the dict<|endoftext|>
2c778391220a72769e9ba99b29366a0ca103d77e316208c22142443299caf110
def create_app(object_name): 'Create the app instance via `Factory Method`' app = Flask(__name__) app.config.from_object(object_name) db.init_app(app) bcrypt.init_app(app) app.register_blueprint(blog.blog_blueprint) app.register_blueprint(main.main_blueprint) return app
Create the app instance via `Factory Method`
flaskblog/__init__.py
create_app
indicolite/flask-blog
0
python
def create_app(object_name): app = Flask(__name__) app.config.from_object(object_name) db.init_app(app) bcrypt.init_app(app) app.register_blueprint(blog.blog_blueprint) app.register_blueprint(main.main_blueprint) return app
def create_app(object_name): app = Flask(__name__) app.config.from_object(object_name) db.init_app(app) bcrypt.init_app(app) app.register_blueprint(blog.blog_blueprint) app.register_blueprint(main.main_blueprint) return app<|docstring|>Create the app instance via `Factory Method`<|endoftext|>
dac137c0f8eb0392a1f0c7f695526f5ccac4720ea1782e4a7d63c82858503915
@skipUnlessDarwin @dsym_test def test_with_dsym_and_run_command(self): 'Test that static methods are properly distinguished from regular methods' self.buildDsym() self.static_method_commands()
Test that static methods are properly distinguished from regular methods
3.7.0/lldb-3.7.0.src/test/lang/cpp/static_methods/TestCPPStaticMethods.py
test_with_dsym_and_run_command
androm3da/clang_sles
3
python
@skipUnlessDarwin @dsym_test def test_with_dsym_and_run_command(self): self.buildDsym() self.static_method_commands()
@skipUnlessDarwin @dsym_test def test_with_dsym_and_run_command(self): self.buildDsym() self.static_method_commands()<|docstring|>Test that static methods are properly distinguished from regular methods<|endoftext|>
048e03a4e78fd404bb31e03092cd47894e233e0484a8d315e02282fb652d962e
@dwarf_test def test_with_dwarf_and_run_command(self): 'Test that static methods are properly distinguished from regular methods' self.buildDwarf() self.static_method_commands()
Test that static methods are properly distinguished from regular methods
3.7.0/lldb-3.7.0.src/test/lang/cpp/static_methods/TestCPPStaticMethods.py
test_with_dwarf_and_run_command
androm3da/clang_sles
3
python
@dwarf_test def test_with_dwarf_and_run_command(self): self.buildDwarf() self.static_method_commands()
@dwarf_test def test_with_dwarf_and_run_command(self): self.buildDwarf() self.static_method_commands()<|docstring|>Test that static methods are properly distinguished from regular methods<|endoftext|>
dea1aebd0a3fcbcbf88c10ac4475e7c12c844122558ecf126ca56a91a4604d01
def static_method_commands(self): 'Test that static methods are properly distinguished from regular methods' self.runCmd('file a.out', CURRENT_EXECUTABLE_SET) lldbutil.run_break_set_by_file_and_line(self, 'main.cpp', self.line, num_expected_locations=1, loc_exact=True) self.runCmd('process launch', RUN_SUCCEEDED) self.expect('thread list', STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) self.expect('expression -- A::getStaticValue()', startstr='(int) $0 = 5') self.expect('expression -- my_a.getMemberValue()', startstr='(int) $1 = 3')
Test that static methods are properly distinguished from regular methods
3.7.0/lldb-3.7.0.src/test/lang/cpp/static_methods/TestCPPStaticMethods.py
static_method_commands
androm3da/clang_sles
3
python
def static_method_commands(self): self.runCmd('file a.out', CURRENT_EXECUTABLE_SET) lldbutil.run_break_set_by_file_and_line(self, 'main.cpp', self.line, num_expected_locations=1, loc_exact=True) self.runCmd('process launch', RUN_SUCCEEDED) self.expect('thread list', STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) self.expect('expression -- A::getStaticValue()', startstr='(int) $0 = 5') self.expect('expression -- my_a.getMemberValue()', startstr='(int) $1 = 3')
def static_method_commands(self): self.runCmd('file a.out', CURRENT_EXECUTABLE_SET) lldbutil.run_break_set_by_file_and_line(self, 'main.cpp', self.line, num_expected_locations=1, loc_exact=True) self.runCmd('process launch', RUN_SUCCEEDED) self.expect('thread list', STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) self.expect('expression -- A::getStaticValue()', startstr='(int) $0 = 5') self.expect('expression -- my_a.getMemberValue()', startstr='(int) $1 = 3')<|docstring|>Test that static methods are properly distinguished from regular methods<|endoftext|>
a8b7959996fc83ceef23dedb0757e9e08aa165a8ab8103098a5b02b1f6b7d818
def atualizar(self, mensagem): 'Fazer o que quiser com a msg.' print(f'|{self.nome} recebeu| {mensagem}')
Fazer o que quiser com a msg.
codigo/Live115/exemplo_1.py
atualizar
Engcompaulo/live-de-python
572
python
def atualizar(self, mensagem): print(f'|{self.nome} recebeu| {mensagem}')
def atualizar(self, mensagem): print(f'|{self.nome} recebeu| {mensagem}')<|docstring|>Fazer o que quiser com a msg.<|endoftext|>
4ed5e25f27d801d39df4fa335e95ef0cb03b7fb2e39636f2ea4ad3e617c9744a
def surface_disque(rayon): '\n fonction qui calcule et renvoie la surface du disque\n :param rayon: le rayon du disque\n :type rayon: int ou float\n :return: la surface du disque (unité (au carré) de celle du rayon)\n :rtype: float\n ' return (math.pi * (rayon ** 2))
fonction qui calcule et renvoie la surface du disque :param rayon: le rayon du disque :type rayon: int ou float :return: la surface du disque (unité (au carré) de celle du rayon) :rtype: float
module_surfaces.py
surface_disque
laboiteaphysique/conda
0
python
def surface_disque(rayon): '\n fonction qui calcule et renvoie la surface du disque\n :param rayon: le rayon du disque\n :type rayon: int ou float\n :return: la surface du disque (unité (au carré) de celle du rayon)\n :rtype: float\n ' return (math.pi * (rayon ** 2))
def surface_disque(rayon): '\n fonction qui calcule et renvoie la surface du disque\n :param rayon: le rayon du disque\n :type rayon: int ou float\n :return: la surface du disque (unité (au carré) de celle du rayon)\n :rtype: float\n ' return (math.pi * (rayon ** 2))<|docstring|>fonction qui calcule et renvoie la surface du disque :param rayon: le rayon du disque :type rayon: int ou float :return: la surface du disque (unité (au carré) de celle du rayon) :rtype: float<|endoftext|>
2e0b3242b9677d29084055672499a20240060df64bcedd1d856c82b73abf7d3d
def surface_carre(arete): "\n fonction qui calcule et renvoie la surface d'un carré\n :param arête: la longueur d'un des côtés du carré\n :type arete: int ou float\n :return: la surface du carré dans l'unité (au carré) de celle d'arete\n :rtype: int ou float\n " return (arete * arete)
fonction qui calcule et renvoie la surface d'un carré :param arête: la longueur d'un des côtés du carré :type arete: int ou float :return: la surface du carré dans l'unité (au carré) de celle d'arete :rtype: int ou float
module_surfaces.py
surface_carre
laboiteaphysique/conda
0
python
def surface_carre(arete): "\n fonction qui calcule et renvoie la surface d'un carré\n :param arête: la longueur d'un des côtés du carré\n :type arete: int ou float\n :return: la surface du carré dans l'unité (au carré) de celle d'arete\n :rtype: int ou float\n " return (arete * arete)
def surface_carre(arete): "\n fonction qui calcule et renvoie la surface d'un carré\n :param arête: la longueur d'un des côtés du carré\n :type arete: int ou float\n :return: la surface du carré dans l'unité (au carré) de celle d'arete\n :rtype: int ou float\n " return (arete * arete)<|docstring|>fonction qui calcule et renvoie la surface d'un carré :param arête: la longueur d'un des côtés du carré :type arete: int ou float :return: la surface du carré dans l'unité (au carré) de celle d'arete :rtype: int ou float<|endoftext|>
762820a4621edcf65323aa030205d36220faeb75e4d4741eb982e3b743ef1d3b
def surface_rectangle(longueur, largeur): "\n fonction qui calcule et renvoie la surface d'un carré\n :param longueur: la longueur du rectangle\n :type longueur: int ou float\n :param largeur: la largeur du rectangle\n :type largeur: int ou float\n :return: la surface du rectangle \n :rtype: int ou float\n \n :CU: la longueur et la largeur doivent être dans la même unité\n " return (longueur * largeur)
fonction qui calcule et renvoie la surface d'un carré :param longueur: la longueur du rectangle :type longueur: int ou float :param largeur: la largeur du rectangle :type largeur: int ou float :return: la surface du rectangle :rtype: int ou float :CU: la longueur et la largeur doivent être dans la même unité
module_surfaces.py
surface_rectangle
laboiteaphysique/conda
0
python
def surface_rectangle(longueur, largeur): "\n fonction qui calcule et renvoie la surface d'un carré\n :param longueur: la longueur du rectangle\n :type longueur: int ou float\n :param largeur: la largeur du rectangle\n :type largeur: int ou float\n :return: la surface du rectangle \n :rtype: int ou float\n \n :CU: la longueur et la largeur doivent être dans la même unité\n " return (longueur * largeur)
def surface_rectangle(longueur, largeur): "\n fonction qui calcule et renvoie la surface d'un carré\n :param longueur: la longueur du rectangle\n :type longueur: int ou float\n :param largeur: la largeur du rectangle\n :type largeur: int ou float\n :return: la surface du rectangle \n :rtype: int ou float\n \n :CU: la longueur et la largeur doivent être dans la même unité\n " return (longueur * largeur)<|docstring|>fonction qui calcule et renvoie la surface d'un carré :param longueur: la longueur du rectangle :type longueur: int ou float :param largeur: la largeur du rectangle :type largeur: int ou float :return: la surface du rectangle :rtype: int ou float :CU: la longueur et la largeur doivent être dans la même unité<|endoftext|>
5492c2e128131809a19afd715b38ca09de18b9824d74dfd01f73ed9f77d3fd95
def surface_triangle(base, hauteur): "\n fonction qui calcule et renvoie la surface d'un triangle\n :param base: la base du triangle\n :type base: int ou float\n :param hauteur: la hauteur du triangle\n :type hauteur: int ou float\n :return: la surface du triangle \n :rtype: int ou float\n \n :CU: la base et la hauteur doivent être dans la même unité\n " return ((base * hauteur) / 2)
fonction qui calcule et renvoie la surface d'un triangle :param base: la base du triangle :type base: int ou float :param hauteur: la hauteur du triangle :type hauteur: int ou float :return: la surface du triangle :rtype: int ou float :CU: la base et la hauteur doivent être dans la même unité
module_surfaces.py
surface_triangle
laboiteaphysique/conda
0
python
def surface_triangle(base, hauteur): "\n fonction qui calcule et renvoie la surface d'un triangle\n :param base: la base du triangle\n :type base: int ou float\n :param hauteur: la hauteur du triangle\n :type hauteur: int ou float\n :return: la surface du triangle \n :rtype: int ou float\n \n :CU: la base et la hauteur doivent être dans la même unité\n " return ((base * hauteur) / 2)
def surface_triangle(base, hauteur): "\n fonction qui calcule et renvoie la surface d'un triangle\n :param base: la base du triangle\n :type base: int ou float\n :param hauteur: la hauteur du triangle\n :type hauteur: int ou float\n :return: la surface du triangle \n :rtype: int ou float\n \n :CU: la base et la hauteur doivent être dans la même unité\n " return ((base * hauteur) / 2)<|docstring|>fonction qui calcule et renvoie la surface d'un triangle :param base: la base du triangle :type base: int ou float :param hauteur: la hauteur du triangle :type hauteur: int ou float :return: la surface du triangle :rtype: int ou float :CU: la base et la hauteur doivent être dans la même unité<|endoftext|>
64155b8c146a91631e81dee831dcac14d6874773a3502d958cb4a83de7d21c02
def hover(self, xpath: str, timeout: int=timeout.DEFAULT) -> None: 'Simulate moving the mouse cursor over the middle of an element.' mouse_hover(self._driver, xpath, timeout)
Simulate moving the mouse cursor over the middle of an element.
src/browserist/browser/mouse/__main__.py
hover
jakob-bagterp/browserist
2
python
def hover(self, xpath: str, timeout: int=timeout.DEFAULT) -> None: mouse_hover(self._driver, xpath, timeout)
def hover(self, xpath: str, timeout: int=timeout.DEFAULT) -> None: mouse_hover(self._driver, xpath, timeout)<|docstring|>Simulate moving the mouse cursor over the middle of an element.<|endoftext|>
57a2926ce9cd7635e68db4b4c227adf87e38a3d63f74aacbe6defbbc60706a37
def adjust_objectives(self, objs): 'adjust objectives based on optimization goal' optim_goals = self.config.obj_goals adjusted_objs = np.empty(objs.shape) for (obj_index, obj_goal) in enumerate(optim_goals): if (obj_goal == 'minimize'): adjusted_objs[(:, obj_index)] = objs[(:, obj_index)] elif (obj_goal == 'maximize'): adjusted_objs[(:, obj_index)] = (- objs[(:, obj_index)]) else: GryffinUnknownSettingsError(('did not understand objective goal: "%s" for objective "%s".\n\tChoose from "minimize" or "maximize"' % (obj_goal, self.config.obj_names[obj_index]))) return adjusted_objs
adjust objectives based on optimization goal
src/gryffin/observation_processor/observation_processor.py
adjust_objectives
sustainable-processes/gryffin
20
python
def adjust_objectives(self, objs): optim_goals = self.config.obj_goals adjusted_objs = np.empty(objs.shape) for (obj_index, obj_goal) in enumerate(optim_goals): if (obj_goal == 'minimize'): adjusted_objs[(:, obj_index)] = objs[(:, obj_index)] elif (obj_goal == 'maximize'): adjusted_objs[(:, obj_index)] = (- objs[(:, obj_index)]) else: GryffinUnknownSettingsError(('did not understand objective goal: "%s" for objective "%s".\n\tChoose from "minimize" or "maximize"' % (obj_goal, self.config.obj_names[obj_index]))) return adjusted_objs
def adjust_objectives(self, objs): optim_goals = self.config.obj_goals adjusted_objs = np.empty(objs.shape) for (obj_index, obj_goal) in enumerate(optim_goals): if (obj_goal == 'minimize'): adjusted_objs[(:, obj_index)] = objs[(:, obj_index)] elif (obj_goal == 'maximize'): adjusted_objs[(:, obj_index)] = (- objs[(:, obj_index)]) else: GryffinUnknownSettingsError(('did not understand objective goal: "%s" for objective "%s".\n\tChoose from "minimize" or "maximize"' % (obj_goal, self.config.obj_names[obj_index]))) return adjusted_objs<|docstring|>adjust objectives based on optimization goal<|endoftext|>
3fb437239763d227a415e15e17207bf0b03eb095a6c7a60870e71eaea04cb866
def iterate_links(self, channel_data: dict, interaction: discord.Interaction) -> str: '\n Iterates through:\n - Roles\n - Reverse Roles\n - Suffix\n ' chunks = [] for role_id in channel_data['roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'{(role.mention if role else role_id)}, ') for role_id in channel_data['reverse_roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'R{(role.mention if role else role_id)}, ') chunks.append((f"`{channel_data['suffix']}`" if channel_data['suffix'] else '')) content = ''.join(chunks) content = (content.removesuffix(', ') + '\n') return content
Iterates through: - Roles - Reverse Roles - Suffix
cogs/linked.py
iterate_links
CDESamBotDev/VCRoles
3
python
def iterate_links(self, channel_data: dict, interaction: discord.Interaction) -> str: '\n Iterates through:\n - Roles\n - Reverse Roles\n - Suffix\n ' chunks = [] for role_id in channel_data['roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'{(role.mention if role else role_id)}, ') for role_id in channel_data['reverse_roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'R{(role.mention if role else role_id)}, ') chunks.append((f"`{channel_data['suffix']}`" if channel_data['suffix'] else )) content = .join(chunks) content = (content.removesuffix(', ') + '\n') return content
def iterate_links(self, channel_data: dict, interaction: discord.Interaction) -> str: '\n Iterates through:\n - Roles\n - Reverse Roles\n - Suffix\n ' chunks = [] for role_id in channel_data['roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'{(role.mention if role else role_id)}, ') for role_id in channel_data['reverse_roles']: role = interaction.guild.get_role(int(role_id)) chunks.append(f'R{(role.mention if role else role_id)}, ') chunks.append((f"`{channel_data['suffix']}`" if channel_data['suffix'] else )) content = .join(chunks) content = (content.removesuffix(', ') + '\n') return content<|docstring|>Iterates through: - Roles - Reverse Roles - Suffix<|endoftext|>
18ee2ba350e08c7871000727ac23e96815f6463d34d67400c54df1d814d1e504
@app_commands.command() @check_any(command_available, is_owner) @app_commands.checks.has_permissions(administrator=True) async def linked(self, interaction: discord.Interaction): 'Displays the linked roles, channels & categories' linked_embed = discord.Embed(colour=discord.Colour.blue(), title=f'The linked roles, channels & categories in {interaction.guild.name}:', description='Note: \n- R before a role indicates a reverse link\n- Text like `this` shows linked suffixes') for channel_type in ['Voice', 'Stage', 'Category', 'Permanent']: content = self.construct_linked_content(self.client.redis.get_linked(channel_type.lower(), interaction.guild_id), interaction) if content: linked_embed.add_field(name=f'{channel_type} Channels:', value=content, inline=False) all_dict = self.client.redis.get_linked('all', interaction.guild_id) all_content = self.iterate_links(all_dict, interaction) chunks = [all_content] if ('except' in all_dict): if all_dict['except']: chunks.append('All-link exceptions: ') for exception_id in all_dict['except']: try: channel = self.client.get_channel(int(exception_id)) chunks.append(f'{channel.mention}, ') except: chunks.append(f'Not Found - ID `{exception_id}`') all_content = ''.join(chunks).removesuffix(', ') if all_content.strip(): linked_embed.add_field(name='All Link:', value=all_content.strip(), inline=False) if linked_embed.fields: (await interaction.response.send_message(embed=linked_embed)) else: (await interaction.response.send_message('Nothing is linked')) return self.client.incr_counter('linked')
Displays the linked roles, channels & categories
cogs/linked.py
linked
CDESamBotDev/VCRoles
3
python
@app_commands.command() @check_any(command_available, is_owner) @app_commands.checks.has_permissions(administrator=True) async def linked(self, interaction: discord.Interaction): linked_embed = discord.Embed(colour=discord.Colour.blue(), title=f'The linked roles, channels & categories in {interaction.guild.name}:', description='Note: \n- R before a role indicates a reverse link\n- Text like `this` shows linked suffixes') for channel_type in ['Voice', 'Stage', 'Category', 'Permanent']: content = self.construct_linked_content(self.client.redis.get_linked(channel_type.lower(), interaction.guild_id), interaction) if content: linked_embed.add_field(name=f'{channel_type} Channels:', value=content, inline=False) all_dict = self.client.redis.get_linked('all', interaction.guild_id) all_content = self.iterate_links(all_dict, interaction) chunks = [all_content] if ('except' in all_dict): if all_dict['except']: chunks.append('All-link exceptions: ') for exception_id in all_dict['except']: try: channel = self.client.get_channel(int(exception_id)) chunks.append(f'{channel.mention}, ') except: chunks.append(f'Not Found - ID `{exception_id}`') all_content = .join(chunks).removesuffix(', ') if all_content.strip(): linked_embed.add_field(name='All Link:', value=all_content.strip(), inline=False) if linked_embed.fields: (await interaction.response.send_message(embed=linked_embed)) else: (await interaction.response.send_message('Nothing is linked')) return self.client.incr_counter('linked')
@app_commands.command() @check_any(command_available, is_owner) @app_commands.checks.has_permissions(administrator=True) async def linked(self, interaction: discord.Interaction): linked_embed = discord.Embed(colour=discord.Colour.blue(), title=f'The linked roles, channels & categories in {interaction.guild.name}:', description='Note: \n- R before a role indicates a reverse link\n- Text like `this` shows linked suffixes') for channel_type in ['Voice', 'Stage', 'Category', 'Permanent']: content = self.construct_linked_content(self.client.redis.get_linked(channel_type.lower(), interaction.guild_id), interaction) if content: linked_embed.add_field(name=f'{channel_type} Channels:', value=content, inline=False) all_dict = self.client.redis.get_linked('all', interaction.guild_id) all_content = self.iterate_links(all_dict, interaction) chunks = [all_content] if ('except' in all_dict): if all_dict['except']: chunks.append('All-link exceptions: ') for exception_id in all_dict['except']: try: channel = self.client.get_channel(int(exception_id)) chunks.append(f'{channel.mention}, ') except: chunks.append(f'Not Found - ID `{exception_id}`') all_content = .join(chunks).removesuffix(', ') if all_content.strip(): linked_embed.add_field(name='All Link:', value=all_content.strip(), inline=False) if linked_embed.fields: (await interaction.response.send_message(embed=linked_embed)) else: (await interaction.response.send_message('Nothing is linked')) return self.client.incr_counter('linked')<|docstring|>Displays the linked roles, channels & categories<|endoftext|>
cd79ea03f1ce21c887f672e5fbed9d398c9d36df30899cd94f81a73df16ba867
@fixture() def kwd_attributes(): 'Keyword attributes.' return {'args': ('Kw Body Start',), 'assign': (), 'doc': 'Logs the given message with the given level.', 'kwname': 'Log', 'libname': 'BuiltIn', 'starttime': '1621947055434', 'tags': [], 'type': 'Keyword'}
Keyword attributes.
tests/unit/conftest.py
kwd_attributes
godiabolo/agent-Python-RobotFramework
42
python
@fixture() def kwd_attributes(): return {'args': ('Kw Body Start',), 'assign': (), 'doc': 'Logs the given message with the given level.', 'kwname': 'Log', 'libname': 'BuiltIn', 'starttime': '1621947055434', 'tags': [], 'type': 'Keyword'}
@fixture() def kwd_attributes(): return {'args': ('Kw Body Start',), 'assign': (), 'doc': 'Logs the given message with the given level.', 'kwname': 'Log', 'libname': 'BuiltIn', 'starttime': '1621947055434', 'tags': [], 'type': 'Keyword'}<|docstring|>Keyword attributes.<|endoftext|>
3d7fc55756b3a9cf48c280c81e71eca44971e1cf4c20fa672d2711f20d64289b
def __init__(self): 'Initialises the provider with the YTS url' super(YTS, self).__init__('YTS', 'https://yts.to/api/v2/list_movies.json')
Initialises the provider with the YTS url
streams/search/providers/yts.py
__init__
robalar/Streams
1
python
def __init__(self): super(YTS, self).__init__('YTS', 'https://yts.to/api/v2/list_movies.json')
def __init__(self): super(YTS, self).__init__('YTS', 'https://yts.to/api/v2/list_movies.json')<|docstring|>Initialises the provider with the YTS url<|endoftext|>
c4bc4095147747c8350d0e55215fc5d07a9533a7d4e62869587af1a41707365e
def do_search(self, search_term): 'Gets all pages of data from YTS' limit = 50 parameters = {'query_term': search_term, 'limit': limit, 'page': 1} requests_session = requesocks.session() requests_session.proxies = streams.PROXIES request = requests_session.get(url=self.url, params=parameters) data = json.loads(request.text) if (data['status'] == 'error'): return [] data = data['data'] current_page = data['page_number'] movie_count = data['movie_count'] page_count = math.ceil((float(movie_count) / float(limit))) movie_list = self.create_movie_list(data) while (current_page < page_count): parameters['page'] += 1 request = requests_session.get(url=self.url, params=parameters) _data = json.loads(request.text) _data = _data['data'] current_page = _data['page_number'] movie_list += self.create_movie_list(_data) for _movie in movie_list: _movie.torrents[:] = [torrent.Torrent(**_torrent) for _torrent in _movie.torrents] return movie_list
Gets all pages of data from YTS
streams/search/providers/yts.py
do_search
robalar/Streams
1
python
def do_search(self, search_term): limit = 50 parameters = {'query_term': search_term, 'limit': limit, 'page': 1} requests_session = requesocks.session() requests_session.proxies = streams.PROXIES request = requests_session.get(url=self.url, params=parameters) data = json.loads(request.text) if (data['status'] == 'error'): return [] data = data['data'] current_page = data['page_number'] movie_count = data['movie_count'] page_count = math.ceil((float(movie_count) / float(limit))) movie_list = self.create_movie_list(data) while (current_page < page_count): parameters['page'] += 1 request = requests_session.get(url=self.url, params=parameters) _data = json.loads(request.text) _data = _data['data'] current_page = _data['page_number'] movie_list += self.create_movie_list(_data) for _movie in movie_list: _movie.torrents[:] = [torrent.Torrent(**_torrent) for _torrent in _movie.torrents] return movie_list
def do_search(self, search_term): limit = 50 parameters = {'query_term': search_term, 'limit': limit, 'page': 1} requests_session = requesocks.session() requests_session.proxies = streams.PROXIES request = requests_session.get(url=self.url, params=parameters) data = json.loads(request.text) if (data['status'] == 'error'): return [] data = data['data'] current_page = data['page_number'] movie_count = data['movie_count'] page_count = math.ceil((float(movie_count) / float(limit))) movie_list = self.create_movie_list(data) while (current_page < page_count): parameters['page'] += 1 request = requests_session.get(url=self.url, params=parameters) _data = json.loads(request.text) _data = _data['data'] current_page = _data['page_number'] movie_list += self.create_movie_list(_data) for _movie in movie_list: _movie.torrents[:] = [torrent.Torrent(**_torrent) for _torrent in _movie.torrents] return movie_list<|docstring|>Gets all pages of data from YTS<|endoftext|>
80cda09f6cebe7aa3d85b0252bb5e425d0d527a4d71b9ce6797d5cacdc4917b4
def update(self, d): '\n Updates this dictionary.\n\n Parameters\n ----------\n d : dict\n key-value pairs to add to or update in this dictionary.\n\n Returns\n -------\n None\n ' invalid_keys = [k for k in d.keys() if (k not in self.valid_keys)] if invalid_keys: raise ValueError(("Invalid keys '%s'. Valid keys are: '%s'" % ("', '".join(invalid_keys), "', '".join(sorted(self.valid_keys))))) super().update(d)
Updates this dictionary. Parameters ---------- d : dict key-value pairs to add to or update in this dictionary. Returns ------- None
pygsti/baseobjs/advancedoptions.py
update
maij/pyGSTi
73
python
def update(self, d): '\n Updates this dictionary.\n\n Parameters\n ----------\n d : dict\n key-value pairs to add to or update in this dictionary.\n\n Returns\n -------\n None\n ' invalid_keys = [k for k in d.keys() if (k not in self.valid_keys)] if invalid_keys: raise ValueError(("Invalid keys '%s'. Valid keys are: '%s'" % ("', '".join(invalid_keys), "', '".join(sorted(self.valid_keys))))) super().update(d)
def update(self, d): '\n Updates this dictionary.\n\n Parameters\n ----------\n d : dict\n key-value pairs to add to or update in this dictionary.\n\n Returns\n -------\n None\n ' invalid_keys = [k for k in d.keys() if (k not in self.valid_keys)] if invalid_keys: raise ValueError(("Invalid keys '%s'. Valid keys are: '%s'" % ("', '".join(invalid_keys), "', '".join(sorted(self.valid_keys))))) super().update(d)<|docstring|>Updates this dictionary. Parameters ---------- d : dict key-value pairs to add to or update in this dictionary. Returns ------- None<|endoftext|>
4bc90bbc21a6d1a2d5703745f0234f690b897b62f389a72f4fa6a6c4619ab1bd
def get_stories(self, limit: int=10) -> List[Story]: '\n Get a list of current stories from Wikipedia.\n ' feed = feedparser.parse('https://www.to-rss.xyz/wikipedia/current_events/') title = "Today's Current Events" content = bs4.BeautifulSoup(feed.entries[0].summary, 'lxml') for a in content.find_all('a'): while a.find('li'): a.find('li').replace_with_children() while a.find('ul'): a.find('ul').replace_with_children() a.replace_with_children() while content.find('dl'): content.find('dl').name = 'h3' return [Story(headline=title, body_html=str(content), byline='Wikipedia Current Events', placement_preference=PlacementPreference.BANNER)]
Get a list of current stories from Wikipedia.
goosepaper/wikipedia.py
get_stories
traysh/goosepaper
178
python
def get_stories(self, limit: int=10) -> List[Story]: '\n \n ' feed = feedparser.parse('https://www.to-rss.xyz/wikipedia/current_events/') title = "Today's Current Events" content = bs4.BeautifulSoup(feed.entries[0].summary, 'lxml') for a in content.find_all('a'): while a.find('li'): a.find('li').replace_with_children() while a.find('ul'): a.find('ul').replace_with_children() a.replace_with_children() while content.find('dl'): content.find('dl').name = 'h3' return [Story(headline=title, body_html=str(content), byline='Wikipedia Current Events', placement_preference=PlacementPreference.BANNER)]
def get_stories(self, limit: int=10) -> List[Story]: '\n \n ' feed = feedparser.parse('https://www.to-rss.xyz/wikipedia/current_events/') title = "Today's Current Events" content = bs4.BeautifulSoup(feed.entries[0].summary, 'lxml') for a in content.find_all('a'): while a.find('li'): a.find('li').replace_with_children() while a.find('ul'): a.find('ul').replace_with_children() a.replace_with_children() while content.find('dl'): content.find('dl').name = 'h3' return [Story(headline=title, body_html=str(content), byline='Wikipedia Current Events', placement_preference=PlacementPreference.BANNER)]<|docstring|>Get a list of current stories from Wikipedia.<|endoftext|>
f3e5e38b1fd31f81e9e647ff5e5582b76586c73ac188e22621277b562693e087
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', ('WHERE %s = %%s' % q(value))) args = [getattr(storable, self.get_column_name(), None)] if callable(where): where = where(req, storable) args = [] if isinstance(where, dict): where = sql.build_where(where) args = [] foreign_label_query = ('SELECT %s, %s FROM %s %s' % (q(value), q(label), q(table), where)) foreign_label_query = sql.interp(foreign_label_query, *args) results = store.pool.runQuery(foreign_label_query) frm = form.FormNode(self.name) frm(type='label') if results: frm(value=results[0][label]) return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', ('WHERE %s = %%s' % q(value))) args = [getattr(storable, self.get_column_name(), None)] if callable(where): where = where(req, storable) args = [] if isinstance(where, dict): where = sql.build_where(where) args = [] foreign_label_query = ('SELECT %s, %s FROM %s %s' % (q(value), q(label), q(table), where)) foreign_label_query = sql.interp(foreign_label_query, *args) results = store.pool.runQuery(foreign_label_query) frm = form.FormNode(self.name) frm(type='label') if results: frm(value=results[0][label]) return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', ('WHERE %s = %%s' % q(value))) args = [getattr(storable, self.get_column_name(), None)] if callable(where): where = where(req, storable) args = [] if isinstance(where, dict): where = sql.build_where(where) args = [] foreign_label_query = ('SELECT %s, %s FROM %s %s' % (q(value), q(label), q(table), where)) foreign_label_query = sql.interp(foreign_label_query, *args) results = store.pool.runQuery(foreign_label_query) frm = form.FormNode(self.name) frm(type='label') if results: frm(value=results[0][label]) return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
a96bc73656ee6e497749356bbd5e3c612ad7ee96519f45fc63a6d58cd0f79fc1
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' store = storable.get_store() label = None value = None label_col = self.get('flabel', 'title') value_col = 'id' table = getattr(storable, 'item_table', None) if (not table): table = self.get('ftable') item_value = getattr(storable, self.get_column_name(), None) if ((table is None) or (item_value is None)): results = None else: foreign_label_query = ('SELECT * FROM %s WHERE %s = %%s' % (table, value_col)) foreign_label_query = sql.interp(foreign_label_query, [item_value]) results = store.pool.runQuery(foreign_label_query) if results: value = results[0][value_col] label = results[0].get(label_col, '(label not found)') frm = form.FormNode(self.name) suffix = '' prefix = '' if (style == 'listing'): frm(type='hidden', value=value) if (table and value): label = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] frm(type='label', value=label) else: if (not label): label = '(no link available)' frm(type='hidden', value=value) if (table and value): prefix = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] else: prefix = label frm(prefix=prefix, suffix=suffix) return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() label = None value = None label_col = self.get('flabel', 'title') value_col = 'id' table = getattr(storable, 'item_table', None) if (not table): table = self.get('ftable') item_value = getattr(storable, self.get_column_name(), None) if ((table is None) or (item_value is None)): results = None else: foreign_label_query = ('SELECT * FROM %s WHERE %s = %%s' % (table, value_col)) foreign_label_query = sql.interp(foreign_label_query, [item_value]) results = store.pool.runQuery(foreign_label_query) if results: value = results[0][value_col] label = results[0].get(label_col, '(label not found)') frm = form.FormNode(self.name) suffix = prefix = if (style == 'listing'): frm(type='hidden', value=value) if (table and value): label = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] frm(type='label', value=label) else: if (not label): label = '(no link available)' frm(type='hidden', value=value) if (table and value): prefix = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] else: prefix = label frm(prefix=prefix, suffix=suffix) return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() label = None value = None label_col = self.get('flabel', 'title') value_col = 'id' table = getattr(storable, 'item_table', None) if (not table): table = self.get('ftable') item_value = getattr(storable, self.get_column_name(), None) if ((table is None) or (item_value is None)): results = None else: foreign_label_query = ('SELECT * FROM %s WHERE %s = %%s' % (table, value_col)) foreign_label_query = sql.interp(foreign_label_query, [item_value]) results = store.pool.runQuery(foreign_label_query) if results: value = results[0][value_col] label = results[0].get(label_col, '(label not found)') frm = form.FormNode(self.name) suffix = prefix = if (style == 'listing'): frm(type='hidden', value=value) if (table and value): label = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] frm(type='label', value=label) else: if (not label): label = '(no link available)' frm(type='hidden', value=value) if (table and value): prefix = tags.a(href=req.get_path(req.prepath, 'detail', table, value))[label] else: prefix = label frm(prefix=prefix, suffix=suffix) return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
ac4ece8ea67715a04fc834b4d84eb4c7c2e4f0444d0ba983da05ee610d4a15eb
def update_storable(self, req, form, storable): '\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t' pass
No operation. @see: L{modu.editable.define.definition.update_storable()}
src/modu/editable/datatypes/relational.py
update_storable
philchristensen/modu
0
python
def update_storable(self, req, form, storable): '\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t' pass
def update_storable(self, req, form, storable): '\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t' pass<|docstring|>No operation. @see: L{modu.editable.define.definition.update_storable()}<|endoftext|>
399a91b0cace2fc8fa6c7f43ce620477ce9872c8cabacd2ad6326346d04c4b1b
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', '') order_by = self.get('order_by', None) if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) foreign_query = ('SELECT %s, %s FROM %s ' % (q(value), q(label), q(table))) if where: foreign_query += where if order_by: foreign_query += ('ORDER BY %s' % order_by) results = store.pool.runQuery(foreign_query) options = OrderedDict([(item[value], item[label]) for item in results]) frm = form.FormNode(self.name) if ((style == 'listing') or self.get('read_only', False)): foreign_value = getattr(storable, self.get_column_name(), None) if (foreign_value in options): frm(type='label', value=options[foreign_value]) else: frm(type='label', value='') else: frm(type='select', value=getattr(storable, self.get_column_name(), None), options=options) return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', ) order_by = self.get('order_by', None) if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) foreign_query = ('SELECT %s, %s FROM %s ' % (q(value), q(label), q(table))) if where: foreign_query += where if order_by: foreign_query += ('ORDER BY %s' % order_by) results = store.pool.runQuery(foreign_query) options = OrderedDict([(item[value], item[label]) for item in results]) frm = form.FormNode(self.name) if ((style == 'listing') or self.get('read_only', False)): foreign_value = getattr(storable, self.get_column_name(), None) if (foreign_value in options): frm(type='label', value=options[foreign_value]) else: frm(type='label', value=) else: frm(type='select', value=getattr(storable, self.get_column_name(), None), options=options) return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] where = self.get('fwhere', ) order_by = self.get('order_by', None) if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) foreign_query = ('SELECT %s, %s FROM %s ' % (q(value), q(label), q(table))) if where: foreign_query += where if order_by: foreign_query += ('ORDER BY %s' % order_by) results = store.pool.runQuery(foreign_query) options = OrderedDict([(item[value], item[label]) for item in results]) frm = form.FormNode(self.name) if ((style == 'listing') or self.get('read_only', False)): foreign_value = getattr(storable, self.get_column_name(), None) if (foreign_value in options): frm(type='label', value=options[foreign_value]) else: frm(type='label', value=) else: frm(type='select', value=getattr(storable, self.get_column_name(), None), options=options) return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
04781f316d6dd5bca0e589288f65ad626a8b0d0488bc49fc1d656dcb8478fe8a
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) ac_cb_id = ('%s-%s-ac-callback' % (form_name, self.name)) ac_url = req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) prefs = ('\n\t\t\t\t\tautoFill:1,\n\t\t\t\t\tselectFirst:1,\n\t\t\t\t\tmatchSubset:0,\n\t\t\t\t\tselectOnly:1,\n\t\t\t\t\tformatItem:formatItem,\n\t\t\t\t\textraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_javascript = ('$("#%s").autocomplete("%s", {%s});' % (ac_id, ac_url, prefs)) ac_javascript += ('$("#%s").result(select_item_handler("%s"));' % (ac_id, ac_cb_id)) ac_javascript = tags.script(type='text/javascript')[ac_javascript] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=0, attributes={'id': ac_id}, suffix=ac_javascript) value_field = form.FormNode(self.name) value_field(type='hidden', weight=2, value=getattr(storable, self.get_column_name(), None), attributes={'id': ac_cb_id}) store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] if hasattr(storable, self.get_column_name()): query = ('SELECT %s FROM %s WHERE %s = %%s' % (q(label), q(table), q(value))) field_value = getattr(storable, self.get_column_name()) if (field_value is not None): results = store.pool.runQuery(sql.interp(query, field_value)) if results: ac_field(value=results[0][label]) else: value_field(value=0) else: value_field(value=0) if ((style == 'listing') or self.get('read_only', False)): return form.FormNode(self.name)(type='label', value=ac_field.attr('value', '')) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) req.content.report('header', tags.script(type='text/javascript')["\n\t\t\tfunction formatItem(item, index, totalItems){\n\t\t\t\treturn item[0].replace('<', '&lt;').replace('>', '&gt;')\n\t\t\t}\n\t\t\t"]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))['']) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))['']) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[ac_field.name] = ac_field frm[value_field.name] = value_field return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) ac_cb_id = ('%s-%s-ac-callback' % (form_name, self.name)) ac_url = req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) prefs = ('\n\t\t\t\t\tautoFill:1,\n\t\t\t\t\tselectFirst:1,\n\t\t\t\t\tmatchSubset:0,\n\t\t\t\t\tselectOnly:1,\n\t\t\t\t\tformatItem:formatItem,\n\t\t\t\t\textraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_javascript = ('$("#%s").autocomplete("%s", {%s});' % (ac_id, ac_url, prefs)) ac_javascript += ('$("#%s").result(select_item_handler("%s"));' % (ac_id, ac_cb_id)) ac_javascript = tags.script(type='text/javascript')[ac_javascript] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=0, attributes={'id': ac_id}, suffix=ac_javascript) value_field = form.FormNode(self.name) value_field(type='hidden', weight=2, value=getattr(storable, self.get_column_name(), None), attributes={'id': ac_cb_id}) store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] if hasattr(storable, self.get_column_name()): query = ('SELECT %s FROM %s WHERE %s = %%s' % (q(label), q(table), q(value))) field_value = getattr(storable, self.get_column_name()) if (field_value is not None): results = store.pool.runQuery(sql.interp(query, field_value)) if results: ac_field(value=results[0][label]) else: value_field(value=0) else: value_field(value=0) if ((style == 'listing') or self.get('read_only', False)): return form.FormNode(self.name)(type='label', value=ac_field.attr('value', )) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) req.content.report('header', tags.script(type='text/javascript')["\n\t\t\tfunction formatItem(item, index, totalItems){\n\t\t\t\treturn item[0].replace('<', '&lt;').replace('>', '&gt;')\n\t\t\t}\n\t\t\t"]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))[]) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))[]) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[ac_field.name] = ac_field frm[value_field.name] = value_field return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) ac_cb_id = ('%s-%s-ac-callback' % (form_name, self.name)) ac_url = req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) prefs = ('\n\t\t\t\t\tautoFill:1,\n\t\t\t\t\tselectFirst:1,\n\t\t\t\t\tmatchSubset:0,\n\t\t\t\t\tselectOnly:1,\n\t\t\t\t\tformatItem:formatItem,\n\t\t\t\t\textraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_javascript = ('$("#%s").autocomplete("%s", {%s});' % (ac_id, ac_url, prefs)) ac_javascript += ('$("#%s").result(select_item_handler("%s"));' % (ac_id, ac_cb_id)) ac_javascript = tags.script(type='text/javascript')[ac_javascript] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=0, attributes={'id': ac_id}, suffix=ac_javascript) value_field = form.FormNode(self.name) value_field(type='hidden', weight=2, value=getattr(storable, self.get_column_name(), None), attributes={'id': ac_cb_id}) store = storable.get_store() value = self['fvalue'] label = self['flabel'] table = self['ftable'] if hasattr(storable, self.get_column_name()): query = ('SELECT %s FROM %s WHERE %s = %%s' % (q(label), q(table), q(value))) field_value = getattr(storable, self.get_column_name()) if (field_value is not None): results = store.pool.runQuery(sql.interp(query, field_value)) if results: ac_field(value=results[0][label]) else: value_field(value=0) else: value_field(value=0) if ((style == 'listing') or self.get('read_only', False)): return form.FormNode(self.name)(type='label', value=ac_field.attr('value', )) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) req.content.report('header', tags.script(type='text/javascript')["\n\t\t\tfunction formatItem(item, index, totalItems){\n\t\t\t\treturn item[0].replace('<', '&lt;').replace('>', '&gt;')\n\t\t\t}\n\t\t\t"]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))[]) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))[]) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[ac_field.name] = ac_field frm[value_field.name] = value_field return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
b67e384b161998bda142b37940566c877d1d5a264b6e1517ad38f795d16359e3
def update_storable(self, req, frm, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' form_name = ('%s-form' % storable.get_table()) if (form_name in req.data): form_data = req.data[form_name] if (not form_data.get(self.name, {}).get(('%s-autocomplete' % self.name), None).value): setattr(storable, self.get_column_name(), None) elif ((self.name in form_data) and (self.name in form_data[self.name])): setattr(storable, self.get_column_name(), form_data[self.name][self.name].value) return True
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
update_storable
philchristensen/modu
0
python
def update_storable(self, req, frm, storable): '\n\t\t\n\t\t' form_name = ('%s-form' % storable.get_table()) if (form_name in req.data): form_data = req.data[form_name] if (not form_data.get(self.name, {}).get(('%s-autocomplete' % self.name), None).value): setattr(storable, self.get_column_name(), None) elif ((self.name in form_data) and (self.name in form_data[self.name])): setattr(storable, self.get_column_name(), form_data[self.name][self.name].value) return True
def update_storable(self, req, frm, storable): '\n\t\t\n\t\t' form_name = ('%s-form' % storable.get_table()) if (form_name in req.data): form_data = req.data[form_name] if (not form_data.get(self.name, {}).get(('%s-autocomplete' % self.name), None).value): setattr(storable, self.get_column_name(), None) elif ((self.name in form_data) and (self.name in form_data[self.name])): setattr(storable, self.get_column_name(), form_data[self.name][self.name].value) return True<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
7593bb75783dc63869e990c68376b1627b3c31d9dbc6da622592ba036f173bb2
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' mlabel = self.get('flabel', '') if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % q(mlabel)) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', '') if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) ntom_query = ('SELECT m.%s AS value, %s AS label, COALESCE(n2m.%s, n2m.%s = 1, 0) AS selected\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tLEFT JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label' % (self['fvalue'], mlabel, self['ntof_f_id'], self['ntof_f_id'], q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): def _default_formatter(req_ignored, style_ignored, storable_ignored, result): return ', '.join([item['label'] for item in result if item['selected']]) formatter = self.get('formatter', _default_formatter) label_value = formatter(req, style, storable, results) return form.FormNode(self.name)(type='label', value=label_value) values = [item['value'] for item in results if item['selected']] options = OrderedDict([(item['value'], item['label']) for item in results]) frm = form.FormNode(self.name) frm(type='select', multiple=True, value=values, options=options) return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' mlabel = self.get('flabel', ) if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % q(mlabel)) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', ) if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) ntom_query = ('SELECT m.%s AS value, %s AS label, COALESCE(n2m.%s, n2m.%s = 1, 0) AS selected\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tLEFT JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label' % (self['fvalue'], mlabel, self['ntof_f_id'], self['ntof_f_id'], q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): def _default_formatter(req_ignored, style_ignored, storable_ignored, result): return ', '.join([item['label'] for item in result if item['selected']]) formatter = self.get('formatter', _default_formatter) label_value = formatter(req, style, storable, results) return form.FormNode(self.name)(type='label', value=label_value) values = [item['value'] for item in results if item['selected']] options = OrderedDict([(item['value'], item['label']) for item in results]) frm = form.FormNode(self.name) frm(type='select', multiple=True, value=values, options=options) return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' mlabel = self.get('flabel', ) if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % q(mlabel)) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', ) if callable(where): where = where(req, storable) if isinstance(where, dict): where = sql.build_where(where) ntom_query = ('SELECT m.%s AS value, %s AS label, COALESCE(n2m.%s, n2m.%s = 1, 0) AS selected\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tLEFT JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label' % (self['fvalue'], mlabel, self['ntof_f_id'], self['ntof_f_id'], q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): def _default_formatter(req_ignored, style_ignored, storable_ignored, result): return ', '.join([item['label'] for item in result if item['selected']]) formatter = self.get('formatter', _default_formatter) label_value = formatter(req, style, storable, results) return form.FormNode(self.name)(type='label', value=label_value) values = [item['value'] for item in results if item['selected']] options = OrderedDict([(item['value'], item['label']) for item in results]) frm = form.FormNode(self.name) frm(type='select', multiple=True, value=values, options=options) return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
0433c31533b911a0ab75ce0c2f12e405c30e20b290ad150b8327beca0c3ffc06
def update_storable(self, req, form, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' form_data = req.data[form.name] store = storable.get_store() item_id = storable.get_id() delete_query = sql.build_delete(self['ntof'], {self['ntof_n_id']: item_id}) store.pool.runOperation(delete_query) if (self.name in form_data): values = form_data[self.name].value if isinstance(values, dict): values = values[(self.name + '-autocomplete')] if (not isinstance(values, list)): values = [values] data = [{self['ntof_n_id']: item_id, self['ntof_f_id']: getattr(val, 'value', val)} for val in values] insert_query = sql.build_insert(self['ntof'], data, **self.get('ntof_extras', {})) store.pool.runOperation(insert_query) elif self.get('required', False): return False return True
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
update_storable
philchristensen/modu
0
python
def update_storable(self, req, form, storable): '\n\t\t\n\t\t' form_data = req.data[form.name] store = storable.get_store() item_id = storable.get_id() delete_query = sql.build_delete(self['ntof'], {self['ntof_n_id']: item_id}) store.pool.runOperation(delete_query) if (self.name in form_data): values = form_data[self.name].value if isinstance(values, dict): values = values[(self.name + '-autocomplete')] if (not isinstance(values, list)): values = [values] data = [{self['ntof_n_id']: item_id, self['ntof_f_id']: getattr(val, 'value', val)} for val in values] insert_query = sql.build_insert(self['ntof'], data, **self.get('ntof_extras', {})) store.pool.runOperation(insert_query) elif self.get('required', False): return False return True
def update_storable(self, req, form, storable): '\n\t\t\n\t\t' form_data = req.data[form.name] store = storable.get_store() item_id = storable.get_id() delete_query = sql.build_delete(self['ntof'], {self['ntof_n_id']: item_id}) store.pool.runOperation(delete_query) if (self.name in form_data): values = form_data[self.name].value if isinstance(values, dict): values = values[(self.name + '-autocomplete')] if (not isinstance(values, list)): values = [values] data = [{self['ntof_n_id']: item_id, self['ntof_f_id']: getattr(val, 'value', val)} for val in values] insert_query = sql.build_insert(self['ntof'], data, **self.get('ntof_extras', {})) store.pool.runOperation(insert_query) elif self.get('required', False): return False return True<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
8a50c85793e348ed8053fc911d7bdba533ed8a9cf1d7ec96892bf8196c528a96
def is_postwrite_field(self): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' return True
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
is_postwrite_field
philchristensen/modu
0
python
def is_postwrite_field(self): '\n\t\t\n\t\t' return True
def is_postwrite_field(self): '\n\t\t\n\t\t' return True<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
64a98f5e024c88226072a53a08802193d81fda4f4c0e19d7786a570d8b3bc176
def get_element(self, req, style, storable): '\n\t\t@see: L{modu.editable.define.definition.get_element()}\n\t\t' mlabel = self.get('flabel', '') if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % mlabel) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', '') if callable(where): where = where(storable) elif isinstance(where, dict): where = sql.build_where(where) limit = ('LIMIT %d' % self.get('limit_choices', 20)) ntom_query = ('SELECT m.%s AS value, %s AS label\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tINNER JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label\n\t\t\t\t\t\t%s' % (self['fvalue'], q(mlabel), q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where, limit)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): label_value = ', '.join([result['label'] for result in results]) return form.FormNode(self.name)(type='label', value=label_value) options = dict([(str(result['value']), result['label']) for result in results]) form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) select_id = ('%s-foreign-select' % self.name) ac_url = ((req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) + '?time=') + str(time.time())) hidden_options = '' for value in options: hidden_options += tags.input(type='hidden', name=('%s[%s]' % (form_name, self.name)), value=value) select_frm = form.FormNode(('%s-select-view' % self.name)) select_frm(type='select', options=options, size=self.get('size', 5), multiple=None, suffix=(hidden_options + '<br/>'), attributes={'id': select_id}) prefs = ('autoFill:1, selectFirst:1, matchSubset:0, selectOnly:1, extraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_js = ('\n\t\t\t$(document).ready(function(){\n\t\t\t\t$("#%s").autocomplete("%s", {%s});\n\t\t\t\t$("#%s").result(add_foreign_item("%s", "%s"));\n\t\t\t});\n\t\t' % (ac_id, ac_url, prefs, ac_id, form_name, self.name)) ac_controls = tags.script(type='text/javascript')[ac_js] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=10, attributes={'id': ac_id}, suffix=ac_controls) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))['']) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))['']) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[select_frm.name] = select_frm frm[ac_field.name] = ac_field return frm
@see: L{modu.editable.define.definition.get_element()}
src/modu/editable/datatypes/relational.py
get_element
philchristensen/modu
0
python
def get_element(self, req, style, storable): '\n\t\t\n\t\t' mlabel = self.get('flabel', ) if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % mlabel) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', ) if callable(where): where = where(storable) elif isinstance(where, dict): where = sql.build_where(where) limit = ('LIMIT %d' % self.get('limit_choices', 20)) ntom_query = ('SELECT m.%s AS value, %s AS label\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tINNER JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label\n\t\t\t\t\t\t%s' % (self['fvalue'], q(mlabel), q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where, limit)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): label_value = ', '.join([result['label'] for result in results]) return form.FormNode(self.name)(type='label', value=label_value) options = dict([(str(result['value']), result['label']) for result in results]) form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) select_id = ('%s-foreign-select' % self.name) ac_url = ((req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) + '?time=') + str(time.time())) hidden_options = for value in options: hidden_options += tags.input(type='hidden', name=('%s[%s]' % (form_name, self.name)), value=value) select_frm = form.FormNode(('%s-select-view' % self.name)) select_frm(type='select', options=options, size=self.get('size', 5), multiple=None, suffix=(hidden_options + '<br/>'), attributes={'id': select_id}) prefs = ('autoFill:1, selectFirst:1, matchSubset:0, selectOnly:1, extraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_js = ('\n\t\t\t$(document).ready(function(){\n\t\t\t\t$("#%s").autocomplete("%s", {%s});\n\t\t\t\t$("#%s").result(add_foreign_item("%s", "%s"));\n\t\t\t});\n\t\t' % (ac_id, ac_url, prefs, ac_id, form_name, self.name)) ac_controls = tags.script(type='text/javascript')[ac_js] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=10, attributes={'id': ac_id}, suffix=ac_controls) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))[]) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))[]) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[select_frm.name] = select_frm frm[ac_field.name] = ac_field return frm
def get_element(self, req, style, storable): '\n\t\t\n\t\t' mlabel = self.get('flabel', ) if (mlabel.find('.') == (- 1)): mlabel = ('m.%s' % mlabel) mlabel = self.get('flabel_sql', mlabel) where = self.get('fwhere', ) if callable(where): where = where(storable) elif isinstance(where, dict): where = sql.build_where(where) limit = ('LIMIT %d' % self.get('limit_choices', 20)) ntom_query = ('SELECT m.%s AS value, %s AS label\n\t\t\t\t\t\tFROM %s m\n\t\t\t\t\t\tINNER JOIN %s n2m ON m.%s = n2m.%s AND n2m.%s = %%s\n\t\t\t\t\t\t%s\n\t\t\t\t\t\tORDER BY label\n\t\t\t\t\t\t%s' % (self['fvalue'], q(mlabel), q(self['ftable']), q(self['ntof']), self.get('fvalue', 'id'), self['ntof_f_id'], self['ntof_n_id'], where, limit)) store = storable.get_store() results = store.pool.runQuery(sql.interp(ntom_query, storable.get_id())) if ((style == 'listing') or self.get('read_only', False)): label_value = ', '.join([result['label'] for result in results]) return form.FormNode(self.name)(type='label', value=label_value) options = dict([(str(result['value']), result['label']) for result in results]) form_name = ('%s-form' % storable.get_table()) ac_id = ('%s-%s-autocomplete' % (form_name, self.name)) select_id = ('%s-foreign-select' % self.name) ac_url = ((req.get_path(req.prepath, 'autocomplete', storable.get_table(), self.name) + '?time=') + str(time.time())) hidden_options = for value in options: hidden_options += tags.input(type='hidden', name=('%s[%s]' % (form_name, self.name)), value=value) select_frm = form.FormNode(('%s-select-view' % self.name)) select_frm(type='select', options=options, size=self.get('size', 5), multiple=None, suffix=(hidden_options + '<br/>'), attributes={'id': select_id}) prefs = ('autoFill:1, selectFirst:1, matchSubset:0, selectOnly:1, extraParams:{t:%d}, minChars:%d' % (int(time.time()), self.get('min_chars', 3))) ac_js = ('\n\t\t\t$(document).ready(function(){\n\t\t\t\t$("#%s").autocomplete("%s", {%s});\n\t\t\t\t$("#%s").result(add_foreign_item("%s", "%s"));\n\t\t\t});\n\t\t' % (ac_id, ac_url, prefs, ac_id, form_name, self.name)) ac_controls = tags.script(type='text/javascript')[ac_js] ac_field = form.FormNode(('%s-autocomplete' % self.name)) ac_field(type='textfield', weight=10, attributes={'id': ac_id}, suffix=ac_controls) req.content.report('header', tags.style(type='text/css')[("@import '%s';" % req.get_path('/assets/jquery/jquery.autocomplete.css'))]) assets.activate_jquery(req) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/jquery/jquery.autocomplete.js'))[]) req.content.report('header', tags.script(type='text/javascript', src=req.get_path('/assets/editable-autocomplete.js'))[]) frm = form.FormNode(('%s-ac-fieldset' % self.name))(type='fieldset', style='brief') frm[select_frm.name] = select_frm frm[ac_field.name] = ac_field return frm<|docstring|>@see: L{modu.editable.define.definition.get_element()}<|endoftext|>
9841cda31fa44324a73d04e0d2a50790ab71e9e814528f2e3f63a77912a65c8f
def calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' Calculates the request payload size' data_size = 0 data_size += calculate_size_data(credentials) data_size += BOOLEAN_SIZE_IN_BYTES if (uuid is not None): data_size += calculate_size_str(uuid) data_size += BOOLEAN_SIZE_IN_BYTES if (owner_uuid is not None): data_size += calculate_size_str(owner_uuid) data_size += BOOLEAN_SIZE_IN_BYTES data_size += calculate_size_str(client_type) data_size += BYTE_SIZE_IN_BYTES data_size += calculate_size_str(client_hazelcast_version) return data_size
Calculates the request payload size
hazelcast/protocol/codec/client_authentication_custom_codec.py
calculate_size
buraksezer/hazelcast-python-client
3
python
def calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' ' data_size = 0 data_size += calculate_size_data(credentials) data_size += BOOLEAN_SIZE_IN_BYTES if (uuid is not None): data_size += calculate_size_str(uuid) data_size += BOOLEAN_SIZE_IN_BYTES if (owner_uuid is not None): data_size += calculate_size_str(owner_uuid) data_size += BOOLEAN_SIZE_IN_BYTES data_size += calculate_size_str(client_type) data_size += BYTE_SIZE_IN_BYTES data_size += calculate_size_str(client_hazelcast_version) return data_size
def calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' ' data_size = 0 data_size += calculate_size_data(credentials) data_size += BOOLEAN_SIZE_IN_BYTES if (uuid is not None): data_size += calculate_size_str(uuid) data_size += BOOLEAN_SIZE_IN_BYTES if (owner_uuid is not None): data_size += calculate_size_str(owner_uuid) data_size += BOOLEAN_SIZE_IN_BYTES data_size += calculate_size_str(client_type) data_size += BYTE_SIZE_IN_BYTES data_size += calculate_size_str(client_hazelcast_version) return data_size<|docstring|>Calculates the request payload size<|endoftext|>
668c32644e4f5ea44e3742c167b71e5d75987ddcf1b4078fd866391b526c29d2
def encode_request(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' Encode request into client_message' client_message = ClientMessage(payload_size=calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_data(credentials) client_message.append_bool((uuid is None)) if (uuid is not None): client_message.append_str(uuid) client_message.append_bool((owner_uuid is None)) if (owner_uuid is not None): client_message.append_str(owner_uuid) client_message.append_bool(is_owner_connection) client_message.append_str(client_type) client_message.append_byte(serialization_version) client_message.append_str(client_hazelcast_version) client_message.update_frame_length() return client_message
Encode request into client_message
hazelcast/protocol/codec/client_authentication_custom_codec.py
encode_request
buraksezer/hazelcast-python-client
3
python
def encode_request(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' ' client_message = ClientMessage(payload_size=calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_data(credentials) client_message.append_bool((uuid is None)) if (uuid is not None): client_message.append_str(uuid) client_message.append_bool((owner_uuid is None)) if (owner_uuid is not None): client_message.append_str(owner_uuid) client_message.append_bool(is_owner_connection) client_message.append_str(client_type) client_message.append_byte(serialization_version) client_message.append_str(client_hazelcast_version) client_message.update_frame_length() return client_message
def encode_request(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): ' ' client_message = ClientMessage(payload_size=calculate_size(credentials, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_data(credentials) client_message.append_bool((uuid is None)) if (uuid is not None): client_message.append_str(uuid) client_message.append_bool((owner_uuid is None)) if (owner_uuid is not None): client_message.append_str(owner_uuid) client_message.append_bool(is_owner_connection) client_message.append_str(client_type) client_message.append_byte(serialization_version) client_message.append_str(client_hazelcast_version) client_message.update_frame_length() return client_message<|docstring|>Encode request into client_message<|endoftext|>
79ac8e5a98c9b2f2e2920fe19b2bd2c45f352dbd9e975e542a64663ab6d8a825
def decode_response(client_message, to_object=None): ' Decode response from client message' parameters = dict(status=None, address=None, uuid=None, owner_uuid=None, serialization_version=None, server_hazelcast_version=None, client_unregistered_members=None) parameters['status'] = client_message.read_byte() if (not client_message.read_bool()): parameters['address'] = AddressCodec.decode(client_message, to_object) if (not client_message.read_bool()): parameters['uuid'] = client_message.read_str() if (not client_message.read_bool()): parameters['owner_uuid'] = client_message.read_str() parameters['serialization_version'] = client_message.read_byte() if client_message.is_complete(): return parameters parameters['server_hazelcast_version'] = client_message.read_str() if (not client_message.read_bool()): client_unregistered_members_size = client_message.read_int() client_unregistered_members = [] for _ in range(0, client_unregistered_members_size): client_unregistered_members_item = MemberCodec.decode(client_message, to_object) client_unregistered_members.append(client_unregistered_members_item) parameters['client_unregistered_members'] = ImmutableLazyDataList(client_unregistered_members, to_object) return parameters
Decode response from client message
hazelcast/protocol/codec/client_authentication_custom_codec.py
decode_response
buraksezer/hazelcast-python-client
3
python
def decode_response(client_message, to_object=None): ' ' parameters = dict(status=None, address=None, uuid=None, owner_uuid=None, serialization_version=None, server_hazelcast_version=None, client_unregistered_members=None) parameters['status'] = client_message.read_byte() if (not client_message.read_bool()): parameters['address'] = AddressCodec.decode(client_message, to_object) if (not client_message.read_bool()): parameters['uuid'] = client_message.read_str() if (not client_message.read_bool()): parameters['owner_uuid'] = client_message.read_str() parameters['serialization_version'] = client_message.read_byte() if client_message.is_complete(): return parameters parameters['server_hazelcast_version'] = client_message.read_str() if (not client_message.read_bool()): client_unregistered_members_size = client_message.read_int() client_unregistered_members = [] for _ in range(0, client_unregistered_members_size): client_unregistered_members_item = MemberCodec.decode(client_message, to_object) client_unregistered_members.append(client_unregistered_members_item) parameters['client_unregistered_members'] = ImmutableLazyDataList(client_unregistered_members, to_object) return parameters
def decode_response(client_message, to_object=None): ' ' parameters = dict(status=None, address=None, uuid=None, owner_uuid=None, serialization_version=None, server_hazelcast_version=None, client_unregistered_members=None) parameters['status'] = client_message.read_byte() if (not client_message.read_bool()): parameters['address'] = AddressCodec.decode(client_message, to_object) if (not client_message.read_bool()): parameters['uuid'] = client_message.read_str() if (not client_message.read_bool()): parameters['owner_uuid'] = client_message.read_str() parameters['serialization_version'] = client_message.read_byte() if client_message.is_complete(): return parameters parameters['server_hazelcast_version'] = client_message.read_str() if (not client_message.read_bool()): client_unregistered_members_size = client_message.read_int() client_unregistered_members = [] for _ in range(0, client_unregistered_members_size): client_unregistered_members_item = MemberCodec.decode(client_message, to_object) client_unregistered_members.append(client_unregistered_members_item) parameters['client_unregistered_members'] = ImmutableLazyDataList(client_unregistered_members, to_object) return parameters<|docstring|>Decode response from client message<|endoftext|>
f80e983f1eae93a1cbf03f59b8e4232c461ac70e629c177e900b6842f447b02a
def left(self, node: rb_tree.Node) -> None: 'Rotates the node with its right child\n \n Args:\n node: the parent node to rotate\n ' new_parent = node.right node.right = new_parent.left new_parent.parent = node.parent if node.is_root: self.tree.root = new_parent new_parent.left = node return
Rotates the node with its right child Args: node: the parent node to rotate
bowling/data_structures/red_black_tree/rotator.py
left
necromuralist/Bowling-For-Data
0
python
def left(self, node: rb_tree.Node) -> None: 'Rotates the node with its right child\n \n Args:\n node: the parent node to rotate\n ' new_parent = node.right node.right = new_parent.left new_parent.parent = node.parent if node.is_root: self.tree.root = new_parent new_parent.left = node return
def left(self, node: rb_tree.Node) -> None: 'Rotates the node with its right child\n \n Args:\n node: the parent node to rotate\n ' new_parent = node.right node.right = new_parent.left new_parent.parent = node.parent if node.is_root: self.tree.root = new_parent new_parent.left = node return<|docstring|>Rotates the node with its right child Args: node: the parent node to rotate<|endoftext|>
57ec5c743679d3d25b3e67cf272cfc9957491fe9a6fc596cf5187a72dccdd723
def right(self, node: rb_tree.Node) -> None: 'Rotates the node with its left child\n \n Args:\n node: the parent node to rotate\n ' previous_child = node.left node.left = previous_child.right previous_child.parent = node.parent if node.is_root: self.tree.root = previous_child previous_child.right = node return
Rotates the node with its left child Args: node: the parent node to rotate
bowling/data_structures/red_black_tree/rotator.py
right
necromuralist/Bowling-For-Data
0
python
def right(self, node: rb_tree.Node) -> None: 'Rotates the node with its left child\n \n Args:\n node: the parent node to rotate\n ' previous_child = node.left node.left = previous_child.right previous_child.parent = node.parent if node.is_root: self.tree.root = previous_child previous_child.right = node return
def right(self, node: rb_tree.Node) -> None: 'Rotates the node with its left child\n \n Args:\n node: the parent node to rotate\n ' previous_child = node.left node.left = previous_child.right previous_child.parent = node.parent if node.is_root: self.tree.root = previous_child previous_child.right = node return<|docstring|>Rotates the node with its left child Args: node: the parent node to rotate<|endoftext|>
e814d40d73c24ed13f9d78806e8e761665b9d00f1bdd005868558ec2e1c7d0bc
def init_game(): '初始化游戏的数组,于开始新游戏或者重置游戏时调用' gameArray = [[0 for i in range(setting.WINDOW_BLOCK_NUM)] for j in range(setting.WINDOW_BLOCK_NUM)] random_elem = 0 while (random_elem < 2): add_elem(gameArray) random_elem += 1 return gameArray
初始化游戏的数组,于开始新游戏或者重置游戏时调用
game2048/GameFunction.py
init_game
seancheng33/PygameProject
0
python
def init_game(): gameArray = [[0 for i in range(setting.WINDOW_BLOCK_NUM)] for j in range(setting.WINDOW_BLOCK_NUM)] random_elem = 0 while (random_elem < 2): add_elem(gameArray) random_elem += 1 return gameArray
def init_game(): gameArray = [[0 for i in range(setting.WINDOW_BLOCK_NUM)] for j in range(setting.WINDOW_BLOCK_NUM)] random_elem = 0 while (random_elem < 2): add_elem(gameArray) random_elem += 1 return gameArray<|docstring|>初始化游戏的数组,于开始新游戏或者重置游戏时调用<|endoftext|>
cad2b6cda9c1c9251ff6ec13a2c6b1a781e62511324240f048036aa8238e2431
def add_elem(gameArray): '添加元素功能' while can_add_elem(gameArray): x = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) y = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) if (gameArray[x][y] == 0): gameArray[x][y] = 2 break
添加元素功能
game2048/GameFunction.py
add_elem
seancheng33/PygameProject
0
python
def add_elem(gameArray): while can_add_elem(gameArray): x = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) y = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) if (gameArray[x][y] == 0): gameArray[x][y] = 2 break
def add_elem(gameArray): while can_add_elem(gameArray): x = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) y = random.randint(0, (setting.WINDOW_BLOCK_NUM - 1)) if (gameArray[x][y] == 0): gameArray[x][y] = 2 break<|docstring|>添加元素功能<|endoftext|>
839d6145c0aa8627a5fe144c8e20de4717f7aef1811a5116f6079d8c57577783
def can_add_elem(self, gameArray): '初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素' num_zero = 16 for i in range(self.WINDOW_BLOCK_NUM): for j in range(self.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True
初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素
game2048/GameFunction.py
can_add_elem
seancheng33/PygameProject
0
python
def can_add_elem(self, gameArray): num_zero = 16 for i in range(self.WINDOW_BLOCK_NUM): for j in range(self.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True
def can_add_elem(self, gameArray): num_zero = 16 for i in range(self.WINDOW_BLOCK_NUM): for j in range(self.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True<|docstring|>初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素<|endoftext|>
b264e6c96d7a2f23d30d91363565d441dd1c7d70371ffddd79b0129fb1834347
def can_add_elem(gameArray): '初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素' num_zero = 16 for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True
初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素
game2048/GameFunction.py
can_add_elem
seancheng33/PygameProject
0
python
def can_add_elem(gameArray): num_zero = 16 for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True
def can_add_elem(gameArray): num_zero = 16 for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): num_zero -= 1 if (num_zero == 0): return False return True<|docstring|>初始化元素为零的元素数量为16个。每查找到一个非零的元素,计数减1. 如果没有是零的元素,不必添加新元素<|endoftext|>
c350a79fa160cb50f27bd3ed86c38b0ddf7f19fc382ea9ecb7d208665d001ee9
def can_move(gameArray): '检查是否可以移动,逻辑比较简单,就是逐个元素判断上下左右是否有相同的元素,如果可以存在,就是可以移动。否则就是不能移动' for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): return True elif (gameArray[i][j] == gameArray[i][(j + 1)]): return True elif (gameArray[i][j] == gameArray[i][(j - 1)]): return True elif (gameArray[i][j] == gameArray[(i + 1)][j]): return True elif (gameArray[i][j] == gameArray[(i - 1)][j]): return True else: return False
检查是否可以移动,逻辑比较简单,就是逐个元素判断上下左右是否有相同的元素,如果可以存在,就是可以移动。否则就是不能移动
game2048/GameFunction.py
can_move
seancheng33/PygameProject
0
python
def can_move(gameArray): for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): return True elif (gameArray[i][j] == gameArray[i][(j + 1)]): return True elif (gameArray[i][j] == gameArray[i][(j - 1)]): return True elif (gameArray[i][j] == gameArray[(i + 1)][j]): return True elif (gameArray[i][j] == gameArray[(i - 1)][j]): return True else: return False
def can_move(gameArray): for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] != 0): return True elif (gameArray[i][j] == gameArray[i][(j + 1)]): return True elif (gameArray[i][j] == gameArray[i][(j - 1)]): return True elif (gameArray[i][j] == gameArray[(i + 1)][j]): return True elif (gameArray[i][j] == gameArray[(i - 1)][j]): return True else: return False<|docstring|>检查是否可以移动,逻辑比较简单,就是逐个元素判断上下左右是否有相同的元素,如果可以存在,就是可以移动。否则就是不能移动<|endoftext|>
3edcb1036f5aff22329b5ffb34e0b1935d12606c6fffc3b9831a9283e3f035dd
def is_win(gameArray): '遍历数组,如果里面有一个元素是2048,就表示胜利' for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] == 2048): return True else: return False
遍历数组,如果里面有一个元素是2048,就表示胜利
game2048/GameFunction.py
is_win
seancheng33/PygameProject
0
python
def is_win(gameArray): for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] == 2048): return True else: return False
def is_win(gameArray): for i in range(setting.WINDOW_BLOCK_NUM): for j in range(setting.WINDOW_BLOCK_NUM): if (gameArray[i][j] == 2048): return True else: return False<|docstring|>遍历数组,如果里面有一个元素是2048,就表示胜利<|endoftext|>
002dd660c224875ede24aedfbefc480ebf28c8b1ba57e15b91cb9a1aaead4bea
def win_or_lost(gameArray): "判断是否为胜利,如果不能移动,返回'lost',如果是胜利,返回'win',如果不是在输赢的状态下,就返回'play'继续游戏 " if (not can_move(gameArray)): return 'lost' if is_win(gameArray): return 'win' return 'play'
判断是否为胜利,如果不能移动,返回'lost',如果是胜利,返回'win',如果不是在输赢的状态下,就返回'play'继续游戏
game2048/GameFunction.py
win_or_lost
seancheng33/PygameProject
0
python
def win_or_lost(gameArray): " " if (not can_move(gameArray)): return 'lost' if is_win(gameArray): return 'win' return 'play'
def win_or_lost(gameArray): " " if (not can_move(gameArray)): return 'lost' if is_win(gameArray): return 'win' return 'play'<|docstring|>判断是否为胜利,如果不能移动,返回'lost',如果是胜利,返回'win',如果不是在输赢的状态下,就返回'play'继续游戏<|endoftext|>
c2d606c3507761d9ed356767ffd8f0ff06f6db619d4c83cc9b8c43bcee820cd2
def notas(*n, sit=False): '\n -> Função para analisar notas e situações de vários alunos.\n :param n: uma ou mais notas dos alunos.\n :param sit: valor opcional, para mostrar a situação do aluno.\n :return: dicionário com várias informações sobre a situação da turma.\n ' nota = dict() nota['total'] = len(n) maior = menor = n[0] soma = 0 for c in n: soma += c if (c > maior): maior = c if (c < menor): menor = c nota['maior'] = maior nota['menor'] = menor nota['média'] = (soma / len(n)) if (sit is True): if (nota['média'] > 7): nota['situação'] = 'Aprovado' else: nota['situação'] = 'Recuperação' return nota
-> Função para analisar notas e situações de vários alunos. :param n: uma ou mais notas dos alunos. :param sit: valor opcional, para mostrar a situação do aluno. :return: dicionário com várias informações sobre a situação da turma.
python-mundo3/ex105.py
notas
abm-astro/estudos-python
1
python
def notas(*n, sit=False): '\n -> Função para analisar notas e situações de vários alunos.\n :param n: uma ou mais notas dos alunos.\n :param sit: valor opcional, para mostrar a situação do aluno.\n :return: dicionário com várias informações sobre a situação da turma.\n ' nota = dict() nota['total'] = len(n) maior = menor = n[0] soma = 0 for c in n: soma += c if (c > maior): maior = c if (c < menor): menor = c nota['maior'] = maior nota['menor'] = menor nota['média'] = (soma / len(n)) if (sit is True): if (nota['média'] > 7): nota['situação'] = 'Aprovado' else: nota['situação'] = 'Recuperação' return nota
def notas(*n, sit=False): '\n -> Função para analisar notas e situações de vários alunos.\n :param n: uma ou mais notas dos alunos.\n :param sit: valor opcional, para mostrar a situação do aluno.\n :return: dicionário com várias informações sobre a situação da turma.\n ' nota = dict() nota['total'] = len(n) maior = menor = n[0] soma = 0 for c in n: soma += c if (c > maior): maior = c if (c < menor): menor = c nota['maior'] = maior nota['menor'] = menor nota['média'] = (soma / len(n)) if (sit is True): if (nota['média'] > 7): nota['situação'] = 'Aprovado' else: nota['situação'] = 'Recuperação' return nota<|docstring|>-> Função para analisar notas e situações de vários alunos. :param n: uma ou mais notas dos alunos. :param sit: valor opcional, para mostrar a situação do aluno. :return: dicionário com várias informações sobre a situação da turma.<|endoftext|>
98371d941a65846e643a13d91b73aaedbdb04ec2bf8ac70138d4c31723dc9ab3
def ortho(model, strength=0.0001, blacklist=[]): 'Apply modified ortho reg to a model.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or any([(param is item) for item in blacklist])): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) * (1.0 - torch.eye(w.shape[0], device=w.device))), w)) param.grad.data += (strength * grad.view(param.shape))
Apply modified ortho reg to a model. This function is an optimized version that directly computes the gradient, instead of computing and then differentiating the loss.
src/deps/pretorched/models/utils/core.py
ortho
ericotjo001/neuron-descriptions
5
python
def ortho(model, strength=0.0001, blacklist=[]): 'Apply modified ortho reg to a model.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or any([(param is item) for item in blacklist])): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) * (1.0 - torch.eye(w.shape[0], device=w.device))), w)) param.grad.data += (strength * grad.view(param.shape))
def ortho(model, strength=0.0001, blacklist=[]): 'Apply modified ortho reg to a model.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or any([(param is item) for item in blacklist])): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) * (1.0 - torch.eye(w.shape[0], device=w.device))), w)) param.grad.data += (strength * grad.view(param.shape))<|docstring|>Apply modified ortho reg to a model. This function is an optimized version that directly computes the gradient, instead of computing and then differentiating the loss.<|endoftext|>
67ee28a98227468b7750c4b39432784493c5f5a8b43f835bca367605d582f2b6
def default_ortho(model, strength=0.0001, blacklist=[]): 'Default ortho regularization.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or (param in blacklist)): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) - torch.eye(w.shape[0], device=w.device)), w)) param.grad.data += (strength * grad.view(param.shape))
Default ortho regularization. This function is an optimized version that directly computes the gradient, instead of computing and then differentiating the loss.
src/deps/pretorched/models/utils/core.py
default_ortho
ericotjo001/neuron-descriptions
5
python
def default_ortho(model, strength=0.0001, blacklist=[]): 'Default ortho regularization.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or (param in blacklist)): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) - torch.eye(w.shape[0], device=w.device)), w)) param.grad.data += (strength * grad.view(param.shape))
def default_ortho(model, strength=0.0001, blacklist=[]): 'Default ortho regularization.\n\n This function is an optimized version that directly computes the gradient,\n instead of computing and then differentiating the loss.\n ' with torch.no_grad(): for param in model.parameters(): if ((len(param.shape) < 2) or (param in blacklist)): continue w = param.view(param.shape[0], (- 1)) grad = (2 * torch.mm((torch.mm(w, w.t()) - torch.eye(w.shape[0], device=w.device)), w)) param.grad.data += (strength * grad.view(param.shape))<|docstring|>Default ortho regularization. This function is an optimized version that directly computes the gradient, instead of computing and then differentiating the loss.<|endoftext|>