blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ea9b9d12aa2d29c8695d3633efd804228328fd3 | 498fcf34fa4482be5c9fefc488666e60edcf46c7 | /math/0x00-linear_algebra/100-slice_like_a_ninja.py | 528b04754521035294e7fdf6a8121f20db0302c4 | [] | no_license | MansourKef/holbertonschool-machine_learning | 7dbc465def04c311c1afb0e8b8903cbe34c72ad3 | 19f78fc09f0ebeb9f27f3f76b98e7a0e9212fd22 | refs/heads/main | 2023-03-12T16:18:08.919099 | 2021-03-05T09:42:09 | 2021-03-05T09:42:09 | 317,303,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/usr/bin/env python3
"""
np_slice.
"""
def np_slice(matrix, axes={}):
"""Slices a matrix."""
len_dic = len(axes)
args = []
list_axes = []
max_val = max(axes.keys())
for j in range(max_val + 1):
list_axes.append(j)
for i in list_axes:
if i in axes.keys():
sl = slice(*axes[i])
else:
sl = slice(None)
args.append(sl)
return matrix[tuple(args)]
| [
"[email protected]"
] | |
cb036973b6b10d9a97cf5c01c03aa9f66500336d | 77de000e7d9f11a5c00ec8f6a9502c9c772bbe65 | /paying_for_college/admin.py | c20f0c8dd6e545bd09c7cbbdf82b523da4dd7fad | [
"CC0-1.0"
] | permissive | mistergone/college-costs | 5081ceedfd2bb560adfb6ac412471d79dc3e4b80 | 7fcb9155d23f363d7d1a22da4df8887996c4b8a6 | refs/heads/master | 2021-01-17T07:52:45.351978 | 2016-05-27T18:21:01 | 2016-05-27T18:21:01 | 45,491,267 | 0 | 0 | null | 2015-11-03T19:45:04 | 2015-11-03T19:45:04 | null | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python
from __future__ import absolute_import
from django.contrib import admin
from .models import School, Program, Alias, Nickname, Contact, Disclosure
from .models import BAHRate, Feedback, Worksheet, ConstantRate, ConstantCap
class DisclosureAdmin(admin.ModelAdmin):
list_display = ('name', 'institution', 'text')
class ConstantRateAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'value', 'updated')
list_editable = ['value']
class ConstantCapAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'value', 'updated')
list_editable = ['value']
class SchoolAdmin(admin.ModelAdmin):
list_display = ('primary_alias',
'school_id',
'city',
'state',
'settlement_school')
list_filter = ('settlement_school', 'state')
list_editable = ('settlement_school',)
search_fields = ['school_id', 'city', 'state']
ordering = ['state']
class AliasAdmin(admin.ModelAdmin):
list_display = ('alias', 'institution', 'is_primary')
search_fields = ['alias']
class NicknameAdmin(admin.ModelAdmin):
list_display = ('nickname', 'institution', 'is_female')
search_fields = ['nickname']
admin.site.register(Disclosure, DisclosureAdmin)
admin.site.register(ConstantRate, ConstantRateAdmin)
admin.site.register(ConstantCap, ConstantCapAdmin)
admin.site.register(School, SchoolAdmin)
admin.site.register(Alias, AliasAdmin)
admin.site.register(BAHRate)
admin.site.register(Feedback)
admin.site.register(Worksheet)
admin.site.register(Contact)
admin.site.register(Nickname, NicknameAdmin)
admin.site.register(Program)
| [
"[email protected]"
] | |
5666f9c55f3024b927bcbc8e0fdb60ceb72db8de | 12485bb945ab8af6ff6a5f3d9d4c542a7bcf95f8 | /server/src/uds/services/Xen/xen_client/__init__.py | 8c47e66b14c2d59e66552db5c9142effdefdc5b3 | [] | no_license | morfeuj/openuds | 6ef0c4bed624def0090efa6abdd2600b9be81a8b | 26e429019e5fe5b01ee1a476c879d8f8333b0ab0 | refs/heads/master | 2020-12-15T15:11:33.598430 | 2020-01-20T16:42:33 | 2020-01-20T16:42:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,495 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2019 Virtual Cable S.L.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ssl
import enum
import xmlrpc.client
import logging
import typing
from . import XenAPI
logger = logging.getLogger(__name__)
TAG_TEMPLATE = "uds-template"
TAG_MACHINE = "uds-machine"
class XenFault(Exception):
pass
class XenFailure(XenAPI.Failure, XenFault):
exBadVmPowerState = 'VM_BAD_POWER_STATE'
exVmMissingPVDrivers = 'VM_MISSING_PV_DRIVERS'
exHandleInvalid = 'HANDLE_INVALID'
exHostIsSlave = 'HOST_IS_SLAVE'
exSRError = 'SR_BACKEND_FAILURE_44'
def __init__(self, details: typing.Optional[typing.List] = None):
details = [] if details is None else details
super(XenFailure, self).__init__(details)
def isHandleInvalid(self) -> bool:
return self.details[0] == XenFailure.exHandleInvalid
def needsXenTools(self) -> bool:
return self.details[0] == XenFailure.exVmMissingPVDrivers
def badPowerState(self) -> bool:
return self.details[0] == XenFailure.exBadVmPowerState
def isSlave(self) -> bool:
return self.details[0] == XenFailure.exHostIsSlave
def asHumanReadable(self) -> str:
try:
errList = {
XenFailure.exBadVmPowerState: 'Machine state is invalid for requested operation (needs {2} and state is {3})',
XenFailure.exVmMissingPVDrivers: 'Machine needs Xen Server Tools to allow requested operation',
XenFailure.exHostIsSlave: 'The connected host is an slave, try to connect to {1}',
XenFailure.exSRError: 'Error on SR: {2}',
XenFailure.exHandleInvalid: 'Invalid reference to {1}',
}
err = errList.get(self.details[0], 'Error {0}')
return err.format(*self.details)
except Exception:
return 'Unknown exception: {0}'.format(self.details)
def __str__(self) -> str:
return self.asHumanReadable()
class XenException(XenFault):
def __init__(self, message: typing.Any):
XenFault.__init__(self, message)
logger.debug('Exception create: %s', message)
class XenPowerState: # pylint: disable=too-few-public-methods
halted: str = 'Halted'
running: str = 'Running'
suspended: str = 'Suspended'
paused: str = 'Paused'
class XenServer: # pylint: disable=too-many-public-methods
_originalHost: str
_host: str
_port: str
_useSSL: bool
_verifySSL: bool
_protocol: str
_url: str
_loggedIn: bool
_username: str
_password: str
_session: typing.Any
_poolName: str
_apiVersion: str
def __init__(self, host: str, port: int, username: str, password: str, useSSL: bool = False, verifySSL: bool = False):
self._originalHost = self._host = host
self._port = str(port)
self._useSSL = bool(useSSL)
self._verifySSL = bool(verifySSL)
self._protocol = 'http' + ('s' if self._useSSL else '') + '://'
self._url = ''
self._loggedIn = False
self._username = username
self._password = password
self._session = None
self._poolName = self._apiVersion = ''
@staticmethod
def toMb(number: typing.Union[str, int]) -> int:
return int(number) // (1024 * 1024)
def checkLogin(self) -> bool:
if not self._loggedIn:
self.login()
return self._loggedIn
def getXenapiProperty(self, prop: str) -> typing.Any:
if not self.checkLogin():
raise Exception("Can't log in")
return getattr(self._session.xenapi, prop)
# Properties to fast access XenApi classes
Async = property(lambda self: self.getXenapiProperty('Async'))
task = property(lambda self: self.getXenapiProperty('task'))
VM = property(lambda self: self.getXenapiProperty('VM'))
SR = property(lambda self: self.getXenapiProperty('SR'))
pool = property(lambda self: self.getXenapiProperty('pool'))
host = property(lambda self: self.getXenapiProperty('host'))
network = property(lambda self: self.getXenapiProperty('network'))
VIF = property(lambda self: self.getXenapiProperty('VIF')) # Virtual Interface
VDI = property(lambda self: self.getXenapiProperty('VDI')) # Virtual Disk Image
VBD = property(lambda self: self.getXenapiProperty('VBD')) # Virtual Block Device
# Properties to access private vars
poolName = property(lambda self: self.checkLogin() and self._poolName)
hasPool = property(lambda self: self.checkLogin() and self._poolName != '')
def getPoolName(self) -> str:
pool = self.pool.get_all()[0]
return self.pool.get_name_label(pool)
# Login/Logout
def login(self, switchToMaster: bool = False) -> None:
try:
# We recalculate here url, because we can "switch host" on any moment
self._url = self._protocol + self._host + ':' + self._port
transport = None
if self._useSSL:
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
if self._verifySSL is False:
context.verify_mode = ssl.CERT_NONE
else:
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
transport = xmlrpc.client.SafeTransport(context=context)
logger.debug('Transport: %s', transport)
self._session = XenAPI.Session(self._url, transport=transport)
self._session.xenapi.login_with_password(self._username, self._password)
self._loggedIn = True
self._apiVersion = self._session.API_version
self._poolName = str(self.getPoolName())
except XenAPI.Failure as e: # XenAPI.Failure: ['HOST_IS_SLAVE', '172.27.0.29'] indicates that this host is an slave of 172.27.0.29, connect to it...
if switchToMaster and e.details[0] == 'HOST_IS_SLAVE':
logger.info('%s is an Slave, connecting to master at %s cause switchToMaster is True', self._host, e.details[1])
self._host = e.details[1]
self.login()
else:
raise XenFailure(e.details)
except Exception as e:
logger.exception('Unrecognized xenapi exception')
raise
def test(self) -> None:
self.login(False)
def logout(self) -> None:
self._session.logout()
self._loggedIn = False
self._session = None
self._poolName = self._apiVersion = ''
def getHost(self) -> str:
return self._host
def setHost(self, host: str) -> None:
self._host = host
def getTaskInfo(self, task: str) -> typing.MutableMapping[str, typing.Any]:
progress = 0
result = None
destroyTask = False
try:
status = self.task.get_status(task)
logger.debug('Task %s in state %s', task, status)
if status == 'pending':
status = 'running'
progress = int(self.task.get_progress(task) * 100)
elif status == 'success':
result = self.task.get_result(task)
destroyTask = True
elif status == 'failure':
result = XenFailure(self.task.get_error_info(task))
destroyTask = True
except XenAPI.Failure as e:
logger.debug('XenServer Failure: %s', e.details[0])
if e.details[0] == 'HANDLE_INVALID':
result = None
status = 'unknown'
progress = 0
else:
destroyTask = True
result = e.details[0]
status = 'failure'
except Exception as e:
logger.exception('Unexpected exception!')
result = str(e)
status = 'failure'
# Removes <value></value> if present
if result and not isinstance(result, XenFailure) and result.startswith('<value>'):
result = result[7:-8]
if destroyTask:
try:
self.task.destroy(task)
except Exception as e:
logger.warning('Destroy task %s returned error %s', task, str(e))
return {'result': result, 'progress': progress, 'status':str(status)}
def getSRs(self) -> typing.Iterable[typing.MutableMapping[str, typing.Any]]:
for srId in self.SR.get_all():
# Only valid SR shared, non iso
name_label = self.SR.get_name_label(srId)
if self.SR.get_content_type(srId) == 'iso' or \
self.SR.get_shared(srId) is False or \
name_label == '':
continue
valid = True
allowed_ops = self.SR.get_allowed_operations(srId)
for v in ['vdi_create', 'vdi_clone', 'vdi_snapshot', 'vdi_destroy']:
if v not in allowed_ops:
valid = False
if not valid:
return
yield {
'id': srId,
'name': name_label,
'size': XenServer.toMb(self.SR.get_physical_size(srId)),
'used': XenServer.toMb(self.SR.get_physical_utilisation(srId))
}
def getSRInfo(self, srId: str) -> typing.MutableMapping[str, typing.Any]:
return {
'id': srId,
'name': self.SR.get_name_label(srId),
'size': XenServer.toMb(self.SR.get_physical_size(srId)),
'used': XenServer.toMb(self.SR.get_physical_utilisation(srId))
}
def getNetworks(self) -> typing.Iterable[typing.MutableMapping[str, typing.Any]]:
for netId in self.network.get_all():
if self.network.get_other_config(netId).get('is_host_internal_management_network', False) is False:
yield {
'id': netId,
'name': self.network.get_name_label(netId),
}
def getNetworkInfo(self, netId: str) -> typing.MutableMapping[str, typing.Any]:
return {
'id': netId,
'name': self.network.get_name_label(netId)
}
def getVMs(self) -> typing.Iterable[typing.MutableMapping[str, typing.Any]]:
try:
vms = self.VM.get_all()
for vm in vms:
# if self.VM.get_is_a_template(vm): # Sample set_tags, easy..
# self.VM.set_tags(vm, ['template'])
# continue
if self.VM.get_is_control_domain(vm) or \
self.VM.get_is_a_template(vm):
continue
yield {'id': vm, 'name': self.VM.get_name_label(vm)}
except XenAPI.Failure as e:
raise XenFailure(e.details)
except Exception as e:
raise XenException(str(e))
def getVMPowerState(self, vmId: str) -> str:
try:
power_state = self.VM.get_power_state(vmId)
logger.debug('Power state of %s: %s', vmId, power_state)
return power_state
except XenAPI.Failure as e:
raise XenFailure(e.details)
def getVMInfo(self, vmId: str) -> typing.Any:
try:
return self.VM.get_record(vmId)
except XenAPI.Failure as e:
raise XenFailure(e.details)
def startVM(self, vmId: str, asnc: bool = True) -> typing.Optional[str]:
vmState = self.getVMPowerState(vmId)
if vmState == XenPowerState.running:
return None # Already powered on
if vmState == XenPowerState.suspended:
return self.resumeVM(vmId, asnc)
if asnc:
return self.Async.VM.start(vmId, False, False)
return self.VM.start(vmId, False, False)
def stopVM(self, vmId: str, asnc: bool = True) -> typing.Optional[str]:
vmState = self.getVMPowerState(vmId)
if vmState in (XenPowerState.suspended, XenPowerState.halted):
return None # Already powered off
if asnc:
return self.Async.VM.hard_shutdown(vmId)
return self.VM.hard_shutdown(vmId)
def resetVM(self, vmId, asnc=True) -> typing.Optional[str]:
vmState = self.getVMPowerState(vmId)
if vmState in (XenPowerState.suspended, XenPowerState.halted):
return None # Already powered off, cannot reboot
if asnc:
return self.Async.VM.hard_reboot(vmId)
return self.VM.hard_reboot(vmId)
def canSuspendVM(self, vmId: str) -> bool:
operations = self.VM.get_allowed_operations(vmId)
logger.debug('Operations: %s', operations)
return 'suspend' in operations
def suspendVM(self, vmId: str, asnc: bool = True) -> typing.Optional[str]:
vmState = self.getVMPowerState(vmId)
if vmState == XenPowerState.suspended:
return None
if asnc:
return self.Async.VM.suspend(vmId)
return self.VM.suspend(vmId)
def resumeVM(self, vmId: str, asnc: bool = True) -> typing.Optional[str]:
vmState = self.getVMPowerState(vmId)
if vmState != XenPowerState.suspended:
return None
if asnc:
return self.Async.VM.resume(vmId, False, False)
return self.VM.resume(vmId, False, False)
def cloneVM(self, vmId: str, targetName: str, targetSR: typing.Optional[str] = None) -> str:
"""
If targetSR is NONE:
Clones the specified VM, making a new VM.
Clone automatically exploits the capabilities of the underlying storage repository
in which the VM's disk images are stored (e.g. Copy on Write).
Else:
Copied the specified VM, making a new VM. Unlike clone, copy does not exploits the capabilities
of the underlying storage repository in which the VM's disk images are stored.
Instead, copy guarantees that the disk images of the newly created VM will be
'full disks' - i.e. not part of a CoW chain.
This function can only be called when the VM is in the Halted State.
"""
logger.debug('Cloning VM %s to %s on sr %s', vmId, targetName, targetSR)
operations = self.VM.get_allowed_operations(vmId)
logger.debug('Allowed operations: %s', operations)
try:
if targetSR:
if 'copy' not in operations:
raise XenException('Copy is not supported for this machine')
task = self.Async.VM.copy(vmId, targetName, targetSR)
else:
if 'clone' not in operations:
raise XenException('Clone is not supported for this machine')
task = self.Async.VM.clone(vmId, targetName)
return task
except XenAPI.Failure as e:
raise XenFailure(e.details)
def removeVM(self, vmId: str) -> None:
logger.debug('Removing machine')
vdisToDelete = []
for vdb in self.VM.get_VBDs(vmId):
vdi = None
try:
vdi = self.VBD.get_VDI(vdb)
if vdi == 'OpaqueRef:NULL':
logger.debug('VDB without VDI')
continue
logger.debug('VDI: %s', vdi)
except Exception:
logger.exception('Exception getting VDI from VDB')
if self.VDI.get_read_only(vdi) is True:
logger.debug('%s is read only, skipping', vdi)
continue
logger.debug('VDI to delete: %s', vdi)
vdisToDelete.append(vdi)
self.VM.destroy(vmId)
for vdi in vdisToDelete:
self.VDI.destroy(vdi)
def configureVM(self, vmId: str, **kwargs):
"""
Optional args:
mac = { 'network': netId, 'mac': mac }
memory = MEM in MB, minimal is 128
Mac address should be in the range 02:xx:xx:xx:xx (recommended, but not a "have to")
"""
mac: typing.Optional[typing.Dict[str, str]] = kwargs.get('mac', None)
memory: typing.Optional[typing.Union[str, int]] = kwargs.get('memory', None)
# If requested mac address change
try:
if mac is not None:
for vifId in self.VM.get_VIFs(vmId):
vif = self.VIF.get_record(vifId)
if vif['network'] == mac['network']:
logger.debug('Found VIF: %s', vif['network'])
self.VIF.destroy(vifId)
# for k in ['status_code', 'status_detail', 'uuid']:
# try:
# del vif[k]
# except:
# logger.exception('Erasing property {0}'.format(k))
vif['MAC'] = mac['mac']
vif['MAC_autogenerated'] = False
self.VIF.create(vif)
# If requested memory change
if memory:
logger.debug('Setting up memory to %s MB', memory)
# Convert memory to MB
memory = str(int(memory) * 1024 * 1024)
self.VM.set_memory_limits(vmId, memory, memory, memory, memory)
except XenAPI.Failure as e:
raise XenFailure(e.details)
def provisionVM(self, vmId: str, **kwargs):
tags = self.VM.get_tags(vmId)
try:
del tags[tags.index(TAG_TEMPLATE)]
except Exception:
pass
tags.append(TAG_MACHINE)
self.VM.set_tags(vmId, tags)
if kwargs.get('asnc', True) is True:
return self.Async.VM.provision(vmId)
return self.VM.provision(vmId)
def convertToTemplate(self, vmId: str, shadowMultiplier: int = 4) -> None:
try:
operations = self.VM.get_allowed_operations(vmId)
logger.debug('Allowed operations: %s', operations)
if 'make_into_template' not in operations:
raise XenException('Convert in template is not supported for this machine')
self.VM.set_is_a_template(vmId, True)
# Apply that is an "UDS Template" taggint it
tags = self.VM.get_tags(vmId)
try:
del tags[tags.index(TAG_MACHINE)]
except Exception:
pass
tags.append(TAG_TEMPLATE)
self.VM.set_tags(vmId, tags)
# Set multiplier
try:
self.VM.set_HVM_shadow_multiplier(vmId, float(shadowMultiplier))
except Exception:
pass # Can't set shadowMultiplier, nothing happens
except XenAPI.Failure as e:
raise XenFailure(e.details)
def removeTemplate(self, templateId: str) -> None:
self.removeVM(templateId)
def cloneTemplate(self, templateId: str, targetName: str) -> str:
"""
After cloning template, we must deploy the VM so it's a full usable VM
"""
return self.cloneVM(templateId, targetName)
| [
"[email protected]"
] | |
efa9169a5663a0c34cd035eaf9a6c8eb588d6625 | 0485a490f466bd1d02eaae96d277888781208c0e | /tests/single_instruction_translation_validation/mcsema/register-variants/xaddb_r8_r8/Output/test-z3.py | 0f72e6bf891b173f5c8d4ce6b1ee4ac725000960 | [
"LicenseRef-scancode-unknown-license-reference",
"NCSA"
] | permissive | Mthandazo42/validating-binary-decompilation | c0e2d54cd79e609bfa35802975bddfa52e646fad | c0fcd6f099e38195dcbbac9e8c13a825865c5cb5 | refs/heads/master | 2022-11-11T13:18:13.033044 | 2020-06-25T05:49:01 | 2020-06-25T05:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,327 | py | #############################################
######## Auto Generated Proof Scripts #######
#############################################
import z3
import sys
status=True
test_name="UnK"
if(len(sys.argv) > 1):
test_name = sys.argv[1]
def solve(msg, lvar, xvar, s):
global status
s.set("timeout", 60000)
res = s.check()
if(z3.unknown == res):
print(test_name + "::" + msg + "::unk")
status = "Unknown"
if(z3.sat == res):
if("UNDEF" in xvar.sexpr()):
print(test_name + "::" + msg + "::undef-sat")
else:
m = s.model()
print(test_name + "::" + msg + "::sat")
print("\n")
print("query", s)
print("\n")
print("model", m)
print("\n")
print("xvar =", m.evaluate(xvar))
print("lvar =", m.evaluate(lvar))
print("\n")
status = False
##############################
## X86 specific variables ####
##############################
### GPRs
VX_RAX = z3.BitVec('VX_RAX',64)
VX_RBX = z3.BitVec('VX_RBX',64)
VX_RCX = z3.BitVec('VX_RCX',64)
VX_RDX = z3.BitVec('VX_RDX',64)
VX_RSI = z3.BitVec('VX_RSI',64)
VX_RDI = z3.BitVec('VX_RDI',64)
### Flags
VX_CF = z3.BitVec('VX_CF',1)
VX_PF = z3.BitVec('VX_PF',1)
VX_ZF = z3.BitVec('VX_ZF',1)
VX_SF = z3.BitVec('VX_SF',1)
VX_AF = z3.BitVec('VX_AF',1)
VX_OF = z3.BitVec('VX_OF',1)
### YMM Registers
VX_YMM1 = z3.BitVec('VX_YMM1', 256)
VX_YMM2 = z3.BitVec('VX_YMM2', 256)
## Undef
VX_UNDEF_1 = z3.BitVec('VX_UNDEF_1', 1)
VX_UNDEF_BOOL = z3.Bool('VX_UNDEF_BOOL')
##############################
## X86 specific variables ####
##############################
### GPRs
VL_RAX = z3.BitVec('VL_RAX',64)
VL_RBX = z3.BitVec('VL_RBX',64)
VL_RCX = z3.BitVec('VL_RCX',64)
VL_RDX = z3.BitVec('VL_RDX',64)
VL_RSI = z3.BitVec('VL_RSI',64)
VL_RDI = z3.BitVec('VL_RDI',64)
### Flags
VL_CF = z3.BitVec('VL_CF',8)
VL_PF = z3.BitVec('VL_PF',8)
VL_ZF = z3.BitVec('VL_ZF',8)
VL_SF = z3.BitVec('VL_SF',8)
VL_AF = z3.BitVec('VL_AF',8)
VL_OF = z3.BitVec('VL_OF',8)
### YMM Registers
VL_YMM1_0 = z3.BitVec('VL_YMM1_0', 64)
VL_YMM1_1 = z3.BitVec('VL_YMM1_1', 64)
VL_YMM1_2 = z3.BitVec('VL_YMM1_2', 64)
VL_YMM1_3 = z3.BitVec('VL_YMM1_3', 64)
VL_YMM2_0 = z3.BitVec('VL_YMM2_0', 64)
VL_YMM2_1 = z3.BitVec('VL_YMM2_1', 64)
VL_YMM2_2 = z3.BitVec('VL_YMM2_2', 64)
VL_YMM2_3 = z3.BitVec('VL_YMM2_3', 64)
##############################
## Proof variables ###########
##############################
V_R = z3.BitVec('V_R',64)
V_F = z3.BitVec('V_F',1)
V_Y = z3.BitVec('V_Y',256)
## Solver instance
s = z3.Solver()
##############################
## Default constraints #######
##############################
### GPRs
s.add(VX_RAX == VL_RAX)
s.add(VX_RBX == VL_RBX)
s.add(VX_RCX == VL_RCX)
s.add(VX_RDX == VL_RDX)
s.add(VX_RDI == VL_RDI)
s.add(VX_RSI == VL_RSI)
### Flags
s.add(z3.Or(VL_CF == 0, VL_CF == 1))
s.add(z3.Or(VL_ZF == 0, VL_ZF == 1))
s.add(z3.Or(VL_PF == 0, VL_PF == 1))
s.add(z3.Or(VL_SF == 0, VL_SF == 1))
s.add(z3.Or(VL_AF == 0, VL_AF == 1))
s.add(z3.Or(VL_OF == 0, VL_OF == 1))
s.add(z3.Extract(0,0, VL_CF) == VX_CF)
s.add(z3.Extract(0,0, VL_SF) == VX_SF)
s.add(z3.Extract(0,0, VL_ZF) == VX_ZF)
s.add(z3.Extract(0,0, VL_PF) == VX_PF)
s.add(z3.Extract(0,0, VL_AF) == VX_AF)
s.add(z3.Extract(0,0, VL_OF) == VX_OF)
### Ymms
s.add(z3.Concat(VL_YMM1_3, VL_YMM1_2, VL_YMM1_1, VL_YMM1_0) == VX_YMM1)
s.add(z3.Concat(VL_YMM2_3, VL_YMM2_2, VL_YMM2_1, VL_YMM2_0) == VX_YMM2)
## =******= AF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, (((z3.LShR(((((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) ^ (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)) ^ ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), z3.BitVecVal(4, 64)) & z3.BitVecVal(256 - 1, 64)) & z3.BitVecVal(1, 64)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_F == ((z3.Extract(4, 4, VX_RCX) ^ z3.Extract(4, 4, VX_RBX)) ^ z3.Extract(4, 4, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX))))))
s.add(lvar != xvar)
solve("AF", lvar, xvar, s)
s.pop()
## =******= CF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, ((z3.If(z3.ULT(((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)) | z3.If(z3.ULT(((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX))), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8))) & z3.BitVecVal(2 - 1, 8)))))))
xvar = (V_F == z3.Extract(8, 8, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))))
s.add(lvar != xvar)
solve("CF", lvar, xvar, s)
s.pop()
## =******= OF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, z3.If(((((((z3.LShR(((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), z3.BitVecVal(7, 64)) & z3.BitVecVal(256 - 1, 64)) ^ (z3.LShR((z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64)), z3.BitVecVal(7, 64)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)) + (((z3.LShR(((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), z3.BitVecVal(7, 64)) & z3.BitVecVal(256 - 1, 64)) ^ z3.Concat(z3.BitVecVal( 0, 56), (z3.LShR(z3.Extract(7, 0, VL_RBX), z3.BitVecVal(7, 8)) & z3.BitVecVal(256 - 1, 8)))) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)) == z3.BitVecVal(2, 64)), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)))))))
xvar = (V_F == z3.If(z3.And(((z3.Extract(7, 7, VX_RCX) == z3.BitVecVal(1, 1)) == (z3.Extract(7, 7, VX_RBX) == z3.BitVecVal(1, 1))), z3.Not(((z3.Extract(7, 7, VX_RCX) == z3.BitVecVal(1, 1)) == (z3.Extract(7, 7, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))))), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("OF", lvar, xvar, s)
s.pop()
## =******= PF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(31, 32)) & z3.BitVecVal(4294967296 - 1, 32)) + ((z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(2, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(3, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(4, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(5, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(6, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(7, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(8, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(9, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(10, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(11, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(12, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(13, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(14, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(15, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(16, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(17, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(18, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(19, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(20, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(21, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(22, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(23, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(24, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(25, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(26, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(27, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(28, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(29, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) + (((z3.LShR(z3.Concat(z3.BitVecVal(0, 24), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))), z3.BitVecVal(30, 32)) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(4294967296 - 1, 32))) & z3.BitVecVal(4294967296 - 1, 32)) & z3.BitVecVal(256 - 1, 32)) & z3.BitVecVal(1, 32)) & z3.BitVecVal(256 - 1, 32)) ^ z3.BitVecVal(1, 32)) & z3.BitVecVal(256 - 1, 32)))))
xvar = (V_F == z3.If(z3.Not(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor(z3.Xor((z3.Extract(0, 0, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1)), (z3.Extract(1, 1, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(2, 2, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(3, 3, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(4, 4, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(5, 5, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(6, 6, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1))), (z3.Extract(7, 7, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(1, 1)))), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("PF", lvar, xvar, s)
s.pop()
## =******= RAX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RAX), z3.Extract(55, 48, VL_RAX), z3.Extract(47, 40, VL_RAX), z3.Extract(39, 32, VL_RAX), z3.Extract(31, 24, VL_RAX), z3.Extract(23, 16, VL_RAX), z3.Extract(15, 8, VL_RAX), z3.Extract(7, 0, VL_RAX)))
xvar = (V_R == VX_RAX)
s.add(lvar != xvar)
solve("RAX", lvar, xvar, s)
s.pop()
## =******= RBX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RBX), z3.Extract(55, 48, VL_RBX), z3.Extract(47, 40, VL_RBX), z3.Extract(39, 32, VL_RBX), z3.Extract(31, 24, VL_RBX), z3.Extract(23, 16, VL_RBX), z3.Extract(15, 8, VL_RBX), z3.Extract(7, 0, ((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_R == z3.Concat(z3.Extract(63, 8, VX_RBX), z3.Extract(7, 0, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX))))))
s.add(lvar != xvar)
solve("RBX", lvar, xvar, s)
s.pop()
## =******= RCX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RCX), z3.Extract(55, 48, VL_RCX), z3.Extract(47, 40, VL_RCX), z3.Extract(39, 32, VL_RCX), z3.Extract(31, 24, VL_RCX), z3.Extract(23, 16, VL_RCX), z3.Extract(15, 8, VL_RCX), z3.Extract(7, 0, z3.Extract(7, 0, VL_RBX))))
xvar = (V_R == z3.Concat(z3.Extract(63, 8, VX_RCX), z3.Extract(7, 0, VX_RBX)))
s.add(lvar != xvar)
solve("RCX", lvar, xvar, s)
s.pop()
## =******= RDX =******=
s.push()
lvar = (V_R == z3.Concat(z3.Extract(63, 56, VL_RDX), z3.Extract(55, 48, VL_RDX), z3.Extract(47, 40, VL_RDX), z3.Extract(39, 32, VL_RDX), z3.Extract(31, 24, VL_RDX), z3.Extract(23, 16, VL_RDX), z3.Extract(15, 8, VL_RDX), z3.Extract(7, 0, VL_RDX)))
xvar = (V_R == VX_RDX)
s.add(lvar != xvar)
solve("RDX", lvar, xvar, s)
s.pop()
## =******= SF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, (z3.LShR(((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)), z3.BitVecVal(7, 64)) & z3.BitVecVal(256 - 1, 64)))))
xvar = (V_F == z3.Extract(7, 7, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))))
s.add(lvar != xvar)
solve("SF", lvar, xvar, s)
s.pop()
## =******= ZF =******=
s.push()
lvar = (V_F == z3.Extract(0, 0, z3.Extract(7, 0, z3.Concat(z3.BitVecVal(0, 7), z3.Extract(0, 0, z3.If((((z3.Concat(z3.BitVecVal( 0, 56), z3.Extract(7, 0, VL_RBX)) + (z3.Concat(z3.BitVecVal(0, 56), z3.Extract(7, 0, VL_RCX)) & z3.BitVecVal(256 - 1, 64))) & z3.BitVecVal(256 - 1, 64)) == z3.BitVecVal(0, 64)), z3.BitVecVal(1, 8), z3.BitVecVal(0, 8)))))))
xvar = (V_F == z3.If((z3.Extract(7, 0, (z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RCX)) + z3.Concat(z3.BitVecVal(0, 1), z3.Extract(7, 0, VX_RBX)))) == z3.BitVecVal(0, 8)), z3.BitVecVal(1, 1), z3.BitVecVal(0, 1)))
s.add(lvar != xvar)
solve("ZF", lvar, xvar, s)
s.pop()
if(status == True):
print('[6;30;42m' + 'Test-Pass: ' + '[0m' + test_name)
else:
if(status == False):
print('[0;30;41m' + 'Test-Fail: ' + '[0m' + test_name)
else:
print('[6;30;47m' + 'Test-Unk: ' + '[0m' + test_name)
| [
"[email protected]"
] | |
f4bfb83c2408bd57b3ce899378717e5b6fe39407 | 5552380060fd8be832a61d1cc4020c16dde40452 | /home/migrations/0004_bank_branch_file.py | 244237a150562d68ccb2eac9f7f5cf4d1998f799 | [] | no_license | rayhancse08/LawChamber | a1f4121a911d92cdf7cd4c885767b2080fa5fbb0 | 40f2306d50bc9c2d2b7c08b44e527ab45a7dc756 | refs/heads/master | 2021-09-10T06:32:48.473590 | 2018-03-21T16:12:58 | 2018-03-21T16:12:58 | 125,860,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | # Generated by Django 2.0.3 on 2018-03-19 13:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0003_contact'),
]
operations = [
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a Bank name (e.g. Brack,DutchBangla)', max_length=200)),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter branch name', max_length=200, verbose_name='Branch Name')),
('address', models.CharField(help_text='Enter branch address', max_length=200, verbose_name='Branch Address')),
('bank', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Bank')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter File Name', max_length=200, verbose_name='File Name')),
('bill', models.CharField(help_text='Enter Bill Amount', max_length=200, verbose_name='Bill Amount')),
('account_no', models.CharField(help_text='Enter Account No', max_length=200, verbose_name='Account No')),
('file', models.FileField(default=False, help_text='Upload File', upload_to='')),
('bank', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Bank')),
('branch', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Branch')),
],
),
]
| [
"[email protected]"
] | |
5ca7bb44464eb2b8bc4a97eed49157497214cee6 | aced407b41f6669f69e9eb8bd599260d50c0bd3f | /server/libs/top/api/rest/TaobaokeItemsDetailGetRequest.py | 13b1b69b497043b5f31870c254d245496fe97a64 | [] | no_license | alswl/music_sofa | 42f7d15431f11b97bf67b604cfde0a0e9e3860cc | c4e5425ef6c80c3e57c91ba568f7cbfe63faa378 | refs/heads/master | 2016-09-12T18:37:34.357510 | 2016-05-20T11:49:52 | 2016-05-20T11:49:52 | 58,946,171 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | '''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TaobaokeItemsDetailGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.is_mobile = None
self.nick = None
self.num_iids = None
self.outer_code = None
self.pid = None
self.refer_type = None
self.track_iids = None
def getapiname(self):
return 'taobao.taobaoke.items.detail.get'
| [
"[email protected]"
] | |
93d70c998b5ffa9aa7583bfab766cfc5b0a7927b | bc82de9237a6aa28fd7623a27b35c02ae8416702 | /allennlp/modules/seq2seq_decoders/decoder_net.py | aa73863dd2f546fc9ee2b7c5a40ada2af3ff2e00 | [
"Apache-2.0"
] | permissive | Snnzhao/GrailQA | 78190a8a5bae934c07f4035786f658ef4764c510 | e89e66380402e51bac56f59c7d24d4400bcd11b6 | refs/heads/main | 2023-04-26T19:49:21.683922 | 2021-04-11T09:40:34 | 2021-04-11T09:40:34 | 370,937,323 | 1 | 0 | Apache-2.0 | 2021-05-26T07:00:21 | 2021-05-26T07:00:20 | null | UTF-8 | Python | false | false | 4,117 | py | from typing import Tuple, Dict, Optional
import torch
from allennlp.common import Registrable
class DecoderNet(torch.nn.Module, Registrable):
# pylint: disable=abstract-method
"""
This class abstracts the neural architectures for decoding the encoded states and
embedded previous step prediction vectors into a new sequence of output vectors.
The implementations of ``DecoderNet`` is used by implementations of
``allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder`` such as
``allennlp.modules.seq2seq_decoders.seq_decoder.auto_regressive_seq_decoder.AutoRegressiveSeqDecoder``.
The outputs of this module would be likely used by ``allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder``
to apply the final output feedforward layer and softmax.
Parameters
----------
decoding_dim : ``int``, required
Defines dimensionality of output vectors.
target_embedding_dim : ``int``, required
Defines dimensionality of target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
decodes_parallel : ``bool``, required
Defines whether the decoder generates multiple next step predictions at in a single `forward`.
"""
def __init__(self,
decoding_dim: int,
target_embedding_dim: int,
decodes_parallel: bool) -> None:
super().__init__()
self.target_embedding_dim = target_embedding_dim
self.decoding_dim = decoding_dim
self.decodes_parallel = decodes_parallel
def get_output_dim(self) -> int:
"""
Returns the dimension of each vector in the sequence output by this ``DecoderNet``.
This is `not` the shape of the returned tensor, but the last element of that shape.
"""
return self.decoding_dim
def init_decoder_state(self, encoder_out: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
"""
Initialize the encoded state to be passed to the first decoding time step.
Parameters
----------
batch_size : ``int``
Size of batch
final_encoder_output : ``torch.Tensor``
Last state of the Encoder
Returns
-------
``Dict[str, torch.Tensor]``
Initial state
"""
raise NotImplementedError()
def forward(self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.Tensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.Tensor] = None) -> Tuple[Dict[str, torch.Tensor],
torch.Tensor]:
# pylint: disable=arguments-differ
"""
Performs a decoding step, and returns dictionary with decoder hidden state or cache and the decoder output.
The decoder output is a 3d tensor (group_size, steps_count, decoder_output_dim)
if `self.decodes_parallel` is True, else it is a 2d tensor with (group_size, decoder_output_dim).
Parameters
----------
previous_steps_predictions : ``torch.Tensor``, required
Embeddings of predictions on previous step.
Shape: (group_size, steps_count, decoder_output_dim)
encoder_outputs : ``torch.Tensor``, required
Vectors of all encoder outputs.
Shape: (group_size, max_input_sequence_length, encoder_output_dim)
source_mask : ``torch.Tensor``, required
This tensor contains mask for each input sequence.
Shape: (group_size, max_input_sequence_length)
previous_state : ``Dict[str, torch.Tensor]``, required
previous state of decoder
Returns
-------
Tuple[Dict[str, torch.Tensor], torch.Tensor]
Tuple of new decoder state and decoder output. Output should be used to generate out sequence elements
"""
raise NotImplementedError()
| [
"[email protected]"
] | |
9ca6d92b19008f566d7bad5323b3263b78cd3d47 | 09e0efb05c9cd29c7aa4301cec7fb95d24d2bdc2 | /nionui_app/nionui_examples/ui_demo/StatusBar.py | 99f1fd1396a4fbf0ae59bae7916bfa90a1e1e115 | [
"Apache-2.0"
] | permissive | AEljarrat/nionui | 4c452037f7e3ee6710c19b18afeb45e35e9ec361 | 3714a54d56f472a8a0f7b9f8a8240103ca790374 | refs/heads/master | 2022-11-30T19:54:10.714060 | 2020-08-06T21:37:47 | 2020-08-06T21:37:47 | 286,014,692 | 0 | 0 | NOASSERTION | 2020-08-08T09:40:31 | 2020-08-08T09:40:30 | null | UTF-8 | Python | false | false | 686 | py | from nion.utils import Model
class Handler:
slider_value_model = Model.PropertyModel(50)
def reset(self, widget):
self.slider_value_model.value = 50
def construct_ui(ui):
label = ui.create_label(text="@binding(slider_value_model.value)")
button = ui.create_push_button(text="Reset to 50", on_clicked="reset")
content = ui.create_column(label, button, spacing=8)
left = ui.create_label(text="LEFT")
right = ui.create_label(text="RIGHT")
group_row = ui.create_row(left, ui.create_stretch(), right, spacing=8)
status_bar = ui.create_group(group_row)
return ui.create_column(content, ui.create_stretch(), status_bar, spacing=8)
| [
"[email protected]"
] | |
fa40c9f5c291d466245994d71e3b05bde8d0668f | 976230c7d6270f9fbec052e01f9799e13059f79c | /tests/spatio_temporal/test_daily.py | 0c473f4a8bf1f201c13a2cf6200948d00b171507 | [
"MIT"
] | permissive | amet123/jyotisha | 01cd60884f729814e6757d2a7186a6a4e3941a1f | 83930880ca74d6ddc95cc72f7026ef63a2e1c3be | refs/heads/master | 2020-03-16T09:44:13.310195 | 2018-02-10T02:43:19 | 2018-02-10T02:43:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | import logging
from jyotisha.panchangam.spatio_temporal import City
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
def test_solar_day():
from jyotisha.panchangam.spatio_temporal import daily
panchangam = daily.Panchangam(city=City('Chennai', '13:05:24', '80:16:12', 'Asia/Calcutta'), julian_day=2457023.27)
panchangam.compute_solar_day()
logging.debug(str(panchangam))
assert panchangam.solar_month_day == 17
assert panchangam.solar_month == 9
def test_tb_muhuurta():
from jyotisha.panchangam.spatio_temporal import daily
panchangam = daily.Panchangam(city=City('Chennai', '13:05:24', '80:16:12', 'Asia/Calcutta'), julian_day=2457023.27)
panchangam.compute_tb_muhuurtas()
logging.debug(str(panchangam))
assert len(panchangam.tb_muhuurtas) == 15
assert panchangam.tb_muhuurtas[0].jd_start == panchangam.jd_sunrise
import numpy.testing
numpy.testing.assert_approx_equal(panchangam.tb_muhuurtas[14].jd_end, panchangam.jd_sunrise)
| [
"[email protected]"
] | |
c1c2f08f213c1883d5c0d7e36a037127caeebd14 | 29ab920cf5c02ca6351e1c98824b8ebdb08933b0 | /src/core/models.py | d6b1641b0977a3f01e92bd8e2d76097fcb2dff09 | [
"MIT"
] | permissive | yordan-marinov/su_dj_todo_app | 237c8a14c17d349a75a437ab810c396269d3b571 | 34adcf3c17fcda30e57d76099780d0cf5f3f0fe6 | refs/heads/main | 2023-05-15T12:05:08.948196 | 2021-06-16T08:01:03 | 2021-06-16T08:01:03 | 375,369,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
title = models.CharField(max_length=120)
description = models.TextField(null=True, blank=True)
complete = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['complete']
def __str__(self):
return self.title
| [
"[email protected]"
] | |
47eccdae8acbd25874bd94626617616ed0ee61fe | 5fdcf15f818eb2d0c7b5dd39443064d5bc42aff9 | /lc_reorganizing_string.py | 8ab639672dbe010d9b0fc8e9842a0ca17c1438dc | [] | no_license | vincentt117/coding_challenge | acf3664034a71ffd70c5f1ac0f6a66768e097a6e | 5deff070bb9f6b19a1cfc0a6086ac155496fbb78 | refs/heads/master | 2021-07-02T05:43:08.007851 | 2020-08-27T02:16:19 | 2020-08-27T02:16:19 | 146,027,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # Optimal solution retrieved from Leetcode - https://leetcode.com/problems/reorganize-string/
class Solution:
def reorganizeString(self, S: str) -> str:
pq = [(-S.count(x), x) for x in set(S)]
heapq.heapify(pq)
if any(-nc > (len(S) + 1) / 2 for nc, x in pq):
return ""
ans = []
while len(pq) >= 2:
nct1, ch1 = heapq.heappop(pq)
nct2, ch2 = heapq.heappop(pq)
#This code turns out to be superfluous, but explains what is happening
#if not ans or ch1 != ans[-1]:
# ans.extend([ch1, ch2])
#else:
# ans.extend([ch2, ch1])
ans.extend([ch1, ch2])
if nct1 + 1: heapq.heappush(pq, (nct1 + 1, ch1))
if nct2 + 1: heapq.heappush(pq, (nct2 + 1, ch2))
return "".join(ans) + (pq[0][1] if pq else '')
| [
"[email protected]"
] | |
97c63ac9e665a2d66342e42e241c92b0489a2ee3 | 18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9 | /73_hun.py | 5a18a272bbd749e2a12f95c01d715dc788f4067d | [] | no_license | mahakalai/mahak | 05f96d52880ed7b2e5eb70dd1dbf14fc533236e8 | 613be9df7743ef59b1f0e07b7df987d29bb23ec7 | refs/heads/master | 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | s=input()
s1=input()
l=[]
if s==s1:
print(s)
else:
for i in range(0,len(s)):
for j in range(i+1,len(s)):
k=s1[i:j]
if k in s:
l.append(k)
print(max(l, key=len))
| [
"[email protected]"
] | |
21e0a4a5c520b5364f7115b1ed1387993123f14b | bea2c1ad58a1d45705ba8f9e4527e4ffac4969cb | /sfepy/discrete/parse_regions.py | c0b71f0ee8b1d0bc7fd05dcd16232c367e61ac0b | [
"BSD-3-Clause"
] | permissive | fethio/sfepy | 8423e9083e3144e1fd97d24c31eda1646167ff5b | 78efe51d6038dc92f5bfeac4c57614746fe28117 | refs/heads/master | 2021-01-16T20:59:29.459626 | 2016-07-01T15:06:11 | 2016-07-01T15:06:11 | 61,715,917 | 1 | 1 | null | 2016-07-01T15:06:11 | 2016-06-22T12:06:07 | Python | UTF-8 | Python | false | false | 6,738 | py | """
Grammar for selecting regions of a domain.
Regions serve for selection of certain parts of the computational domain
represented as a finite element mesh. They are used to define the boundary
conditions, the domains of terms and materials etc.
Notes
-----
History: pre-git versions already from from 13.06.2006.
"""
from pyparsing import Literal, CaselessLiteral, Word, delimitedList,\
Group, Optional, ZeroOrMore, nums, alphas, alphanums,\
Combine, StringStart, StringEnd, Forward, oneOf, ParseException
ParseException # Needed for importing elsewhere.
op_codes = ['OA_SubV', 'OA_SubE', 'OA_SubF', 'OA_SubC', 'OA_SubS',
'OA_AddV', 'OA_AddE', 'OA_AddF', 'OA_AddC', 'OA_AddS',
'OA_IntersectV', 'OA_IntersectE', 'OA_IntersectF',
'OA_IntersectC', 'OA_IntersectS']
eval_codes = ['E_VIR', 'E_VOS', 'E_VBF', 'E_VOG', 'E_OVIR', 'E_VI', 'E_VOSET',
'E_CBF', 'E_COG', 'E_CI', 'E_COSET']
kw_codes = ['KW_All', 'KW_Region']
def to_stack(stack):
def push_first(str, loc, toks):
if toks:
stack.append(toks[0])
return toks
return push_first
def replace(what, keep=False):
def _replace(str, loc, toks):
ret = {'token' : what, 'orig' : []}
if keep:
ret['orig'] = list(toks[0])
return ret
return _replace
def replace_with_region(what, r_index):
def _replace(str, loc, toks):
ret = {'token' : what, 'orig' : []}
orig = toks[0]
r_orig = orig[r_index]
if isinstance(r_orig, dict) and (r_orig['token'] == 'KW_Region'):
orig = list(orig[:r_index]) + r_orig['orig']
ret['orig'] = orig
return ret
return _replace
def join_tokens(str, loc, toks):
return [" ".join(toks[0])]
def visit_stack(stack, op_visitor, leaf_visitor):
def visit(stack, level):
op = stack.pop()
token = op['token']
if token in op_codes:
res2 = visit(stack, level + 1)
res1 = visit(stack, level + 1)
return op_visitor(level, op, res1, res2)
elif token in eval_codes:
return leaf_visitor(level, op)
elif token in kw_codes:
return leaf_visitor(level, op)
else:
raise ValueError, token
return visit(stack, 0)
def print_op(level, op, item1, item2):
print level * ' ' + (': %s' % op)
def print_leaf(level, op):
print level * ' ' + ('< %s' % op)
def print_stack(stack):
visit_stack(stack, print_op, print_leaf)
def create_bnf(stack):
point = Literal(".")
e = CaselessLiteral("E")
inumber = Word(nums)
fnumber = Combine(Word("+-"+nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-"+nums, nums)))
_of = Literal('of')
_in = Literal('in')
_by = Literal('by')
_copy = Literal('copy')
_mv = Literal('-v').setParseAction(replace('OA_SubV'))
_me = Literal('-e').setParseAction(replace('OA_SubE'))
_mf = Literal('-f').setParseAction(replace('OA_SubF'))
_mc = Literal('-c').setParseAction(replace('OA_SubC'))
_ms = Literal('-s').setParseAction(replace('OA_SubS'))
_pv = Literal('+v').setParseAction(replace('OA_AddV'))
_pe = Literal('+e').setParseAction(replace('OA_AddE'))
_pf = Literal('+f').setParseAction(replace('OA_AddF'))
_pc = Literal('+c').setParseAction(replace('OA_AddC'))
_ps = Literal('+s').setParseAction(replace('OA_AddS'))
_inv = Literal('*v').setParseAction(replace('OA_IntersectV'))
_ine = Literal('*e').setParseAction(replace('OA_IntersectE'))
_inf = Literal('*f').setParseAction(replace('OA_IntersectF'))
_inc = Literal('*c').setParseAction(replace('OA_IntersectC'))
_ins = Literal('*s').setParseAction(replace('OA_IntersectS'))
regop = (_mv | _me | _mf | _mc | _ms |
_pv | _pe | _pf | _pc | _ps |
_inv | _ine | _inf | _inc | _ins)
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
_all = Literal('all').setParseAction(replace('KW_All'))
vertex = Literal('vertex')
vertices = Literal('vertices')
cell = Literal('cell')
cells = Literal('cells')
group = Literal('group')
_set = Literal('set')
surface = Literal('surface')
ident = Word(alphas + '_.', alphanums + '_.')
set_name = Word(nums) | ident
function = Word(alphas + '_', alphanums + '_')
function = Group(function).setParseAction(join_tokens)
region = Combine(Literal('r.') + Word(alphas + '_',
'_' + alphas + nums + '.'))
region = Group(Optional(_copy, default='nocopy') + region)
region.setParseAction(replace('KW_Region', keep=True))
coor = oneOf('x y z')
boolop = oneOf('& |')
relop = oneOf('< > <= >= != ==')
bool_term = (ZeroOrMore('(') + (coor | fnumber) + relop + (coor | fnumber)
+ ZeroOrMore(')'))
relation = Forward()
relation << (ZeroOrMore('(')
+ bool_term + ZeroOrMore(boolop + relation)
+ ZeroOrMore(')'))
relation = Group(relation).setParseAction(join_tokens)
nos = Group(vertices + _of + surface).setParseAction(replace('E_VOS'))
nir = Group(vertices + _in + relation).setParseAction(
replace('E_VIR', keep=True))
nbf = Group(vertices + _by + function).setParseAction(
replace('E_VBF', keep=True))
ebf = Group(cells + _by + function).setParseAction(
replace('E_CBF', keep=True))
eog = Group(cells + _of + group + Word(nums)).setParseAction(
replace('E_COG', keep=True))
nog = Group(vertices + _of + group + Word(nums)).setParseAction(
replace('E_VOG', keep=True))
onir = Group(vertex + _in + region).setParseAction(
replace_with_region('E_OVIR', 2))
ni = Group(vertex + delimitedList(inumber)).setParseAction(
replace('E_VI', keep=True))
ei = Group(cell + delimitedList(inumber)).setParseAction(
replace('E_CI', keep=True))
noset = Group(vertices + _of + _set + set_name).setParseAction(
replace('E_VOSET', keep=True))
eoset = Group(cells + _of + _set + set_name).setParseAction(
replace('E_COSET', keep=True))
region_expression = Forward()
atom1 = (_all | region | ni | onir | nos | nir | nbf
| ei | ebf | eog | nog | noset | eoset)
atom1.setParseAction(to_stack(stack))
atom2 = (lpar + region_expression.suppress() + rpar)
atom = (atom1 | atom2)
aux = (regop + region_expression)
aux.setParseAction(to_stack(stack))
region_expression << atom + ZeroOrMore(aux)
region_expression = StringStart() + region_expression + StringEnd()
return region_expression
| [
"[email protected]"
] | |
e678647f567f69e613f9801f9ae2ac7ac80e1659 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/185/30193/submittedfiles/swamee.py | cf9c5669d5d51a92ee3a60133f07bc131b3da0cc | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite f:'))
L=float(input('digite L:'))
Q=float(input('digite Q:'))
DeltaH=float(input('digite DeltaH:'))
V=float(input('digite V:'))
g=9.81
E=0.000002
D=((8*f*L*(Q*Q))/((math.pi**2)*g*DeltaH))**1/5
Rey=(4*Q)/(math.pi*D*V)
K=0.25/(math.log10((E/(3.7*D))+(5.74/(Rey**0.9)))**2 | [
"[email protected]"
] | |
1d1b706b79d817045a4458163d9e0a30057bb120 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_nets.py | 9595b4e462c02417b87f949ef703a5e57f707413 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py |
from xai.brain.wordbase.adjectives._net import _NET
#calss header
class _NETS(_NET, ):
def __init__(self,):
_NET.__init__(self)
self.name = "NETS"
self.specie = 'adjectives'
self.basic = "net"
self.jsondata = {}
| [
"[email protected]"
] | |
dcb8464506d3eb9dc51cf3f6fac3227e35f31256 | 932c30383a5a3b8f70f182fb160e172826531b2c | /tools/sapp/sapp/ui/tests/interactive_test.py | 88e66ed924672725b2e7e540da16bcbba6688a25 | [
"MIT"
] | permissive | fylux/pyre-check | 78581f8ed68764bb46be6f686d74cbbc25828556 | a675dc272a973972544e595c567bf432993c2b3a | refs/heads/master | 2023-01-01T23:38:11.453527 | 2020-10-24T17:14:28 | 2020-10-24T17:14:28 | 298,233,128 | 0 | 1 | null | 2020-09-24T09:36:45 | 2020-09-24T09:36:45 | null | UTF-8 | Python | false | false | 84,132 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import sys
from datetime import datetime
from io import StringIO
from typing import List
from unittest import TestCase
from unittest.mock import mock_open, patch
from sqlalchemy.orm import Session
from ...db import DB, DBType
from ...decorators import UserError
from ...models import (
DBID,
Issue,
IssueInstance,
IssueInstanceSharedTextAssoc,
IssueInstanceTraceFrameAssoc,
Run,
RunStatus,
SharedText,
SharedTextKind,
SourceLocation,
TraceFrame,
TraceFrameLeafAssoc,
TraceKind,
create as create_models,
)
from ...pipeline.pysa_taint_parser import Parser
from ...tests.fake_object_generator import FakeObjectGenerator
from ..interactive import (
Interactive,
IssueQueryResult,
TraceFrameQueryResult,
TraceTuple,
)
class InteractiveTest(TestCase):
def setUp(self) -> None:
self.db = DB(DBType.MEMORY)
create_models(self.db)
self.interactive = Interactive(
database=self.db, repository_directory="", parser_class=Parser
)
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout # redirect output
sys.stderr = self.stderr # redirect output
self.fakes = FakeObjectGenerator()
def tearDown(self) -> None:
sys.stdout = sys.__stdout__ # reset redirect
sys.stderr = sys.__stderr__ # reset redirect
def _clear_stdout(self):
self.stdout = StringIO()
sys.stdout = self.stdout
def _add_to_session(self, session, data):
if not isinstance(data, list):
session.add(data)
return
for row in data:
session.add(row)
def _frame_to_query_result(
self, session: Session, trace_frame: TraceFrame
) -> TraceFrameQueryResult:
caller = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.caller_id)
.scalar()
)
callee = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.callee_id)
.scalar()
)
filename = (
session.query(SharedText.contents)
.filter(SharedText.id == trace_frame.filename_id)
.scalar()
)
return TraceFrameQueryResult(
id=trace_frame.id,
caller=caller,
caller_port=trace_frame.caller_port,
callee=callee,
callee_port=trace_frame.callee_port,
caller_id=trace_frame.caller_id,
callee_id=trace_frame.callee_id,
callee_location=trace_frame.callee_location,
kind=trace_frame.kind,
filename=filename,
)
def testState(self):
self.interactive.current_run_id = 1
self.interactive.current_issue_instance_id = 2
self.interactive.current_frame_id = 3
self.interactive.sources = {1}
self.interactive.sinks = {2}
self.interactive.state()
output = self.stdout.getvalue()
self.assertIn("Database: memory:sapp.db", output)
self.assertIn("Repository directory: ", output)
self.assertIn("Current run: 1", output)
self.assertIn("Current issue instance: 2", output)
self.assertIn("Current trace frame: 3", output)
self.assertIn("Sources filter: {1}", output)
self.assertIn("Sinks filter: {2}", output)
def testListIssuesBasic(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance(
message="message1", filename="file.py", callable="module.function1"
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Code: 6016", output)
self.assertIn("Message: message1", output)
self.assertIn("Callable: module.function1", output)
self.assertIn("Location: file.py:6|7|8", output)
def testListIssuesFromLatestRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance() # part of run1
self.fakes.save_all(self.db) # early flush to resolve DBID's
run2 = self.fakes.run()
self.fakes.instance() # part of run2
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertIn("Issue 2", output)
def _list_issues_filter_setup(self):
run = self.fakes.run()
issue1 = self.fakes.issue()
self.fakes.instance(
issue_id=issue1.id,
callable="module.sub.function1",
filename="module/sub.py",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
)
self.fakes.save_all(self.db)
issue2 = self.fakes.issue()
self.fakes.instance(
issue_id=issue2.id,
callable="module.sub.function2",
filename="module/sub.py",
min_trace_length_to_sources=2,
min_trace_length_to_sinks=2,
)
self.fakes.save_all(self.db)
issue3 = self.fakes.issue()
self.fakes.instance(
issue_id=issue3.id,
callable="module.function3",
filename="module/__init__.py",
min_trace_length_to_sources=3,
min_trace_length_to_sinks=3,
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
def testListIssuesFilterCodes(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(codes="a string")
stderr = self.stderr.getvalue().strip()
self.assertIn("'codes' should be", stderr)
self.interactive.issues(codes=6016)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(codes=[6017, 6018])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterCallables(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(callables=1234)
stderr = self.stderr.getvalue().strip()
self.assertIn("'callables' should be", stderr)
self.interactive.issues(callables="%sub%")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(callables=["%function3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterFilenames(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(filenames=1234)
stderr = self.stderr.getvalue().strip()
self.assertIn("'filenames' should be", stderr)
self.interactive.issues(filenames="module/s%")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(filenames=["%__init__.py"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertIn("Issue 3", output)
def testListIssuesFilterMinTraceLength(self):
self._list_issues_filter_setup()
self.interactive.setup()
self.interactive.issues(exact_trace_length_to_sources="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'exact_trace_length_to_sources' should be", stderr)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sinks="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'exact_trace_length_to_sinks' should be", stderr)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'max_trace_length_to_sources' should be", stderr)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks="1")
stderr = self.stderr.getvalue().strip()
self.assertIn("'max_trace_length_to_sinks' should be", stderr)
self._clear_stdout()
self.interactive.issues(
exact_trace_length_to_sources=1, max_trace_length_to_sources=1
)
stderr = self.stderr.getvalue().strip()
self.assertIn("can't be set together", stderr)
self._clear_stdout()
self.interactive.issues(
exact_trace_length_to_sinks=1, max_trace_length_to_sinks=1
)
stderr = self.stderr.getvalue().strip()
self.assertIn("can't be set together", stderr)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sources=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(exact_trace_length_to_sinks=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks=1)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sources=2)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(max_trace_length_to_sinks=2)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(
max_trace_length_to_sources=1, max_trace_length_to_sinks=1
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
self.interactive.issues(
max_trace_length_to_sources=1, max_trace_length_to_sinks=2
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
self.assertNotIn("Issue 3", output)
self._clear_stdout()
def testListIssuesFilterAllFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(all_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(all_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
def testListIssuesFilterAnyFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(any_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(any_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
def testListIssuesFilterExcludeFeature(self):
self._list_issues_filter_setup()
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
self.interactive.issues(exclude_features="via:feature1")
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature1", "via:feature2"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature1", "via:feature3"])
output = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", output)
self._clear_stdout()
self.interactive.issues(exclude_features=["via:feature3"])
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
def testListIssuesFilterAllFeatureAndAnyFeature(self):
self._list_issues_filter_setup()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
feature3 = self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
self._add_to_session(
session,
[
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature3.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=2
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=2
),
],
)
session.commit()
self.interactive.setup()
self.interactive.issues(
any_features=["via:feature2", "via:feature3"],
all_features="via:feature1",
)
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertIn("Issue 2", output)
def testNoRunsFound(self):
self.interactive.setup()
stderr = self.stderr.getvalue().strip()
self.assertIn("No runs found.", stderr)
def testListRuns(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED),
Run(id=2, date=datetime.now(), status=RunStatus.INCOMPLETE),
Run(id=3, date=datetime.now(), status=RunStatus.FINISHED),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.setup()
self.interactive.runs()
output = self.stdout.getvalue().strip()
self.assertIn("Run 1", output)
self.assertNotIn("Run 2", output)
self.assertIn("Run 3", output)
def testSetRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
run2 = self.fakes.run()
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.interactive.run(1)
self.interactive.issues()
output = self.stdout.getvalue().strip()
self.assertIn("Issue 1", output)
self.assertNotIn("Issue 2", output)
def testSetRunNonExistent(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED),
Run(id=2, date=datetime.now(), status=RunStatus.INCOMPLETE),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.setup()
self.interactive.run(2)
self.interactive.run(3)
stderr = self.stderr.getvalue().strip()
self.assertIn("Run 2 doesn't exist", stderr)
self.assertIn("Run 3 doesn't exist", stderr)
def testSetLatestRun(self):
runs = [
Run(id=1, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=2, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=3, date=datetime.now(), status=RunStatus.FINISHED, kind="a"),
Run(id=4, date=datetime.now(), status=RunStatus.FINISHED, kind="b"),
Run(id=5, date=datetime.now(), status=RunStatus.FINISHED, kind="b"),
Run(id=6, date=datetime.now(), status=RunStatus.FINISHED, kind="c"),
]
with self.db.make_session() as session:
self._add_to_session(session, runs)
session.commit()
self.interactive.latest_run("c")
self.assertEqual(self.interactive.current_run_id, 6)
self.interactive.latest_run("b")
self.assertEqual(self.interactive.current_run_id, 5)
self.interactive.latest_run("a")
self.assertEqual(self.interactive.current_run_id, 3)
self.interactive.latest_run("d")
self.assertEqual(self.interactive.current_run_id, 3)
self.assertIn("No runs with kind 'd'", self.stderr.getvalue())
def testSetIssue(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance(message="Issue message")
self.fakes.instance(message="Issue message")
self.fakes.instance(message="Issue message")
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(2)
self.assertEqual(self.interactive.current_issue_instance_id, 2)
stdout = self.stdout.getvalue().strip()
self.assertNotIn("Issue 1", stdout)
self.assertIn("Issue 2", stdout)
self.assertNotIn("Issue 3", stdout)
self.interactive.issue(1)
self.assertEqual(self.interactive.current_issue_instance_id, 1)
stdout = self.stdout.getvalue().strip()
self.assertIn("Issue 1", stdout)
self.assertNotIn("Issue 3", stdout)
def testSetIssueNonExistent(self):
run = self.fakes.run()
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
stderr = self.stderr.getvalue().strip()
self.assertIn("Issue 1 doesn't exist", stderr)
def testSetIssueUpdatesRun(self):
self.fakes.issue()
run1 = self.fakes.run()
self.fakes.instance()
self.fakes.save_all(self.db)
run2 = self.fakes.run()
self.fakes.instance()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.assertEqual(int(self.interactive.current_run_id), 2)
self.interactive.issue(1)
self.assertEqual(int(self.interactive.current_run_id), 1)
def testGetSources(self):
self.fakes.instance()
source1 = self.fakes.source("source1")
source2 = self.fakes.source("source2")
self.fakes.source("source3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=source1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=source2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
sources = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.SOURCE
)
self.assertEqual(len(sources), 2)
self.assertIn("source1", sources)
self.assertIn("source2", sources)
def testGetSinks(self):
self.fakes.instance()
sink1 = self.fakes.sink("sink1")
sink2 = self.fakes.sink("sink2")
self.fakes.sink("sink3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(shared_text_id=sink1.id, issue_instance_id=1),
IssueInstanceSharedTextAssoc(shared_text_id=sink2.id, issue_instance_id=1),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
sinks = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.SINK
)
self.assertEqual(len(sinks), 2)
self.assertIn("sink1", sinks)
self.assertIn("sink2", sinks)
def testGetFeatures(self):
self.fakes.instance()
feature1 = self.fakes.feature("via:feature1")
feature2 = self.fakes.feature("via:feature2")
self.fakes.feature("via:feature3")
self.fakes.save_all(self.db)
assocs = [
IssueInstanceSharedTextAssoc(
shared_text_id=feature1.id, issue_instance_id=1
),
IssueInstanceSharedTextAssoc(
shared_text_id=feature2.id, issue_instance_id=1
),
]
with self.db.make_session() as session:
self._add_to_session(session, assocs)
session.commit()
self.interactive.setup()
features = self.interactive._get_leaves_issue_instance(
session, 1, SharedTextKind.FEATURE
)
self.assertEqual(len(features), 2)
self.assertIn("via:feature1", features)
self.assertIn("via:feature2", features)
def _basic_trace_frames(self):
return [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param0",
callee="leaf",
callee_port="sink",
location=(1, 2, 1),
),
]
def testCreateTraceTuples(self):
# reverse order
postcondition_traces = [
(
TraceFrameQueryResult(
id=DBID(1),
callee="call3",
callee_port="result",
filename="file3.py",
callee_location=SourceLocation(1, 1, 3),
caller="main",
caller_port="root",
),
1,
),
(
TraceFrameQueryResult(
id=DBID(2),
callee="call2",
callee_port="result",
caller="dummy caller",
caller_port="dummy caller",
filename="file2.py",
callee_location=SourceLocation(1, 1, 2),
),
2,
),
(
TraceFrameQueryResult(
id=DBID(3),
callee="leaf",
callee_port="source",
caller="dummy caller",
caller_port="dummy caller",
filename="file1.py",
callee_location=SourceLocation(1, 1, 1),
),
3,
),
]
trace_tuples = self.interactive._create_trace_tuples(postcondition_traces)
self.assertEqual(len(trace_tuples), 3)
self.assertEqual(
trace_tuples,
[
TraceTuple(postcondition_traces[0][0], 1),
TraceTuple(postcondition_traces[1][0], 2),
TraceTuple(postcondition_traces[2][0], 3),
],
)
def testOutputTraceTuples(self):
trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(1),
caller="unused",
caller_port="unused",
callee="leaf",
callee_port="source",
filename="file1.py",
callee_location=SourceLocation(1, 1, 1),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(2),
caller="unused",
caller_port="unused",
callee="call2",
callee_port="result",
filename="file2.py",
callee_location=SourceLocation(1, 1, 2),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(3),
caller="unused",
caller_port="unused",
callee="call3",
callee_port="result",
filename="file3.py",
callee_location=SourceLocation(1, 1, 3),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(4),
caller="unused",
caller_port="unused",
callee="main",
callee_port="root",
filename="file4.py",
callee_location=SourceLocation(1, 1, 4),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(5),
caller="unused",
caller_port="unused",
callee="call4",
callee_port="param0",
filename="file4.py",
callee_location=SourceLocation(1, 1, 4),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(6),
caller="unused",
caller_port="unused",
callee="call5",
callee_port="param1",
filename="file5.py",
callee_location=SourceLocation(1, 1, 5),
)
),
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(7),
caller="unused",
caller_port="unused",
callee="leaf",
callee_port="sink",
filename="file6.py",
callee_location=SourceLocation(1, 1, 6),
)
),
]
self.interactive.current_trace_frame_index = 1
self.interactive._output_trace_tuples(trace_tuples)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" --> 2 call2 result file2.py:1|1|2",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
self._clear_stdout()
self.interactive.current_trace_frame_index = 4
self.interactive._output_trace_tuples(trace_tuples)
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source file1.py:1|1|1",
" 2 call2 result file2.py:1|1|2",
" 3 call3 result file3.py:1|1|3",
" 4 main root file4.py:1|1|4",
" --> 5 call4 param0 file4.py:1|1|4",
" 6 call5 param1 file5.py:1|1|5",
" 7 leaf sink file6.py:1|1|6",
"",
],
)
def testTraceFromIssue(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="sink",
location=(1, 1, 2),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.trace()
stderr = self.stderr.getvalue().strip()
self.assertIn("Use 'issue ID' or 'frame ID'", stderr)
self.interactive.issue(1)
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:1|1|1",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 leaf sink lib/server/posts/request.py:1|1|2",
"",
],
)
def testTraceFromFrame(self):
run = self.fakes.run()
frames = self._basic_trace_frames()
sink = self.fakes.sink("sink")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.frame(int(frames[0].id))
self._clear_stdout()
self.interactive.trace()
self.assertEqual(self.interactive.sinks, {"sink"})
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" --> 1 call1 root lib/server/posts/request.py:1|1|1",
" 2 call2 param0 lib/server/posts/request.py:1|1|1",
" 3 leaf sink lib/server/posts/request.py:1|2|1",
"",
],
)
def testTraceMissingFrames(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self.interactive.trace()
stdout = self.stdout.getvalue().strip()
self.assertIn("Missing trace frame: call2:param0", stdout)
def testTraceCursorLocation(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance(callable="Issue callable")
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="sink",
location=(1, 2, 1),
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.assertIsNone(self.interactive.callable())
self.interactive.issue(1)
self.assertEqual(self.interactive.callable(), "Issue callable")
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.next_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.assertEqual(self.interactive.callable(), "leaf")
self.interactive.next_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.interactive.prev_cursor_location()
self.assertEqual(self.interactive.current_trace_frame_index, 0)
def testJumpToLocation(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source()
frames = [
self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
),
self.fakes.precondition(
caller="call1", caller_port="root", callee="leaf", callee_port="sink"
),
]
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[0].id, issue_instance_id=instance.id
),
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[1].id, issue_instance_id=instance.id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self.assertEqual(self.interactive.current_trace_frame_index, 1)
self.interactive.jump(1)
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.interactive.jump(3)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.jump(4)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
self.interactive.jump(0)
self.assertEqual(self.interactive.current_trace_frame_index, 2)
def testTraceNoSinks(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
frame = self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frame.id, issue_instance_id=instance.id
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frame.id, leaf_id=source.id, trace_length=0
)
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.sources = {"source1"}
self.interactive.issue(1)
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:4|5|6",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
"",
],
)
def _set_up_branched_trace(self) -> List[TraceFrame]:
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
sink = self.fakes.sink("sink1")
self.fakes.saver.add_all(
[
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=source.id
),
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=sink.id
),
]
)
frames = []
for i in range(6):
if i < 2: # 2 postconditions
frames.append(
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="leaf",
callee_port="source",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id, leaf_id=source.id, trace_length=i
)
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[-1].id, issue_instance_id=instance.id
)
)
elif i < 4:
frames.append(
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param2",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id, leaf_id=sink.id, trace_length=i
)
)
self.fakes.saver.add(
IssueInstanceTraceFrameAssoc.Record(
trace_frame_id=frames[-1].id, issue_instance_id=instance.id
)
)
else:
frames.append(
self.fakes.precondition(
caller="call2",
caller_port="param2",
callee="leaf",
callee_port="sink",
location=(i, i, i),
)
)
self.fakes.saver.add(
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[-1].id,
leaf_id=sink.id,
trace_length=5 - i,
)
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
return frames
def testTraceBranchNumber(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
self.assertEqual(self.interactive.sources, {"source1"})
self.assertEqual(self.interactive.sinks, {"sink1"})
self._clear_stdout()
self.interactive.trace()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 +2 leaf source lib/server/posts/response.py:0|0|0",
" --> 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 +2 call2 param2 lib/server/posts/request.py:2|2|2",
" 4 +2 leaf sink lib/server/posts/request.py:5|5|5",
"",
],
)
def testShowBranches(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
# Parent at root
self.interactive.prev_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] leaf : source\n"
" [0 hops: source1]\n"
" [lib/server/posts/response.py:0|0|0]",
output,
)
self.assertIn(
"[2] leaf : source\n"
" [1 hops: source1]\n"
" [lib/server/posts/response.py:1|1|1]",
output,
)
self._clear_stdout()
# Move to call2:param2
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] call2 : param2\n"
" [2 hops: sink1]\n"
" [lib/server/posts/request.py:2|2|2]",
output,
)
self.assertIn(
"[2] call2 : param2\n"
" [3 hops: sink1]\n"
" [lib/server/posts/request.py:3|3|3]",
output,
)
self._clear_stdout()
# Move to leaf:sink
self.interactive.next_cursor_location()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn(
"[*] leaf : sink\n"
" [0 hops: sink1]\n"
" [lib/server/posts/request.py:5|5|5]",
output,
)
self.assertIn(
"[2] leaf : sink\n"
" [1 hops: sink1]\n"
" [lib/server/posts/request.py:4|4|4]",
output,
)
def testGetTraceFrameBranches(self):
frames = self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
# Parent at root
self.interactive.prev_cursor_location()
with self.db.make_session() as session:
branches = self.interactive._get_trace_frame_branches(session)
self.assertEqual(len(branches), 2)
self.assertEqual(int(branches[0].id), int(frames[0].id))
self.assertEqual(int(branches[1].id), int(frames[1].id))
# Parent is no longer root
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
branches = self.interactive._get_trace_frame_branches(session)
self.assertEqual(len(branches), 2)
self.assertEqual(int(branches[0].id), int(frames[5].id))
self.assertEqual(int(branches[1].id), int(frames[4].id))
def testBranch(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.issue(1)
self.interactive.prev_cursor_location()
# We are testing for the source location, which differs between branches
self._clear_stdout()
self.interactive.branch(2) # location 0|0|0 -> 1|1|1
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 1 +2 leaf source lib/server/posts/response.py:1|1|1", output
)
self._clear_stdout()
self.interactive.branch(1) # location 1|1|1 -> 0|0|0
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 1 +2 leaf source lib/server/posts/response.py:0|0|0", output
)
self.interactive.next_cursor_location()
self.interactive.next_cursor_location()
self._clear_stdout()
self.interactive.branch(2) # location 2|2|2 -> 3|3|3
output = self.stdout.getvalue().strip()
self.assertIn(
" --> 3 +2 call2 param2 lib/server/posts/request.py:3|3|3", output
)
self.interactive.next_cursor_location()
self._clear_stdout()
self.interactive.branch(2) # location 4|4|4 -> 5|5|5
output = self.stdout.getvalue().strip()
self.assertIn(
" 3 +2 call2 param2 lib/server/posts/request.py:3|3|3", output
)
self.assertIn(
" --> 4 +2 leaf sink lib/server/posts/request.py:4|4|4", output
)
self.interactive.branch(3) # location 4|4|4 -> 5|5|5
stderr = self.stderr.getvalue().strip()
self.assertIn("Branch number invalid", stderr)
def testBranchPrefixLengthChanges(self):
run = self.fakes.run()
self.fakes.issue()
instance = self.fakes.instance()
source = self.fakes.source("source1")
sink = self.fakes.sink("sink1")
frames = [
self.fakes.postcondition(
caller="call1", caller_port="root", callee="leaf", callee_port="source"
),
self.fakes.postcondition(
caller="call1",
caller_port="root",
callee="prev_call",
callee_port="result",
),
self.fakes.postcondition(
caller="prev_call",
caller_port="result",
callee="leaf",
callee_port="source",
),
self.fakes.precondition(
caller="call1", caller_port="root", callee="leaf", callee_port="sink"
),
]
self.fakes.saver.add_all(
[
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=source.id
),
IssueInstanceSharedTextAssoc.Record(
issue_instance_id=instance.id, shared_text_id=sink.id
),
]
)
self.fakes.saver.add_all(
[
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[0].id
),
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[1].id
),
IssueInstanceTraceFrameAssoc.Record(
issue_instance_id=instance.id, trace_frame_id=frames[3].id
),
]
)
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=source.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[2].id, leaf_id=source.id, trace_length=0
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[3].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.setup()
self.interactive.issue(1)
self._clear_stdout()
self.interactive.prev_cursor_location()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" --> 1 +2 leaf source lib/server/posts/response.py:4|5|6",
" 2 Foo.barMethod root /r/some/filename.py:6|7|8",
" 3 leaf sink lib/server/posts/request.py:4|5|6",
"",
],
)
self._clear_stdout()
self.interactive.branch(2)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
" # ⎇ [callable] [port] [location]",
" 1 leaf source lib/server/posts/response.py:4|5|6",
" --> 2 +2 prev_call result lib/server/posts/response.py:4|5|6",
" 3 Foo.barMethod root /r/some/filename.py:6|7|8",
" 4 leaf sink lib/server/posts/request.py:4|5|6",
"",
],
)
self._clear_stdout()
with patch("click.prompt", return_value=0):
self.interactive.branch()
output = self.stdout.getvalue().strip()
self.assertIn("[*] prev_call : result", output)
self.assertIn(" [1 hops: source1]", output)
def testCurrentBranchIndex(self):
trace_frames = [TraceFrame(id=1), TraceFrame(id=2), TraceFrame(id=3)]
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [TraceTuple(trace_frame=TraceFrame(id=1))]
self.assertEqual(0, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 2
self.assertEqual(1, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 3
self.assertEqual(2, self.interactive._current_branch_index(trace_frames))
self.interactive.trace_tuples[0].trace_frame.id = 4
self.assertEqual(-1, self.interactive._current_branch_index(trace_frames))
def testVerifyEntrypointSelected(self):
self.interactive.current_issue_instance_id = -1
self.interactive.current_frame_id = -1
with self.assertRaises(UserError):
self.interactive._verify_entrypoint_selected()
self.interactive.current_issue_instance_id = 1
try:
self.interactive._verify_entrypoint_selected()
except UserError:
self.fail("Unexpected UserError")
self.interactive.current_issue_instance_id = -1
self.interactive.current_frame_id = 1
try:
self.interactive._verify_entrypoint_selected()
except UserError:
self.fail("Unexpected UserError")
self.interactive.current_issue_instance_id = 1
with self.assertRaises(AssertionError):
self.interactive._verify_entrypoint_selected()
def testVerifyMultipleBranches(self):
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(trace_frame=TraceFrame(id=1), branches=1),
TraceTuple(trace_frame=TraceFrame(id=2), branches=2),
]
with self.assertRaises(UserError):
self.interactive._verify_multiple_branches()
self.interactive.current_trace_frame_index = 1
try:
self.interactive._verify_multiple_branches()
except UserError:
self.fail("Unexpected UserError")
def testAddListOrElementFilterErrors(self):
with self.assertRaises(UserError):
self.interactive._add_list_or_element_filter_to_query(
"not a list", None, None, "arg0", int
)
with self.assertRaises(UserError):
self.interactive._add_list_or_element_filter_to_query(
[], None, None, "arg0", str
)
def testAddListOrStringFilterToQuery(self):
shared_texts = [
SharedText(id=1, contents="prefix"),
SharedText(id=2, contents="suffix"),
SharedText(id=3, contents="prefix_suffix"),
SharedText(id=4, contents="fix"),
]
with self.db.make_session() as session:
self._add_to_session(session, shared_texts)
session.commit()
query = session.query(SharedText.contents)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["prefix", "suffix"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("suffix",)],
)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["%prefix%"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("prefix_suffix",)],
)
self.assertEqual(
self.interactive._add_list_or_string_filter_to_query(
["%fix%"], query, SharedText.contents, "contents"
).all(),
[("prefix",), ("suffix",), ("prefix_suffix",), ("fix",)],
)
def testCreateIssueOutputStringNoSourcesNoSinks(self):
issue = IssueQueryResult(
id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
)
sources = []
sinks = ["sink1", "sink2"]
features = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Sources: No sources", result)
self.assertIn("Sinks: sink1", result)
sources = ["source1", "source2"]
sinks = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Sources: source1", result)
self.assertIn("Sinks: No sinks", result)
def testCreateIssueOutputStringNoFeatures(self):
issue = IssueQueryResult(
id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=1,
min_trace_length_to_sinks=1,
)
sources = []
sinks = ["sink1"]
features = []
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Features: No features", result)
sources = []
sinks = ["sink1"]
features = ["via:feature1"]
result = self.interactive._create_issue_output_string(
issue, sources, sinks, features
)
self.assertIn("Features: via:feature1", result)
def testCreateIssueOutputStringTraceLength(self):
issue1 = IssueQueryResult(
id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=0,
min_trace_length_to_sinks=6,
)
sources = []
sinks = ["sink1", "sink2"]
features = []
result = self.interactive._create_issue_output_string(
issue1, sources, sinks, features
)
self.assertIn("Min Trace Length: Source (0) | Sink (6)", result)
issue2 = IssueQueryResult(
id=1,
filename="module.py",
location=SourceLocation(1, 2, 3),
code=1000,
callable="module.function1",
message="root",
min_trace_length_to_sources=3,
min_trace_length_to_sinks=1,
)
sources = []
sinks = ["sink1", "sink2"]
result = self.interactive._create_issue_output_string(
issue2, sources, sinks, features
)
self.assertIn("Min Trace Length: Source (3) | Sink (1)", result)
def testListSourceCode(self):
mock_data = """if this_is_true:
print("This was true")
else:
print("This was false")
"""
self.interactive.setup()
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(0),
filename="file.py",
caller="",
caller_port="",
callee="callee",
callee_port="",
callee_location=SourceLocation(2, 10, 25),
),
placeholder=True,
)
]
with patch("builtins.open", mock_open(read_data=mock_data)) as mock_file:
self._clear_stdout()
self.interactive.list_source_code(2)
mock_file.assert_called_once_with(f"{os.getcwd()}/file.py", "r")
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
"In callee [file.py:2|10|25]",
" 1 if this_is_true:",
' --> 2 print("This was true")',
" ^^^^^^^^^^^^^^^",
" 3 else:",
' 4 print("This was false")',
"",
],
)
mock_file.reset_mock()
self._clear_stdout()
self.interactive.list_source_code(1)
mock_file.assert_called_once_with(f"{os.getcwd()}/file.py", "r")
output = self.stdout.getvalue()
self.assertEqual(
output.split("\n"),
[
"In callee [file.py:2|10|25]",
" 1 if this_is_true:",
' --> 2 print("This was true")',
" ^^^^^^^^^^^^^^^",
" 3 else:",
"",
],
)
def testListSourceCodeFileNotFound(self):
self.interactive.setup()
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self.interactive.trace_tuples = [
TraceTuple(
trace_frame=TraceFrameQueryResult(
id=DBID(0),
caller="",
caller_port="",
callee="",
callee_port="",
filename="file.py",
callee_location=SourceLocation(2, 1, 1),
)
)
]
with patch("builtins.open", mock_open(read_data="not read")) as mock_file:
mock_file.side_effect = FileNotFoundError()
self.interactive.list_source_code()
self.assertIn("Couldn't open", self.stderr.getvalue())
self.assertNotIn("file.py", self.stdout.getvalue())
def testGroupTraceFrames(self):
trace_frames = [
TraceFrameQueryResult(
id=DBID(1),
caller="caller1",
caller_port="port1",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(2),
caller="caller1",
caller_port="port1",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(3),
caller="caller2",
caller_port="port2",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(4),
caller="caller2",
caller_port="port2",
callee="",
callee_port="",
),
TraceFrameQueryResult(
id=DBID(5),
caller="caller2",
caller_port="port3",
callee="",
callee_port="",
),
]
buckets = self.interactive._group_trace_frames(trace_frames, 5)
self.assertEqual(3, len(buckets.keys()))
self.assertIn(("caller1", "port1"), buckets.keys())
self.assertIn(("caller2", "port2"), buckets.keys())
self.assertIn(("caller2", "port3"), buckets.keys())
self.assertEqual(
[1, 2], [int(frame.id) for frame in buckets[("caller1", "port1")]]
)
self.assertEqual(
[3, 4], [int(frame.id) for frame in buckets[("caller2", "port2")]]
)
self.assertEqual(
[5], [int(frame.id) for frame in buckets[("caller2", "port3")]]
)
def testListTracesBasic(self):
self.fakes.run()
post1 = self.fakes.postcondition(
caller="caller1", caller_port="port1", callee="callee1", callee_port="port1"
)
post2 = self.fakes.postcondition(
caller="caller1", caller_port="port1", callee="callee2", callee_port="port2"
)
post3 = self.fakes.postcondition(
caller="caller2", caller_port="port2", callee="callee3", callee_port="port3"
)
post4 = self.fakes.postcondition(
caller="caller2", caller_port="port2", callee="callee4", callee_port="port4"
)
post5 = self.fakes.postcondition(
caller="caller2", caller_port="port3", callee="callee5", callee_port="port5"
)
self.fakes.save_all(self.db)
self.interactive.current_run_id = 1
self._clear_stdout()
self.interactive.frames(kind=TraceKind.POSTCONDITION)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- caller1:port1 ->",
f"{post1.id} callee1:port1",
f"{post2.id} callee2:port2",
"---- caller2:port2 ->",
f"{post3.id} callee3:port3",
f"{post4.id} callee4:port4",
"---- caller2:port3 ->",
f"{post5.id} callee5:port5",
"",
],
)
self._clear_stdout()
self.interactive.frames(kind=TraceKind.PRECONDITION)
self.assertEqual(self.stdout.getvalue().strip(), "No trace frames found.")
def testListTracesFilterCallersCallees(self):
run = self.fakes.run()
frames = self._basic_trace_frames()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
self.interactive.current_run_id = 1
self._clear_stdout()
self.interactive.frames(callers=["call2"])
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call2:param0 ->",
f"{frames[1].id} leaf:sink",
"",
],
)
self._clear_stdout()
self.interactive.frames(callees=["call2"])
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call1:root ->",
f"{frames[0].id} call2:param0",
"",
],
)
def testListFramesWithLimit(self):
frames = self._set_up_branched_trace()
self.interactive.run(1)
self._clear_stdout()
self.interactive.frames(limit=3)
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[id] [caller:caller_port -> callee:callee_port]",
"---- call1:root ->",
f"{frames[3].id} call2:param2",
f"{frames[2].id} call2:param2",
f"{frames[1].id} leaf:source",
"...",
"Showing 3/6 matching frames. To see more, call 'frames' with "
"the 'limit' argument.",
"",
],
)
def testSetFrame(self):
frames = self._basic_trace_frames()
sink = self.fakes.sink("sink")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
self.interactive.setup()
self.interactive.frame(0)
self.assertIn("Trace frame 0 doesn't exist.", self.stderr.getvalue())
self._clear_stdout()
self.interactive.frame(1)
self.assertIn("Trace frame 1", self.stdout.getvalue())
self.assertNotIn("Trace frame 2", self.stdout.getvalue())
self._clear_stdout()
self.interactive.frame(2)
self.assertNotIn("Trace frame 1", self.stdout.getvalue())
self.assertIn("Trace frame 2", self.stdout.getvalue())
def testSetFrameUpdatesRun(self):
run1 = self.fakes.run()
frames = [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param1",
callee="call3",
callee_port="param2",
location=(1, 1, 1),
),
]
run2 = self.fakes.run()
sink = self.fakes.sink("sink1")
self.fakes.saver.add_all(
[
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[0].id, leaf_id=sink.id, trace_length=1
),
TraceFrameLeafAssoc.Record(
trace_frame_id=frames[1].id, leaf_id=sink.id, trace_length=0
),
]
)
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run1)
session.add(run2)
session.commit()
self.interactive.setup()
self.assertEqual(int(self.interactive.current_run_id), 2)
self.interactive.frame(int(frames[0].id))
self.assertEqual(int(self.interactive.current_run_id), 1)
def testIsBeforeRoot(self):
self.interactive.trace_tuples = [
TraceTuple(trace_frame=TraceFrame(kind=TraceKind.POSTCONDITION)),
TraceTuple(trace_frame=TraceFrame(kind=TraceKind.PRECONDITION)),
]
self.interactive.current_trace_frame_index = 0
self.assertTrue(self.interactive._is_before_root())
self.interactive.current_trace_frame_index = 1
self.assertFalse(self.interactive._is_before_root())
def testIsRootTraceTuple(self):
trace_tuple = TraceTuple(trace_frame=TraceFrame(callee_port="root"))
self.assertTrue(self.interactive._is_root_trace_tuple(trace_tuple))
trace_tuple = TraceTuple(trace_frame=TraceFrame(callee_port="not_root"))
self.assertFalse(self.interactive._is_root_trace_tuple(trace_tuple))
def testParents(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.frame(3)
self.interactive.current_trace_frame_index = 1
self._clear_stdout()
with patch("click.prompt", return_value=0):
self.interactive.parents()
self.assertEqual(
self.stdout.getvalue().split("\n"),
["[1] call1 : root", "[2] call1 : root", ""],
)
self._clear_stdout()
self.interactive.current_trace_frame_index = 0
self.interactive.parents()
self.assertIn("No parents calling", self.stdout.getvalue())
self.interactive.current_trace_frame_index = 2
self.interactive.parents()
self.assertIn("Try running from a non-leaf node", self.stderr.getvalue())
def testParentsSelectParent(self):
self._set_up_branched_trace()
self.interactive.setup()
self.interactive.frame(3)
self.interactive.current_trace_frame_index = 1
self._clear_stdout()
with patch("click.prompt", return_value=1):
self.interactive.parents()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
"[1] call1 : root",
"[2] call1 : root",
"",
" # ⎇ [callable] [port] [location]",
" --> 1 call1 root lib/server/posts/request.py:2|2|2",
" 2 call2 param2 lib/server/posts/request.py:2|2|2",
" 3 +2 leaf sink lib/server/posts/request.py:5|5|5",
"",
],
)
def testUpdateTraceTuplesNewParent(self):
frames = [
self.fakes.postcondition(callee="A"),
self.fakes.postcondition(callee="B"),
self.fakes.postcondition(callee="C"),
self.fakes.postcondition(callee="D"),
self.fakes.postcondition(callee="E"),
]
self.fakes.save_all(self.db)
self.interactive.setup()
# Test postcondition
self.interactive.current_trace_frame_index = 2
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[1])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[2])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[3])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[4])),
]
trace_frame = TraceFrameQueryResult(
id=DBID(0),
caller="caller",
caller_port="caller_port",
callee="F",
callee_port="callee_port",
filename="file.py",
callee_location=SourceLocation(1, 1, 1),
kind=TraceKind.POSTCONDITION,
)
self.interactive._update_trace_tuples_new_parent(trace_frame)
self.assertEqual(self.interactive.current_trace_frame_index, 3)
self.assertEqual(
[
self.interactive._get_callable_from_trace_tuple(trace_tuple)[0]
for trace_tuple in self.interactive.trace_tuples
],
["A", "B", "F", "caller"],
)
self.assertTrue(self.interactive.trace_tuples[-1].placeholder)
# Test precondition
self.interactive.current_trace_frame_index = 2
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[1])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[2])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[3])),
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[4])),
]
trace_frame = TraceFrameQueryResult(
id=DBID(0),
caller="caller",
caller_port="caller_port",
callee="F",
callee_port="callee_port",
filename="file.py",
callee_location=SourceLocation(1, 1, 1),
kind=TraceKind.PRECONDITION,
)
self.interactive._update_trace_tuples_new_parent(trace_frame)
self.assertEqual(self.interactive.current_trace_frame_index, 0)
self.assertEqual(
[
self.interactive._get_callable_from_trace_tuple(trace_tuple)[0]
for trace_tuple in self.interactive.trace_tuples
],
["caller", "F", "D", "E"],
)
self.assertTrue(self.interactive.trace_tuples[0].placeholder)
def testAllLeavesByKind(self):
shared_texts = [
SharedText(id=1, contents="source1", kind=SharedTextKind.SOURCE),
SharedText(id=2, contents="source2", kind=SharedTextKind.SOURCE),
SharedText(id=3, contents="source3", kind=SharedTextKind.SOURCE),
SharedText(id=4, contents="sink4", kind=SharedTextKind.SINK),
SharedText(id=5, contents="sink5", kind=SharedTextKind.SINK),
]
with self.db.make_session() as session:
self._add_to_session(session, shared_texts)
session.commit()
self.assertEqual(
self.interactive._all_leaves_by_kind(session, SharedTextKind.SOURCE),
{1: "source1", 2: "source2", 3: "source3"},
)
self.assertEqual(
self.interactive._all_leaves_by_kind(session, SharedTextKind.SINK),
{4: "sink4", 5: "sink5"},
)
def testDetails(self):
run = self.fakes.run()
frames = [
self.fakes.precondition(
caller="call1",
caller_port="root",
callee="call2",
callee_port="param0",
location=(1, 1, 1),
),
self.fakes.precondition(
caller="call2",
caller_port="param1",
callee="call3",
callee_port="param2",
location=(1, 1, 1),
),
]
issues = [self.fakes.issue(), self.fakes.issue(), self.fakes.issue()]
self.fakes.instance(issue_id=issues[0].id, callable="call2"),
self.fakes.instance(issue_id=issues[1].id, callable="call3"),
self.fakes.instance(issue_id=issues[2].id, callable="call2"),
self.fakes.save_all(self.db)
with self.db.make_session(expire_on_commit=False) as session:
session.add(run)
session.commit()
self.interactive.setup()
with self.db.make_session() as session:
self.interactive.trace_tuples = [
TraceTuple(trace_frame=self._frame_to_query_result(session, frames[0]))
]
self.interactive.current_issue_instance_id = 1
self.interactive.current_trace_frame_index = 0
self._clear_stdout()
self.interactive.details()
self.assertEqual(
self.stdout.getvalue().split("\n"),
[
f"Trace frame {frames[0].id}",
" Caller: call1 : root",
" Callee: call2 : param0",
" Kind: TraceKind.precondition",
" Sinks: ",
" Location: lib/server/posts/request.py:1|1|1",
"",
"Issues in callable (call2): 2",
"",
"Postconditions with caller (call2):",
"No trace frames found.",
"",
"Preconditions with caller (call2):",
"[id] [caller:caller_port -> callee:callee_port]",
"---- call2:param1 ->",
f"{frames[1].id} call3:param2",
"",
],
)
def mock_pager(self, output_string):
self.pager_calls += 1
def testPager(self):
run = self.fakes.run()
self.fakes.issue()
self.fakes.instance()
self.fakes.save_all(self.db)
with self.db.make_session() as session:
session.add(run)
session.commit()
# Default is no pager in tests
self.pager_calls = 0
with patch("IPython.core.page.page", self.mock_pager):
self.interactive.setup()
self.interactive.issues(use_pager=False)
self.interactive.runs(use_pager=False)
self.assertEqual(self.pager_calls, 0)
self.pager_calls = 0
with patch("IPython.core.page.page", self.mock_pager):
self.interactive.setup()
self.interactive.issues(use_pager=True)
self.interactive.runs(use_pager=True)
self.assertEqual(self.pager_calls, 2)
| [
"[email protected]"
] | |
7967013853faa251a7c164a1774ad60bcab38057 | 84a240d463a7c286a4ff3b0c344a0e80e681ce38 | /src/chap03/10_time_dependencies/test.py | 1ae0ad5dbbf3e72fb61a6fcfb782ede2deaa843d | [] | no_license | perillaroc/ecflow-tutorial-code | 87f12b11f942b2323e45f8f8c205f665ceaeca7f | e00d0556a161a4c221b854f99dbbd0898ee14762 | refs/heads/master | 2021-09-06T04:30:48.812500 | 2018-02-02T06:48:41 | 2018-02-02T06:48:41 | 119,969,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | import os
from pathlib import Path
from ecflow import Defs, Suite, Task, Family, Edit, Trigger, Event, Complete, Meter, Time, Day, Date
def create_family_f1():
return Family(
"f1",
Edit(SLEEP=20),
Task("t1",
Time("03:00 23:00 00:30")),
Task("t2",
Day("sunday")),
Task("t3",
Date("1.*.*"),
Time("12:00")
),
Task("t4",
Time("+00:02")),
Task("t5",
Time("00:02"))
)
print("Creating suite definition")
home = os.path.abspath(Path(Path(__file__).parent, "../../../build/course"))
defs = Defs(
Suite('test',
Edit(ECF_INCLUDE=home, ECF_HOME=home),
create_family_f1()))
print(defs)
print("Checking job creation: .ecf -> .job0")
print(defs.check_job_creation())
print("Saving definition to file 'test.def'")
defs.save_as_defs(str(Path(home, "test.def")))
# To restore the definition from file 'test.def' we can use:
# restored_defs = ecflow.Defs("test.def")
| [
"[email protected]"
] | |
befb49d4dbdb7cb938cabe95b0ce64fc371134a3 | 93a10a77cfed19f6f43987d5f7333c7599990ab1 | /vpy27/Lib/site-packages/cms/cms_toolbars.py | 963e7378c772e5cbb3cd448e37d3d387bf6db174 | [] | no_license | zfanai/vpy27 | 8cd00a49cadccd462276f685dfa30d51cfdfe3d6 | 57ae83d393c569cb632b1ad0bb093a13851e10ed | refs/heads/master | 2021-07-15T20:25:41.383490 | 2017-10-21T02:18:50 | 2017-10-21T02:18:50 | 107,623,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,153 | py | # -*- coding: utf-8 -*-
from classytags.utils import flatten_context
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch, resolve, Resolver404
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from cms.api import get_page_draft, can_change_page
from cms.constants import TEMPLATE_INHERITANCE_MAGIC, PUBLISHER_STATE_PENDING
from cms.models import CMSPlugin, Title, Page
from cms.toolbar.items import TemplateItem, REFRESH_PAGE
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.i18n import get_language_tuple, force_language, get_language_dict
from cms.utils.compat.dj import is_installed
from cms.utils import get_cms_setting
from cms.utils.permissions import (
get_user_sites_queryset,
has_auth_page_permission,
)
from cms.utils.urlutils import add_url_parameters, admin_reverse
from menus.utils import DefaultLanguageChanger
# Identifiers for search
ADMIN_MENU_IDENTIFIER = 'admin-menu'
LANGUAGE_MENU_IDENTIFIER = 'language-menu'
TEMPLATE_MENU_BREAK = 'Template Menu Break'
PAGE_MENU_IDENTIFIER = 'page'
PAGE_MENU_ADD_IDENTIFIER = 'add_page'
PAGE_MENU_FIRST_BREAK = 'Page Menu First Break'
PAGE_MENU_SECOND_BREAK = 'Page Menu Second Break'
PAGE_MENU_THIRD_BREAK = 'Page Menu Third Break'
PAGE_MENU_FOURTH_BREAK = 'Page Menu Fourth Break'
PAGE_MENU_LAST_BREAK = 'Page Menu Last Break'
HISTORY_MENU_IDENTIFIER = 'history'
HISTORY_MENU_BREAK = 'History Menu Break'
MANAGE_PAGES_BREAK = 'Manage Pages Break'
ADMIN_SITES_BREAK = 'Admin Sites Break'
ADMINISTRATION_BREAK = 'Administration Break'
CLIPBOARD_BREAK = 'Clipboard Break'
USER_SETTINGS_BREAK = 'User Settings Break'
ADD_PAGE_LANGUAGE_BREAK = "Add page language Break"
REMOVE_PAGE_LANGUAGE_BREAK = "Remove page language Break"
COPY_PAGE_LANGUAGE_BREAK = "Copy page language Break"
TOOLBAR_DISABLE_BREAK = 'Toolbar disable Break'
@toolbar_pool.register
class PlaceholderToolbar(CMSToolbar):
"""
Adds placeholder edit buttons if placeholders or static placeholders are detected in the template
"""
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
def init_placeholders_from_request(self):
self.placeholders = getattr(self.request, 'placeholders', [])
self.statics = getattr(self.request, 'static_placeholders', [])
def populate(self):
self.init_from_request()
def post_template_populate(self):
self.init_placeholders_from_request()
self.add_wizard_button()
self.add_structure_mode()
def add_structure_mode(self):
if self.page and not self.page.application_urls:
if self.page.has_change_permission(self.request):
return self.add_structure_mode_item()
elif self.placeholders:
return self.add_structure_mode_item()
for sp in self.statics:
if sp.has_change_permission(self.request):
return self.add_structure_mode_item()
def add_structure_mode_item(self, extra_classes=('cms-toolbar-item-cms-mode-switcher',)):
build_mode = self.toolbar.build_mode
build_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__BUILD')
edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
if self.request.user.has_perm("cms.use_structure"):
switcher = self.toolbar.add_button_list('Mode Switcher', side=self.toolbar.RIGHT,
extra_classes=extra_classes)
switcher.add_button(_('Structure'), build_url, active=build_mode, disabled=False)
switcher.add_button(_('Content'), edit_url, active=not build_mode, disabled=False)
def add_wizard_button(self):
from cms.wizards.wizard_pool import entry_choices
title = _("Create")
try:
page_pk = self.page.pk
except AttributeError:
page_pk = ''
user = getattr(self.request, "user", None)
disabled = user and hasattr(self, "page") and len(
list(entry_choices(user, self.page))) == 0
url = '{url}?page={page}&edit'.format(
url=reverse("cms_wizard_create"),
page=page_pk
)
self.toolbar.add_modal_button(title, url,
side=self.toolbar.RIGHT,
disabled=disabled,
on_close=REFRESH_PAGE)
@toolbar_pool.register
class BasicToolbar(CMSToolbar):
"""
Basic Toolbar for site and languages menu
"""
page = None
_language_menu = None
_admin_menu = None
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
def populate(self):
if not self.page:
self.init_from_request()
user_settings = self.request.toolbar.get_user_settings()
self.clipboard = user_settings.clipboard
self.add_admin_menu()
self.add_language_menu()
def add_admin_menu(self):
if not self._admin_menu:
self._admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER, self.current_site.name)
# Users button
self.add_users_button(self._admin_menu)
# sites menu
if get_cms_setting('PERMISSION'):
sites_queryset = get_user_sites_queryset(self.request.user)
else:
sites_queryset = Site.objects.all()
if len(sites_queryset) > 1:
sites_menu = self._admin_menu.get_or_create_menu('sites', _('Sites'))
sites_menu.add_sideframe_item(_('Admin Sites'), url=admin_reverse('sites_site_changelist'))
sites_menu.add_break(ADMIN_SITES_BREAK)
for site in sites_queryset:
sites_menu.add_link_item(site.name, url='http://%s' % site.domain,
active=site.pk == self.current_site.pk)
# admin
self._admin_menu.add_sideframe_item(_('Administration'), url=admin_reverse('index'))
self._admin_menu.add_break(ADMINISTRATION_BREAK)
# cms users settings
self._admin_menu.add_sideframe_item(_('User settings'), url=admin_reverse('cms_usersettings_change'))
self._admin_menu.add_break(USER_SETTINGS_BREAK)
# clipboard
if self.toolbar.edit_mode or self.toolbar.build_mode:
# True if the clipboard exists and there's plugins in it.
clipboard_is_bound = self.get_clipboard_plugins().exists()
self._admin_menu.add_link_item(_('Clipboard...'), url='#',
extra_classes=['cms-clipboard-trigger'],
disabled=not clipboard_is_bound)
self._admin_menu.add_link_item(_('Clear clipboard'), url='#',
extra_classes=['cms-clipboard-empty'],
disabled=not clipboard_is_bound)
self._admin_menu.add_break(CLIPBOARD_BREAK)
# Disable toolbar
self._admin_menu.add_link_item(_('Disable toolbar'), url='?%s' % get_cms_setting('CMS_TOOLBAR_URL__DISABLE'))
self._admin_menu.add_break(TOOLBAR_DISABLE_BREAK)
# logout
self.add_logout_button(self._admin_menu)
def add_users_button(self, parent):
User = get_user_model()
if User in admin.site._registry:
opts = User._meta
if self.request.user.has_perm('%s.%s' % (opts.app_label, get_permission_codename('change', opts))):
user_changelist_url = admin_reverse('%s_%s_changelist' % (opts.app_label, opts.model_name))
parent.add_sideframe_item(_('Users'), url=user_changelist_url)
def add_logout_button(self, parent):
# If current page is not published or has view restrictions user is redirected to the home page:
# * published page: no redirect
# * unpublished page: redirect to the home page
# * published page with login_required: redirect to the home page
# * published page with view permissions: redirect to the home page
if (self.page and self.page.is_published(self.current_lang) and not self.page.login_required and
self.page.has_view_permission(self.request, AnonymousUser())):
on_success = self.toolbar.REFRESH_PAGE
else:
on_success = '/'
# We'll show "Logout Joe Bloggs" if the name fields in auth.User are completed, else "Logout jbloggs". If
# anything goes wrong, it'll just be "Logout".
user_name = self.get_username()
logout_menu_text = _('Logout %s') % user_name if user_name else _('Logout')
parent.add_ajax_item(logout_menu_text, action=admin_reverse('logout'), active=True, on_success=on_success)
def add_language_menu(self):
if settings.USE_I18N and not self._language_menu:
self._language_menu = self.toolbar.get_or_create_menu(LANGUAGE_MENU_IDENTIFIER, _('Language'))
language_changer = getattr(self.request, '_language_changer', DefaultLanguageChanger(self.request))
for code, name in get_language_tuple(self.current_site.pk):
try:
url = language_changer(code)
except NoReverseMatch:
url = DefaultLanguageChanger(self.request)(code)
self._language_menu.add_link_item(name, url=url, active=self.current_lang == code)
def get_username(self, user=None, default=''):
user = user or self.request.user
try:
name = user.get_full_name()
if name:
return name
else:
return user.get_username()
except (AttributeError, NotImplementedError):
return default
def get_clipboard_plugins(self):
self.populate()
if not hasattr(self, "clipboard"):
return CMSPlugin.objects.none()
return self.clipboard.get_plugins()
def render_addons(self, context):
context.push()
context['local_toolbar'] = self
clipboard = mark_safe(render_to_string('cms/toolbar/clipboard.html', flatten_context(context)))
context.pop()
return [clipboard]
@toolbar_pool.register
class PageToolbar(CMSToolbar):
_changed_admin_menu = None
watch_models = [Page]
# Helpers
def init_from_request(self):
self.page = get_page_draft(self.request.current_page)
self.title = self.get_title()
self.permissions_activated = get_cms_setting('PERMISSION')
def init_placeholders_from_request(self):
self.placeholders = getattr(self.request, 'placeholders', [])
self.statics = getattr(self.request, 'static_placeholders', [])
self.dirty_statics = [sp for sp in self.statics if sp.dirty]
def get_title(self):
try:
return Title.objects.get(page=self.page, language=self.current_lang, publisher_is_draft=True)
except Title.DoesNotExist:
return None
def has_publish_permission(self):
if not hasattr(self, 'publish_permission'):
publish_permission = bool(self.page or self.statics)
if self.page:
publish_permission = self.page.has_publish_permission(self.request)
if self.statics:
publish_permission &= all(sp.has_publish_permission(self.request) for sp in self.dirty_statics)
self.publish_permission = publish_permission
return self.publish_permission
def has_page_change_permission(self):
if not hasattr(self, 'page_change_permission'):
if not self.page and not get_cms_setting('PERMISSION'):
# We can't check permissions for an individual page
# and can't check global cms permissions because
# user opted out of them.
# So just check django auth permissions.
user = self.request.user
can_change = has_auth_page_permission(user, action='change')
else:
can_change = can_change_page(self.request)
self.page_change_permission = can_change
return self.page_change_permission
def page_is_pending(self, page, language):
return (page.publisher_public_id and
page.publisher_public.get_publisher_state(language) == PUBLISHER_STATE_PENDING)
def in_apphook(self):
with force_language(self.toolbar.language):
try:
resolver = resolve(self.request.path_info)
except Resolver404:
return False
else:
from cms.views import details
return resolver.func != details
def get_on_delete_redirect_url(self):
parent, language = self.page.parent, self.current_lang
# if the current page has a parent in the request's current language redirect to it
if parent and language in parent.get_languages():
with force_language(language):
return parent.get_absolute_url(language=language)
# else redirect to root, do not redirect to Page.objects.get_home() because user could have deleted the last
# page, if DEBUG == False this could cause a 404
return reverse('pages-root')
# Populate
def populate(self):
self.init_from_request()
self.change_admin_menu()
self.add_page_menu()
self.add_history_menu()
self.change_language_menu()
def post_template_populate(self):
self.init_placeholders_from_request()
self.add_draft_live()
self.add_publish_button()
# Buttons
def add_publish_button(self, classes=('cms-btn-action', 'cms-btn-publish',)):
# only do dirty lookups if publish permission is granted else button isn't added anyway
if self.toolbar.edit_mode and self.has_publish_permission():
classes = list(classes or [])
pk = self.page.pk if self.page else 0
dirty = (bool(self.dirty_statics) or
(self.page and (self.page.is_dirty(self.current_lang) or
self.page_is_pending(self.page, self.current_lang))))
if dirty:
classes.append('cms-btn-publish-active')
if self.dirty_statics or (self.page and self.page.is_published(self.current_lang)):
title = _('Publish changes')
else:
title = _('Publish page now')
classes.append('cms-publish-page')
params = {}
if self.dirty_statics:
params['statics'] = ','.join(str(sp.pk) for sp in self.dirty_statics)
if self.in_apphook():
params['redirect'] = self.request.path_info
with force_language(self.current_lang):
url = admin_reverse('cms_page_publish_page', args=(pk, self.current_lang))
url = add_url_parameters(url, params)
self.toolbar.add_button(title, url=url, extra_classes=classes,
side=self.toolbar.RIGHT, disabled=not dirty)
def add_draft_live(self):
if self.page:
if self.toolbar.edit_mode and not self.title:
self.add_page_settings_button()
if self.page.has_change_permission(self.request) and self.page.is_published(self.current_lang):
return self.add_draft_live_item()
elif self.placeholders:
return self.add_draft_live_item()
for sp in self.statics:
if sp.has_change_permission(self.request):
return self.add_draft_live_item()
def add_draft_live_item(self, template='cms/toolbar/items/live_draft.html', extra_context=None):
context = {'request': self.request}
context.update(extra_context or {})
pos = len(self.toolbar.right_items)
self.toolbar.add_item(TemplateItem(template, extra_context=context, side=self.toolbar.RIGHT), position=pos)
def add_page_settings_button(self, extra_classes=('cms-btn-action',)):
url = '%s?language=%s' % (admin_reverse('cms_page_change', args=[self.page.pk]), self.toolbar.language)
self.toolbar.add_modal_button(_('Page settings'), url, side=self.toolbar.RIGHT, extra_classes=extra_classes)
# Menus
def change_language_menu(self):
if self.toolbar.edit_mode and self.page:
language_menu = self.toolbar.get_menu(LANGUAGE_MENU_IDENTIFIER)
if not language_menu:
return None
languages = get_language_dict(self.current_site.pk)
remove = [(code, languages.get(code, code)) for code in self.page.get_languages() if code in languages]
add = [l for l in languages.items() if l not in remove]
copy = [(code, name) for code, name in languages.items() if code != self.current_lang and (code, name) in remove]
if add:
language_menu.add_break(ADD_PAGE_LANGUAGE_BREAK)
page_change_url = admin_reverse('cms_page_change', args=(self.page.pk,))
title = _('Add %(language)s Translation')
for code, name in add:
url = add_url_parameters(page_change_url, language=code)
language_menu.add_modal_item(title % {'language': name}, url=url)
if remove:
language_menu.add_break(REMOVE_PAGE_LANGUAGE_BREAK)
translation_delete_url = admin_reverse('cms_page_delete_translation', args=(self.page.pk,))
title = _('Delete %(language)s Translation')
disabled = len(remove) == 1
for code, name in remove:
url = add_url_parameters(translation_delete_url, language=code)
language_menu.add_modal_item(title % {'language': name}, url=url, disabled=disabled)
if copy:
language_menu.add_break(COPY_PAGE_LANGUAGE_BREAK)
page_copy_url = admin_reverse('cms_page_copy_language', args=(self.page.pk,))
title = _('Copy all plugins from %s')
question = _('Are you sure you want copy all plugins from %s?')
for code, name in copy:
language_menu.add_ajax_item(title % name, action=page_copy_url,
data={'source_language': code, 'target_language': self.current_lang},
question=question % name, on_success=self.toolbar.REFRESH_PAGE)
def change_admin_menu(self):
if not self._changed_admin_menu and self.has_page_change_permission():
admin_menu = self.toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)
url = admin_reverse('cms_page_changelist') # cms page admin
params = {'language': self.toolbar.language}
if self.page:
params['page_id'] = self.page.pk
url = add_url_parameters(url, params)
admin_menu.add_sideframe_item(_('Pages'), url=url, position=0)
# Used to prevent duplicates
self._changed_admin_menu = True
def add_page_menu(self):
if self.page and self.has_page_change_permission():
edit_mode = self.toolbar.edit_mode
refresh = self.toolbar.REFRESH_PAGE
# menu for current page
current_page_menu = self.toolbar.get_or_create_menu(PAGE_MENU_IDENTIFIER, _('Page'), position=1)
# page operations menu
add_page_menu = current_page_menu.get_or_create_menu(PAGE_MENU_ADD_IDENTIFIER, _('Add Page'))
app_page_url = admin_reverse('cms_page_add')
add_page_menu_modal_items = (
(_('New Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.parent_id or ''}),
(_('New Sub Page'), {'edit': 1, 'position': 'last-child', 'target': self.page.pk}),
(_('Duplicate this Page'), {'copy_target': self.page.pk})
)
for title, params in add_page_menu_modal_items:
params.update(language=self.toolbar.language)
add_page_menu.add_modal_item(title, url=add_url_parameters(app_page_url, params))
# first break
current_page_menu.add_break(PAGE_MENU_FIRST_BREAK)
# page edit
page_edit_url = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
current_page_menu.add_link_item(_('Edit this Page'), disabled=edit_mode, url=page_edit_url)
# page settings
page_settings_url = admin_reverse('cms_page_change', args=(self.page.pk,))
page_settings_url = add_url_parameters(page_settings_url, language=self.toolbar.language)
current_page_menu.add_modal_item(_('Page settings'), url=page_settings_url, disabled=not edit_mode,
on_close=refresh)
# templates menu
if self.toolbar.build_mode or edit_mode:
templates_menu = current_page_menu.get_or_create_menu('templates', _('Templates'))
action = admin_reverse('cms_page_change_template', args=(self.page.pk,))
for path, name in get_cms_setting('TEMPLATES'):
active = self.page.template == path
if path == TEMPLATE_INHERITANCE_MAGIC:
templates_menu.add_break(TEMPLATE_MENU_BREAK)
templates_menu.add_ajax_item(name, action=action, data={'template': path}, active=active,
on_success=refresh)
# second break
current_page_menu.add_break(PAGE_MENU_SECOND_BREAK)
# advanced settings
advanced_url = admin_reverse('cms_page_advanced', args=(self.page.pk,))
advanced_url = add_url_parameters(advanced_url, language=self.toolbar.language)
advanced_disabled = not self.page.has_advanced_settings_permission(self.request) or not edit_mode
current_page_menu.add_modal_item(_('Advanced settings'), url=advanced_url, disabled=advanced_disabled)
# permissions
if self.permissions_activated:
permissions_url = admin_reverse('cms_page_permissions', args=(self.page.pk,))
permission_disabled = not edit_mode or not self.page.has_change_permissions_permission(self.request)
current_page_menu.add_modal_item(_('Permissions'), url=permissions_url, disabled=permission_disabled)
# dates settings
dates_url = admin_reverse('cms_page_dates', args=(self.page.pk,))
current_page_menu.add_modal_item(_('Publishing dates'), url=dates_url, disabled=not edit_mode)
# third break
current_page_menu.add_break(PAGE_MENU_THIRD_BREAK)
# navigation toggle
nav_title = _('Hide in navigation') if self.page.in_navigation else _('Display in navigation')
nav_action = admin_reverse('cms_page_change_innavigation', args=(self.page.pk,))
current_page_menu.add_ajax_item(nav_title, action=nav_action, disabled=not edit_mode, on_success=refresh)
# publisher
if self.title:
if self.title.published:
publish_title = _('Unpublish page')
publish_url = admin_reverse('cms_page_unpublish', args=(self.page.pk, self.current_lang))
else:
publish_title = _('Publish page')
publish_url = admin_reverse('cms_page_publish_page', args=(self.page.pk, self.current_lang))
current_page_menu.add_ajax_item(publish_title, action=publish_url, disabled=not edit_mode,
on_success=refresh)
# fourth break
current_page_menu.add_break(PAGE_MENU_FOURTH_BREAK)
# delete
delete_url = admin_reverse('cms_page_delete', args=(self.page.pk,))
on_delete_redirect_url = self.get_on_delete_redirect_url()
current_page_menu.add_modal_item(_('Delete page'), url=delete_url, on_close=on_delete_redirect_url,
disabled=not edit_mode)
# last break
current_page_menu.add_break(PAGE_MENU_LAST_BREAK)
# page type
page_type_url = admin_reverse('cms_page_add_page_type')
page_type_url = add_url_parameters(page_type_url, copy_target=self.page.pk, language=self.toolbar.language)
current_page_menu.add_modal_item(_('Save as Page Type'), page_type_url, disabled=not edit_mode)
def add_history_menu(self):
if self.toolbar.edit_mode and self.page:
refresh = self.toolbar.REFRESH_PAGE
history_menu = self.toolbar.get_or_create_menu(HISTORY_MENU_IDENTIFIER, _('History'), position=2)
if is_installed('reversion'):
from cms.utils.reversion_hacks import reversion, Revision
versions = reversion.get_for_object(self.page)
if self.page.revision_id:
current_revision = Revision.objects.get(pk=self.page.revision_id)
has_undo = versions.filter(revision__pk__lt=current_revision.pk).exists()
has_redo = versions.filter(revision__pk__gt=current_revision.pk).exists()
else:
has_redo = False
has_undo = versions.count() > 1
undo_action = admin_reverse('cms_page_undo', args=(self.page.pk,))
redo_action = admin_reverse('cms_page_redo', args=(self.page.pk,))
history_menu.add_ajax_item(_('Undo'), action=undo_action, disabled=not has_undo, on_success=refresh)
history_menu.add_ajax_item(_('Redo'), action=redo_action, disabled=not has_redo, on_success=refresh)
history_menu.add_break(HISTORY_MENU_BREAK)
revert_action = admin_reverse('cms_page_revert_page', args=(self.page.pk, self.current_lang))
revert_question = _('Are you sure you want to revert to live?')
is_enabled = self.page.is_dirty(self.current_lang) and self.page.publisher_public
history_menu.add_ajax_item(_('Revert to live'), action=revert_action, question=revert_question,
disabled=not is_enabled,
on_success=refresh, extra_classes=('cms-toolbar-revert',))
history_menu.add_modal_item(_('View history'), url=admin_reverse('cms_page_history', args=(self.page.pk,)))
| [
"[email protected]"
] | |
b8993aa7ccaea8513fd07124d6005cdbd77c5068 | dd6cf539f20a0143acbdda1ed3f64b18b08a29b5 | /whiteList_v5/Config/2019第一季度脚本/config_fangtoo.py | 8af88ee3c64feb458b58b8f158acd47a212eef70 | [
"MIT"
] | permissive | myirelias/white_list | eec6a900cc3eea0f227425327ab55a6b46ba873d | 2bf344d2dc8f5cb3afbde71e248c019651ee3a7a | refs/heads/master | 2020-04-15T11:10:44.850942 | 2019-11-05T01:51:41 | 2019-11-05T01:51:41 | 164,618,155 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | # 房途网
TASK_NAME = 'fangtoo'
# 起始URL
START_URL = 'http://www.fangtoo.com/'
# 控制域,必须为list格式
DOMAIN = ['fangtoo']
# 请求头
HEADERS = {
'Host': 'www.fangtoo.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'If-Modified-Since': 'Tue, 26 Feb 2019 06:20:30 GMT',
'Cache-Control': 'max-age=0',
}
# xpath规则
XPATHER_HREF = ".//*/@href"
# 字段模版
# {
# "title": "",
# "news_date": "",
# "source": "",
# "author": "",
# "navigation": "",
# "content": "",
# "editor": "",
# "tags": ""
# },
XPATHER_NEWS_LIST = [
{
"title": "normalize-space(.//*[@class='main-text-cnt']/h1)",
"news_date": "substring(normalize-space(.//*[contains(@class,'time-source')]),1,20)",
"source": "substring-before(substring-after(normalize-space(.//*[contains(@class,'time-source')]),'来源:'),'编辑')",
"author": "",
"navigation": "normalize-space(.//*[@class='urhere'])",
"content": ".//*[@class='main-text']/descendant::text()",
"editor": "substring-after(normalize-space(.//*[contains(@class,'time-source')]),'编辑:')",
"tags": ".//*[@name='keywords']/@content"
},
{
"title": "normalize-space(.//*[@class='title'])",
"news_date": "normalize-space(.//*[@class='info']/span[1])",
"source": "normalize-space(.//*[@class='info']/span[3])",
"author": "",
"navigation": "normalize-space(.//*[@class='channel-name'])",
"content": ".//article/descendant::p/text()",
"editor": "substring-after(normalize-space(.//*[@class='info']/span[2]),'编辑:')",
"tags": ".//*[@name='keywords']/@content"
},
]
# 正则匹配规则,此处为静态页面url的正则表达式,匹配以下的规则的网址将抓取其中新闻内容
REGEX_URL = r'/\d*-\d*-\w*\d*\.[s]*htm[l]*'
| [
"[email protected]"
] | |
7e143995d7e137dcb1934363b26de9cf57999bab | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5040/404005040.py | 9579f1c988fb1681ddca7d2da3451a5a5e882284 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 3,935 | py | from bots.botsconfig import *
from records005040 import recorddefs
syntax = {
'version': '00504',
'functionalgroup': 'SR',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'ZC1', MIN: 0, MAX: 1},
{ID: 'BX', MIN: 0, MAX: 1},
{ID: 'BNX', MIN: 0, MAX: 1},
{ID: 'M3', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 30},
{ID: 'CM', MIN: 0, MAX: 2},
{ID: 'M1', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'N7', MIN: 1, MAX: 500, LEVEL: [
{ID: 'EM', MIN: 0, MAX: 1},
{ID: 'VC', MIN: 0, MAX: 36, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'H3', MIN: 0, MAX: 1},
]},
]},
{ID: 'M7', MIN: 0, MAX: 5},
{ID: 'N5', MIN: 0, MAX: 1},
{ID: 'IC', MIN: 0, MAX: 1},
{ID: 'IM', MIN: 0, MAX: 1},
{ID: 'M12', MIN: 0, MAX: 2},
{ID: 'E1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'E4', MIN: 0, MAX: 1},
{ID: 'E5', MIN: 0, MAX: 13},
{ID: 'PI', MIN: 0, MAX: 1},
]},
{ID: 'GA', MIN: 0, MAX: 15},
{ID: 'REF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 10},
{ID: 'N10', MIN: 0, MAX: 15},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'NA', MIN: 0, MAX: 10},
{ID: 'F9', MIN: 1, MAX: 1},
{ID: 'D9', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 15, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'BL', MIN: 0, MAX: 12},
]},
{ID: 'S1', MIN: 0, MAX: 12, LEVEL: [
{ID: 'S2', MIN: 0, MAX: 2},
{ID: 'S9', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'R2', MIN: 1, MAX: 13},
{ID: 'R9', MIN: 0, MAX: 1},
{ID: 'E1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'E4', MIN: 0, MAX: 1},
{ID: 'E5', MIN: 0, MAX: 13},
{ID: 'PI', MIN: 0, MAX: 1},
]},
{ID: 'H3', MIN: 0, MAX: 20},
{ID: 'PS', MIN: 0, MAX: 5},
{ID: 'LX', MIN: 1, MAX: 25, LEVEL: [
{ID: 'L5', MIN: 1, MAX: 15},
{ID: 'L0', MIN: 0, MAX: 25, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 3},
{ID: 'L1', MIN: 0, MAX: 10},
{ID: 'PI', MIN: 0, MAX: 30, LEVEL: [
{ID: 'CD', MIN: 0, MAX: 10},
]},
]},
{ID: 'X1', MIN: 0, MAX: 6},
]},
{ID: 'T1', MIN: 0, MAX: 64, LEVEL: [
{ID: 'T2', MIN: 0, MAX: 30},
{ID: 'T3', MIN: 0, MAX: 12},
{ID: 'T6', MIN: 0, MAX: 1},
{ID: 'T8', MIN: 0, MAX: 99},
]},
{ID: 'L3', MIN: 0, MAX: 1},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LH1', MIN: 1, MAX: 1000, LEVEL: [
{ID: 'LH2', MIN: 0, MAX: 4},
{ID: 'LH3', MIN: 0, MAX: 10},
{ID: 'LFH', MIN: 0, MAX: 20},
{ID: 'LEP', MIN: 0, MAX: 3},
{ID: 'LH4', MIN: 0, MAX: 4},
{ID: 'LHT', MIN: 0, MAX: 3},
{ID: 'LHR', MIN: 0, MAX: 5},
{ID: 'PER', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
]},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'PER', MIN: 0, MAX: 5},
{ID: 'LH2', MIN: 0, MAX: 6},
{ID: 'LHR', MIN: 0, MAX: 1},
{ID: 'LH6', MIN: 0, MAX: 5},
{ID: 'XH', MIN: 0, MAX: 1},
{ID: 'X7', MIN: 0, MAX: 10},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
451b171eedc679c5013c736a0918b085ca36b46c | fbea3e5d1ecab07b15887b321cd650349f22df95 | /peterklepec_webpage/cms/dashboard.py | 4df6b11b0df775c579e8ece440fb36e4e21e822a | [] | no_license | simonrakovic/peterklepec | 2e2004c04450898107da61314ec2ba03ee93bbe7 | 58114cfbd4f85d08a4749aa34492f52e11a9925e | refs/heads/master | 2021-01-21T07:53:35.429724 | 2016-09-21T12:34:52 | 2016-09-21T12:34:52 | 24,656,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,632 | py | """
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = 'peterklepec_webpage.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
self.children.append(modules.Group(
_('Group: Administration & Applications'),
column=1,
collapsible=True,
children = [
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
),
modules.AppList(
_('Applications'),
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
)
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('AppList: Applications'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.ModelList(
_('ModelList: Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Media Management'),
column=2,
children=[
{
'title': _('FileBrowser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
column=2,
children=[
{
'title': _('Django Documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Grappelli Documentation'),
'url': 'http://packages.python.org/django-grappelli/',
'external': True,
},
{
'title': _('Grappelli Google-Code'),
'url': 'http://code.google.com/p/django-grappelli/',
'external': True,
},
]
))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
column=2,
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| [
"[email protected]"
] | |
9500c48e7a7fbda723ca05e82f6932e50a80cc2a | 53ba1c29d6122b8afeb6578f1a338621c61f507d | /FCCeeAnalyses/ZH_Zmumu/dataframe/plots.py | 1478a35a3cf293e86dc3db7e5dd11aed9830304d | [] | no_license | selvaggi/FCChhAnalyses | dd420f5bdba60147322cc16b49479ca200e8a54c | 8f397c77229c68ad87947e6912466da4b0a6654b | refs/heads/master | 2021-04-27T17:08:28.824726 | 2020-05-21T07:42:43 | 2020-05-21T07:42:43 | 136,484,120 | 1 | 0 | null | 2019-05-02T13:12:39 | 2018-06-07T13:49:23 | Python | UTF-8 | Python | false | false | 1,547 | py | import ROOT
# global parameters
intLumi = 5.0e+06 #in pb-1
ana_tex = "e^{+}e^{-} #rightarrow ZH #rightarrow #mu^{+}#mu^{-} + X"
delphesVersion = "3.4.2"
energy = 240.0
collider = "FCC-ee"
inputDir = "Outputs/FCCee/ZH_Zmumu/"
formats = ['png','pdf']
yaxis = ['lin','log']
stacksig = ['stack','nostack']
outdir = 'Outputs/FCCee/ZH_Zmumu/plots/'
variables = ['mz','mz_zoom','nbjets','leptonic_recoil_m','leptonic_recoil_m_zoom']
###Dictonnary with the analysis name as a key, and the list of selections to be plotted for this analysis. The name of the selections should be the same than in the final selection
selections = {}
selections['ZH'] = ["sel0","sel1","sel2"]
selections['ZH_2'] = ["sel0","sel2"]
extralabel = {}
extralabel['sel0'] = "Selection: N_{Z} = 1"
extralabel['sel1'] = "Selection: N_{Z} = 1; 80 GeV < m_{Z} < 100 GeV"
extralabel['sel2'] = "Selection: N_{Z} = 1; 80 GeV < m_{Z} < 100 GeV; N_{b} = 2"
colors = {}
colors['ZH'] = ROOT.kRed
colors['WW'] = ROOT.kBlue+1
colors['ZZ'] = ROOT.kGreen+2
colors['VV'] = ROOT.kGreen+3
plots = {}
plots['ZH'] = {'signal':{'ZH':['p8_ee_ZH_ecm240']},
'backgrounds':{'WW':['p8_ee_WW_ecm240'],
'ZZ':['p8_ee_ZZ_ecm240']}
}
plots['ZH_2'] = {'signal':{'ZH':['p8_ee_ZH_ecm240']},
'backgrounds':{'VV':['p8_ee_WW_ecm240','p8_ee_ZZ_ecm240']}
}
legend = {}
legend['ZH'] = 'ZH'
legend['WW'] = 'WW'
legend['ZZ'] = 'ZZ'
legend['VV'] = 'VV boson'
| [
"[email protected]"
] | |
51cf6cb246c8fc7206420f5ca0b4ca38daf503b7 | 9a2413b572c0f89b1f80899a10237657d9393bd6 | /sdk/python/pulumi_keycloak/generic_client_role_mapper.py | 7482298398ca25d54904f73e31411f577ced592b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | jaxxstorm/pulumi-keycloak | 5c25363ece6af49dad40bd693ce07b1fa0dedd74 | 2fc7b1060b725a40d2ada745aa0d10130243a0b5 | refs/heads/master | 2022-10-10T13:11:04.290703 | 2020-06-05T19:11:19 | 2020-06-05T19:11:19 | 270,870,883 | 0 | 0 | NOASSERTION | 2020-06-09T01:08:56 | 2020-06-09T01:08:55 | null | UTF-8 | Python | false | false | 4,797 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class GenericClientRoleMapper(pulumi.CustomResource):
client_id: pulumi.Output[str]
"""
The destination client of the client role. Cannot be used at the same time as client_scope_id.
"""
client_scope_id: pulumi.Output[str]
"""
The destination client scope of the client role. Cannot be used at the same time as client_id.
"""
realm_id: pulumi.Output[str]
"""
The realm id where the associated client or client scope exists.
"""
role_id: pulumi.Output[str]
"""
Id of the role to assign
"""
def __init__(__self__, resource_name, opts=None, client_id=None, client_scope_id=None, realm_id=None, role_id=None, __props__=None, __name__=None, __opts__=None):
"""
Create a GenericClientRoleMapper resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The destination client of the client role. Cannot be used at the same time as client_scope_id.
:param pulumi.Input[str] client_scope_id: The destination client scope of the client role. Cannot be used at the same time as client_id.
:param pulumi.Input[str] realm_id: The realm id where the associated client or client scope exists.
:param pulumi.Input[str] role_id: Id of the role to assign
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['client_id'] = client_id
__props__['client_scope_id'] = client_scope_id
if realm_id is None:
raise TypeError("Missing required property 'realm_id'")
__props__['realm_id'] = realm_id
if role_id is None:
raise TypeError("Missing required property 'role_id'")
__props__['role_id'] = role_id
super(GenericClientRoleMapper, __self__).__init__(
'keycloak:index/genericClientRoleMapper:GenericClientRoleMapper',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, client_id=None, client_scope_id=None, realm_id=None, role_id=None):
"""
Get an existing GenericClientRoleMapper resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The destination client of the client role. Cannot be used at the same time as client_scope_id.
:param pulumi.Input[str] client_scope_id: The destination client scope of the client role. Cannot be used at the same time as client_id.
:param pulumi.Input[str] realm_id: The realm id where the associated client or client scope exists.
:param pulumi.Input[str] role_id: Id of the role to assign
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["client_id"] = client_id
__props__["client_scope_id"] = client_scope_id
__props__["realm_id"] = realm_id
__props__["role_id"] = role_id
return GenericClientRoleMapper(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
4c10cbd3c907d2b700a4587d67043ec7b66c5280 | f098c361ee79bb8b7a8402fcf20b37f17fb36983 | /Back-End/Python/Basics/Part -3- Hash Maps/04- Serialization and Deserialization/_04_JSON_singledispatch.py | 5bac467735833efd499c104e8e24e933427f5fe3 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | rnsdoodi/Programming-CookBook | 4d619537a6875ffbcb42cbdaf01d80db1feba9b4 | 9bd9c105fdd823aea1c3f391f5018fd1f8f37182 | refs/heads/master | 2023-09-05T22:09:08.282385 | 2021-10-31T11:57:40 | 2021-10-31T11:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,389 | py | import json
from datetime import datetime
log_record = {
'time': datetime.utcnow(),
'message': 'Testing...',
'other': {'a', 'b', 'c'}
}
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
print(json.dumps(log_record, default=custom_json_formatter))
#{"time": "2020-11-06T03:10:44.482803", "message": "Testing...", "other": ["a", "b", "c"]}
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return {
'name': self.name,
'age': self.age,
'create_dt': self.create_dt.isoformat()
}
p = Person('John', 82)
print(p)
print(p.toJSON())
# {"time": "2020-11-06T03:11:08.677000", "message": "Testing...", "other": ["b", "a", "c"]}
# Person(name=John, age=82)
# {'name': 'John', 'age': 82, 'create_dt': '2020-11-06T03:11:08.677000'}
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
elif isinstance(arg, Person):
return arg.toJSON()
log_record = dict(time=datetime.utcnow(),
message='Created new person record',
person=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:12:12.624757",
# "message": "Created new person record",
# "person": {
# "name": "John",
# "age": 82,
# "create_dt": "2020-11-06T03:12:12.624757"
# }
# }
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return {
'name': self.name,
'age': self.age,
'create_dt': self.create_dt
}
p = Person('Monty', 100)
log_record = dict(time=datetime.utcnow(),
message='Created new person record',
person=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:13:01.412670",
# "message": "Created new person record",
# "person": {
# "name": "Monty",
# "age": 100,
# "create_dt": "2020-11-06T03:13:01.412670"
# }
# }
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return vars(self)
p = Person('Python', 27)
print(p.toJSON())
# {'name': 'Python', 'age': 27, 'create_dt': datetime.datetime(2020, 11, 6, 3, 13, 34, 452006)}
log_record['person'] = p
print(log_record)
#{'time': datetime.datetime(2020, 11, 6, 3, 14, 6, 399677), 'message': 'Created new person record', 'person': Person(name=Python, age=27)}
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:14:19.885341",
# "message": "Created new person record",
# "person": {
# "name": "Python",
# "age": 27,
# "create_dt": "2020-11-06T03:14:19.885341"
# }
# }
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
else:
try:
return arg.toJSON()
except AttributeError:
try:
return vars(arg)
except TypeError:
return str(arg)
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'Point(x={self.x}, y={self.y})'
pt1 = Point(10, 10)
log_record = dict(time=datetime.utcnow(),
message='Created new point',
point=pt1,
created_by=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:18:39.272100",
# "message": "Created new point",
# "point": {
# "x": 10,
# "y": 10
# },
# "created_by": {
# "name": "Python",
# "age": 27,
# "create_dt": "2020-11-06T03:18:39.272100"
# }
# }
| [
"[email protected]"
] | |
cee671906f006b2298a9e9071b8bf2c43320fd39 | 5c7b6f96aef9a2c605c8e16eb0e3f6e2ab958947 | /settings.py | 454c3f98b426329036c61e8949009c6ba0b54d30 | [] | no_license | chapkovski/progressbaracrossrounds | 0137ce4a552edf9027eb092f1d9e0abde5cc6e8e | 62905f17d456fc6a7a57fa1fe91b510593740518 | refs/heads/master | 2021-07-03T20:53:58.758511 | 2020-09-03T22:19:49 | 2020-09-03T22:19:49 | 87,289,409 | 0 | 0 | null | 2021-06-10T20:38:02 | 2017-04-05T09:07:49 | Python | UTF-8 | Python | false | false | 1,797 | py | import os
from os import environ
import dj_database_url
from boto.mturk import qualification
import otree.settings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# the environment variable OTREE_PRODUCTION controls whether Django runs in
# DEBUG mode. If OTREE_PRODUCTION==1, then DEBUG=False
if environ.get('OTREE_PRODUCTION') not in {None, '', '0'}:
DEBUG = False
else:
DEBUG = True
ADMIN_USERNAME = 'admin'
# for security, best to set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
AUTH_LEVEL = environ.get('OTREE_AUTH_LEVEL')
# e.g. EUR, CAD, GBP, CHF, CNY, JPY
REAL_WORLD_CURRENCY_CODE = 'USD'
USE_POINTS = True
POINTS_DECIMAL_PLACES = 2
# e.g. en, de, fr, it, ja, zh-hans
# see: https://docs.djangoproject.com/en/1.9/topics/i18n/#term-language-code
LANGUAGE_CODE = 'en'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree', ]
DEMO_PAGE_INTRO_TEXT = """
oTree games
"""
# from here on are qualifications requirements for workers
# see description for requirements on Amazon Mechanical Turk website:
# http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_QualificationRequirementDataStructureArticle.html
# and also in docs for boto:
# https://boto.readthedocs.org/en/latest/ref/mturk.html?highlight=mturk#module-boto.mturk.qualification
SESSION_CONFIG_DEFAULTS = {
'real_world_currency_per_point': 0.000,
'participation_fee': 0.00,
'doc': "",
}
SESSION_CONFIGS = [
{
'name': 'bigfive',
'display_name': 'Progress bar across rounds',
'num_demo_participants': 1,
'app_sequence': ['bigfive'],
},
]
SECRET_KEY = 'whatever' | [
"[email protected]"
] | |
6372a17f6b0d06d1112a8594098c1279fd098c30 | 6eb58e32b469c37428185ab4456184905a5b4fb5 | /analysis_code/newdata_dataread_BP_MHTSTRATEGY_v2_hacktemp.py | c830e7ec48f7165d32ca2acd9803182abd11b3b1 | [] | no_license | rchenmit/mht_analysis | 0b8bfff7730df835975c7c41d65f007ad269e3a9 | 678d4419bdaed9ed9d0041df3a2cd8638074590f | refs/heads/master | 2020-04-06T03:40:41.577209 | 2015-01-12T00:14:48 | 2015-01-12T00:14:48 | 19,548,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,094 | py | ## Robert Chen
## Monday 3/12/2014
##
## trying to parse this in python
##
import os
import sys
if os.name == 'nt': #'nt' = windows
sys.path.append('C:\\anaconda\\lib\\site-packages') #in windows, alot of modules were installed with Anaconda
import pandas as pd
import numpy as np
import math
import copy
import csv
import scipy as s
import openpyxl
from openpyxl import load_workbook
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime
from dateutil import parser
from collections import Counter
#import pickle
if sys.version_info.major == 3:
import pickle
else:
import cPickle as pickle
## ENTER import files ##########################################################################################################
datadir = '../../data/new_data_20140416/Data_20140409/'
filename = datadir + 'Meds_DD_04082014.csv'
file_classes = datadir + 'MedClasses.xlsx'
file_BP_clinician = datadir + 'mht_strategy_20140407.txt'
file_BP_record = datadir + 'BP_04082014.csv'
file_eGFR_record = datadir + 'EGFR_04082014.csv'
pickle_dir = '../analysis_output/pickle/'
if not os.path.exists(pickle_dir):
os.makedirs(pickle_dir)
#pickle
with open(pickle_dir + "df_bp_clinician.pickle", "wb") as output_file:
pickle.dump(df_bp_clinician, output_file)
output_file.close()
with open(pickle_dir + "df_bp_record.pickle", "wb") as output_file:
pickle.dump(df_bp_record, output_file)
output_file.close()
with open(pickle_dir + "df_egfr_record.pickle", "wb") as output_file:
pickle.dump(df_egfr_record, output_file)
output_file.close()
## analyze recorded BP's: using BP.txt (reported numbers)#################################################################################
list_ruid = list(set(df_bp_clinician.index.values)) #list of floats
#earliest and latest possible date : for throwing out bad data
early_date = datetime(1990,1,1)
late_date = datetime.today()
#make dictionary of BP's key'd by RUID
d_bp_record = dict()
cnt = 0
print("bulding dictionary of recorded BP's (346K lines total)-----------------\n")
for i in range(len(df_bp_record)):
cnt+=1
if (cnt % 10000 == 0):
print(cnt)
key = df_bp_record.index[i]
indexes_for_df = np.array([])
data = []
this_date = parser.parse(df_bp_record.iloc[i]['MEASURE_DATE']) ##PARSE THE DATE OUT!
bool_this_date_good = this_date > early_date and this_date < late_date
indexes_for_df = np.append(indexes_for_df, this_date)
if df_bp_record.iloc[i]['SYSTOLIC'].isdigit() and df_bp_record.iloc[i]['DIASTOLIC'].isdigit() and bool_this_date_good:
data.append([int(df_bp_record.iloc[i]['SYSTOLIC']), int(df_bp_record.iloc[i]['DIASTOLIC'])]) #CAST ELEMENTS AS INTEGERS!!!!
if key in d_bp_record: #then append
d_bp_record[key] = d_bp_record[key].append(pd.DataFrame(data, index = indexes_for_df, columns = ['SYSTOLIC', 'DIASTOLIC']))
else: #then initialize
d_bp_record[key] = pd.DataFrame(data, index = indexes_for_df, columns = ['SYSTOLIC', 'DIASTOLIC'])
#add in status at each time point
print("calculating BP control status from recorded numbers: \n")
for key in d_bp_record: #loop thru the keys in dictionary
d_bp_record[key]['STATUS'] = 0
bool_condition_systolic = d_bp_record[key]['SYSTOLIC'] < 140 #in control if SYSTOLIC < 140
bool_condition_diastolic = d_bp_record[key]['DIASTOLIC'] < 90 #in control if DIASTOLIC < 90
bool_condition_INCONTROL = bool_condition_systolic & bool_condition_diastolic
d_bp_record[key].loc[bool_condition_INCONTROL, 'STATUS'] = 1 #-1 => IN CONTROL
d_bp_record[key].loc[~bool_condition_INCONTROL, 'STATUS'] = -1 #1 => OUT OF CONTROL
#make dictionary of BP Control Status (at the patient level, ie mostly in control or out of control)
print("calculating intervals of in control vs out of control from recorded numbers: \n")
d_bp_status_pt_level = dict()
for key in d_bp_record:
d_days_in_out = {-1: 0, 1:0}
ts_status_this_pt = d_bp_record[key]['STATUS'].sort_index()
last_status = ts_status_this_pt[0]
last_timestamp = ts_status_this_pt.index[0]
if len(ts_status_this_pt) > 1 and (max(ts_status_this_pt.index) - min(ts_status_this_pt.index)).days > 1: #if there are more than 1 entry, and more than 1 day's worth (if theres more than one entry and they're not all on the same day)
#loop thru the timeSeries of status for this patient
for timestamp in ts_status_this_pt.index:
time_delta = (timestamp - last_timestamp).days
d_days_in_out[last_status] += time_delta #add the time that has passed
if ts_status_this_pt[timestamp].size > 1:
status_at_this_timestamp = ts_status_this_pt[timestamp][-1] #pick the last recorded status for this timestamp
if status_at_this_timestamp != last_status: #if the status changed
last_status = status_at_this_timestamp
else:
status_at_this_timestamp = ts_status_this_pt[timestamp]
if status_at_this_timestamp != last_status: #if the status changed
last_status = status_at_this_timestamp #then change last_status to reflect this so that you add to the right status for the next timestamp
last_timestamp = timestamp
#now count how many days in /out and detemrine if mostly in or mostly out or mixed
num_in = d_days_in_out[1]
num_out = d_days_in_out[-1]
else: #if only one BP measurement was taken for the patient
if last_status == 1:
num_in = 1
num_out = 0
else:
num_in = 0
num_out = 1
if num_in == 0 and num_out == 0:
print("ERROR 0: no days in or out! " + str(key))
d_bp_status_pt_level[key] = 0
elif num_out == 0:
if num_in > num_out:
d_bp_status_pt_level[key] = 1
else:
print("ERROR1 - check!")
elif num_in == 0:
if num_out > num_in:
d_bp_status_pt_level[key] = -1
else:
print("ERROR2 - check!")
elif num_in > num_out and num_out == 0:
d_bp_status_pt_level[key] = 1
elif num_out > num_in and num_in == 0:
d_bp_status_pt_level[key] = -1
elif num_in / float(num_out) > 1.5:
d_bp_status_pt_level[key] = 1
elif num_out / float(num_in) > 1.5:
d_bp_status_pt_level[key] = -1
else:
d_bp_status_pt_level[key] = 0
#print counts
print("number patients with each control class (from numbers: ")
counter_control_status = Counter(val for val in d_bp_status_pt_level.values())
print(counter_control_status)
#pickle:
with open(pickle_dir + "d_bp_record.pickle", "wb") as output_file:
pickle.dump(d_bp_record, output_file)
output_file.close()
with open(pickle_dir + "d_bp_status_pt_level.pickle", "wb") as output_file:
pickle.dump(d_bp_status_pt_level, output_file)
output_file.close()
with open(pickle_dir + "list_ruid.pickle", "wb") as output_file:
pickle.dump(list_ruid, output_file)
output_file.close()
| [
"[email protected]"
] | |
e410730572e9364ca72db41147389438c4725f12 | 38c10c01007624cd2056884f25e0d6ab85442194 | /extensions/extensions.gypi | d7306813a707108e1338e08175390f6649ec65d3 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 51,840 | gypi | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'extensions_common_constants_sources': [
'common/constants.cc',
'common/constants.h',
],
'extensions_common_mojo_sources': [
'common/mojo/keep_alive.mojom',
'common/mojo/stash.mojom',
],
'extensions_common_sources': [
'common/api/bluetooth/bluetooth_manifest_data.cc',
'common/api/bluetooth/bluetooth_manifest_data.h',
'common/api/bluetooth/bluetooth_manifest_handler.cc',
'common/api/bluetooth/bluetooth_manifest_handler.h',
'common/api/bluetooth/bluetooth_manifest_permission.cc',
'common/api/bluetooth/bluetooth_manifest_permission.h',
'common/api/declarative/declarative_manifest_data.cc',
'common/api/declarative/declarative_manifest_data.h',
'common/api/declarative/declarative_manifest_handler.cc',
'common/api/declarative/declarative_manifest_handler.h',
'common/api/messaging/message.h',
'common/api/printer_provider/usb_printer_manifest_data.cc',
'common/api/printer_provider/usb_printer_manifest_data.h',
'common/api/printer_provider/usb_printer_manifest_handler.cc',
'common/api/printer_provider/usb_printer_manifest_handler.h',
'common/api/sockets/sockets_manifest_data.cc',
'common/api/sockets/sockets_manifest_data.h',
'common/api/sockets/sockets_manifest_handler.cc',
'common/api/sockets/sockets_manifest_handler.h',
'common/api/sockets/sockets_manifest_permission.cc',
'common/api/sockets/sockets_manifest_permission.h',
'common/cast/cast_cert_validator.cc',
'common/cast/cast_cert_validator.h',
'common/common_manifest_handlers.cc',
'common/common_manifest_handlers.h',
'common/csp_validator.cc',
'common/csp_validator.h',
'common/dom_action_types.h',
'common/draggable_region.cc',
'common/draggable_region.h',
'common/error_utils.cc',
'common/error_utils.h',
'common/event_filter.cc',
'common/event_filter.h',
'common/event_filtering_info.cc',
'common/event_filtering_info.h',
'common/event_matcher.cc',
'common/event_matcher.h',
'common/extension.cc',
'common/extension.h',
'common/extension_api.cc',
'common/extension_api.h',
'common/extension_icon_set.cc',
'common/extension_icon_set.h',
'common/extension_l10n_util.cc',
'common/extension_l10n_util.h',
'common/extension_message_generator.cc',
'common/extension_message_generator.h',
'common/extension_messages.cc',
'common/extension_messages.h',
'common/extension_paths.cc',
'common/extension_paths.h',
'common/extension_resource.cc',
'common/extension_resource.h',
'common/extension_set.cc',
'common/extension_set.h',
'common/extension_urls.cc',
'common/extension_urls.h',
'common/extension_utility_messages.h',
'common/extensions_client.cc',
'common/extensions_client.h',
'common/feature_switch.cc',
'common/feature_switch.h',
'common/features/api_feature.cc',
'common/features/api_feature.h',
'common/features/base_feature_provider.cc',
'common/features/base_feature_provider.h',
'common/features/behavior_feature.cc',
'common/features/behavior_feature.h',
'common/features/complex_feature.cc',
'common/features/complex_feature.h',
'common/features/feature.cc',
'common/features/feature.h',
'common/features/feature_provider.cc',
'common/features/feature_provider.h',
'common/features/feature_util.h',
'common/features/json_feature_provider_source.cc',
'common/features/json_feature_provider_source.h',
'common/features/manifest_feature.cc',
'common/features/manifest_feature.h',
'common/features/permission_feature.cc',
'common/features/permission_feature.h',
'common/features/simple_feature.cc',
'common/features/simple_feature.h',
'common/features/simple_feature_filter.cc',
'common/features/simple_feature_filter.h',
'common/file_util.cc',
'common/file_util.h',
'common/guest_view/extensions_guest_view_messages.h',
'common/host_id.cc',
'common/host_id.h',
'common/image_util.cc',
'common/image_util.h',
'common/install_warning.cc',
'common/install_warning.h',
'common/manifest.cc',
'common/manifest.h',
'common/manifest_constants.cc',
'common/manifest_constants.h',
'common/manifest_handler.cc',
'common/manifest_handler.h',
'common/manifest_handler_helpers.cc',
'common/manifest_handler_helpers.h',
'common/manifest_handlers/app_isolation_info.cc',
'common/manifest_handlers/app_isolation_info.h',
'common/manifest_handlers/background_info.cc',
'common/manifest_handlers/background_info.h',
'common/manifest_handlers/content_capabilities_handler.cc',
'common/manifest_handlers/content_capabilities_handler.h',
'common/manifest_handlers/csp_info.cc',
'common/manifest_handlers/csp_info.h',
'common/manifest_handlers/default_locale_handler.cc',
'common/manifest_handlers/default_locale_handler.h',
'common/manifest_handlers/externally_connectable.cc',
'common/manifest_handlers/externally_connectable.h',
'common/manifest_handlers/file_handler_info.cc',
'common/manifest_handlers/file_handler_info.h',
'common/manifest_handlers/icons_handler.cc',
'common/manifest_handlers/icons_handler.h',
'common/manifest_handlers/incognito_info.cc',
'common/manifest_handlers/incognito_info.h',
'common/manifest_handlers/kiosk_mode_info.cc',
'common/manifest_handlers/kiosk_mode_info.h',
'common/manifest_handlers/launcher_page_info.cc',
'common/manifest_handlers/launcher_page_info.h',
'common/manifest_handlers/mime_types_handler.cc',
'common/manifest_handlers/mime_types_handler.h',
'common/manifest_handlers/oauth2_manifest_handler.cc',
'common/manifest_handlers/oauth2_manifest_handler.h',
'common/manifest_handlers/offline_enabled_info.cc',
'common/manifest_handlers/offline_enabled_info.h',
'common/manifest_handlers/options_page_info.cc',
'common/manifest_handlers/options_page_info.h',
'common/manifest_handlers/permissions_parser.cc',
'common/manifest_handlers/permissions_parser.h',
'common/manifest_handlers/requirements_info.cc',
'common/manifest_handlers/requirements_info.h',
'common/manifest_handlers/sandboxed_page_info.cc',
'common/manifest_handlers/sandboxed_page_info.h',
'common/manifest_handlers/shared_module_info.cc',
'common/manifest_handlers/shared_module_info.h',
'common/manifest_handlers/web_accessible_resources_info.cc',
'common/manifest_handlers/web_accessible_resources_info.h',
'common/manifest_handlers/webview_info.cc',
'common/manifest_handlers/webview_info.h',
'common/manifest_url_handlers.cc',
'common/manifest_url_handlers.h',
'common/message_bundle.cc',
'common/message_bundle.h',
'common/one_shot_event.cc',
'common/one_shot_event.h',
'common/permissions/api_permission.cc',
'common/permissions/api_permission.h',
'common/permissions/api_permission_set.cc',
'common/permissions/api_permission_set.h',
'common/permissions/base_set_operators.h',
'common/permissions/extensions_api_permissions.cc',
'common/permissions/extensions_api_permissions.h',
'common/permissions/manifest_permission.cc',
'common/permissions/manifest_permission.h',
'common/permissions/manifest_permission_set.cc',
'common/permissions/manifest_permission_set.h',
'common/permissions/media_galleries_permission.cc',
'common/permissions/media_galleries_permission.h',
'common/permissions/media_galleries_permission_data.cc',
'common/permissions/media_galleries_permission_data.h',
'common/permissions/permission_message.cc',
'common/permissions/permission_message.h',
'common/permissions/permission_message_provider.cc',
'common/permissions/permission_message_provider.h',
'common/permissions/permission_message_util.cc',
'common/permissions/permission_message_util.h',
'common/permissions/permission_set.cc',
'common/permissions/permission_set.h',
'common/permissions/permissions_data.cc',
'common/permissions/permissions_data.h',
'common/permissions/permissions_info.cc',
'common/permissions/permissions_info.h',
'common/permissions/permissions_provider.h',
'common/permissions/set_disjunction_permission.h',
'common/permissions/settings_override_permission.cc',
'common/permissions/settings_override_permission.h',
'common/permissions/socket_permission.cc',
'common/permissions/socket_permission.h',
'common/permissions/socket_permission_data.cc',
'common/permissions/socket_permission_data.h',
'common/permissions/socket_permission_entry.cc',
'common/permissions/socket_permission_entry.h',
'common/permissions/usb_device_permission.cc',
'common/permissions/usb_device_permission.h',
'common/permissions/usb_device_permission_data.cc',
'common/permissions/usb_device_permission_data.h',
'common/stack_frame.cc',
'common/stack_frame.h',
'common/switches.cc',
'common/switches.h',
'common/update_manifest.cc',
'common/update_manifest.h',
'common/url_pattern.cc',
'common/url_pattern.h',
'common/url_pattern_set.cc',
'common/url_pattern_set.h',
'common/user_script.cc',
'common/user_script.h',
'common/value_builder.cc',
'common/value_builder.h',
'common/value_counter.cc',
'common/value_counter.h',
'common/view_type.cc',
'common/view_type.h',
],
'extensions_common_sources_nacl': [
'common/manifest_handlers/nacl_modules_handler.cc',
'common/manifest_handlers/nacl_modules_handler.h',
],
'extensions_browser_sources': [
# NOTE: When moving an API out of Chrome be sure to verify that the
# Android build still compiles. See conditions below.
'browser/api/activity_log/web_request_constants.cc',
'browser/api/activity_log/web_request_constants.h',
'browser/api/alarms/alarm_manager.cc',
'browser/api/alarms/alarm_manager.h',
'browser/api/alarms/alarms_api.cc',
'browser/api/alarms/alarms_api.h',
'browser/api/api_resource.cc',
'browser/api/api_resource.h',
'browser/api/api_resource_manager.h',
'browser/api/app_current_window_internal/app_current_window_internal_api.cc',
'browser/api/app_current_window_internal/app_current_window_internal_api.h',
'browser/api/app_runtime/app_runtime_api.cc',
'browser/api/app_runtime/app_runtime_api.h',
'browser/api/app_window/app_window_api.cc',
'browser/api/app_window/app_window_api.h',
'browser/api/async_api_function.cc',
'browser/api/async_api_function.h',
'browser/api/audio/audio_api.cc',
'browser/api/audio/audio_api.h',
'browser/api/audio/audio_service.h',
'browser/api/bluetooth/bluetooth_api.cc',
'browser/api/bluetooth/bluetooth_api.h',
'browser/api/bluetooth/bluetooth_api_pairing_delegate.cc',
'browser/api/bluetooth/bluetooth_api_pairing_delegate.h',
'browser/api/bluetooth/bluetooth_api_utils.cc',
'browser/api/bluetooth/bluetooth_api_utils.h',
'browser/api/bluetooth/bluetooth_event_router.cc',
'browser/api/bluetooth/bluetooth_event_router.h',
'browser/api/bluetooth/bluetooth_extension_function.cc',
'browser/api/bluetooth/bluetooth_extension_function.h',
'browser/api/bluetooth/bluetooth_private_api.cc',
'browser/api/bluetooth/bluetooth_private_api.h',
'browser/api/bluetooth_socket/bluetooth_api_socket.cc',
'browser/api/bluetooth_socket/bluetooth_api_socket.h',
'browser/api/bluetooth_socket/bluetooth_socket_api.cc',
'browser/api/bluetooth_socket/bluetooth_socket_api.h',
'browser/api/bluetooth_socket/bluetooth_socket_event_dispatcher.cc',
'browser/api/bluetooth_socket/bluetooth_socket_event_dispatcher.h',
'browser/api/capture_web_contents_function.cc',
'browser/api/capture_web_contents_function.h',
'browser/api/cast_channel/cast_auth_ica.cc',
'browser/api/cast_channel/cast_auth_ica.h',
'browser/api/cast_channel/cast_auth_util.cc',
'browser/api/cast_channel/cast_auth_util.h',
'browser/api/cast_channel/cast_channel_api.cc',
'browser/api/cast_channel/cast_channel_api.h',
'browser/api/cast_channel/cast_framer.cc',
'browser/api/cast_channel/cast_framer.h',
'browser/api/cast_channel/cast_message_util.cc',
'browser/api/cast_channel/cast_message_util.h',
'browser/api/cast_channel/cast_socket.cc',
'browser/api/cast_channel/cast_socket.h',
'browser/api/cast_channel/cast_transport.cc',
'browser/api/cast_channel/cast_transport.h',
'browser/api/cast_channel/keep_alive_delegate.cc',
'browser/api/cast_channel/keep_alive_delegate.h',
'browser/api/cast_channel/logger.cc',
'browser/api/cast_channel/logger.h',
'browser/api/cast_channel/logger_util.cc',
'browser/api/cast_channel/logger_util.h',
'browser/api/declarative/declarative_api.cc',
'browser/api/declarative/declarative_api.h',
'browser/api/declarative/declarative_rule.h',
'browser/api/declarative/deduping_factory.h',
'browser/api/declarative/rules_cache_delegate.cc',
'browser/api/declarative/rules_cache_delegate.h',
'browser/api/declarative/rules_registry.cc',
'browser/api/declarative/rules_registry.h',
'browser/api/declarative/rules_registry_service.cc',
'browser/api/declarative/rules_registry_service.h',
'browser/api/declarative/test_rules_registry.cc',
'browser/api/declarative/test_rules_registry.h',
'browser/api/declarative_content/content_rules_registry.h',
'browser/api/declarative_webrequest/request_stage.cc',
'browser/api/declarative_webrequest/request_stage.h',
'browser/api/declarative_webrequest/webrequest_action.cc',
'browser/api/declarative_webrequest/webrequest_action.h',
'browser/api/declarative_webrequest/webrequest_condition.cc',
'browser/api/declarative_webrequest/webrequest_condition.h',
'browser/api/declarative_webrequest/webrequest_condition_attribute.cc',
'browser/api/declarative_webrequest/webrequest_condition_attribute.h',
'browser/api/declarative_webrequest/webrequest_constants.cc',
'browser/api/declarative_webrequest/webrequest_constants.h',
'browser/api/declarative_webrequest/webrequest_rules_registry.cc',
'browser/api/declarative_webrequest/webrequest_rules_registry.h',
'browser/api/device_permissions_manager.cc',
'browser/api/device_permissions_manager.h',
'browser/api/device_permissions_prompt.cc',
'browser/api/device_permissions_prompt.h',
'browser/api/dns/dns_api.cc',
'browser/api/dns/dns_api.h',
'browser/api/dns/host_resolver_wrapper.cc',
'browser/api/dns/host_resolver_wrapper.h',
'browser/api/document_scan/document_scan_api.cc',
'browser/api/document_scan/document_scan_api.h',
'browser/api/document_scan/document_scan_interface.cc',
'browser/api/document_scan/document_scan_interface.h',
'browser/api/document_scan/document_scan_interface_chromeos.cc',
'browser/api/execute_code_function.cc',
'browser/api/execute_code_function.h',
'browser/api/extensions_api_client.cc',
'browser/api/extensions_api_client.h',
'browser/api/guest_view/app_view/app_view_guest_internal_api.cc',
'browser/api/guest_view/app_view/app_view_guest_internal_api.h',
'browser/api/guest_view/extension_view/extension_view_internal_api.cc',
'browser/api/guest_view/extension_view/extension_view_internal_api.h',
'browser/api/guest_view/guest_view_internal_api.cc',
'browser/api/guest_view/guest_view_internal_api.h',
'browser/api/guest_view/web_view/web_view_internal_api.cc',
'browser/api/guest_view/web_view/web_view_internal_api.h',
'browser/api/hid/hid_api.cc',
'browser/api/hid/hid_api.h',
'browser/api/hid/hid_connection_resource.cc',
'browser/api/hid/hid_connection_resource.h',
'browser/api/hid/hid_device_manager.cc',
'browser/api/hid/hid_device_manager.h',
'browser/api/idle/idle_api.cc',
'browser/api/idle/idle_api.h',
'browser/api/idle/idle_api_constants.cc',
'browser/api/idle/idle_api_constants.h',
'browser/api/idle/idle_manager.cc',
'browser/api/idle/idle_manager.h',
'browser/api/idle/idle_manager_factory.cc',
'browser/api/idle/idle_manager_factory.h',
'browser/api/management/management_api.cc',
'browser/api/management/management_api.h',
'browser/api/management/management_api_constants.cc',
'browser/api/management/management_api_constants.h',
'browser/api/management/management_api_delegate.h',
'browser/api/messaging/native_message_host.cc',
'browser/api/mime_handler_private/mime_handler_private.cc',
'browser/api/mime_handler_private/mime_handler_private.h',
'browser/api/networking_private/networking_private_api.cc',
'browser/api/networking_private/networking_private_api.h',
'browser/api/networking_private/networking_private_chromeos.cc',
'browser/api/networking_private/networking_private_chromeos.h',
'browser/api/networking_private/networking_private_delegate.cc',
'browser/api/networking_private/networking_private_delegate.h',
'browser/api/networking_private/networking_private_delegate_factory.cc',
'browser/api/networking_private/networking_private_delegate_factory.h',
'browser/api/networking_private/networking_private_delegate_observer.h',
'browser/api/networking_private/networking_private_event_router.h',
'browser/api/networking_private/networking_private_event_router_chromeos.cc',
'browser/api/networking_private/networking_private_event_router_factory.cc',
'browser/api/networking_private/networking_private_event_router_factory.h',
'browser/api/power/power_api.cc',
'browser/api/power/power_api.h',
'browser/api/printer_provider/printer_provider_api.cc',
'browser/api/printer_provider/printer_provider_api.h',
'browser/api/printer_provider/printer_provider_api_factory.cc',
'browser/api/printer_provider/printer_provider_api_factory.h',
'browser/api/printer_provider/printer_provider_print_job.cc',
'browser/api/printer_provider/printer_provider_print_job.h',
'browser/api/printer_provider_internal/printer_provider_internal_api.cc',
'browser/api/printer_provider_internal/printer_provider_internal_api.h',
'browser/api/printer_provider_internal/printer_provider_internal_api_observer.h',
'browser/api/runtime/runtime_api.cc',
'browser/api/runtime/runtime_api.h',
'browser/api/runtime/runtime_api_delegate.cc',
'browser/api/runtime/runtime_api_delegate.h',
'browser/api/serial/serial_api.cc',
'browser/api/serial/serial_api.h',
'browser/api/serial/serial_connection.cc',
'browser/api/serial/serial_connection.h',
'browser/api/serial/serial_event_dispatcher.cc',
'browser/api/serial/serial_event_dispatcher.h',
'browser/api/serial/serial_service_factory.cc',
'browser/api/serial/serial_service_factory.h',
'browser/api/socket/socket.cc',
'browser/api/socket/socket.h',
'browser/api/socket/socket_api.cc',
'browser/api/socket/socket_api.h',
'browser/api/socket/tcp_socket.cc',
'browser/api/socket/tcp_socket.h',
'browser/api/socket/tls_socket.cc',
'browser/api/socket/tls_socket.h',
'browser/api/socket/udp_socket.cc',
'browser/api/socket/udp_socket.h',
'browser/api/sockets_tcp/sockets_tcp_api.cc',
'browser/api/sockets_tcp/sockets_tcp_api.h',
'browser/api/sockets_tcp/tcp_socket_event_dispatcher.cc',
'browser/api/sockets_tcp/tcp_socket_event_dispatcher.h',
'browser/api/sockets_tcp_server/sockets_tcp_server_api.cc',
'browser/api/sockets_tcp_server/sockets_tcp_server_api.h',
'browser/api/sockets_tcp_server/tcp_server_socket_event_dispatcher.cc',
'browser/api/sockets_tcp_server/tcp_server_socket_event_dispatcher.h',
'browser/api/sockets_udp/sockets_udp_api.cc',
'browser/api/sockets_udp/sockets_udp_api.h',
'browser/api/sockets_udp/udp_socket_event_dispatcher.cc',
'browser/api/sockets_udp/udp_socket_event_dispatcher.h',
'browser/api/storage/leveldb_settings_storage_factory.cc',
'browser/api/storage/leveldb_settings_storage_factory.h',
'browser/api/storage/local_value_store_cache.cc',
'browser/api/storage/local_value_store_cache.h',
'browser/api/storage/settings_namespace.cc',
'browser/api/storage/settings_namespace.h',
'browser/api/storage/settings_observer.h',
'browser/api/storage/settings_storage_factory.h',
'browser/api/storage/settings_storage_quota_enforcer.cc',
'browser/api/storage/settings_storage_quota_enforcer.h',
'browser/api/storage/storage_api.cc',
'browser/api/storage/storage_api.h',
'browser/api/storage/storage_frontend.cc',
'browser/api/storage/storage_frontend.h',
'browser/api/storage/value_store_cache.cc',
'browser/api/storage/value_store_cache.h',
'browser/api/storage/weak_unlimited_settings_storage.cc',
'browser/api/storage/weak_unlimited_settings_storage.h',
'browser/api/system_cpu/cpu_info_provider.cc',
'browser/api/system_cpu/cpu_info_provider.h',
'browser/api/system_cpu/cpu_info_provider_linux.cc',
'browser/api/system_cpu/cpu_info_provider_mac.cc',
'browser/api/system_cpu/cpu_info_provider_win.cc',
'browser/api/system_cpu/system_cpu_api.cc',
'browser/api/system_cpu/system_cpu_api.h',
'browser/api/system_display/display_info_provider.cc',
'browser/api/system_display/display_info_provider.h',
'browser/api/system_display/system_display_api.cc',
'browser/api/system_display/system_display_api.h',
'browser/api/system_info/system_info_api.cc',
'browser/api/system_info/system_info_api.h',
'browser/api/system_info/system_info_provider.cc',
'browser/api/system_info/system_info_provider.h',
'browser/api/system_memory/memory_info_provider.cc',
'browser/api/system_memory/memory_info_provider.h',
'browser/api/system_memory/system_memory_api.cc',
'browser/api/system_memory/system_memory_api.h',
'browser/api/system_network/system_network_api.cc',
'browser/api/system_network/system_network_api.h',
'browser/api/system_storage/storage_info_provider.cc',
'browser/api/system_storage/storage_info_provider.h',
'browser/api/system_storage/system_storage_api.cc',
'browser/api/system_storage/system_storage_api.h',
'browser/api/test/test_api.cc',
'browser/api/test/test_api.h',
'browser/api/usb/usb_api.cc',
'browser/api/usb/usb_api.h',
'browser/api/usb/usb_device_resource.cc',
'browser/api/usb/usb_device_resource.h',
'browser/api/usb/usb_event_router.cc',
'browser/api/usb/usb_event_router.h',
'browser/api/usb/usb_guid_map.cc',
'browser/api/usb/usb_guid_map.h',
'browser/api/virtual_keyboard_private/virtual_keyboard_delegate.h',
'browser/api/virtual_keyboard_private/virtual_keyboard_private_api.cc',
'browser/api/virtual_keyboard_private/virtual_keyboard_private_api.h',
'browser/api/web_request/form_data_parser.cc',
'browser/api/web_request/form_data_parser.h',
'browser/api/web_request/upload_data_presenter.cc',
'browser/api/web_request/upload_data_presenter.h',
'browser/api/web_request/web_request_api.cc',
'browser/api/web_request/web_request_api.h',
'browser/api/web_request/web_request_api_constants.cc',
'browser/api/web_request/web_request_api_constants.h',
'browser/api/web_request/web_request_api_helpers.cc',
'browser/api/web_request/web_request_api_helpers.h',
'browser/api/web_request/web_request_event_router_delegate.cc',
'browser/api/web_request/web_request_event_router_delegate.h',
'browser/api/web_request/web_request_permissions.cc',
'browser/api/web_request/web_request_permissions.h',
'browser/api/web_request/web_request_time_tracker.cc',
'browser/api/web_request/web_request_time_tracker.h',
'browser/api_activity_monitor.h',
'browser/app_sorting.h',
'browser/app_window/app_delegate.h',
'browser/app_window/app_web_contents_helper.cc',
'browser/app_window/app_web_contents_helper.h',
'browser/app_window/app_window.cc',
'browser/app_window/app_window.h',
'browser/app_window/app_window_client.cc',
'browser/app_window/app_window_client.h',
'browser/app_window/app_window_contents.cc',
'browser/app_window/app_window_contents.h',
'browser/app_window/app_window_geometry_cache.cc',
'browser/app_window/app_window_geometry_cache.h',
'browser/app_window/app_window_registry.cc',
'browser/app_window/app_window_registry.h',
'browser/app_window/native_app_window.h',
'browser/app_window/size_constraints.cc',
'browser/app_window/size_constraints.h',
'browser/bad_message.cc',
'browser/bad_message.h',
'browser/blacklist_state.h',
'browser/blob_holder.cc',
'browser/blob_holder.h',
'browser/browser_context_keyed_api_factory.h',
'browser/browser_context_keyed_service_factories.cc',
'browser/browser_context_keyed_service_factories.h',
'browser/component_extension_resource_manager.h',
'browser/computed_hashes.cc',
'browser/computed_hashes.h',
'browser/content_hash_fetcher.cc',
'browser/content_hash_fetcher.h',
'browser/content_hash_reader.cc',
'browser/content_hash_reader.h',
'browser/content_hash_tree.cc',
'browser/content_hash_tree.h',
'browser/content_verifier.cc',
'browser/content_verifier.h',
'browser/content_verifier_delegate.h',
'browser/content_verifier_io_data.cc',
'browser/content_verifier_io_data.h',
'browser/content_verify_job.cc',
'browser/content_verify_job.h',
'browser/crx_file_info.cc',
'browser/crx_file_info.h',
'browser/declarative_user_script_manager.cc',
'browser/declarative_user_script_manager.h',
'browser/declarative_user_script_manager_factory.cc',
'browser/declarative_user_script_manager_factory.h',
'browser/declarative_user_script_master.cc',
'browser/declarative_user_script_master.h',
'browser/deferred_start_render_host.h',
'browser/deferred_start_render_host_observer.h',
'browser/error_map.cc',
'browser/error_map.h',
'browser/event_listener_map.cc',
'browser/event_listener_map.h',
'browser/event_page_tracker.h',
'browser/event_router.cc',
'browser/event_router.h',
'browser/event_router_factory.cc',
'browser/event_router_factory.h',
'browser/extension_dialog_auto_confirm.cc',
'browser/extension_dialog_auto_confirm.h',
'browser/extension_error.cc',
'browser/extension_error.h',
'browser/extension_function.cc',
'browser/extension_function.h',
'browser/extension_function_dispatcher.cc',
'browser/extension_function_dispatcher.h',
'browser/extension_function_registry.cc',
'browser/extension_function_registry.h',
'browser/extension_function_util.cc',
'browser/extension_function_util.h',
'browser/extension_host.cc',
'browser/extension_host.h',
'browser/extension_host_delegate.h',
'browser/extension_host_observer.h',
'browser/extension_host_queue.h',
'browser/extension_icon_image.cc',
'browser/extension_icon_image.h',
'browser/extension_icon_placeholder.cc',
'browser/extension_icon_placeholder.h',
'browser/extension_message_filter.cc',
'browser/extension_message_filter.h',
'browser/extension_pref_store.cc',
'browser/extension_pref_store.h',
'browser/extension_pref_value_map.cc',
'browser/extension_pref_value_map.h',
'browser/extension_pref_value_map_factory.cc',
'browser/extension_pref_value_map_factory.h',
'browser/extension_prefs.cc',
'browser/extension_prefs.h',
'browser/extension_prefs_factory.cc',
'browser/extension_prefs_factory.h',
'browser/extension_prefs_observer.h',
'browser/extension_prefs_scope.h',
'browser/extension_protocols.cc',
'browser/extension_protocols.h',
'browser/extension_registry.cc',
'browser/extension_registry.h',
'browser/extension_registry_factory.cc',
'browser/extension_registry_factory.h',
'browser/extension_registry_observer.h',
'browser/extension_request_limiting_throttle.cc',
'browser/extension_request_limiting_throttle.h',
'browser/extension_scoped_prefs.h',
'browser/extension_system.cc',
'browser/extension_system.h',
'browser/extension_system_provider.cc',
'browser/extension_system_provider.h',
'browser/extension_throttle_entry.cc',
'browser/extension_throttle_entry.h',
'browser/extension_throttle_entry_interface.h',
'browser/extension_throttle_manager.cc',
'browser/extension_throttle_manager.h',
'browser/extension_user_script_loader.cc',
'browser/extension_user_script_loader.h',
'browser/extension_util.cc',
'browser/extension_util.h',
'browser/extension_web_contents_observer.cc',
'browser/extension_web_contents_observer.h',
'browser/extension_zoom_request_client.cc',
'browser/extension_zoom_request_client.h',
'browser/extensions_browser_client.cc',
'browser/extensions_browser_client.h',
'browser/external_provider_interface.h',
'browser/file_highlighter.cc',
'browser/file_highlighter.h',
'browser/file_reader.cc',
'browser/file_reader.h',
'browser/granted_file_entry.cc',
'browser/granted_file_entry.h',
'browser/guest_view/app_view/app_view_constants.cc',
'browser/guest_view/app_view/app_view_constants.h',
'browser/guest_view/app_view/app_view_guest.cc',
'browser/guest_view/app_view/app_view_guest.h',
'browser/guest_view/app_view/app_view_guest_delegate.cc',
'browser/guest_view/app_view/app_view_guest_delegate.h',
'browser/guest_view/extension_options/extension_options_constants.cc',
'browser/guest_view/extension_options/extension_options_constants.h',
'browser/guest_view/extension_options/extension_options_guest.cc',
'browser/guest_view/extension_options/extension_options_guest.h',
'browser/guest_view/extension_options/extension_options_guest_delegate.cc',
'browser/guest_view/extension_options/extension_options_guest_delegate.h',
'browser/guest_view/extension_view/extension_view_constants.cc',
'browser/guest_view/extension_view/extension_view_constants.h',
'browser/guest_view/extension_view/extension_view_guest.cc',
'browser/guest_view/extension_view/extension_view_guest.h',
'browser/guest_view/extensions_guest_view_manager_delegate.cc',
'browser/guest_view/extensions_guest_view_manager_delegate.h',
'browser/guest_view/extensions_guest_view_message_filter.cc',
'browser/guest_view/extensions_guest_view_message_filter.h',
'browser/guest_view/guest_view_events.cc',
'browser/guest_view/guest_view_events.h',
'browser/guest_view/mime_handler_view/mime_handler_stream_manager.cc',
'browser/guest_view/mime_handler_view/mime_handler_stream_manager.h',
'browser/guest_view/mime_handler_view/mime_handler_view_constants.cc',
'browser/guest_view/mime_handler_view/mime_handler_view_constants.h',
'browser/guest_view/mime_handler_view/mime_handler_view_guest.cc',
'browser/guest_view/mime_handler_view/mime_handler_view_guest.h',
'browser/guest_view/mime_handler_view/mime_handler_view_guest_delegate.cc',
'browser/guest_view/mime_handler_view/mime_handler_view_guest_delegate.h',
'browser/guest_view/web_view/javascript_dialog_helper.cc',
'browser/guest_view/web_view/javascript_dialog_helper.h',
'browser/guest_view/web_view/web_ui/web_ui_url_fetcher.cc',
'browser/guest_view/web_view/web_ui/web_ui_url_fetcher.h',
'browser/guest_view/web_view/web_view_constants.cc',
'browser/guest_view/web_view/web_view_constants.h',
'browser/guest_view/web_view/web_view_content_script_manager.cc',
'browser/guest_view/web_view/web_view_content_script_manager.h',
'browser/guest_view/web_view/web_view_find_helper.cc',
'browser/guest_view/web_view/web_view_find_helper.h',
'browser/guest_view/web_view/web_view_guest.cc',
'browser/guest_view/web_view/web_view_guest.h',
'browser/guest_view/web_view/web_view_guest_delegate.h',
'browser/guest_view/web_view/web_view_permission_helper.cc',
'browser/guest_view/web_view/web_view_permission_helper.h',
'browser/guest_view/web_view/web_view_permission_helper_delegate.cc',
'browser/guest_view/web_view/web_view_permission_helper_delegate.h',
'browser/guest_view/web_view/web_view_permission_types.h',
'browser/guest_view/web_view/web_view_renderer_state.cc',
'browser/guest_view/web_view/web_view_renderer_state.h',
'browser/image_loader.cc',
'browser/image_loader.h',
'browser/image_loader_factory.cc',
'browser/image_loader_factory.h',
'browser/info_map.cc',
'browser/info_map.h',
'browser/install/crx_install_error.h',
'browser/install/extension_install_ui.cc',
'browser/install/extension_install_ui.h',
'browser/install_flag.h',
'browser/io_thread_extension_message_filter.cc',
'browser/io_thread_extension_message_filter.h',
'browser/lazy_background_task_queue.cc',
'browser/lazy_background_task_queue.h',
'browser/lazy_background_task_queue_factory.cc',
'browser/lazy_background_task_queue_factory.h',
'browser/load_monitoring_extension_host_queue.cc',
'browser/load_monitoring_extension_host_queue.h',
'browser/management_policy.cc',
'browser/management_policy.h',
'browser/mojo/keep_alive_impl.cc',
'browser/mojo/keep_alive_impl.h',
'browser/mojo/service_registration.cc',
'browser/mojo/service_registration.h',
'browser/mojo/stash_backend.cc',
'browser/mojo/stash_backend.h',
'browser/notification_types.cc',
'browser/notification_types.h',
'browser/null_app_sorting.cc',
'browser/null_app_sorting.h',
'browser/pref_names.cc',
'browser/pref_names.h',
'browser/process_manager.cc',
'browser/process_manager.h',
'browser/process_manager_delegate.h',
'browser/process_manager_factory.cc',
'browser/process_manager_factory.h',
'browser/process_manager_observer.h',
'browser/process_map.cc',
'browser/process_map.h',
'browser/process_map_factory.cc',
'browser/process_map_factory.h',
'browser/quota_service.cc',
'browser/quota_service.h',
'browser/renderer_startup_helper.cc',
'browser/renderer_startup_helper.h',
'browser/requirements_checker.h',
'browser/runtime_data.cc',
'browser/runtime_data.h',
'browser/sandboxed_unpacker.cc',
'browser/sandboxed_unpacker.h',
'browser/script_execution_observer.h',
'browser/script_executor.cc',
'browser/script_executor.h',
'browser/serial_extension_host_queue.cc',
'browser/serial_extension_host_queue.h',
'browser/service_worker_manager.cc',
'browser/service_worker_manager.h',
'browser/state_store.cc',
'browser/state_store.h',
'browser/suggest_permission_util.cc',
'browser/suggest_permission_util.h',
'browser/uninstall_reason.h',
'browser/update_observer.h',
'browser/updater/extension_cache.h',
'browser/updater/extension_downloader.cc',
'browser/updater/extension_downloader.h',
'browser/updater/extension_downloader_delegate.cc',
'browser/updater/extension_downloader_delegate.h',
'browser/updater/manifest_fetch_data.cc',
'browser/updater/manifest_fetch_data.h',
'browser/updater/null_extension_cache.cc',
'browser/updater/null_extension_cache.h',
'browser/updater/request_queue.h',
'browser/updater/request_queue_impl.h',
'browser/updater/safe_manifest_parser.cc',
'browser/updater/safe_manifest_parser.h',
'browser/updater/update_client_config.cc',
'browser/updater/update_client_config.h',
'browser/updater/update_data_provider.cc',
'browser/updater/update_data_provider.h',
'browser/updater/update_install_shim.cc',
'browser/updater/update_install_shim.h',
'browser/updater/update_service.cc',
'browser/updater/update_service.h',
'browser/updater/update_service_factory.cc',
'browser/updater/update_service_factory.h',
'browser/url_request_util.cc',
'browser/url_request_util.h',
'browser/user_script_loader.cc',
'browser/user_script_loader.h',
'browser/value_store/leveldb_value_store.cc',
'browser/value_store/leveldb_value_store.h',
'browser/value_store/testing_value_store.cc',
'browser/value_store/testing_value_store.h',
'browser/value_store/value_store.cc',
'browser/value_store/value_store.h',
'browser/value_store/value_store_change.cc',
'browser/value_store/value_store_change.h',
'browser/value_store/value_store_frontend.cc',
'browser/value_store/value_store_frontend.h',
'browser/value_store/value_store_util.cc',
'browser/value_store/value_store_util.h',
'browser/verified_contents.cc',
'browser/verified_contents.h',
'browser/view_type_utils.cc',
'browser/view_type_utils.h',
'browser/warning_service.cc',
'browser/warning_service.h',
'browser/warning_service_factory.cc',
'browser/warning_service_factory.h',
'browser/warning_set.cc',
'browser/warning_set.h',
'browser/web_ui_user_script_loader.cc',
'browser/web_ui_user_script_loader.h',
],
'extensions_browser_sources_chromeos': [
'browser/api/audio/audio_service_chromeos.cc',
'browser/api/diagnostics/diagnostics_api.cc',
'browser/api/diagnostics/diagnostics_api.h',
'browser/api/diagnostics/diagnostics_api_chromeos.cc',
'browser/api/networking_config/networking_config_api.cc',
'browser/api/networking_config/networking_config_api.h',
'browser/api/networking_config/networking_config_service.cc',
'browser/api/networking_config/networking_config_service.h',
'browser/api/networking_config/networking_config_service_factory.cc',
'browser/api/networking_config/networking_config_service_factory.h',
'browser/api/socket/app_firewall_hole_manager.cc',
'browser/api/socket/app_firewall_hole_manager.h',
'browser/api/vpn_provider/vpn_provider_api.cc',
'browser/api/vpn_provider/vpn_provider_api.h',
'browser/api/vpn_provider/vpn_service.cc',
'browser/api/vpn_provider/vpn_service.h',
'browser/api/vpn_provider/vpn_service_factory.h',
'browser/api/webcam_private/v4l2_webcam.cc',
'browser/api/webcam_private/v4l2_webcam.h',
'browser/api/webcam_private/visca_webcam.cc',
'browser/api/webcam_private/visca_webcam.h',
'browser/api/webcam_private/webcam.cc',
'browser/api/webcam_private/webcam.h',
'browser/api/webcam_private/webcam_private_api.h',
'browser/api/webcam_private/webcam_private_api_chromeos.cc',
],
'extensions_browser_sources_nonchromeos': [
'browser/api/audio/audio_service.cc',
'browser/api/document_scan/document_scan_interface_nonchromeos.cc',
],
'extensions_browser_sources_win_or_mac': [
'browser/api/networking_private/networking_private_event_router_nonchromeos.cc',
'browser/api/networking_private/networking_private_service_client.cc',
'browser/api/networking_private/networking_private_service_client.h',
],
'extensions_browser_sources_linux_nonchromeos': [
'browser/api/networking_private/network_config_dbus_constants_linux.cc',
'browser/api/networking_private/network_config_dbus_constants_linux.h',
'browser/api/networking_private/networking_private_event_router_nonchromeos.cc',
'browser/api/networking_private/networking_private_linux.cc',
'browser/api/networking_private/networking_private_linux.h',
],
'extensions_renderer_sources': [
'renderer/activity_log_converter_strategy.cc',
'renderer/activity_log_converter_strategy.h',
'renderer/api/automation/automation_api_helper.cc',
'renderer/api/automation/automation_api_helper.h',
'renderer/api_activity_logger.cc',
'renderer/api_activity_logger.h',
'renderer/api_definitions_natives.cc',
'renderer/api_definitions_natives.h',
'renderer/app_window_custom_bindings.cc',
'renderer/app_window_custom_bindings.h',
'renderer/binding_generating_native_handler.cc',
'renderer/binding_generating_native_handler.h',
'renderer/blob_native_handler.cc',
'renderer/blob_native_handler.h',
'renderer/console.cc',
'renderer/console.h',
'renderer/content_watcher.cc',
'renderer/content_watcher.h',
'renderer/context_menus_custom_bindings.cc',
'renderer/context_menus_custom_bindings.h',
'renderer/css_native_handler.cc',
'renderer/css_native_handler.h',
'renderer/dispatcher.cc',
'renderer/dispatcher.h',
'renderer/dispatcher_delegate.h',
'renderer/document_custom_bindings.cc',
'renderer/document_custom_bindings.h',
'renderer/dom_activity_logger.cc',
'renderer/dom_activity_logger.h',
'renderer/event_bindings.cc',
'renderer/event_bindings.h',
'renderer/extension_frame_helper.cc',
'renderer/extension_frame_helper.h',
'renderer/extension_groups.h',
'renderer/extension_helper.cc',
'renderer/extension_helper.h',
'renderer/extension_injection_host.cc',
'renderer/extension_injection_host.h',
'renderer/extensions_render_frame_observer.cc',
'renderer/extensions_render_frame_observer.h',
'renderer/extensions_renderer_client.cc',
'renderer/extensions_renderer_client.h',
'renderer/file_system_natives.cc',
'renderer/file_system_natives.h',
'renderer/gc_callback.cc',
'renderer/gc_callback.h',
'renderer/guest_view/extensions_guest_view_container.cc',
'renderer/guest_view/extensions_guest_view_container.h',
'renderer/guest_view/extensions_guest_view_container_dispatcher.cc',
'renderer/guest_view/extensions_guest_view_container_dispatcher.h',
'renderer/guest_view/guest_view_internal_custom_bindings.cc',
'renderer/guest_view/guest_view_internal_custom_bindings.h',
'renderer/guest_view/mime_handler_view/mime_handler_view_container.cc',
'renderer/guest_view/mime_handler_view/mime_handler_view_container.h',
'renderer/i18n_custom_bindings.cc',
'renderer/i18n_custom_bindings.h',
'renderer/id_generator_custom_bindings.cc',
'renderer/id_generator_custom_bindings.h',
'renderer/injection_host.cc',
'renderer/injection_host.h',
'renderer/lazy_background_page_native_handler.cc',
'renderer/lazy_background_page_native_handler.h',
'renderer/logging_native_handler.cc',
'renderer/logging_native_handler.h',
'renderer/messaging_bindings.cc',
'renderer/messaging_bindings.h',
'renderer/module_system.cc',
'renderer/module_system.h',
'renderer/native_handler.cc',
'renderer/native_handler.h',
'renderer/object_backed_native_handler.cc',
'renderer/object_backed_native_handler.h',
'renderer/print_native_handler.cc',
'renderer/print_native_handler.h',
'renderer/process_info_native_handler.cc',
'renderer/process_info_native_handler.h',
'renderer/programmatic_script_injector.cc',
'renderer/programmatic_script_injector.h',
'renderer/render_frame_observer_natives.cc',
'renderer/renderer_extension_registry.cc',
'renderer/renderer_extension_registry.h',
'renderer/request_sender.cc',
'renderer/request_sender.h',
'renderer/resource_bundle_source_map.cc',
'renderer/resource_bundle_source_map.h',
'renderer/resources/app_runtime_custom_bindings.js',
'renderer/resources/app_window_custom_bindings.js',
'renderer/resources/binding.js',
'renderer/resources/context_menus_custom_bindings.js',
'renderer/resources/declarative_webrequest_custom_bindings.js',
'renderer/resources/entry_id_manager.js',
'renderer/resources/event.js',
'renderer/resources/extension.css',
'renderer/resources/extension_custom_bindings.js',
'renderer/resources/extension_fonts.css',
'renderer/resources/greasemonkey_api.js',
'renderer/resources/i18n_custom_bindings.js',
'renderer/resources/image_util.js',
'renderer/resources/json_schema.js',
'renderer/resources/last_error.js',
'renderer/resources/messaging.js',
'renderer/resources/messaging_utils.js',
'renderer/resources/permissions_custom_bindings.js',
'renderer/resources/platform_app.css',
'renderer/resources/platform_app.js',
'renderer/resources/runtime_custom_bindings.js',
'renderer/resources/schema_utils.js',
'renderer/resources/send_request.js',
'renderer/resources/set_icon.js',
'renderer/resources/storage_area.js',
'renderer/resources/test_custom_bindings.js',
'renderer/resources/uncaught_exception_handler.js',
'renderer/resources/utils.js',
'renderer/resources/guest_view/app_view/app_view.js',
'renderer/resources/guest_view/extension_options/extension_options.js',
'renderer/resources/guest_view/extension_view/extension_view.js',
'renderer/resources/guest_view/web_view/web_view.js',
'renderer/resources/guest_view/web_view/web_view_events.js',
'renderer/resources/guest_view/web_view/web_view_iframe.js',
'renderer/resources/web_request_custom_bindings.js',
'renderer/resources/web_request_internal_custom_bindings.js',
'renderer/runtime_custom_bindings.cc',
'renderer/runtime_custom_bindings.h',
'renderer/safe_builtins.cc',
'renderer/safe_builtins.h',
'renderer/script_context.cc',
'renderer/script_context.h',
'renderer/script_context_set.cc',
'renderer/script_context_set.h',
'renderer/script_injection.cc',
'renderer/script_injection.h',
'renderer/script_injection_callback.cc',
'renderer/script_injection_callback.h',
'renderer/script_injection_manager.cc',
'renderer/script_injection_manager.h',
'renderer/script_injector.h',
'renderer/scripts_run_info.cc',
'renderer/scripts_run_info.h',
'renderer/send_request_natives.cc',
'renderer/send_request_natives.h',
'renderer/set_icon_natives.cc',
'renderer/set_icon_natives.h',
'renderer/static_v8_external_one_byte_string_resource.cc',
'renderer/static_v8_external_one_byte_string_resource.h',
'renderer/test_features_native_handler.cc',
'renderer/test_features_native_handler.h',
'renderer/test_native_handler.cc',
'renderer/test_native_handler.h',
'renderer/user_gestures_native_handler.cc',
'renderer/user_gestures_native_handler.h',
'renderer/user_script_injector.cc',
'renderer/user_script_injector.h',
'renderer/user_script_set.cc',
'renderer/user_script_set.h',
'renderer/user_script_set_manager.cc',
'renderer/user_script_set_manager.h',
'renderer/utils_native_handler.cc',
'renderer/utils_native_handler.h',
'renderer/v8_context_native_handler.cc',
'renderer/v8_context_native_handler.h',
'renderer/v8_helpers.h',
'renderer/v8_schema_registry.cc',
'renderer/v8_schema_registry.h',
'renderer/wake_event_page.cc',
'renderer/wake_event_page.h',
'renderer/web_ui_injection_host.cc',
'renderer/web_ui_injection_host.h',
'renderer/worker_script_context_set.cc',
'renderer/worker_script_context_set.h',
],
'extensions_utility_sources': [
'utility/unpacker.cc',
'utility/unpacker.h',
'utility/utility_handler.cc',
'utility/utility_handler.h',
],
'extensions_test_support_sources': [
'browser/api/cast_channel/cast_test_util.cc',
'browser/api/cast_channel/cast_test_util.h',
'browser/api/dns/mock_host_resolver_creator.cc',
'browser/api/dns/mock_host_resolver_creator.h',
'browser/api/storage/settings_test_util.cc',
'browser/api/storage/settings_test_util.h',
'browser/api_test_utils.cc',
'browser/api_test_utils.h',
'browser/api_unittest.cc',
'browser/api_unittest.h',
'browser/app_window/test_app_window_contents.cc',
'browser/app_window/test_app_window_contents.h',
'browser/extension_error_test_util.cc',
'browser/extension_error_test_util.h',
'browser/extensions_test.cc',
'browser/extensions_test.h',
'browser/guest_view/mime_handler_view/test_mime_handler_view_guest.cc',
'browser/guest_view/mime_handler_view/test_mime_handler_view_guest.h',
'browser/mock_extension_system.cc',
'browser/mock_extension_system.h',
'browser/test_extension_registry_observer.cc',
'browser/test_extension_registry_observer.h',
'browser/test_extensions_browser_client.cc',
'browser/test_extensions_browser_client.h',
'browser/test_image_loader.cc',
'browser/test_image_loader.h',
'browser/test_management_policy.cc',
'browser/test_management_policy.h',
'browser/test_runtime_api_delegate.cc',
'browser/test_runtime_api_delegate.h',
'common/extension_builder.cc',
'common/extension_builder.h',
'common/manifest_test.cc',
'common/manifest_test.h',
'common/permissions/permission_message_test_util.cc',
'common/permissions/permission_message_test_util.h',
'common/test_util.cc',
'common/test_util.h',
'renderer/test_extensions_renderer_client.cc',
'renderer/test_extensions_renderer_client.h',
'test/background_page_watcher.cc',
'test/background_page_watcher.h',
'test/extension_test_message_listener.cc',
'test/extension_test_message_listener.h',
'test/result_catcher.cc',
'test/result_catcher.h',
'test/test_content_utility_client.cc',
'test/test_content_utility_client.h',
'test/test_extensions_client.cc',
'test/test_extensions_client.h',
'test/test_permission_message_provider.cc',
'test/test_permission_message_provider.h',
],
},
}
| [
"[email protected]"
] | |
b19d4c48e98253f0c798e34a47492d4728a86040 | 440e4f13d6b3939f0290931bcd984591f191f9f9 | /demo/XmlResourceSubclass.py | 3ffb043f8b415e55275f9873d23bd8a36a423eb2 | [] | no_license | AlfiyaZi/Phoenix | b445e6906eb61d037c83957ce601d731dc04acfa | c524ed1a3794ec4d2baaba6b12d6d7ef37aa7695 | refs/heads/master | 2020-06-16T18:04:42.696173 | 2016-11-24T18:39:47 | 2016-11-24T18:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,717 | py | #!/usr/bin/env python
import wx
import wx.xrc as xrc
#----------------------------------------------------------------------
resourceText = br'''<?xml version="1.0"?>
<resource>
<!-- Notice that the class IS a standard wx class, and a custom
subclass is specified as "moduleName.ClassName" Try changing
the classname to one that does not exist and see what happens -->
<object class="wxPanel" subclass="XmlResourceSubclass.MyCustomPanel" name="MyPanel">
<size>200,100</size>
<object class="wxStaticText" name="label1">
<label>This panel is a custom class derived from wx.Panel,\nand is loaded by a custom XmlResourceHandler.</label>
<pos>10,10</pos>
</object>
</object>
</resource>
'''
#----------------------------------------------------------------------
class MyCustomPanel(wx.Panel):
def __init__(self):
wx.Panel.__init__(self)
# the Create step is done by XRC.
self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnCreate(self, evt):
# This is the little bit of customization that we do for this
# silly example. It could just as easily have been done in
# the resource. We do it in the EVT_WINDOW_CREATE handler
# because the window doesn't really exist yet in the __init__.
if self is evt.GetEventObject():
t = wx.StaticText(self, -1, "MyCustomPanel")
f = t.GetFont()
f.SetWeight(wx.FONTWEIGHT_BOLD)
f.SetPointSize(f.GetPointSize()+2)
t.SetFont(f)
self.t = t
# On OSX the EVT_SIZE happens before EVT_WINDOW_CREATE !?!
# so give it another kick
wx.CallAfter(self.OnSize, None)
evt.Skip()
def OnSize(self, evt):
if hasattr(self, 't'):
sz = self.GetSize()
w, h = self.t.GetTextExtent(self.t.GetLabel())
self.t.SetPosition(((sz.width-w)/2, (sz.height-h)/2))
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
# make the components
label = wx.StaticText(self, -1, "The lower panel was built from this XML:")
label.SetFont(wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
text = wx.TextCtrl(self, -1, resourceText,
style=wx.TE_READONLY|wx.TE_MULTILINE)
text.SetInsertionPoint(0)
line = wx.StaticLine(self, -1)
# Load the resource
res = xrc.XmlResource()
res.LoadFromBuffer(resourceText)
# Now create a panel from the resource data
panel = res.LoadPanel(self, "MyPanel")
# and do the layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, wx.EXPAND|wx.TOP|wx.LEFT, 5)
sizer.Add(text, 1, wx.EXPAND|wx.ALL, 5)
sizer.Add(line, 0, wx.EXPAND)
sizer.Add(panel, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(sizer)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.XmlResourceSubclass</center></h2>
Sometimes it is necessary to use custom classes, but you still want
them to be created from XRC. The subclass XRC attribute allows you to
do that.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| [
"[email protected]"
] | |
d72b7ab5be64ba723348d6436a369f88a256597d | bce29ac8dccfc51983dcc00c433287866e9d63e7 | /1222-5223.py | 2fd13043ddeebbd63a8be746dccad6b295a1017f | [] | no_license | aiifabbf/leetcode-memo | b2c3f110073367b4b6db95722e96a794b5fe0453 | 5be09b4a804cb600e61e24617b9b2a1cc78fab3f | refs/heads/master | 2021-06-11T18:41:16.550017 | 2021-04-20T15:15:50 | 2021-04-20T15:15:50 | 175,244,504 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,916 | py | """
给一个8x8的棋盘、皇后的位置、国王的位置,问哪些皇后可以吃掉国王。
很简单,从国王开始往8个方向(上、下、左、右、上左、上右、下左、下右)分别搜索,每个方向遇到的第一个皇后就是能吃掉国王的皇后。注意如果出现国王和皇后之间隔了另一个皇后,比如
::
王 后1 后2
后2是没办法吃掉国王的。
但是要写的好看挺难的……我写了8个for循环。
"""
from typing import *
class Solution:
def queensAttacktheKing(self, queens: List[List[int]], king: List[int]) -> List[List[int]]:
queens = set(map(tuple, queens)) # 把皇后的位置转换成tuple再放到set里,这样判断某个皇后是否存在的复杂度就是O(1)
res = []
for delta in range(1, 9): # 往右看
temp = (king[0] + delta, king[1])
if temp in queens: # 遇到的第一个皇后
res.append(temp) # 放到结果里
break # 然后就不用看下去了,因为反正被第一个皇后挡住了,吃不到国王了
for delta in range(1, 9): # 往下看
temp = (king[0], king[1] + delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往右下看
temp = (king[0] + delta, king[1] + delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左看
temp = (king[0] - delta, king[1])
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往上看
temp = (king[0], king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左上看
temp = (king[0] - delta, king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往右上看
temp = (king[0] + delta, king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左下看
temp = (king[0] - delta, king[1] + delta)
if temp in queens:
res.append(temp)
break
return list(map(list, res))
# s = Solution()
# print(s.queensAttacktheKing(queens = [[0,1],[1,0],[4,0],[0,4],[3,3],[2,4]], king = [0,0]))
# print(s.queensAttacktheKing(queens = [[0,0],[1,1],[2,2],[3,4],[3,5],[4,4],[4,5]], king = [3,3]))
# print(s.queensAttacktheKing(queens = [[5,6],[7,7],[2,1],[0,7],[1,6],[5,1],[3,7],[0,3],[4,0],[1,2],[6,3],[5,0],[0,4],[2,2],[1,1],[6,4],[5,4],[0,0],[2,6],[4,5],[5,2],[1,4],[7,5],[2,3],[0,5],[4,2],[1,0],[2,7],[0,1],[4,6],[6,1],[0,6],[4,3],[1,7]], king = [3,4])) | [
"[email protected]"
] | |
4d3e25f526d32e3c523a613ec868b4d3bf6994d0 | 3e9856fd60d797f35dc54a4e0092ceb611f5f1da | /train_gnn_s8_nodp01.py | d0562884ca0c8af89d85da494252ac13c1dc36cf | [] | no_license | yougoforward/parse2 | d18a7f5654c10edbb716dda3ae9615b10d289d8e | 2d21b6f0968b6b05e6c2453bbf46a7cebbd69220 | refs/heads/master | 2021-05-22T23:23:18.046316 | 2020-12-22T09:47:37 | 2020-12-22T09:47:37 | 253,138,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,236 | py | import argparse
import os
import random
import sys
import time
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
from tensorboardX import SummaryWriter
from torch.nn import functional as F
from torch.nn.parallel.scatter_gather import gather
from torch.utils import data
from dataset.data_pascal import DataGenerator
# from dataset.datasets import DatasetGenerator
from network.gnn_s8_nodp01 import get_model
# from network.abrnet import get_model
from progress.bar import Bar
from utils.gnn_loss import gnn_loss_noatt as ABRLovaszLoss
from utils.metric import *
from utils.parallel import DataParallelModel, DataParallelCriterion
from utils.visualize import inv_preprocess, decode_predictions
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Segmentation')
parser.add_argument('--method', type=str, default='abr')
# Datasets
parser.add_argument('--root', default='./data/Person', type=str)
parser.add_argument('--val-root', default='./data/Person', type=str)
parser.add_argument('--lst', default='./dataset/Pascal/train_id.txt', type=str)
parser.add_argument('--val-lst', default='./dataset/Pascal/val_id.txt', type=str)
parser.add_argument('--crop-size', type=int, default=473)
parser.add_argument('--num-classes', type=int, default=7)
parser.add_argument('--hbody-cls', type=int, default=3)
parser.add_argument('--fbody-cls', type=int, default=2)
# Optimization options
parser.add_argument('--epochs', default=150, type=int)
parser.add_argument('--batch-size', default=20, type=int)
parser.add_argument('--learning-rate', default=1e-2, type=float)
parser.add_argument('--lr-mode', type=str, default='poly')
parser.add_argument('--ignore-label', type=int, default=255)
# Checkpoints
# parser.add_argument('--restore-from', default='./checkpoints/init/resnet152_stem.pth', type=str)
parser.add_argument('--restore-from', default='./checkpoints/init/resnet101_stem.pth', type=str)
# parser.add_argument('--restore-from', default='./checkpoints/init/resnet50_stem.pth', type=str)
parser.add_argument('--snapshot_dir', type=str, default='./checkpoints/exp/')
parser.add_argument('--log-dir', type=str, default='./runs/')
parser.add_argument('--init', action="store_true")
parser.add_argument('--save-num', type=int, default=2)
# Misc
parser.add_argument('--seed', type=int, default=123)
args = parser.parse_args()
return args
def adjust_learning_rate(optimizer, epoch, i_iter, iters_per_epoch, method='poly'):
if method == 'poly':
current_step = epoch * iters_per_epoch + i_iter
max_step = args.epochs * iters_per_epoch
lr = args.learning_rate * ((1 - current_step / max_step) ** 0.9)
else:
lr = args.learning_rate
optimizer.param_groups[0]['lr'] = lr
return lr
def main(args):
# initialization
print("Input arguments:")
for key, val in vars(args).items():
print("{:16} {}".format(key, val))
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
writer = SummaryWriter(log_dir=os.path.join(args.log_dir, args.method))
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
# conduct seg network
seg_model = get_model(num_classes=args.num_classes)
saved_state_dict = torch.load(args.restore_from)
new_params = seg_model.state_dict().copy()
if args.init:
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == 'fc':
new_params['encoder.' + '.'.join(i_parts[:])] = saved_state_dict[i]
seg_model.load_state_dict(new_params)
print('loading params w/o fc')
else:
seg_model.load_state_dict(saved_state_dict)
print('loading params all')
model = DataParallelModel(seg_model)
model.float()
model.cuda()
# define dataloader
train_loader = data.DataLoader(DataGenerator(root=args.root, list_path=args.lst,
crop_size=args.crop_size, training=True),
batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
val_loader = data.DataLoader(DataGenerator(root=args.val_root, list_path=args.val_lst,
crop_size=args.crop_size, training=False),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
# define criterion & optimizer
criterion = ABRLovaszLoss(adj_matrix = torch.tensor(
[[0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0]]), ignore_index=args.ignore_label, only_present=True, upper_part_list=[1, 2, 3, 4], lower_part_list=[5, 6], cls_p= args.num_classes, cls_h= args.hbody_cls, cls_f= args.fbody_cls)
criterion = DataParallelCriterion(criterion).cuda()
optimizer = optim.SGD(
[{'params': filter(lambda p: p.requires_grad, seg_model.parameters()), 'lr': args.learning_rate}],
lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)
# key points
best_val_mIoU = 0
best_val_pixAcc = 0
start = time.time()
for epoch in range(0, args.epochs):
print('\n{} | {}'.format(epoch, args.epochs - 1))
# training
_ = train(model, train_loader, epoch, criterion, optimizer, writer)
# validation
if epoch %10 ==0 or epoch > args.epochs-10:
# val_pixacc0, val_miou0 = validation0(model, val_loader, epoch, writer)
val_pixacc, val_miou = validation(model, val_loader, epoch, writer)
# save model
if val_pixacc > best_val_pixAcc:
best_val_pixAcc = val_pixacc
if val_miou > best_val_mIoU:
best_val_mIoU = val_miou
model_dir = os.path.join(args.snapshot_dir, args.method + '_miou.pth')
torch.save(seg_model.state_dict(), model_dir)
print('Model saved to %s' % model_dir)
os.rename(model_dir, os.path.join(args.snapshot_dir, args.method + '_miou'+str(best_val_mIoU)+'.pth'))
print('Complete using', time.time() - start, 'seconds')
print('Best pixAcc: {} | Best mIoU: {}'.format(best_val_pixAcc, best_val_mIoU))
def train(model, train_loader, epoch, criterion, optimizer, writer):
# set training mode
model.train()
train_loss = 0.0
iter_num = 0
# Iterate over data.
# bar = Bar('Processing | {}'.format('train'), max=len(train_loader))
# bar.check_tty = False
from tqdm import tqdm
tbar = tqdm(train_loader)
for i_iter, batch in enumerate(tbar):
sys.stdout.flush()
start_time = time.time()
iter_num += 1
# adjust learning rate
iters_per_epoch = len(train_loader)
lr = adjust_learning_rate(optimizer, epoch, i_iter, iters_per_epoch, method=args.lr_mode)
# print("\n=>epoch %d, learning_rate = %f" % (epoch, lr))
image, label, hlabel, flabel, _ = batch
images, labels, hlabel, flabel = image.cuda(), label.long().cuda(), hlabel.cuda(), flabel.cuda()
torch.set_grad_enabled(True)
# zero the parameter gradients
optimizer.zero_grad()
# compute output loss
preds = model(images)
loss = criterion(preds, [labels, hlabel, flabel]) # batch mean
train_loss += loss.item()
# compute gradient and do SGD step
loss.backward()
optimizer.step()
if i_iter % 10 == 0:
writer.add_scalar('learning_rate', lr, iter_num + epoch * len(train_loader))
writer.add_scalar('train_loss', train_loss / iter_num, iter_num + epoch * len(train_loader))
batch_time = time.time() - start_time
# plot progress
tbar.set_description('{} / {} | Time: {batch_time:.4f} | Loss: {loss:.4f}'.format(iter_num, len(train_loader),
batch_time=batch_time,
loss=train_loss / iter_num))
# bar.suffix = '{} / {} | Time: {batch_time:.4f} | Loss: {loss:.4f}'.format(iter_num, len(train_loader),
# batch_time=batch_time,
# loss=train_loss / iter_num)
# bar.next()
epoch_loss = train_loss / iter_num
writer.add_scalar('train_epoch_loss', epoch_loss, epoch)
tbar.close()
# bar.finish()
return epoch_loss
def validation(model, val_loader, epoch, writer):
# set evaluate mode
model.eval()
total_correct, total_label = 0, 0
total_correct_hb, total_label_hb = 0, 0
total_correct_fb, total_label_fb = 0, 0
hist = np.zeros((args.num_classes, args.num_classes))
hist_hb = np.zeros((args.hbody_cls, args.hbody_cls))
hist_fb = np.zeros((args.fbody_cls, args.fbody_cls))
# Iterate over data.
from tqdm import tqdm
tbar = tqdm(val_loader)
for idx, batch in enumerate(tbar):
image, target, hlabel, flabel, _ = batch
image, target, hlabel, flabel = image.cuda(), target.cuda(), hlabel.cuda(), flabel.cuda()
with torch.no_grad():
h, w = target.size(1), target.size(2)
outputs = model(image)
outputs = gather(outputs, 0, dim=0)
preds = F.interpolate(input=outputs[0][-1], size=(h, w), mode='bilinear', align_corners=True)
preds_hb = F.interpolate(input=outputs[1][-1], size=(h, w), mode='bilinear', align_corners=True)
preds_fb = F.interpolate(input=outputs[2][-1], size=(h, w), mode='bilinear', align_corners=True)
# pixelAcc
correct, labeled = batch_pix_accuracy(preds.data, target)
correct_hb, labeled_hb = batch_pix_accuracy(preds_hb.data, hlabel)
correct_fb, labeled_fb = batch_pix_accuracy(preds_fb.data, flabel)
# mIoU
hist += fast_hist(preds, target, args.num_classes)
hist_hb += fast_hist(preds_hb, hlabel, args.hbody_cls)
hist_fb += fast_hist(preds_fb, flabel, args.fbody_cls)
total_correct += correct
total_correct_hb += correct_hb
total_correct_fb += correct_fb
total_label += labeled
total_label_hb += labeled_hb
total_label_fb += labeled_fb
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
pixAcc_hb = 1.0 * total_correct_hb / (np.spacing(1) + total_label_hb)
IoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
pixAcc_fb = 1.0 * total_correct_fb / (np.spacing(1) + total_label_fb)
IoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)
# plot progress
tbar.set_description('{} / {} | {pixAcc:.4f}, {IoU:.4f} |' \
'{pixAcc_hb:.4f}, {IoU_hb:.4f} |' \
'{pixAcc_fb:.4f}, {IoU_fb:.4f}'.format(idx + 1, len(val_loader), pixAcc=pixAcc, IoU=IoU,pixAcc_hb=pixAcc_hb, IoU_hb=IoU_hb,pixAcc_fb=pixAcc_fb, IoU_fb=IoU_fb))
tbar.close()
return pixAcc, IoU
def validation0(model, val_loader, epoch, writer):
# set evaluate mode
model.eval()
total_correct, total_label = 0, 0
total_correct_hb, total_label_hb = 0, 0
total_correct_fb, total_label_fb = 0, 0
hist = np.zeros((args.num_classes, args.num_classes))
hist_hb = np.zeros((args.hbody_cls, args.hbody_cls))
hist_fb = np.zeros((args.fbody_cls, args.fbody_cls))
# Iterate over data.
from tqdm import tqdm
tbar = tqdm(val_loader)
for idx, batch in enumerate(tbar):
image, target, hlabel, flabel, _ = batch
image, target, hlabel, flabel = image.cuda(), target.cuda(), hlabel.cuda(), flabel.cuda()
with torch.no_grad():
h, w = target.size(1), target.size(2)
outputs = model(image)
outputs = gather(outputs, 0, dim=0)
preds = F.interpolate(input=outputs[0][0], size=(h, w), mode='bilinear', align_corners=True)
preds_hb = F.interpolate(input=outputs[1][0], size=(h, w), mode='bilinear', align_corners=True)
preds_fb = F.interpolate(input=outputs[2][0], size=(h, w), mode='bilinear', align_corners=True)
# pixelAcc
correct, labeled = batch_pix_accuracy(preds.data, target)
correct_hb, labeled_hb = batch_pix_accuracy(preds_hb.data, hlabel)
correct_fb, labeled_fb = batch_pix_accuracy(preds_fb.data, flabel)
# mIoU
hist += fast_hist(preds, target, args.num_classes)
hist_hb += fast_hist(preds_hb, hlabel, args.hbody_cls)
hist_fb += fast_hist(preds_fb, flabel, args.fbody_cls)
total_correct += correct
total_correct_hb += correct_hb
total_correct_fb += correct_fb
total_label += labeled
total_label_hb += labeled_hb
total_label_fb += labeled_fb
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
pixAcc_hb = 1.0 * total_correct_hb / (np.spacing(1) + total_label_hb)
IoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
pixAcc_fb = 1.0 * total_correct_fb / (np.spacing(1) + total_label_fb)
IoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)
# plot progress
tbar.set_description('{} / {} | {pixAcc:.4f}, {IoU:.4f} |' \
'{pixAcc_hb:.4f}, {IoU_hb:.4f} |' \
'{pixAcc_fb:.4f}, {IoU_fb:.4f}'.format(idx + 1, len(val_loader), pixAcc=pixAcc, IoU=IoU,pixAcc_hb=pixAcc_hb, IoU_hb=IoU_hb,pixAcc_fb=pixAcc_fb, IoU_fb=IoU_fb))
tbar.close()
return pixAcc, IoU
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"[email protected]"
] | |
36820395946cd35b82cf0890ad00f03f16354844 | 906b3df6f4cb2141910c19dbe8e29fe493205ed2 | /contracts.py | b39f4cb339e5a70daf16f74cbf15b0b6b99822c0 | [] | no_license | webclinic017/IB_to_XCEL_Python | 6af80ed3855e97664f38c50a945554678cc1f834 | 66dfefdb047d373486434e065324e676a9f7618e | refs/heads/main | 2023-06-18T20:38:33.784478 | 2021-07-15T15:29:31 | 2021-07-15T15:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | from ibapi.contract import Contract
july_vixFuture_contract = Contract()
july_vixFuture_contract.symbol = 'VXK1'
july_vixFuture_contract.secType = 'FUT'
july_vixFuture_contract.tradingClass = 'VX'
july_vixFuture_contract.exchange = 'CFE'
july_vixFuture_contract.currency = 'USD'
july_vixFuture_contract.lastTradeDateOrContractMonth = '20210721'
#Create August VIX future object
august_vixFuture_contract = Contract()
august_vixFuture_contract.symbol = 'VXK1'
august_vixFuture_contract.secType = 'FUT'
august_vixFuture_contract.tradingClass = 'VX'
august_vixFuture_contract.exchange = 'CFE'
august_vixFuture_contract.currency = 'USD'
august_vixFuture_contract.lastTradeDateOrContractMonth = '20210818'
#Create Septmeber VIX future object
september_vixFuture_contract = Contract()
september_vixFuture_contract.symbol = 'VXK1'
september_vixFuture_contract.secType = 'FUT'
september_vixFuture_contract.tradingClass = 'VX'
september_vixFuture_contract.exchange = 'CFE'
september_vixFuture_contract.currency = 'USD'
september_vixFuture_contract.lastTradeDateOrContractMonth = '20210915'
#Ocotber VIX Contract Object
october_vixFuture_contract = Contract()
october_vixFuture_contract.symbol = 'VXK1'
october_vixFuture_contract.secType = 'FUT'
october_vixFuture_contract.tradingClass = 'VX'
october_vixFuture_contract.exchange = 'CFE'
october_vixFuture_contract.currency = 'USD'
october_vixFuture_contract.lastTradeDateOrContractMonth = '20211020'
november_vixFuture_contract = Contract()
november_vixFuture_contract.symbol = 'VXK1'
november_vixFuture_contract.secType = 'FUT'
november_vixFuture_contract.tradingClass = 'VX'
november_vixFuture_contract.exchange = 'CFE'
november_vixFuture_contract.currency = 'USD'
november_vixFuture_contract.lastTradeDateOrContractMonth = '20211117'
contract_list = [july_vixFuture_contract, august_vixFuture_contract, september_vixFuture_contract, october_vixFuture_contract, november_vixFuture_contract]
| [
"[email protected]"
] | |
8a221e3053dcc8502c46d16f04827b083c3b5bd0 | 8159c2c650d53fb188a26b508dfff524296707d1 | /lore/deities/alki_l.py | 905ce33ad3e1bc0fadf8acc8293b6ce5991ca540 | [] | no_license | Teifion/Rob3 | 501663bf5077da0c28a7db4960c74c4477dd11bf | 81fc2f9930434b5a4de52b75eb6a8d78dd708f77 | refs/heads/master | 2021-01-16T19:20:29.666426 | 2011-05-30T13:34:17 | 2011-05-30T13:34:17 | 1,819,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | data = {
"cat": "deities",
"page": "alki",
}
blocks = [
{
"id": "summary",
"text": "Summary text"
},
{
"id": "Backstory",
"text": """"""
},
{
"level": "secret",
"text": "Secret info"
},
{
"id": "gm_notes",
"level": "gm",
"text": "GM info"
}
] | [
"[email protected]"
] | |
37f6724585a71b87339e498274d17d2f828f55ce | e29faa10be00e8c839bf909922ace176b05826bb | /misc/metric_loss_ops.py | 272388b01a7837eecc308939289b84d6c1ef0bac | [] | no_license | weiyinfu/learnTensorflow | 666e46259da435c19c06f3abbaa8a00ae37431ce | 98cb7a978dd682ec8f651f9da57e2f23d47c21a4 | refs/heads/master | 2022-01-01T08:10:40.215554 | 2021-12-26T06:24:40 | 2021-12-26T06:24:40 | 145,659,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,466 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(embeddings_anchor - embeddings_positive), 1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(diff - row_minimums, mask), 1,
keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(
math_ops.multiply(
math_ops.exp(diff_tiled - max_elements_vect), mask_tiled),
1,
keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keepdims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
| [
"[email protected]"
] | |
770d33e367fca4988fdf64c78ac4aef27d69ca8f | 9682dab1ce9e00e11708872fa26febc847d4d18c | /pycorrector/seq2seq/corpus_reader.py | 46822ab4f180f6abb8baed21724debbfb98f1cc9 | [
"Apache-2.0"
] | permissive | fireflycsq/pycorrector | 1ed2a6df9ec05bb9055b2052f92301212ebab235 | d1096e4cee99ba95bb4df945707bbd2b8972717a | refs/heads/master | 2020-03-21T15:43:51.228521 | 2018-06-12T07:34:44 | 2018-06-12T07:34:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,961 | py | # -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Brief: Corpus for model
import random
from pycorrector.seq2seq.reader import Reader, PAD_TOKEN, EOS_TOKEN, GO_TOKEN
class FCEReader(Reader):
"""
Read FCE data set
"""
UNKNOWN_TOKEN = 'UNK'
DROPOUT_TOKENS = {"a", "an", "the", "'ll", "'s", "'m", "'ve"}
REPLACEMENTS = {"there": "their", "their": "there", "then": "than", "than": "then"}
def __init__(self, config, train_path=None, token_2_id=None,
dropout_prob=0.25, replacement_prob=0.25, dataset_copies=2):
super(FCEReader, self).__init__(
config, train_path=train_path, token_2_id=token_2_id,
special_tokens=[PAD_TOKEN, GO_TOKEN, EOS_TOKEN, FCEReader.UNKNOWN_TOKEN],
dataset_copies=dataset_copies)
self.dropout_prob = dropout_prob
self.replacement_prob = replacement_prob
self.UNKNOWN_ID = self.token_2_id[FCEReader.UNKNOWN_TOKEN]
def read_samples_by_string(self, path):
with open(path, 'r', encoding='utf-8') as f:
while True:
line_src = f.readline()
line_dst = f.readline()
if not line_src or len(line_src) < 5:
break
source = line_src.lower()[5:].strip().split()
target = line_dst.lower()[5:].strip().split()
if self.config.enable_special_error:
new_source = []
for token in source:
# Random dropout words from the input
dropout_token = (token in FCEReader.DROPOUT_TOKENS and
random.random() < self.dropout_prob)
replace_token = (token in FCEReader.REPLACEMENTS and
random.random() < self.replacement_prob)
if replace_token:
new_source.append(FCEReader.REPLACEMENTS[source])
elif not dropout_token:
new_source.append(token)
source = new_source
yield source, target
def unknown_token(self):
return FCEReader.UNKNOWN_TOKEN
def read_tokens(self, path):
i = 0
with open(path, 'r', encoding='utf-8') as f:
for line in f:
# Input the correct text, which start with 0
if i % 2 == 1:
if line and len(line) > 5:
yield line.lower()[5:].strip().split()
i += 1
class CGEDReader(Reader):
"""
Read CGED data set
"""
UNKNOWN_TOKEN = 'UNK'
def __init__(self, config, train_path=None, token_2_id=None, dataset_copies=2):
super(CGEDReader, self).__init__(
config, train_path=train_path, token_2_id=token_2_id,
special_tokens=[PAD_TOKEN, GO_TOKEN, EOS_TOKEN, CGEDReader.UNKNOWN_TOKEN],
dataset_copies=dataset_copies)
self.UNKNOWN_ID = self.token_2_id[CGEDReader.UNKNOWN_TOKEN]
def read_samples_by_string(self, path):
with open(path, 'r', encoding='utf-8') as f:
while True:
line_src = f.readline()
line_dst = f.readline()
if not line_src or len(line_src) < 5:
break
source = line_src.lower()[5:].strip().split()
target = line_dst.lower()[5:].strip().split()
yield source, target
def unknown_token(self):
return CGEDReader.UNKNOWN_TOKEN
def read_tokens(self, path, is_infer=False):
i = 0
with open(path, 'r', encoding='utf-8') as f:
for line in f:
# Input the correct text, which start with 0
if i % 2 == 1:
if line and len(line) > 5:
yield line.lower()[5:].strip().split()
i += 1
| [
"[email protected]"
] | |
4a810f7029a6f0806c1dc6e4f8679c877af55d4b | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.2/kernel/drivers/eciadsl/actions.py | 77a821cb1f94a9b37fd3943be25d766c74b060e7 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "eciadsl-usermode-0.11"
def setup():
pisitools.dosed("eciadsl-config.tk", "set BIN_DIR \"/usr/local/bin\"", "set BIN_DIR \"/usr/bin\"")
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("README", "INSTALL", "BUGS", "TODO", "TROUBLESHOOTING")
| [
"[email protected]"
] | |
8c657e2a7939065498bf9e9e9abba7751a7b2714 | cc3ded8dc35ec9f52e7cee6a840fb6d3c92b3185 | /transformers_repo/tests/test_modeling_ctrl.py | 3d1a1cb2dc728952f1ef36f667b59ccf7af1a48b | [
"MIT",
"Apache-2.0"
] | permissive | see--/natural-question-answering | 04e3a38022dcb78a20f1632749cb82bb40a3d8aa | 9d31c8dee0ff799d190a2a351f4857224788a5ca | refs/heads/master | 2023-08-17T00:10:17.594705 | 2020-03-12T21:38:56 | 2020-03-12T21:38:56 | 234,064,034 | 90 | 24 | MIT | 2023-09-06T17:32:48 | 2020-01-15T11:19:50 | Python | UTF-8 | Python | false | false | 8,330 | py | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available():
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel
@require_torch
class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
class CTRLModelTester(object):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result["loss"].size()), [])
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
sequence_output, presents = model(input_ids)
result = {
"sequence_output": sequence_output,
"presents": presents,
}
self.parent.assertListEqual(
list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]
)
self.parent.assertEqual(len(result["presents"]), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {"loss": loss, "lm_logits": lm_logits}
self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(
list(result["lm_logits"].size()), [self.batch_size, self.seq_length, self.vocab_size]
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def setUp(self):
self.model_tester = CTRLModelTest.CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in list(CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = CTRLModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model)
| [
"[email protected]"
] | |
03bcad2f49a796e50d41e05a57f6c096d03928b1 | 642e8d6d8cd8d08a73bdcf82ae9689a09284025c | /celery/tests/test_pickle.py | bf2f4ccc4968b6c53b399fa9e10f0ffbaf43368d | [
"BSD-3-Clause"
] | permissive | abecciu/celery | 941f29c033b54b766166f17aa8c5e4be05df08b9 | f0c399e34d56c7a2a14cb42bfb2b6455c68ef0c0 | refs/heads/master | 2021-01-14T12:57:11.230199 | 2009-09-10T13:44:51 | 2009-09-10T13:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | import unittest
from celery.serialization import pickle
class RegularException(Exception):
pass
class ArgOverrideException(Exception):
def __init__(self, message, status_code=10):
self.status_code = status_code
super(ArgOverrideException, self).__init__(message, status_code)
class TestPickle(unittest.TestCase):
# See: http://www.reddit.com/r/django/comments/8gdwi/
# celery_distributed_task_queue_for_django/c097hr1
def test_pickle_regular_exception(self):
e = None
try:
raise RegularException("RegularException raised")
except RegularException, e:
pass
pickled = pickle.dumps({"exception": e})
unpickled = pickle.loads(pickled)
exception = unpickled.get("exception")
self.assertTrue(exception)
self.assertTrue(isinstance(exception, RegularException))
self.assertEquals(exception.args, ("RegularException raised", ))
def test_pickle_arg_override_exception(self):
e = None
try:
raise ArgOverrideException("ArgOverrideException raised",
status_code=100)
except ArgOverrideException, e:
pass
pickled = pickle.dumps({"exception": e})
unpickled = pickle.loads(pickled)
exception = unpickled.get("exception")
self.assertTrue(exception)
self.assertTrue(isinstance(exception, ArgOverrideException))
self.assertEquals(exception.args, ("ArgOverrideException raised",
100))
self.assertEquals(exception.status_code, 100)
| [
"[email protected]"
] | |
ee8fcb6113dfdf4554705e7c1cf7fe5e5b3c6017 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03253/s028075824.py | 5a75ef3726225d5282ae0dd3a1b8e2323b4d2dfd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | import sys
import math
sys.setrecursionlimit(1000000) # 再帰上限を増やす
def calc_combi(n, m, mod=1000000007):
""" nCmの組み合わせ数を数える """
if n - m < m:
return calc_combi(n, n - m)
ans_mull, ans_div = 1, 1
for i in range(m):
ans_mull *= (n - i)
ans_div *= (i + 1)
ans_mull %= mod
ans_div %= mod
ans = ans_mull * pow(ans_div, mod - 2, mod) % mod
return ans
def main():
input = sys.stdin.readline # 文字列に対してinputした場合は、rstripするのを忘れずに!
N, M = map(int, input().rstrip().split())
mod = 1000000007
ans = 1
for i in range(2, math.ceil(math.sqrt(M))):
if M % i == 0:
count = 0
while M % i == 0:
M /= i
count += 1
ans *= calc_combi(count + N - 1, N - 1, mod)
ans %= mod
if M != 1:
ans *= calc_combi(N, 1, mod)
ans %= mod
print(ans)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
d7d22bee196dd88cf02a813ed99e658a7a9134a1 | d0758e0ca004226cec8ad8b26c9565c98534a8b8 | /11-videogames/Julio/3 - Moscas/game.py | 91460a45758ba15d812fa0b32ac1bfb74b85c7e4 | [] | no_license | pythoncanarias/eoi | 334d64a96afc76ac1fa10282378f291b6d8c94b3 | 349367254f85e3e4273cede067ca950913a1332c | refs/heads/master | 2023-07-06T08:00:11.366345 | 2023-06-30T15:19:33 | 2023-06-30T15:19:33 | 222,742,870 | 26 | 19 | null | 2023-06-25T16:03:46 | 2019-11-19T16:41:25 | Jupyter Notebook | UTF-8 | Python | false | false | 1,274 | py | import pygame
import math
from settings import *
from sprites import Player
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.all_sprites = pygame.sprite.Group()
for _ in range(32):
Player(self, 10, 10)
def run(self):
self.playing = True
while self.playing:
self.dt = self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.playing = False
def update(self):
self.all_sprites.update()
def draw(self):
self.screen.fill(BLACK)
for x in range(0, WIDTH, TILESIZE):
pygame.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pygame.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))
self.all_sprites.draw(self.screen)
# Nothing else to draw, let's show it!
pygame.display.flip()
game = Game()
game.run()
| [
"[email protected]"
] | |
531328a7203522f625b1e1d7157e9d0d3de657c3 | ab40571d5051ad53c0f205fa797ba36eac516d06 | /language/serene/wiki_index.py | d3f3722d348a60910e695ac044d8e3a711095a05 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | google-research/language | e941b1a92ab46d40d8d03bb0c314905cb6902ce2 | ac9447064195e06de48cc91ff642f7fffa28ffe8 | refs/heads/master | 2023-08-24T23:10:13.207294 | 2023-05-25T20:47:18 | 2023-05-25T22:29:27 | 153,201,352 | 1,567 | 371 | Apache-2.0 | 2023-07-06T23:03:15 | 2018-10-16T00:58:14 | Python | UTF-8 | Python | false | false | 14,914 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Build index from wikipedia embeddings from model and save to disk.
"""
# pylint: enable=line-too-long
from concurrent import futures
import os
import time
from absl import app
from absl import flags
from absl import logging
import dataclasses
from fever_scorer.scorer import fever_score
from language.serene import config
from language.serene import constants
from language.serene import util
import numpy as np
import tensorflow.compat.v2 as tf
import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string('wiki_embedding_dir', None,
'Directory for embedding files.')
flags.DEFINE_string('claim_id_path', None, 'File for claim ids')
flags.DEFINE_string('claim_embedding_path', None, 'File for claim embeddings')
flags.DEFINE_integer('n_shards', None, 'Number of shards in embedding_dir.')
flags.DEFINE_string('out_path', None, 'Output location for predictions.')
flags.DEFINE_bool('l2_norm', False, 'Whether to apply L2 Norm to embeddings')
flags.DEFINE_bool('copy_to_tmp', False,
'Whether to copy embeddings to tmp before reading them.')
flags.DEFINE_string('device', '/CPU:0', 'TF Device')
flags.DEFINE_integer('batch_size', 256, 'batch size for matrix ops')
@dataclasses.dataclass(frozen=True, eq=True)
class IndexKey:
wikipedia_url: Text
sentence_id: int
def copy_file_to_tmp(path_pair):
orig_path, tmp_path = path_pair
with util.log_time(f'Copy: {orig_path} To: {tmp_path}'):
util.safe_copy(orig_path, tmp_path)
class ShardedEmbPaths:
"""Utility class for managing sharded embedding paths."""
def __init__(self, *, embedding_dir, n_shards):
"""Initialize sharded paths.
Args:
embedding_dir: Directory of embeddings
n_shards: Number of shards in directory
"""
self._embedding_dir = embedding_dir
self._n_shards = n_shards
self._tmp_dir = os.path.join('/tmp',
util.random_string(prefix='embedding-tmp'))
def files(self, tmp = False):
"""Return the files for the sharded embeddings.
Args:
tmp: Whether to return paths in /tmp or not
Returns:
Paths of all sharded files.
"""
sharded_files = []
for shard in range(self._n_shards):
sharded_files.extend(self.shard_files(shard, tmp=tmp))
return sharded_files
def orig_tmp_file_pairs(self):
"""Creates a list of tuples of (original_path, tmp_path).
Returns:
A list of tuples where first path is original and second is tmp location
"""
pairs = []
for shard in range(self._n_shards):
orig_paths = self.shard_files(shard, tmp=False)
tmp_paths = self.shard_files(shard, tmp=True)
pairs.extend(list(zip(orig_paths, tmp_paths)))
return pairs
def shard_files(self, shard, tmp = False):
"""Return file paths that correspond to the given shard.
Args:
shard: Shard to make paths for
tmp: Whether to return tmp version or not
Returns:
A list of files for the shard
"""
return [
self.shard_emb(shard, tmp=tmp),
self.shard_urls(shard, tmp=tmp),
self.shard_sentence_ids(shard, tmp=tmp),
]
def _file_dir(self, tmp):
if tmp:
return self._tmp_dir
else:
return self._embedding_dir
def shard_emb(self, shard, tmp = False):
"""Get the path for the sharded embedding.
Args:
shard: Shard to get embedding path for
tmp: Whether to return /tmp version of path
Returns:
Path to embedding for given shard
"""
return os.path.join(
self._file_dir(tmp), f'embeddings_{shard}_{self._n_shards}.npy')
def shard_urls(self, shard, tmp = False):
"""Get the path for the sharded urls.
Args:
shard: Shard to get urls path for
tmp: Whether to return /tmp version of path
Returns:
Path to urls for given shard
"""
return os.path.join(
self._file_dir(tmp), f'urls_{shard}_{self._n_shards}.npy')
def shard_sentence_ids(self, shard, tmp = False):
"""Get the path for the sharded sentence_ids.
Args:
shard: Shard to get sentence_ids path for
tmp: Whether to return /tmp version of path
Returns:
Path to sentence_ids for given shard
"""
return os.path.join(
self._file_dir(tmp), f'sentence_ids_{shard}_{self._n_shards}.npy')
def to_tmp(self):
path_pairs = self.orig_tmp_file_pairs()
with futures.ThreadPoolExecutor(max_workers=len(path_pairs)) as executor:
list(tqdm.tqdm(executor.map(copy_file_to_tmp, path_pairs)))
def read_examples(
*, embedding_dir, n_shards,
copy_to_tmp):
"""Read and yield examples from embeddings in directory.
Args:
embedding_dir: The directory of .npy embedding files
n_shards: Number of shards used to create the data
copy_to_tmp: Whether to copy embeddings to /tmp before reading, this can
significantly improve throughput compared to remote filesystem reads
Yields:
Tuples of an integer identifier, wikipedia_url, sentence_id, and embedding
"""
idx = 0
sharded_paths = ShardedEmbPaths(
embedding_dir=embedding_dir, n_shards=n_shards)
logging.info('Copying files to tmp')
if copy_to_tmp:
sharded_paths.to_tmp()
logging.info('Starting example read')
for shard in tqdm.trange(n_shards):
emb_path = sharded_paths.shard_emb(shard, tmp=copy_to_tmp)
urls_path = sharded_paths.shard_urls(shard, tmp=copy_to_tmp)
sentence_ids_path = sharded_paths.shard_sentence_ids(shard, tmp=copy_to_tmp)
logging.info('Emb path: %s', emb_path)
logging.info('Urls path: %s', urls_path)
logging.info('Sent path: %s', sentence_ids_path)
with \
util.safe_open(emb_path, 'rb') as emb_f,\
util.safe_open(urls_path, 'rb') as url_f,\
util.safe_open(sentence_ids_path, 'rb') as sid_f:
load_start = time.time()
embeddings = np.load(emb_f)
wikipedia_urls = np.load(url_f)
sentence_ids = np.load(sid_f)
load_end = time.time()
logging.info('Reading shard %s, Seconds: %s', shard,
load_end - load_start)
for wiki_url, sid, emb in zip(wikipedia_urls, sentence_ids, embeddings):
yield idx, wiki_url, sid, emb
idx += 1
class Index:
"""Index that can be used for brute for neighbor search."""
def __init__(self,
*,
claim_embedding_path,
claim_id_path,
wiki_embedding_dir,
n_shards,
copy_to_tmp,
device = '/CPU:0',
batch_size = 256,
l2_norm=False):
"""Configure index.
Claim ids and embeddings are related through their position (eg, first
embedding corresponds to first id).
Args:
claim_embedding_path: Path to claim embeddings
claim_id_path: Path to claim ids
wiki_embedding_dir: Directory of .npy embedding files
n_shards: Number of shards to read
copy_to_tmp: Whether to copy embeddings to tmp (eg, maybe they are on slow
network file system)
device: Tensorflow device to use for batched matrix multiplies
batch_size: Batch size for batched matrix multiplies
l2_norm: Whether to impose the L2 norm on vectors
"""
self._claim_embedding_path = claim_embedding_path
self._claim_id_path = claim_id_path
self._wiki_embedding_dir = wiki_embedding_dir
self._n_shards = n_shards
self._batch_size = batch_size
self._l2_norm = l2_norm
self._copy_to_tmp = copy_to_tmp
self._device = device
self._wiki_embeddings: Optional[tf.Tensor] = None
self._key_to_idx: Optional[Dict[IndexKey, int]] = None
self._idx_to_key: Optional[Dict[int, IndexKey]] = None
self._claim_embeddings: Optional[tf.Tensor] = None
self._claim_ids: Optional[tf.Tensor] = None
self._claim_id_to_idx: Optional[Dict[int, int]] = None
def build(self, load_wiki=True, load_fever=True):
"""Build the index in memory.
Args:
load_wiki: Whether to load wiki embeddings
load_fever: Whether to load fever claim embeddings
"""
if load_wiki:
self._load_wiki_embeddings()
if load_fever:
self._load_fever_embeddings()
def _load_fever_embeddings(self):
"""Load fever claim embeddings and ids."""
with util.safe_open(self._claim_embedding_path, 'rb') as f:
claim_embeddings = np.load(f)
with util.safe_open(self._claim_id_path, 'rb') as f:
claim_ids = np.load(f)
self._claim_embeddings = tf.convert_to_tensor(claim_embeddings)
if self._l2_norm:
self._claim_embeddings = tf.math.l2_normalize(
self._claim_embeddings, axis=-1)
self._claim_ids = claim_ids
self._claim_id_to_idx = {}
for idx, claim_id in enumerate(self._claim_ids):
self._claim_id_to_idx[claim_id] = idx
def _load_wiki_embeddings(self):
"""Build an index from embeddings."""
logging.info('Read: %s', self._wiki_embedding_dir)
logging.info('N Shards: %s', self._n_shards)
examples = read_examples(
embedding_dir=self._wiki_embedding_dir,
n_shards=self._n_shards,
copy_to_tmp=self._copy_to_tmp,
)
logging.info('Starting indexing')
embeddings = []
self._key_to_idx = {}
self._idx_to_key = {}
for idx, wiki_url, sid, emb in tqdm.tqdm(examples, mininterval=10):
embeddings.append(emb)
self._key_to_idx[IndexKey(wiki_url, sid)] = idx
self._idx_to_key[idx] = IndexKey(wiki_url, sid)
self._wiki_embeddings = tf.convert_to_tensor(np.vstack(embeddings))
if self._l2_norm:
self._wiki_embeddings = tf.math.l2_normalize(
self._wiki_embeddings, axis=-1)
logging.info('Embedding Shape: %s', self._wiki_embeddings.shape)
def score_claim_to_wiki(self, n = 5):
"""Score all claims to wikipedia and return the predictions.
Args:
n: Number of predictions to make per claim
Returns:
Top ranked wikipedia sentences per claim
"""
logging.info('TF Initializing')
with tf.device(self._device):
idx = 0
top_idx = []
top_scores = []
bar = tqdm.tqdm(total=self._claim_embeddings.shape[0])
# batch_size: over n_claims
while idx < self._claim_embeddings.shape[0]:
# (batch_size, emb_dim)
batch = self._claim_embeddings[idx:idx + self._batch_size, :]
# (n_wiki_embeddings, batch_size)
batch_scores = tf.linalg.matmul(
# wiki_embeddings: (n_wiki_embeddings, emb_dim)
self._wiki_embeddings,
batch,
transpose_b=True)
# <float>(batch_size, n_wiki_embeddings)
batch_scores = tf.transpose(batch_scores)
# <float>(batch_size, n), <long>(batch_size, n)
batch_top_scores, batch_top_idx = tf.nn.top_k(batch_scores, k=n)
top_idx.append(batch_top_idx.numpy())
top_scores.append(batch_top_scores.numpy())
idx += self._batch_size
bar.update(self._batch_size)
bar.close()
top_idx = np.vstack(top_idx)
top_scores = np.vstack(top_scores)
claim_id_to_scored_keys = {}
for claim_index in range(top_idx.shape[0]):
row = top_idx[claim_index]
wiki_keys = []
for idx in row:
wiki_keys.append(self._idx_to_key[idx])
claim_id = self._claim_ids[claim_index]
claim_id_to_scored_keys[claim_id] = {
'wiki_keys': wiki_keys,
'scores': top_scores[claim_index]
}
return claim_id_to_scored_keys
def main(_):
flags.mark_flag_as_required('out_path')
flags.mark_flag_as_required('wiki_embedding_dir')
flags.mark_flag_as_required('claim_id_path')
flags.mark_flag_as_required('claim_embedding_path')
flags.mark_flag_as_required('n_shards')
tf.enable_v2_behavior()
conf = config.Config()
logging.info('wiki_embedding_dir: %s', FLAGS.wiki_embedding_dir)
logging.info('n_shards: %s', FLAGS.n_shards)
logging.info('l2_norm: %s', FLAGS.l2_norm)
logging.info('claim_id_path: %s', FLAGS.claim_id_path)
logging.info('claim_embedding_path: %s', FLAGS.claim_embedding_path)
logging.info('copy_to_tmp: %s', FLAGS.copy_to_tmp)
logging.info('batch_size: %s', FLAGS.batch_size)
with util.log_time('Building index'):
index = Index(
wiki_embedding_dir=FLAGS.wiki_embedding_dir,
n_shards=FLAGS.n_shards,
l2_norm=FLAGS.l2_norm,
claim_id_path=FLAGS.claim_id_path,
claim_embedding_path=FLAGS.claim_embedding_path,
copy_to_tmp=FLAGS.copy_to_tmp,
batch_size=FLAGS.batch_size,
device=FLAGS.device,
)
index.build()
logging.info('Reading claims from: %s', conf.fever_dev)
dev = [
c for c in util.read_jsonlines(conf.fever_dev)
if c['label'] != constants.NOT_ENOUGH_INFO
]
logging.info('Making predictions')
claim_id_to_scored_keys = index.score_claim_to_wiki(n=5)
formatted_predictions = []
actual = []
for claim in tqdm.tqdm(dev):
claim_id = claim['id']
predicted_evidence = []
scored_keys = claim_id_to_scored_keys[claim_id]
for index_key in scored_keys['wiki_keys']:
# sentence_id is a numpy int, and fever scoring script only
# accepts python int.
predicted_evidence.append([
index_key.wikipedia_url, int(index_key.sentence_id)])
formatted_predictions.append({
'id': claim_id,
'predicted_label': constants.SUPPORTS,
'predicted_evidence': predicted_evidence,
})
actual.append({'evidence': claim['evidence'], 'label': claim['label']})
logging.info('FEVER Metrics')
strict_score, accuracy_score, precision, recall, f1 = fever_score(
formatted_predictions, actual)
logging.info('Strict Score: %s', strict_score)
logging.info('Accuracy Score: %s', accuracy_score)
logging.info('Precision: %s', precision)
logging.info('Recall: %s', recall)
logging.info('F1: %s', f1)
logging.info('Saving predictions and metrics to: %s', FLAGS.out_path)
util.write_json(
{
'predictions': formatted_predictions,
'metrics': {
'strict_score': strict_score,
'accuracy_score': accuracy_score,
'precision': precision,
'recall': recall,
'f1': f1,
}
}, FLAGS.out_path)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
5b666edc2f564f9e62ca32f883d55c4dc29f7449 | 567eac942e94c653dda710d52c1eb6db0847a075 | /0x0B-python-input_output/100-append_after.py | c1d85581606a8e1587723e9fb8ea9451eb415a89 | [] | no_license | mecomontes/Higher-Level-Programming | beeb4a410ff99fa062a86bd0a7f4d87a39889283 | 3a78f6eeedc70e2f447c49ccaf0838f5878f651c | refs/heads/main | 2023-06-26T18:02:29.046302 | 2021-07-13T14:58:24 | 2021-07-13T14:58:24 | 385,641,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tru Jun 3 15:13:37 2020
@author: Robinson Montes
"""
def append_after(filename="", search_string="", new_string=""):
"""
Inserts a line after each line containing a specific string
Arguments:
filename (str): The name of the file
search_string (str): The string to math
new_string (str): The string to insert after matching
"""
with open(filename, 'r', encoding='utf-8') as file:
lines = file.readlines()
for i in range(len(lines)):
if search_string in lines[i]:
lines.insert(i + 1, new_string)
with open(filename, 'w', encoding='utf-8') as file:
content = "".join(lines)
file.write(content)
| [
"[email protected]"
] | |
9e16773835ce51e64709f46cb6093b668958019f | 2bf6bf2eeb72b9eb4b8a41f36fb56585e140a611 | /pickle_to_tex.py | 82080a24d542ddb03dc447346c6cdf65cb6965bc | [
"MIT"
] | permissive | Daniel-Bu/exposure | 0fcff9177764cc85bb46298cfcdfaf5aeb29b6ec | 74549ac75822fba41125cb0a086905a51db49d7b | refs/heads/master | 2020-09-27T07:34:49.726377 | 2019-12-07T07:06:34 | 2019-12-07T07:06:34 | 226,464,955 | 0 | 0 | MIT | 2019-12-07T06:12:36 | 2019-12-07T06:12:35 | null | UTF-8 | Python | false | false | 5,295 | py | # This script converts output pickle to step-by-step latex figures
import numpy as np
import os
import pickle as pickle
import shutil
NUM_STEPS = 5
CURVE_STEPS = 8
files = []
filters = [
'Expo.',
'Gam.',
'W.B.',
'Satu.',
'Tone',
'Cst.',
'BW',
'Color',
]
def visualize_detail(name, param, pos):
def map_pos(x, y):
return '(%f,%f)' % (pos[0] + x * 0.8, pos[1] - 1.1 + y * 0.8)
if name == 'Expo.':
return '{Exposure $%+.2f$};' % param[0]
elif name == 'Gam.':
return '{Gamma $1/%.2f$};' % (1 / param[0])
elif name == 'Satu.':
return '{Saturation $+%.2f$};' % param[0]
elif name == 'Cst.':
return '{Contrast $%+.2f$};' % param[0]
elif name == 'BW':
return '{$%+.2f$};' % (param[0])
elif name == 'W.B.':
scaling = 1 / (1e-5 + 0.27 * param[0] + 0.67 * param[1] + 0.06 * param[2])
r, g, b = [int(255 * x * scaling) for x in param]
color = r'{\definecolor{tempcolor}{RGB}{%d,%d,%d}};' % (r, g, b)
return color + '\n' + r'\tikz \fill[tempcolor] (0,0) rectangle (4 ex, 2 ex);'
elif name == 'Tone':
s = '{Tone\quad\quad\quad\quad};\n'
s += r'\draw[<->] %s -- %s -- %s;' % (map_pos(0, 1.1), map_pos(0, 0),
map_pos(1.1, 0))
s += '\n'
for i in range(1):
values = np.array([0] + list(param[0][0][i]))
values /= sum(values) + 1e-30
scale = 1
values *= scale
for j in range(0, CURVE_STEPS):
values[j + 1] += values[j]
for j in range(CURVE_STEPS):
p1 = (1.0 / CURVE_STEPS * j, values[j])
p2 = (1.0 / CURVE_STEPS * (j + 1), values[j + 1])
s += r'\draw[-] %s -- %s;' % (map_pos(*p1), map_pos(*p2))
if j != CURVE_STEPS - 1:
s += '\n'
return s
elif name == 'Color':
s = '{Color\quad\quad\quad\quad};\n'
s += r'\draw[<->] %s -- %s -- %s;' % (map_pos(0, 1.1), map_pos(0, 0),
map_pos(1.1, 0))
s += '\n'
c = ['red', 'green', 'blue']
for i in range(3):
#print(param)
values = np.array([0] + list(param[0][0][i]))
values /= sum(values) + 1e-30
scale = 1
values *= scale
for j in range(0, CURVE_STEPS):
values[j + 1] += values[j]
for j in range(CURVE_STEPS):
p1 = (1.0 / CURVE_STEPS * j, values[j])
p2 = (1.0 / CURVE_STEPS * (j + 1), values[j + 1])
s += r'\draw[%s,-] %s -- %s;' % (c[i], map_pos(*p1), map_pos(*p2))
if j != CURVE_STEPS - 1:
s += '\n'
return s
else:
assert False
def visualize_step(debug_info, step_name, position):
pdf = debug_info['pdf']
filter_id = debug_info['selected_filter_id']
s = ''
s += r'\node[draw, rectangle, thick,minimum height=7em,minimum width=7em](%s) at (%f,%f) {};' % (
step_name, position[0], position[1])
s += '\n'
s += r'\node (%ss) at ([yshift=1.4em]%s.center) {' % (step_name, step_name)
s += '\n'
s += r' \scalebox{0.7}{'
s += '\n'
s += r' \begin{tabular}{|p{0.5cm}p{0.2cm}p{0.5cm}p{0.2cm}|}'
s += '\n'
s += r' \hline'
s += '\n'
def bar(i):
return '\pdfbarSelected' if i == filter_id else '\pdfbar'
for i in range(4):
f1 = filters[i]
b1 = r'%s{%.3f}' % (bar(i), pdf[i] * 3)
f2 = filters[i + 4]
b2 = r'%s{%.3f}' % (bar(i + 4), pdf[i + 4] * 3)
s += r' %s & %s & %s & %s \\' % (f1, b1, f2, b2)
s += '\n'
s += r' \hline'
s += '\n'
s += r' \end{tabular}'
s += '\n'
s += r' }'
s += '\n'
s += r'};'
s += '\n'
s += r'\node (%sd) at ([yshift=-2.0em]%s.center)' % (step_name, step_name)
s += '\n'
s += visualize_detail(
filters[filter_id],
debug_info['filter_debug_info'][filter_id]['filter_parameters'], position)
s += '\n'
return s
def process_dog():
f = 'dog04/a0694.tif_debug.pkl'
debug_info_list = pickle.load(open(f, 'r'))
for i in range(NUM_STEPS):
debug_info = debug_info_list[i]
print(visualize_step(debug_info, 'agent%d' % (i + 1), (4, i * -3)), end=' ')
def process(filename, id, src):
pkl_fn = os.path.join(src, filename)
debug_info_list = pickle.load(open(pkl_fn, 'rb'))
filename = filename[:-10]
target_dir = 'export/{}'.format(id)
os.makedirs(target_dir, exist_ok=True)
for i in range(NUM_STEPS - 1):
shutil.copy(os.path.join(src, filename + '.intermediate%02d.png' % i),
os.path.join(target_dir, 'step%d.png' % (i + 1)))
shutil.copy(os.path.join(src, filename + '.retouched.png'), os.path.join(target_dir, 'final.png'))
shutil.copy(os.path.join(src, filename + '.linear.png'), os.path.join(target_dir, 'input.png'))
with open(target_dir + '/steps.tex', 'w') as f:
for i in range(NUM_STEPS):
debug_info = debug_info_list[i]
print(
visualize_step(debug_info, 'agent%d' % (i + 1), (4, i * -3)),
end=' ',
file=f)
print('##########################################')
print('Note: Please make sure you have pdflatex.')
print('##########################################')
print()
for input_dir in ['outputs']:
for f in os.listdir(input_dir):
if not f.endswith('pkl'):
continue
id = f.split('.')[0]
print('Generating pdf operating sequences for image {}...'.format(id))
process(f, id, src=input_dir)
| [
"[email protected]"
] | |
e0c71dc714a97faaca6e6a3978e34959b43481c4 | 03b6e62060ae8db13bfe1ace9b2ee108d96f785b | /functions/box_utils.py | e7aa8a3a01eff392c2ff53b00e0b58ab115d65cb | [] | no_license | pyaf/ssd | d5321cf2bce62a24c37c8f23ba5a326c61ea4dff | 0e8cf8bbc6fdeb57acd7ceb4fffab647921d3018 | refs/heads/master | 2022-03-08T09:18:32.243214 | 2018-10-29T12:02:31 | 2018-10-29T12:02:31 | 151,805,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,420 | py | # -*- coding: utf-8 -*-
import torch
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
# loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
# conf_t[idx] = conf # [num_priors] top class label for each prior
return loc, conf
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), dim=1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| [
"[email protected]"
] | |
c5b30f8b5cad9ac276560858501ff8ed4aa8a8b1 | 0a7b77367cde1a64d95d1aab53b6a4f344056a9c | /mcod/organizations/views.py | 685347e8b0ddc6c20caa3f4d05cff315812b5080 | [] | no_license | kaglowka/danyzespolapi | 529c2c7fc5d35c630498c8438e59dbcc3c00c437 | 0b3c07c68cf61faa81756822af9eec7c497bba2f | refs/heads/master | 2023-01-13T19:11:40.247512 | 2018-10-28T12:36:47 | 2018-10-28T12:36:47 | 154,993,674 | 0 | 0 | null | 2022-12-26T20:44:19 | 2018-10-27T17:57:34 | CSS | UTF-8 | Python | false | false | 2,487 | py | # -*- coding: utf-8 -*-
import falcon
from dal import autocomplete
from django.apps import apps
from elasticsearch_dsl import Q
from mcod.datasets.documents import DatasetsDoc
from mcod.datasets.schemas import DatasetsList
from mcod.datasets.serializers import DatasetSerializer, DatasetsMeta
from mcod.lib.handlers import SearchHandler, RetrieveOneHandler
from mcod.lib.triggers import LoginOptional
from mcod.lib.views import SearchView, RetrieveOneView
from mcod.organizations.documents import InstitutionDoc
from mcod.organizations.models import Organization
from mcod.organizations.schemas import InstitutionsList
from mcod.organizations.serializers import InstitutionsSerializer, InstitutionsMeta
class InstitutionsView(SearchView):
class GET(SearchHandler):
meta_serializer = InstitutionsMeta()
request_schema = InstitutionsList()
response_serializer = InstitutionsSerializer(many=True)
search_document = InstitutionDoc()
class InstitutionView(RetrieveOneView):
class GET(RetrieveOneHandler):
database_model = apps.get_model('organizations', 'Organization')
response_serializer = InstitutionsSerializer(many=False, include_data=('datasets',))
triggers = [LoginOptional(), ]
def resource_clean(self, request, id, *args, **kwargs):
model = self.database_model
try:
return model.objects.get(pk=id, status="published")
except model.DoesNotExist:
raise falcon.HTTPNotFound
class InstitutionDatasetsView(SearchView):
class GET(SearchHandler):
meta_serializer = DatasetsMeta()
request_schema = DatasetsList()
response_serializer = DatasetSerializer(many=True)
search_document = DatasetsDoc()
def _queryset(self, cleaned, *args, **kwargs):
qs = super()._queryset(cleaned, *args, **kwargs)
if 'id' in kwargs:
qs = qs.query("nested", path="institution",
query=Q("term", **{'institution.id': kwargs['id']}))
return qs
class OrganizationAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated:
return Organization.objects.none()
qs = Organization.objects.all()
if self.q:
qs = qs.filter(title__icontains=self.q)
return qs
| [
"[email protected]"
] | |
1821ffa05b0f39dd18622808d4a83cb6c4da538e | c3132612a7ac311e501e432e1a4c7592bbd7a713 | /day16/code/08_爬虫.py | f5ee39d962d3e580dd54293573568a861feddcd3 | [] | no_license | everqiujuan/python | 7b8e169107012c3d7829d4ebd9860482fc0d8fec | b0a98de943217e24da60f79dec4fe8ebf4f1c713 | refs/heads/master | 2020-06-21T16:57:22.260311 | 2019-07-18T05:58:44 | 2019-07-18T05:58:44 | 184,990,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #
# 爬虫: 蜘蛛Spider
#
# requests
import requests
import re
# pip 第三方包管理
# pip install requests 安装包
# pip uninstall requests 卸载包
# pip freeze 显示自己安装的包
# pip list 显示所有包
# pip -V 查看版本
# pip show requests 查看包详情
url = 'https://search.51job.com/list/040000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
res = requests.get(url)
string = res.content.decode('gbk')
# print(string)
pattern = '<div class="rt">(.*?)</div>'
res = re.findall(pattern, string, re.S)
# print(res)
string2 = res[0]
# print(string2)
# 取数字
# string2 = string2.strip()
# print(string2)
res2 = re.findall('(\d+)', string2)
print(res2)
print(res2[0])
| [
"[email protected]"
] | |
305d43af585283a2419fc1f458c295e373db6e69 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/otp/uberdog/OtpAvatarManagerAI.py | fe784b18b21426dfb9dc945b89ef2a60b8896819 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class OtpAvatarManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("OtpAvatarManagerAI")
def online(self):
pass
def requestAvatarList(self, todo0):
pass
def rejectAvatarList(self, todo0):
pass
def avatarListResponse(self, todo0):
pass
def requestAvatarSlot(self, todo0, todo1, todo2):
pass
def rejectAvatarSlot(self, todo0, todo1, todo2):
pass
def avatarSlotResponse(self, todo0, todo1):
pass
def requestPlayAvatar(self, todo0, todo1, todo2):
pass
def rejectPlayAvatar(self, todo0, todo1):
pass
def playAvatarResponse(self, todo0, todo1, todo2, todo3):
pass
def rejectCreateAvatar(self, todo0):
pass
def createAvatarResponse(self, todo0, todo1, todo2, todo3):
pass
def requestRemoveAvatar(self, todo0, todo1, todo2, todo3):
pass
def rejectRemoveAvatar(self, todo0):
pass
def removeAvatarResponse(self, todo0, todo1):
pass
def requestShareAvatar(self, todo0, todo1, todo2, todo3):
pass
def rejectShareAvatar(self, todo0):
pass
def shareAvatarResponse(self, todo0, todo1, todo2):
pass
| [
"[email protected]"
] | |
71f5c1ce2364f53f0df7d090799750a7ae5ef7d2 | 6cc9adf9c0d840139d70a51f9e05731c4f26fe4c | /Programmers/8.py | 5bbe6b7fd4a2d2760dfc09ee32bdb38a7e843cb8 | [] | no_license | y2sec/Algorithm | 2e05447d401c8c8d0c183c73fb1ce94d26080382 | 712d572bedfbf36dc27e1cc1272143e2b3b73b7f | refs/heads/master | 2023-07-08T00:38:46.809836 | 2021-09-04T06:23:54 | 2021-09-04T06:23:54 | 317,202,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # 문자열 압축
def solution(s):
answer = len(s)
for i in range(1, len(s) // 2 + 1):
start = 0
cnt = 1
change = ''
while start < len(s):
if s[start:start + i] == s[start + i:start + i + i]:
cnt += 1
elif cnt >= 2:
change += str(cnt) + s[start:start + i]
cnt = 1
else:
change += s[start:start + i]
start += i
answer = min(answer, len(change))
return answer
print(solution('a'))
| [
"[email protected]"
] | |
d43a0031adc40a69d9b2814a1c978c8cd490e873 | 910463f16caddc5a4e06d6ca362d62af20910dba | /CH_14_multithreading_and_multiprocessing/T_15_thread_local.py | 5e4a0a8ab9363c44c745791a81890bf00047fd8a | [
"MIT"
] | permissive | AniketS-cpu/Mastering-Python-Second-Edition-test | 3621ac06fc2cff577992396cd924fe09a349d52e | 84de81c355d7ca21a1849eed04a15b722538f521 | refs/heads/master | 2023-06-09T12:42:53.963075 | 2021-07-04T14:54:18 | 2021-07-04T20:43:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import threading
import concurrent.futures
context = threading.local()
def init_counter():
context.counter = 10
def increment(name):
current_value = context.counter
print(f'{name} value before increment: {current_value}')
context.counter = current_value + 1
print(f'{name} value after increment: {context.counter}')
init_counter()
print(f'Before thread start: {context.counter}')
with concurrent.futures.ThreadPoolExecutor(
initializer=init_counter) as executor:
executor.map(increment, range(5))
print(f'After thread finish: {context.counter}')
| [
"[email protected]"
] | |
a518889814eb5ff4f08fee145ec9852d16126a23 | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/BrickTricks-master/uv_map_diamond_addin.py | 540c658a13709a5eda3aba03ac9e4a6bac45fe6e | [
"Zlib"
] | permissive | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 47,127 | py | import bpy
class ShaderNodeuv_map_diamond(bpy.types.NodeCustomGroup):
bl_name='ShaderNodeuv_map_diamond'
bl_label='uv_map_diamond'
bl_icon='NONE'
def init(self, context):
self.getNodetree(self.name + '_node_tree_v013')
self.inputs['Vector'].default_value=[0,0,0]
self.inputs['HAS_Vector'].default_value=0
self.inputs['HAS_Vector'].hide=True
self.inputs['scale'].default_value=1
self.inputs['diamond_size'].default_value=0.2
self.inputs['brick_width'].default_value=1
self.inputs['brick_height'].default_value=1
self.inputs['feather'].default_value=0.1
self.inputs['border'].default_value=0.050000001
def update(self):
if self.inputs['Vector'].is_linked:
self.inputs['HAS_Vector'].default_value=1
else:
self.inputs['HAS_Vector'].default_value=0
pass
def value_set(self, obj, path, value):
if '.' in path:
path_prop, path_attr = path.rsplit('.', 1)
prop = obj.path_resolve(path_prop)
else:
prop = obj
path_attr = path
setattr(prop, path_attr, value)
def createNodetree(self, name) :
self.node_tree = bpy.data.node_groups.new(name, 'ShaderNodeTree')
#Nodes
self.addNode('NodeGroupInput', { 'name':'GroupInput' })
self.addNode('NodeGroupOutput', { 'name':'GroupOutput' })
self.addNode('ShaderNodeTexCoord', { 'name':'Node0' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node1' })
self.addNode('ShaderNodeCombineRGB', { 'name':'Node2' ,'inputs[0].default_value':0 ,'inputs[1].default_value':0 ,'inputs[2].default_value':0 })
self.addNode('ShaderNodeValue', { 'name':'Node5' ,'outputs[0].default_value':0 })
self.addNode('ShaderNodeValue', { 'name':'Node9' ,'outputs[0].default_value':2 })
self.addNode('ShaderNodeValue', { 'name':'Node10' ,'outputs[0].default_value':0 })
self.addNode('ShaderNodeValue', { 'name':'Node11' ,'outputs[0].default_value':1 })
self.addNode('ShaderNodeValue', { 'name':'Node13' ,'outputs[0].default_value':0.5 })
self.addNode('ShaderNodeValue', { 'name':'Node14' ,'outputs[0].default_value':0 })
self.addNode('ShaderNodeValue', { 'name':'Node15' ,'outputs[0].default_value':1.5 })
self.addNode('ShaderNodeValue', { 'name':'Node16' ,'outputs[0].default_value':-0.5 })
self.addNode('ShaderNodeValue', { 'name':'Node18' ,'outputs[0].default_value':1 })
self.addNode('ShaderNodeMath', { 'name':'Node19' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node20' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMath', { 'name':'Node21' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node22' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMath', { 'name':'Node23' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeSeparateXYZ', { 'name':'Node24' })
self.addNode('ShaderNodeMath', { 'name':'Node25' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node26' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMath', { 'name':'Node27' ,'inputs[1].default_value':0.5 ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node28' ,'operation':'ROUND' })
self.addNode('ShaderNodeSeparateXYZ', { 'name':'Node29' })
self.addNode('ShaderNodeMath', { 'name':'Node30' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node31' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMath', { 'name':'Node32' ,'inputs[1].default_value':0.5 ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node33' ,'operation':'ROUND' })
self.addNode('ShaderNodeSeparateXYZ', { 'name':'Node34' })
self.addNode('ShaderNodeMath', { 'name':'Node35' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node36' ,'operation':'MODULO' })
self.addNode('ShaderNodeMath', { 'name':'Node37' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node38' ,'operation':'ADD' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node39' })
self.addNode('ShaderNodeSeparateXYZ', { 'name':'Node40' })
self.addNode('ShaderNodeMath', { 'name':'Node41' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node42' ,'operation':'MODULO' })
self.addNode('ShaderNodeMath', { 'name':'Node43' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node44' ,'operation':'ADD' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node45' })
self.addNode('ShaderNodeMath', { 'name':'Node46' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node47' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node48' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node49' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node50' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node51' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node52' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node53' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node54' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node55' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node56' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node57' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node58' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node59' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node60' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node61' })
self.addNode('ShaderNodeMath', { 'name':'Node62' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node63' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node64' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node65' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node66' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node67' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node68' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node69' })
self.addNode('ShaderNodeMath', { 'name':'Node70' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node71' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node72' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node73' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node74' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node75' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node76' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node77' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node78' })
self.addNode('ShaderNodeMath', { 'name':'Node79' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node80' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node81' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node82' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node83' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node84' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node85' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node86' })
self.addNode('ShaderNodeMath', { 'name':'Node87' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node88' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node89' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node90' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node91' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node92' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node93' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node94' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node95' })
self.addNode('ShaderNodeMath', { 'name':'Node96' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node97' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node98' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node99' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node100' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node101' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node102' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node103' })
self.addNode('ShaderNodeMath', { 'name':'Node104' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node105' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node106' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node107' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node108' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node109' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node110' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node111' ,'inputs[1].default_value':0.000001 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node112' })
self.addNode('ShaderNodeMath', { 'name':'Node113' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node114' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node115' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node116' ,'operation':'ADD' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node117' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node118' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node119' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node120' })
self.addNode('ShaderNodeMath', { 'name':'Node121' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node122' ,'operation':'ADD' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node123' })
self.addNode('ShaderNodeMath', { 'name':'Node124' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node125' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node126' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node127' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node128' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node129' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node130' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node131' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node132' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node133' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node134' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node135' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node136' ,'inputs[1].default_value':0.000001 ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node137' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node138' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node139' ,'inputs[1].default_value':0.999 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node140' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node141' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node142' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node143' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node144' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node145' })
self.addNode('ShaderNodeCombineXYZ', { 'name':'Node146' })
self.addNode('ShaderNodeMath', { 'name':'Node147' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node148' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node149' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node150' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node151' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node152' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node153' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node154' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node155' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node156' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node157' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node158' })
self.addNode('ShaderNodeMath', { 'name':'Node159' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node160' ,'operation':'MINIMUM' })
self.addNode('ShaderNodeMath', { 'name':'Node161' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node162' ,'operation':'MULTIPLY' })
self.addNode('ShaderNodeMath', { 'name':'Node163' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node164' ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node165' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node166' ,'operation':'ABSOLUTE' })
self.addNode('ShaderNodeMath', { 'name':'Node167' ,'inputs[1].default_value':0.000001 ,'operation':'LESS_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node168' ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node169' ,'operation':'ADD' })
self.addNode('ShaderNodeMath', { 'name':'Node170' ,'inputs[1].default_value':0.999 ,'operation':'GREATER_THAN' })
self.addNode('ShaderNodeMath', { 'name':'Node171' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node172' ,'operation':'SUBTRACT' })
self.addNode('ShaderNodeMath', { 'name':'Node173' ,'operation':'DIVIDE' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node174' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node175' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node176' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node177' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node178' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node179' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node180' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node181' })
self.addNode('ShaderNodeMixRGB', { 'name':'Node182' })
#Sockets
self.addSocket(False, 'NodeSocketVector', 'Vector')
self.addSocket(False, 'NodeSocketFloatFactor', 'HAS_Vector')
self.addSocket(False, 'NodeSocketFloat', 'scale')
self.addSocket(False, 'NodeSocketFloat', 'diamond_size')
self.addSocket(False, 'NodeSocketFloat', 'brick_width')
self.addSocket(False, 'NodeSocketFloat', 'brick_height')
self.addSocket(False, 'NodeSocketFloat', 'feather')
self.addSocket(False, 'NodeSocketFloat', 'border')
self.addSocket(True, 'NodeSocketVector', 'UV')
self.addSocket(True, 'NodeSocketVector', 'CellID')
self.addSocket(True, 'NodeSocketFloat', 'Height')
self.addSocket(True, 'NodeSocketFloat', 'TileMask')
self.addSocket(True, 'NodeSocketFloat', 'BorderMask')
self.addSocket(True, 'NodeSocketFloat', 'BrickWidth')
self.addSocket(True, 'NodeSocketFloat', 'BrickHeight')
#Links
self.innerLink('nodes["GroupInput"].outputs[1]', 'nodes["Node1"].inputs[0]')
self.innerLink('nodes["Node0"].outputs[3]', 'nodes["Node1"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[0]', 'nodes["Node1"].inputs[2]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node19"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node19"].inputs[1]')
self.innerLink('nodes["Node19"].outputs[0]', 'nodes["Node20"].inputs[0]')
self.innerLink('nodes["Node9"].outputs[0]', 'nodes["Node20"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[3]', 'nodes["Node21"].inputs[0]')
self.innerLink('nodes["Node20"].outputs[0]', 'nodes["Node21"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[7]', 'nodes["Node22"].inputs[0]')
self.innerLink('nodes["Node9"].outputs[0]', 'nodes["Node22"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[6]', 'nodes["Node23"].inputs[0]')
self.innerLink('nodes["Node9"].outputs[0]', 'nodes["Node23"].inputs[1]')
self.innerLink('nodes["Node1"].outputs[0]', 'nodes["Node24"].inputs[0]')
self.innerLink('nodes["Node24"].outputs[0]', 'nodes["Node25"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[2]', 'nodes["Node25"].inputs[1]')
self.innerLink('nodes["Node25"].outputs[0]', 'nodes["Node26"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node26"].inputs[1]')
self.innerLink('nodes["Node26"].outputs[0]', 'nodes["Node27"].inputs[0]')
self.innerLink('nodes["Node27"].outputs[0]', 'nodes["Node28"].inputs[0]')
self.innerLink('nodes["Node1"].outputs[0]', 'nodes["Node29"].inputs[0]')
self.innerLink('nodes["Node29"].outputs[1]', 'nodes["Node30"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[2]', 'nodes["Node30"].inputs[1]')
self.innerLink('nodes["Node30"].outputs[0]', 'nodes["Node31"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node31"].inputs[1]')
self.innerLink('nodes["Node31"].outputs[0]', 'nodes["Node32"].inputs[0]')
self.innerLink('nodes["Node32"].outputs[0]', 'nodes["Node33"].inputs[0]')
self.innerLink('nodes["Node1"].outputs[0]', 'nodes["Node34"].inputs[0]')
self.innerLink('nodes["Node34"].outputs[0]', 'nodes["Node35"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[2]', 'nodes["Node35"].inputs[1]')
self.innerLink('nodes["Node35"].outputs[0]', 'nodes["Node36"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node36"].inputs[1]')
self.innerLink('nodes["Node36"].outputs[0]', 'nodes["Node37"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node37"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node38"].inputs[0]')
self.innerLink('nodes["Node36"].outputs[0]', 'nodes["Node38"].inputs[1]')
self.innerLink('nodes["Node37"].outputs[0]', 'nodes["Node39"].inputs[0]')
self.innerLink('nodes["Node36"].outputs[0]', 'nodes["Node39"].inputs[1]')
self.innerLink('nodes["Node38"].outputs[0]', 'nodes["Node39"].inputs[2]')
self.innerLink('nodes["Node1"].outputs[0]', 'nodes["Node40"].inputs[0]')
self.innerLink('nodes["Node40"].outputs[1]', 'nodes["Node41"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[2]', 'nodes["Node41"].inputs[1]')
self.innerLink('nodes["Node41"].outputs[0]', 'nodes["Node42"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node42"].inputs[1]')
self.innerLink('nodes["Node42"].outputs[0]', 'nodes["Node43"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node43"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node44"].inputs[0]')
self.innerLink('nodes["Node42"].outputs[0]', 'nodes["Node44"].inputs[1]')
self.innerLink('nodes["Node43"].outputs[0]', 'nodes["Node45"].inputs[0]')
self.innerLink('nodes["Node42"].outputs[0]', 'nodes["Node45"].inputs[1]')
self.innerLink('nodes["Node44"].outputs[0]', 'nodes["Node45"].inputs[2]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node46"].inputs[0]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node46"].inputs[1]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node47"].inputs[0]')
self.innerLink('nodes["Node46"].outputs[0]', 'nodes["Node47"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node48"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node48"].inputs[1]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node49"].inputs[0]')
self.innerLink('nodes["Node48"].outputs[0]', 'nodes["Node49"].inputs[1]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node50"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node50"].inputs[1]')
self.innerLink('nodes["Node50"].outputs[0]', 'nodes["Node51"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node51"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node52"].inputs[0]')
self.innerLink('nodes["Node9"].outputs[0]', 'nodes["Node52"].inputs[1]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node53"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node53"].inputs[1]')
self.innerLink('nodes["Node53"].outputs[0]', 'nodes["Node54"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node54"].inputs[1]')
self.innerLink('nodes["Node54"].outputs[0]', 'nodes["Node55"].inputs[0]')
self.innerLink('nodes["Node55"].outputs[0]', 'nodes["Node56"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node57"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node57"].inputs[1]')
self.innerLink('nodes["Node57"].outputs[0]', 'nodes["Node58"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node58"].inputs[1]')
self.innerLink('nodes["Node58"].outputs[0]', 'nodes["Node59"].inputs[0]')
self.innerLink('nodes["Node59"].outputs[0]', 'nodes["Node60"].inputs[0]')
self.innerLink('nodes["Node56"].outputs[0]', 'nodes["Node61"].inputs[0]')
self.innerLink('nodes["Node56"].outputs[0]', 'nodes["Node61"].inputs[1]')
self.innerLink('nodes["Node60"].outputs[0]', 'nodes["Node61"].inputs[2]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node62"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node62"].inputs[1]')
self.innerLink('nodes["Node62"].outputs[0]', 'nodes["Node63"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node63"].inputs[1]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node64"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node64"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node65"].inputs[0]')
self.innerLink('nodes["Node64"].outputs[0]', 'nodes["Node65"].inputs[1]')
self.innerLink('nodes["Node63"].outputs[0]', 'nodes["Node66"].inputs[0]')
self.innerLink('nodes["Node65"].outputs[0]', 'nodes["Node66"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node66"].inputs[2]')
self.innerLink('nodes["Node61"].outputs[0]', 'nodes["Node67"].inputs[0]')
self.innerLink('nodes["Node2"].outputs[0]', 'nodes["Node67"].inputs[1]')
self.innerLink('nodes["Node66"].outputs[0]', 'nodes["Node67"].inputs[2]')
self.innerLink('nodes["Node61"].outputs[0]', 'nodes["Node68"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node68"].inputs[1]')
self.innerLink('nodes["Node13"].outputs[0]', 'nodes["Node68"].inputs[2]')
self.innerLink('nodes["Node61"].outputs[0]', 'nodes["Node69"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node69"].inputs[1]')
self.innerLink('nodes["Node13"].outputs[0]', 'nodes["Node69"].inputs[2]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node70"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node70"].inputs[1]')
self.innerLink('nodes["Node70"].outputs[0]', 'nodes["Node71"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node71"].inputs[1]')
self.innerLink('nodes["Node71"].outputs[0]', 'nodes["Node72"].inputs[0]')
self.innerLink('nodes["Node72"].outputs[0]', 'nodes["Node73"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node74"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node74"].inputs[1]')
self.innerLink('nodes["Node74"].outputs[0]', 'nodes["Node75"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node75"].inputs[1]')
self.innerLink('nodes["Node75"].outputs[0]', 'nodes["Node76"].inputs[0]')
self.innerLink('nodes["Node76"].outputs[0]', 'nodes["Node77"].inputs[0]')
self.innerLink('nodes["Node73"].outputs[0]', 'nodes["Node78"].inputs[0]')
self.innerLink('nodes["Node73"].outputs[0]', 'nodes["Node78"].inputs[1]')
self.innerLink('nodes["Node77"].outputs[0]', 'nodes["Node78"].inputs[2]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node79"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node79"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node80"].inputs[0]')
self.innerLink('nodes["Node79"].outputs[0]', 'nodes["Node80"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node81"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node81"].inputs[1]')
self.innerLink('nodes["Node81"].outputs[0]', 'nodes["Node82"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node82"].inputs[1]')
self.innerLink('nodes["Node80"].outputs[0]', 'nodes["Node83"].inputs[0]')
self.innerLink('nodes["Node82"].outputs[0]', 'nodes["Node83"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node83"].inputs[2]')
self.innerLink('nodes["Node78"].outputs[0]', 'nodes["Node84"].inputs[0]')
self.innerLink('nodes["Node67"].outputs[0]', 'nodes["Node84"].inputs[1]')
self.innerLink('nodes["Node83"].outputs[0]', 'nodes["Node84"].inputs[2]')
self.innerLink('nodes["Node78"].outputs[0]', 'nodes["Node85"].inputs[0]')
self.innerLink('nodes["Node68"].outputs[0]', 'nodes["Node85"].inputs[1]')
self.innerLink('nodes["Node13"].outputs[0]', 'nodes["Node85"].inputs[2]')
self.innerLink('nodes["Node78"].outputs[0]', 'nodes["Node86"].inputs[0]')
self.innerLink('nodes["Node69"].outputs[0]', 'nodes["Node86"].inputs[1]')
self.innerLink('nodes["Node15"].outputs[0]', 'nodes["Node86"].inputs[2]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node87"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node87"].inputs[1]')
self.innerLink('nodes["Node87"].outputs[0]', 'nodes["Node88"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node88"].inputs[1]')
self.innerLink('nodes["Node88"].outputs[0]', 'nodes["Node89"].inputs[0]')
self.innerLink('nodes["Node89"].outputs[0]', 'nodes["Node90"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node91"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node91"].inputs[1]')
self.innerLink('nodes["Node91"].outputs[0]', 'nodes["Node92"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node92"].inputs[1]')
self.innerLink('nodes["Node92"].outputs[0]', 'nodes["Node93"].inputs[0]')
self.innerLink('nodes["Node93"].outputs[0]', 'nodes["Node94"].inputs[0]')
self.innerLink('nodes["Node90"].outputs[0]', 'nodes["Node95"].inputs[0]')
self.innerLink('nodes["Node90"].outputs[0]', 'nodes["Node95"].inputs[1]')
self.innerLink('nodes["Node94"].outputs[0]', 'nodes["Node95"].inputs[2]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node96"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node96"].inputs[1]')
self.innerLink('nodes["Node96"].outputs[0]', 'nodes["Node97"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node97"].inputs[1]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node98"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node98"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node99"].inputs[0]')
self.innerLink('nodes["Node98"].outputs[0]', 'nodes["Node99"].inputs[1]')
self.innerLink('nodes["Node97"].outputs[0]', 'nodes["Node100"].inputs[0]')
self.innerLink('nodes["Node99"].outputs[0]', 'nodes["Node100"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node100"].inputs[2]')
self.innerLink('nodes["Node95"].outputs[0]', 'nodes["Node101"].inputs[0]')
self.innerLink('nodes["Node84"].outputs[0]', 'nodes["Node101"].inputs[1]')
self.innerLink('nodes["Node100"].outputs[0]', 'nodes["Node101"].inputs[2]')
self.innerLink('nodes["Node95"].outputs[0]', 'nodes["Node102"].inputs[0]')
self.innerLink('nodes["Node85"].outputs[0]', 'nodes["Node102"].inputs[1]')
self.innerLink('nodes["Node16"].outputs[0]', 'nodes["Node102"].inputs[2]')
self.innerLink('nodes["Node95"].outputs[0]', 'nodes["Node103"].inputs[0]')
self.innerLink('nodes["Node86"].outputs[0]', 'nodes["Node103"].inputs[1]')
self.innerLink('nodes["Node15"].outputs[0]', 'nodes["Node103"].inputs[2]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node104"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node104"].inputs[1]')
self.innerLink('nodes["Node104"].outputs[0]', 'nodes["Node105"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node105"].inputs[1]')
self.innerLink('nodes["Node105"].outputs[0]', 'nodes["Node106"].inputs[0]')
self.innerLink('nodes["Node106"].outputs[0]', 'nodes["Node107"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node108"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node108"].inputs[1]')
self.innerLink('nodes["Node108"].outputs[0]', 'nodes["Node109"].inputs[0]')
self.innerLink('nodes["Node10"].outputs[0]', 'nodes["Node109"].inputs[1]')
self.innerLink('nodes["Node109"].outputs[0]', 'nodes["Node110"].inputs[0]')
self.innerLink('nodes["Node110"].outputs[0]', 'nodes["Node111"].inputs[0]')
self.innerLink('nodes["Node107"].outputs[0]', 'nodes["Node112"].inputs[0]')
self.innerLink('nodes["Node107"].outputs[0]', 'nodes["Node112"].inputs[1]')
self.innerLink('nodes["Node111"].outputs[0]', 'nodes["Node112"].inputs[2]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node113"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node113"].inputs[1]')
self.innerLink('nodes["Node113"].outputs[0]', 'nodes["Node114"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node114"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node115"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node115"].inputs[1]')
self.innerLink('nodes["Node115"].outputs[0]', 'nodes["Node116"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node116"].inputs[1]')
self.innerLink('nodes["Node114"].outputs[0]', 'nodes["Node117"].inputs[0]')
self.innerLink('nodes["Node116"].outputs[0]', 'nodes["Node117"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node117"].inputs[2]')
self.innerLink('nodes["Node112"].outputs[0]', 'nodes["Node118"].inputs[0]')
self.innerLink('nodes["Node101"].outputs[0]', 'nodes["Node118"].inputs[1]')
self.innerLink('nodes["Node117"].outputs[0]', 'nodes["Node118"].inputs[2]')
self.innerLink('nodes["Node112"].outputs[0]', 'nodes["Node119"].inputs[0]')
self.innerLink('nodes["Node102"].outputs[0]', 'nodes["Node119"].inputs[1]')
self.innerLink('nodes["Node16"].outputs[0]', 'nodes["Node119"].inputs[2]')
self.innerLink('nodes["Node112"].outputs[0]', 'nodes["Node120"].inputs[0]')
self.innerLink('nodes["Node103"].outputs[0]', 'nodes["Node120"].inputs[1]')
self.innerLink('nodes["Node13"].outputs[0]', 'nodes["Node120"].inputs[2]')
self.innerLink('nodes["Node28"].outputs[0]', 'nodes["Node121"].inputs[0]')
self.innerLink('nodes["Node119"].outputs[0]', 'nodes["Node121"].inputs[1]')
self.innerLink('nodes["Node33"].outputs[0]', 'nodes["Node122"].inputs[0]')
self.innerLink('nodes["Node120"].outputs[0]', 'nodes["Node122"].inputs[1]')
self.innerLink('nodes["Node121"].outputs[0]', 'nodes["Node123"].inputs[0]')
self.innerLink('nodes["Node122"].outputs[0]', 'nodes["Node123"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node123"].inputs[2]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node124"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node124"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node125"].inputs[0]')
self.innerLink('nodes["Node124"].outputs[0]', 'nodes["Node125"].inputs[1]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node126"].inputs[0]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node126"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node127"].inputs[0]')
self.innerLink('nodes["Node126"].outputs[0]', 'nodes["Node127"].inputs[1]')
self.innerLink('nodes["Node125"].outputs[0]', 'nodes["Node128"].inputs[0]')
self.innerLink('nodes["Node127"].outputs[0]', 'nodes["Node128"].inputs[1]')
self.innerLink('nodes["Node128"].outputs[0]', 'nodes["Node129"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node129"].inputs[1]')
self.innerLink('nodes["Node52"].outputs[0]', 'nodes["Node130"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node130"].inputs[1]')
self.innerLink('nodes["Node23"].outputs[0]', 'nodes["Node131"].inputs[0]')
self.innerLink('nodes["Node130"].outputs[0]', 'nodes["Node131"].inputs[1]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node132"].inputs[0]')
self.innerLink('nodes["Node131"].outputs[0]', 'nodes["Node132"].inputs[1]')
self.innerLink('nodes["Node128"].outputs[0]', 'nodes["Node133"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node133"].inputs[1]')
self.innerLink('nodes["Node128"].outputs[0]', 'nodes["Node134"].inputs[0]')
self.innerLink('nodes["Node132"].outputs[0]', 'nodes["Node134"].inputs[1]')
self.innerLink('nodes["Node134"].outputs[0]', 'nodes["Node135"].inputs[0]')
self.innerLink('nodes["Node135"].outputs[0]', 'nodes["Node136"].inputs[0]')
self.innerLink('nodes["Node128"].outputs[0]', 'nodes["Node137"].inputs[0]')
self.innerLink('nodes["Node132"].outputs[0]', 'nodes["Node137"].inputs[1]')
self.innerLink('nodes["Node136"].outputs[0]', 'nodes["Node138"].inputs[0]')
self.innerLink('nodes["Node137"].outputs[0]', 'nodes["Node138"].inputs[1]')
self.innerLink('nodes["Node138"].outputs[0]', 'nodes["Node139"].inputs[0]')
self.innerLink('nodes["Node128"].outputs[0]', 'nodes["Node140"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node140"].inputs[1]')
self.innerLink('nodes["Node132"].outputs[0]', 'nodes["Node141"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node141"].inputs[1]')
self.innerLink('nodes["Node140"].outputs[0]', 'nodes["Node142"].inputs[0]')
self.innerLink('nodes["Node141"].outputs[0]', 'nodes["Node142"].inputs[1]')
self.innerLink('nodes["Node139"].outputs[0]', 'nodes["Node143"].inputs[0]')
self.innerLink('nodes["Node142"].outputs[0]', 'nodes["Node143"].inputs[1]')
self.innerLink('nodes["Node18"].outputs[0]', 'nodes["Node143"].inputs[2]')
self.innerLink('nodes["Node133"].outputs[0]', 'nodes["Node144"].inputs[0]')
self.innerLink('nodes["Node143"].outputs[0]', 'nodes["Node144"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node144"].inputs[2]')
self.innerLink('nodes["Node28"].outputs[0]', 'nodes["Node145"].inputs[0]')
self.innerLink('nodes["Node33"].outputs[0]', 'nodes["Node145"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node145"].inputs[2]')
self.innerLink('nodes["Node39"].outputs[0]', 'nodes["Node146"].inputs[0]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node146"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node146"].inputs[2]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node147"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node147"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node148"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node148"].inputs[1]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node149"].inputs[0]')
self.innerLink('nodes["Node148"].outputs[0]', 'nodes["Node149"].inputs[1]')
self.innerLink('nodes["Node149"].outputs[0]', 'nodes["Node150"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node150"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node151"].inputs[0]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node151"].inputs[1]')
self.innerLink('nodes["Node45"].outputs[0]', 'nodes["Node152"].inputs[0]')
self.innerLink('nodes["Node151"].outputs[0]', 'nodes["Node152"].inputs[1]')
self.innerLink('nodes["Node21"].outputs[0]', 'nodes["Node153"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node153"].inputs[1]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node154"].inputs[0]')
self.innerLink('nodes["Node153"].outputs[0]', 'nodes["Node154"].inputs[1]')
self.innerLink('nodes["Node154"].outputs[0]', 'nodes["Node155"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node155"].inputs[1]')
self.innerLink('nodes["Node47"].outputs[0]', 'nodes["Node156"].inputs[0]')
self.innerLink('nodes["Node49"].outputs[0]', 'nodes["Node156"].inputs[1]')
self.innerLink('nodes["Node152"].outputs[0]', 'nodes["Node157"].inputs[0]')
self.innerLink('nodes["Node156"].outputs[0]', 'nodes["Node157"].inputs[1]')
self.innerLink('nodes["Node155"].outputs[0]', 'nodes["Node157"].inputs[2]')
self.innerLink('nodes["Node147"].outputs[0]', 'nodes["Node158"].inputs[0]')
self.innerLink('nodes["Node157"].outputs[0]', 'nodes["Node158"].inputs[1]')
self.innerLink('nodes["Node150"].outputs[0]', 'nodes["Node158"].inputs[2]')
self.innerLink('nodes["Node158"].outputs[0]', 'nodes["Node159"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node159"].inputs[1]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node160"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node160"].inputs[1]')
self.innerLink('nodes["Node160"].outputs[0]', 'nodes["Node161"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node161"].inputs[1]')
self.innerLink('nodes["Node23"].outputs[0]', 'nodes["Node162"].inputs[0]')
self.innerLink('nodes["Node161"].outputs[0]', 'nodes["Node162"].inputs[1]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node163"].inputs[0]')
self.innerLink('nodes["Node162"].outputs[0]', 'nodes["Node163"].inputs[1]')
self.innerLink('nodes["Node158"].outputs[0]', 'nodes["Node164"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node164"].inputs[1]')
self.innerLink('nodes["Node158"].outputs[0]', 'nodes["Node165"].inputs[0]')
self.innerLink('nodes["Node163"].outputs[0]', 'nodes["Node165"].inputs[1]')
self.innerLink('nodes["Node165"].outputs[0]', 'nodes["Node166"].inputs[0]')
self.innerLink('nodes["Node166"].outputs[0]', 'nodes["Node167"].inputs[0]')
self.innerLink('nodes["Node158"].outputs[0]', 'nodes["Node168"].inputs[0]')
self.innerLink('nodes["Node163"].outputs[0]', 'nodes["Node168"].inputs[1]')
self.innerLink('nodes["Node167"].outputs[0]', 'nodes["Node169"].inputs[0]')
self.innerLink('nodes["Node168"].outputs[0]', 'nodes["Node169"].inputs[1]')
self.innerLink('nodes["Node169"].outputs[0]', 'nodes["Node170"].inputs[0]')
self.innerLink('nodes["Node158"].outputs[0]', 'nodes["Node171"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node171"].inputs[1]')
self.innerLink('nodes["Node163"].outputs[0]', 'nodes["Node172"].inputs[0]')
self.innerLink('nodes["Node22"].outputs[0]', 'nodes["Node172"].inputs[1]')
self.innerLink('nodes["Node171"].outputs[0]', 'nodes["Node173"].inputs[0]')
self.innerLink('nodes["Node172"].outputs[0]', 'nodes["Node173"].inputs[1]')
self.innerLink('nodes["Node170"].outputs[0]', 'nodes["Node174"].inputs[0]')
self.innerLink('nodes["Node173"].outputs[0]', 'nodes["Node174"].inputs[1]')
self.innerLink('nodes["Node18"].outputs[0]', 'nodes["Node174"].inputs[2]')
self.innerLink('nodes["Node164"].outputs[0]', 'nodes["Node175"].inputs[0]')
self.innerLink('nodes["Node174"].outputs[0]', 'nodes["Node175"].inputs[1]')
self.innerLink('nodes["Node14"].outputs[0]', 'nodes["Node175"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node176"].inputs[0]')
self.innerLink('nodes["Node159"].outputs[0]', 'nodes["Node176"].inputs[1]')
self.innerLink('nodes["Node129"].outputs[0]', 'nodes["Node176"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node177"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[5]', 'nodes["Node177"].inputs[1]')
self.innerLink('nodes["Node52"].outputs[0]', 'nodes["Node177"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node178"].inputs[0]')
self.innerLink('nodes["GroupInput"].outputs[4]', 'nodes["Node178"].inputs[1]')
self.innerLink('nodes["Node52"].outputs[0]', 'nodes["Node178"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node179"].inputs[0]')
self.innerLink('nodes["Node145"].outputs[0]', 'nodes["Node179"].inputs[1]')
self.innerLink('nodes["Node123"].outputs[0]', 'nodes["Node179"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node180"].inputs[0]')
self.innerLink('nodes["Node175"].outputs[0]', 'nodes["Node180"].inputs[1]')
self.innerLink('nodes["Node144"].outputs[0]', 'nodes["Node180"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node181"].inputs[0]')
self.innerLink('nodes["Node5"].outputs[0]', 'nodes["Node181"].inputs[1]')
self.innerLink('nodes["Node11"].outputs[0]', 'nodes["Node181"].inputs[2]')
self.innerLink('nodes["Node51"].outputs[0]', 'nodes["Node182"].inputs[0]')
self.innerLink('nodes["Node146"].outputs[0]', 'nodes["Node182"].inputs[1]')
self.innerLink('nodes["Node118"].outputs[0]', 'nodes["Node182"].inputs[2]')
self.innerLink('nodes["Node182"].outputs[0]', 'nodes["GroupOutput"].inputs[0]')
self.innerLink('nodes["Node179"].outputs[0]', 'nodes["GroupOutput"].inputs[1]')
self.innerLink('nodes["Node180"].outputs[0]', 'nodes["GroupOutput"].inputs[2]')
self.innerLink('nodes["Node181"].outputs[0]', 'nodes["GroupOutput"].inputs[3]')
self.innerLink('nodes["Node176"].outputs[0]', 'nodes["GroupOutput"].inputs[4]')
self.innerLink('nodes["Node178"].outputs[0]', 'nodes["GroupOutput"].inputs[5]')
self.innerLink('nodes["Node177"].outputs[0]', 'nodes["GroupOutput"].inputs[6]')
def getNodetree(self, name):
if bpy.data.node_groups.find(name)==-1:
self.createNodetree(name)
else:
self.node_tree=bpy.data.node_groups[name]
def addSocket(self, is_output, sockettype, name):
#for now duplicated socket names are not allowed
if is_output==True:
if self.node_tree.nodes['GroupOutput'].inputs.find(name)==-1:
socket=self.node_tree.outputs.new(sockettype, name)
elif is_output==False:
if self.node_tree.nodes['GroupInput'].outputs.find(name)==-1:
socket=self.node_tree.inputs.new(sockettype, name)
return socket
def addNode(self, nodetype, attrs):
node=self.node_tree.nodes.new(nodetype)
for attr in attrs:
self.value_set(node, attr, attrs[attr])
return node
def getNode(self, nodename):
if self.node_tree.nodes.find(nodename)>-1:
return self.node_tree.nodes[nodename]
return None
def innerLink(self, socketin, socketout):
SI=self.node_tree.path_resolve(socketin)
SO=self.node_tree.path_resolve(socketout)
self.node_tree.links.new(SI, SO)
def free(self):
if self.node_tree.users==1:
bpy.data.node_groups.remove(self.node_tree, do_unlink=True)
| [
"[email protected]"
] | |
a613d147df8ad21434ac4cf9eead8e054bb3143b | b71c43b7f7785ca6718d74aff762557f5591758d | /Python/Week1/CodingDojo_Python/Django_Projects/email_validation/apps/emails/urls.py | 5f6c9ebfc8cb14cc19aa4e6250f72d24b019c58b | [] | no_license | jqchang/DojoAssignments | 4be9db6039763905eada2253873997ba5bfd1058 | 3c5a8b351879ccc380af9ce3b5267ca26ea62681 | refs/heads/master | 2021-01-13T15:28:45.189452 | 2017-04-15T23:31:58 | 2017-04-15T23:31:58 | 79,868,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from django.conf.urls import url
from . import views
# from django.contrib import admin
urlpatterns = [
url(r'^$', views.index),
url(r'^process$', views.process),
url(r'^success$', views.success),
url(r'^delete/(?P<id>\d+)$', views.delete)
]
| [
"[email protected]"
] | |
2412d1167b7c0b04015b6e4b11532237a2e2543d | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /energies/migrations/0007_energyvector_data.py | e945aa4ab0b6a070aaa73901fa7bf9c5246097dd | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,103 | py | # Generated by Django 2.2 on 2019-05-02 14:54
from decimal import Decimal
from django.db import migrations
def load_data(apps, schema_editor):
EnergyVector = apps.get_model("energies", "EnergyVector")
Energy = apps.get_model("energies", "Energy")
energies = {energy.identifier: energy for energy in Energy.objects.all()}
EnergyVector.objects.bulk_create([
EnergyVector(vector='oil_kg', buying_unit='kg de fioul', pci=Decimal("11.8"), unit="kWh / kg", energy=energies['oil'], order=10),
EnergyVector(vector='oil_l', buying_unit='L de fioul', pci=Decimal("10.384"), unit="kWh / L", energy=energies['oil'], order=20),
EnergyVector(vector='oil_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['oil'], order=30),
EnergyVector(vector='propane_kg', buying_unit='kg de propane', pci=Decimal("12.88"), unit="kWh / kg", energy=energies['gaz_b1'], order=40),
EnergyVector(vector='propane_m3', buying_unit='m3 de propane', pci=Decimal("7728"), unit="kWh / m³", energy=energies['gaz_b1'], order=50),
EnergyVector(vector='propane_bottles', buying_unit='Bouteilles de 13 kg de propane', pci=Decimal("167"), unit="kWh / bouteilles de 13 kg", energy=energies['propane'], order=60),
EnergyVector(vector='propane_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['propane'], order=70),
EnergyVector(vector='natural_gaz_m3', buying_unit='m3 de gaz naturel', pci=Decimal("10"), unit="kWh / m³", energy=energies['gaz_b1'], order=80),
EnergyVector(vector='natural_gaz_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['gaz_b1'], order=90),
EnergyVector(vector='electricity_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['electricity'], order=100),
EnergyVector(vector='rcu_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['network'], order=110),
EnergyVector(vector='wood_logs_stere', buying_unit='stères de bois', pci=Decimal("1700"), unit="kWh / stère", energy=energies['wood'], order=120),
EnergyVector(vector='granules_t', buying_unit='tonne', pci=Decimal("4700"), unit="kWh / t", energy=energies['bulk_granules'], order=130),
EnergyVector(vector='granules_bag', buying_unit='nombres de sacs de granulés', pci=Decimal("70.5"), unit="kWh/sac de granulés", energy=energies['bag_granules'], order=140),
EnergyVector(vector='shredded_wood_t', buying_unit='tonne', pci=Decimal("3500"), unit="kWh / t", energy=energies['shredded_wood'], order=150),
EnergyVector(vector='shredded_wood_map', buying_unit='MAP', pci=Decimal("875"), unit="kWh / MAP", energy=energies['shredded_wood'], order=160),
EnergyVector(vector='shredded_wood_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['shredded_wood'], order=170),
])
class Migration(migrations.Migration):
dependencies = [
('energies', '0006_energyvector'),
]
operations = [
migrations.RunPython(load_data, reverse_code=migrations.RunPython.noop),
]
| [
"[email protected]"
] | |
b3b2a6a978e363f8f53a0106b14af35f54d5c484 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/advanced/98/grid.py | 0b25292e5ce34d129230783fd6a21739685074b5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,104 | py | import re
DOWN, UP, LEFT, RIGHT = '⇓', '⇑', '⇐', '⇒'
START_VALUE = 1
def print_sequence_route(grid, start_coordinates=None):
"""Receive grid string, convert to 2D matrix of ints, find the
START_VALUE coordinates and move through the numbers in order printing
them. Each time you turn append the grid with its corresponding symbol
(DOWN / UP / LEFT / RIGHT). See the TESTS for more info."""
matrix = []
for i,line in enumerate(grid.splitlines()):
if i % 2 == 1:
values = list(map(int,re.split(r'\D+',line)))
if START_VALUE in values:
start_row = len(matrix)
start_col = values.index(START_VALUE)
matrix.append(values)
length = len(matrix)
goal = length**2
current_row,current_col = start_row,start_col
current_value = START_VALUE
previous_direction = None
print(current_value,end=' ')
while current_value != goal:
directions = ((current_row + 1,current_col,DOWN),(current_row - 1,current_col,UP),(current_row,current_col + 1,RIGHT),(current_row,current_col -1,LEFT))
for neighbor_x,neighbor_y,direction in directions:
if 0 <= neighbor_x < length and 0 <= neighbor_y < length:
if matrix[neighbor_x][neighbor_y] == current_value + 1:
if previous_direction is not None and direction != previous_direction:
print(direction)
previous_direction = direction
elif previous_direction is None:
previous_direction = direction
print(current_value + 1,end=' ')
break
current_row,current_col = neighbor_x,neighbor_y
current_value += 1
if __name__ == "__main__":
small_grid = """
21 - 22 - 23 - 24 - 25
|
20 7 - 8 - 9 - 10
| | |
19 6 1 - 2 11
| | | |
18 5 - 4 - 3 12
| |
17 - 16 - 15 - 14 - 13"""
print_sequence_route(small_grid)
| [
"[email protected]"
] | |
1ce34e71483a21a7ca0f0f0bf8d2cc6bb9f71ef7 | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_quantizer/vai_q_pytorch/nndct_shared/base/key_names.py | 1cf30c4715f21302ea8f0965e4a1eab9d78f66b6 | [
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 11,018 | py | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
class GlobalMap(object):
globalmap = {}
def set_map(self, key, value):
self.globalmap[key] = value
def set(self, **keys):
try:
for key_, value_ in keys.items():
self.globalmap[key_] = str(value_)
print(key_ + ":" + str(value_))
except BaseException as msg:
print(msg)
raise msg
def del_map(self, key):
try:
del self.globalmap[key]
return self.globalmap
except KeyError:
pass
#print("key:'" + str(key) + "' not found!")
def get_ele(self, key):
if key in self.globalmap:
return self.globalmap[key]
return None
def get(self, *args):
try:
dic = {}
for key in args:
if len(args) == 1:
dic = self.globalmap[key]
print(key + ":" + str(dic))
elif len(args) == 1 and args[0] == 'all':
dic = self.globalmap
else:
dic[key] = self.globalmap[key]
return dic
except KeyError:
print("key:'" + str(key) + "' not found!")
return 'Null_'
class NNDCT_KEYS(object):
#basic names
INFO_FLAG = "NNDCT_NOTE"
WARN_FLAG = "NNDCT_WARN"
DEBUG_FLAG = "NNDCT_DEBUG"
ERROR_FLAG = "NNDCT_ERROR"
VERBOSE_LEVEL = 'nndct_verbose_lvl'
LOG_LEVEL = 'nndct_log_lvl'
LOGGER = 'nndct_logger'
SUFFIX_CONNECT = 'SUFFIX'
#for debug
COMPILER = 'nndct_compiler'
OUTPUT_TO_NODE_MAP = 'output_to_node_map'
NODE_TO_OUTPUT_MAP = 'node_to_output_map'
#for Xgraph & Xnode
XMODEL_SUFFIX = '.xmodel'
XMODEL_IMAGE_SUFFIX = '.svg'
XPARAM_SUFFIX = '.xparams'
XPATTERN_SUFFIX = '.xpattern'
XBLOBS_SUFFIX = '.xblobs'
#for parsing/exporting
TORCH_REFLECT_OPS_MAP = 'torch_reflect_ops_map'
TORCH_PARSER_MAP = 'torch_parser_map'
TORCH_SUPPORT_OPS_MAP = 'torch_support_ops_map'
TORCH_PARAM_MAP = 'torch_parameters_name_map'
TORCH_IR_ATTRS_MAP = 'torch_ir_attrs_map'
TORCH_SCHEMA_OP_TABLE = 'torch_schema_op_table'
NODE_CALLER_MAP = 'node_caller_map'
CUSTOM_OP_ATTRS_MAP = 'custom_op_attrs_map'
CUSTOM_TO_XIR_LIST = 'custom_to_xir_list'
DEVICE = 'device'
TORCH_SCRIPT_MODEL = 'torch_script_model'
#for quantization module:
QUANT_MODE = "quant_mode"
QUANTIZER = "nndct_quantizer"
QUANT_SUFFIX = '_quant.json'
QUANT_DEVICE = "quant_device"
QUANT_CONFIG = "quant_config"
PARAM_SCAN_SCOPE = "ParamScan"
BLOB_SCAN_SCOPE = "BlobScan"
QUANT_PARAMSCAN_OPS_COLLECTION = "qaunt_paramscan_ops_collection"
BLOB_PREFFIX = "Blob"
MAX_SCAN_SUFFIX = SUFFIX_CONNECT + "maxscan"
MIN_SCAN_SUFFIX = SUFFIX_CONNECT + "minscan"
DIFFS_SCAN_SUFFIX = SUFFIX_CONNECT + "diffs"
QUANTTABLE_VAR_SUFFIX = SUFFIX_CONNECT + "QuantTableVar"
#for load module
NNDCT_LOADER = 'nndct_loader'
LOAD_FLAG = 'load_flag'
ORGVARS_SUFFIX = '_OrgVars.json'
ORGKERASMODEL_SUFFIX = '_OrgKerasModel.json'
#for modification process
MODIFIER = 'nndct_modifier'
TRANS_SCOPE = 'TransScp'
#for graph export
IR_GRAPH = 'nndct_ir_graph'
IR_NAME = 'nndct_ir_name'
IR_EXPORT_TYPE = 'ir_export_type'
#for training and controlling
NRS_COLLECTION = "non_restorable_collection"
NGTS_COLLECTION = "non_grad_tensor_collection"
DEBUG_COLLECTION = "nndct_debug_collection"
#for compile
PARAMETER_FILE = 'NndctParameter'
ISTRUCTION_FILE = 'NndctInstruction'
WORKSPACE_PATH = 'NndctWorkspace'
INPUT_FILE = 'NndctInput'
DEVOP_PREFFIX = 'fpga_op_'
FIX_OP_SUFFIX = '_fix'
PRE_FIX_OP_SUFFIX = '_pre_fix'
TRANSPOSE_OP_SUFFIX = '_t'
#deploy
DEPLOY_CHECK_DATA_FOLDER = 'deploy_check_data'
#dynamo
WEGO_DYNAMO_SCRIPTER = 'wego_dynamo_scripter'
GRAPH_COUNTER = 'graph_counter'
class NNDCT_OP(object):
ADAPTIVEAVGPOOL2D = 'nndct_adaptive_avg_pool2d'
ADD = 'nndct_elemwise_add'
ADDMM = 'nndct_addmm'
ANGLE = 'nndct_angle'
ARANGE = 'nndct_arange'
ARGMAX = 'nndct_argmax_no_dim'
ARGMAX_DIM = 'nndct_argmax_dim'
AVG_POOL = 'nndct_avgpool'
BASIC_GRU = 'nndct_basic_gru'
BASIC_LSTM = 'nndct_basic_lstm'
BATCH_NORM = 'nndct_batch_norm'
BATCH_TO_SPACE_ND = 'nndct_batch_to_space_nd'
BIAS_ADD = 'nndct_bias_add'
BIDIRECTIONAL_RNN = 'nndct_bidirectional_rnn'
BMM = 'nndct_bmm'
BUFFER_GET_NEXT = 'nndct_buffer_get_next'
BLOCK = 'nndct_block'
CAST = 'nndct_cast'
CALL_FUNCTION = "nndct_call_function"
CALL_MODULE = "nndct_call_module"
CALL_METHOD = "nndct_call_method"
CEIL = 'nndct_ceil'
CHANNEL_SCALE = 'nndct_channel_scale'
CORRELATION1D_ELEMWISE = 'nndct_correlation1d_elemwise'
CORRELATION2D_ELEMWISE = 'nndct_correlation2d_elemwise'
COST_VOLUME = 'nndct_cost_volume'
CHUNK = 'nndct_chunk'
CLAMP = 'nndct_clamp'
COMPLEX_ABS = 'nndct_complex_abs'
CONCAT = 'nndct_concat'
CONSTANT_WITH_RESHAPE = "constant_with_reshape"
CONST = 'nndct_const'
CONTIGUOUS = 'nndct_contiguous'
CONV1D = 'nndct_conv1d'
CONV2D = 'nndct_conv2d'
CONV3D = 'nndct_conv3d'
CONVTRANSPOSE2D = 'nndct_conv_transpose_2d'
CONVTRANSPOSE3D = 'nndct_conv_transpose_3d'
DENSE = 'nndct_dense'
DEPTHWISE_CONV1D = 'nndct_depthwise_conv1d'
DEPTHWISE_CONV2D = 'nndct_depthwise_conv2d'
DEPTHWISE_CONV3D = 'nndct_depthwise_conv3d'
DEPTHWISE_CONVTRANSPOSE2D = 'nndct_depthwise_conv_transpose_2d'
DEPTHWISE_CONVTRANSPOSE3D = 'nndct_depthwise_conv_transpose_3d'
DEQUANT_STUB = 'nndct_dequant_stub'
DERIVE_LOOP_INDEX = 'nndct_derive_loop_index'
DETACH = 'nndct_detach'
DEVICE = 'nndct_device'
DTYPE = 'nndct_dtype'
DIV = 'nndct_elemwise_div'
DROPOUT = 'nndct_dropout'
ELU = 'nndct_elu'
EMBEDDING = 'nndct_embedding'
EMBEDDING_BAG = 'nndct_embedding_bag'
EMPTY = 'nndct_empty'
EQUAL = 'nndct_equal'
EXP = 'nndct_elemwise_exp'
EXPAND = 'nndct_expand'
EXPAND_AS = 'nndct_expand_as'
EXPONENTIAL = 'nndct_exponential'
FLATTEN = 'nndct_flatten'
FLOOR = 'nndct_floor'
FLOOR_DIV = 'nndct_floor_divide'
FIX = 'nndct_fix'
FPGA_OP = 'nndct_fpga_op'
GATHER = 'nndct_gather'
GELU = 'nndct_GELU'
GENERIC = 'nndct_generic'
GRID_SAMPLE = 'nndct_grid_sample'
GROUP_NORM = 'nndct_group_norm'
GRU = 'nndct_gru'
HARDTANH = 'nndct_hardtanh'
HSIGMOID = 'nndct_hsigmoid'
HSWISH = 'nndct_hswish'
IDENTITY = 'nndct_identity'
IF = 'nndct_if'
INDEX = 'nndct_index'
INDEX_INPUT_INPLACE = 'nndct_index_put_inplace'
INPLACE_COPY = 'nndct_copy_'
INPUT = 'nndct_input'
INPUT_WITH_DEFAULT = 'nndct_input_with_default'
INSTANCE_NORM = 'nndct_instance_norm'
INT = 'nndct_int'
INTERPOLATE = 'nndct_interpolate'
IRFFT = 'nndct_irfft'
ITER_GET_NEXT = 'nndct_iter_get_next'
LAYER_NORM = 'nndct_layer_norm'
LEAKY_RELU = 'nndct_leaky_relu'
LENGTH = 'nndct_len'
LINEAR = 'nndct_linear'
LINEAR = 'nndct_linear'
LIST = 'nndct_list'
LIST_ADD = 'nndct_list_add'
LOG = 'nndct_log'
LOG_SOFTMAX = 'nndct_log_softmax'
LOOP = 'nndct_loop'
LSTM = 'nndct_lstm'
LSTM_CELL = 'nndct_lstm_cell'
MATMUL = 'nndct_matmul'
MAX = 'nndct_max'
MAX_POOL = 'nndct_maxpool'
MAX_POOL1D = 'nndct_maxpool1d'
MEAN = 'nndct_mean'
MERGE = 'nndct_merge'
MIN = 'nndct_min'
MISH = 'nndct_mish'
# e.g tf.math.multiply x * y, y can be a num or a tensor with same shape with x
MULTIPLY = 'nndct_elemwise_mul'
# e.g tf.keras.layers.multiply
# Takes a list of tensors, all of the same shape, and returns a single tensor (same shape).
MULTIPLYLAYER = 'nndct_multiply_layer'
NANQUANTILE = 'nndct_nanquantile'
NEG = 'nndct_neg'
NOOP = 'nndct_noop'
NORM = 'nndct_normalize'
NOT_EQUAL = 'nndct_not_equal'
NON_TENSOR_SUB = 'nndct_non_tensor_sub'
ONE_HOT = 'nndct_one_hot'
PACK = 'nndct_pack'
PAD = 'nndct_pad'
PAD_ND = "nndct_pad_nd"
PERMUTE = 'nndct_permute'
PIXEL_SHUFFLE = 'nndct_pixel_shuffle'
PIXEL_UNSHUFFLE = 'nndct_pixel_unshuffle'
PLACEHOLDER = 'nndct_placeholder'
PRELU = 'nndct_prelu'
QUANT_NEURON = 'nndct_quant_neuron'
QUANT_STUB = 'nndct_quant_stub'
QUANTILE = 'nndct_quantile'
RANDOM_UNIFORM = 'nndct_random_uniform'
RANGE = 'nndct_range'
REALDIV = 'nndct_real_div'
RELU = 'nndct_relu'
RELU6 = 'nndct_relu6'
RELUK = 'nndct_reluk'
REORG = 'nndct_reorg'
REPEAT = 'nndct_repeat'
RESHAPE = 'nndct_reshape'
RESCALING = 'nndct_rescaling'
RESIZE = 'nndct_resize'
RESIZE_3D = 'nndct_resize_3d'
RESIZE_NEAREST_3D = 'nndct_resize_nearest_3d'
RETURN = 'nndct_return'
RFFT = 'nndct_rfft'
RNN = 'nndct_rnn'
RNN_LAYER = 'nndct_rnn_layer'
RSQRT = 'nndct_rsqrt'
RSUB = 'nndct_rsub'
REMAINDER = 'nndct_remainder'
SCALAR_ADD = 'nndct_add'
SCALAR_EQUAL = 'nndct_scalar_equal'
SCALAR_LESS_THAN = 'nndct_scalar_lt'
SCALAR_MUL = 'nndct_mul'
SCALAR_SUB = 'nndct_sub'
SCALAR_REMAINDER = 'nndct_scalar_remainder'
SELECT = 'nndct_select'
SELU = 'nndct_selu'
SEPARABLECONV2D = 'nndct_separableconv2D'
SHAPE = 'nndct_shape'
SHAPE_AS_TENSOR = 'nndct_shape_as_tensor'
SIGMOID = 'nndct_sigmoid'
SIMPLE_RNN = 'nndct_simple_rnn'
SLICE = 'nndct_slice'
SLICE_TENSOR_INPLACE_COPY = 'nndct_slice_tensor_inplace_copy'
SOFTMAX = 'nndct_softmax'
SOFTPLUS = 'nndct_softplus'
SOFTSIGN = 'nndct_softsign'
SPACE_TO_BATCH_ND = 'nndct_space_to_batch_nd'
SPARSE_SOFTMAX_CROSS_ENTROPY = 'nndct_sparse_softmax_cross_entropy_with_logits'
SPLIT = 'nndct_split'
SQRT = 'nndct_sqrt'
SQUARE = 'nndct_square'
SQUEEZE = 'nndct_squeeze'
STACK = 'nndct_stack'
STACKED_RNN_CELLS = 'nndct_stacked_rnn_cells'
STFT = 'nndct_stft'
STRIDED_SLICE = 'nndct_strided_slice'
STRIDED_SLICE_INPLACE_COPY = 'nndct_strided_slice_inplace_copy'
SUB = 'nndct_elementwise_sub'
SUM = 'nndct_sum'
SWISH = 'nndct_swish'
TANH = 'nndct_tanh'
TENSOR = 'nndct_tensor'
TENSOR_ARRAY_GATHER = 'nndct_tensor_array_gather'
TENSOR_TO_SCALAR = 'nndct_tensor_to_scalar'
THRESHOLD = 'nndct_threshold'
TILE = 'nndct_tile'
TRANSPOSE = 'nndct_transpose'
TUPLE = 'nndct_tuple'
TUPLE_INPUT = 'nndct_tuple_input'
TUPLE_INDEX = 'nndct_tuple_index'
TUPLE_UNPACK = 'nndct_tuple_unpack'
UNSQUEEZE = 'nndct_unsqueeze'
UP_SAMPLING = 'nndct_up_sampling'
ZEROS = 'nndct_zeros'
UNIQUE_DIM = 'nndct_unique_dim'
_UNIQUE2 = 'nndct_unique2'
_UNIQUE = 'nndct_unique'
class NNDCT_PARAM(object):
WEIGHT = 'weights'
BIAS = 'bias'
GAMMA = 'gamma'
BETA = 'beta'
VAR = 'var'
MEAN = 'mean'
class FrameworkType(object):
# Frontend types
TORCH = 'torch'
CAFFE = 'caffe'
TENSORFLOW = 'tensorflow'
TF_KERAS = 'tf_keras'
# NNDCT as a bridge
NNDCT = 'nndct'
class NNDCT_CONSTANT(object):
INT_MAX = 2 ** 31 - 1
| [
"[email protected]"
] | |
4d7a13431523e52f557cb9288ca2df2918ca4592 | 6e9d6a682f20054e13d3764e95b8bd3b7b64fabf | /dailychallenge794.py | 480c50e622ac834ef1ded759d169a5654425fa0f | [] | no_license | SeanyDcode/codechallenges | 30a271e04bc2b360bca923ae868be65a9533c8db | 947cf3034911b381afaf777794d22d2af06aa5ba | refs/heads/master | 2022-11-07T21:22:56.927863 | 2022-10-18T23:33:13 | 2022-10-18T23:33:13 | 154,498,776 | 1 | 0 | null | 2022-10-18T23:02:05 | 2018-10-24T12:38:45 | Python | UTF-8 | Python | false | false | 515 | py | # from dailycodingproblem.com
#
# Daily Challenge #794
# Implement a stack that has the following methods:
#
# push(val), which pushes an element onto the stack
# pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then
# it should throw an error or return null.
# max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should
# throw an error or return null.
# Each method should run in constant time.
| [
"[email protected]"
] | |
a7bc599698af3f0f019d14651118a3707a859c85 | 4ef91a77e56428f8d9eed917270b165c150707b1 | /unstable_learning_rates/adam/cifar10-6/cifar10.py | 1d37fdb16c6513cf3fd1e981dc35eba1c8ab9280 | [
"MIT"
] | permissive | Jeffrey-Ede/ALRC | e761ffc65a9dad44edf301dd30b766e46b6715b3 | e5bf1d713b56af68c79413ae23e275d466121b90 | refs/heads/master | 2020-06-05T04:38:58.595922 | 2020-05-29T13:25:01 | 2020-05-29T13:25:01 | 192,315,914 | 3 | 2 | null | 2019-06-17T12:29:18 | 2019-06-17T09:25:07 | Python | UTF-8 | Python | false | false | 18,726 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('log_file', '//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/modified_losses/experiments2/tmp/cifar10_train/log.txt',
'File to record losses and other info.')
tf.app.flags.DEFINE_string('data_dir', '//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/modified_losses/experiments2/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
tf.app.flags.DEFINE_float('num_stddev', 3.,
"""Number of standard deviations error can be above mean
without being clipped.""")
tf.app.flags.DEFINE_float('initial_lr', 0.1/128, """Initial learning rate""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = FLAGS.initial_lr
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def redistribute_loss(beta_ramp, beta_max, loss):
"""Redistribute loss by using its moments to estimate its CDF and mapping
cumulative densities to cumulative densities on another distribution.
Args:
loss: Tensor to redistribute.
beta_ramp: Number of iterations to increase decay factor for running means
to its maximum value over.
beta_max: Maximum value of decay factor - the value to ramp to.
"""
#Moments
mu1 = tf.get_variable("mom1", initializer=tf.constant(0., dtype=tf.float32))
mu2 = tf.get_variable("mom2", initializer=tf.constant(0., dtype=tf.float32))
mu3 = tf.get_variable("mom3", initializer=tf.constant(0., dtype=tf.float32))
mu4 = tf.get_variable("mom4", initializer=tf.constant(0., dtype=tf.float32))
#Use running means to update moments
with tf.control_dependencies([mu1.assign(beta*mu1+(1-beta)*loss),
mu2.assign(beta*mu2+(1-beta)*loss),
mu3.assign(beta*mu3+(1-beta)*loss),
mu4.assign(beta*mu4+(1-beta)*loss)]):
return loss
def capper_fn(x):
mu = tf.get_variable(f"mu", initializer=tf.constant(10, dtype=tf.float32))
mu2 = tf.get_variable(f"mu2", initializer=tf.constant(20**2, dtype=tf.float32))
def cap(x):
sigma = tf.sqrt(mu2 - mu**2+0.001)
capped_x = tf.where(x < mu+FLAGS.num_stddev*sigma,
x,
x/tf.stop_gradient(x/(mu+FLAGS.num_stddev*sigma)))
return capped_x
capped_x = cap(x)
mean = tf.reduce_mean(capped_x)
mean2 = tf.reduce_mean(capped_x**2)
with tf.control_dependencies([mu.assign(0.997*mu+0.001*mean),
mu2.assign(0.997*mu2+0.001*mean2)]):
return tf.identity(capped_x)
def inference(images):
def conv2d(inputs, num_outputs, kernel_size=3, stride=1, actv_fn=tf.nn.relu, transpose=False):
if transpose:
return tf.contrib.layers.conv2d_transpose(
inputs,
num_outputs,
kernel_size,
stride,
activation_fn=actv_fn)
else:
return tf.contrib.layers.conv2d(
inputs,
num_outputs,
kernel_size,
stride,
data_format='NHWC',
activation_fn=actv_fn)
input = tf.image.resize_images(images, (16,16), align_corners=True)
input = tf.image.resize_images(input, (32,32), align_corners=True)
x = conv2d(input, num_outputs=64, kernel_size=5)
x = conv2d(x, num_outputs=128, kernel_size=3)
for _ in range(3):
x0 = x
x = conv2d(x, num_outputs=128, kernel_size=3)
x = conv2d(x, num_outputs=128, kernel_size=3)
x += x0
x = conv2d(x, num_outputs=64, kernel_size=3)
x = conv2d(x, num_outputs=3, kernel_size=5, actv_fn=None)
loss = 25 * tf.reduce_mean((images-x)**2, axis=[1,2,3])
tf.add_to_collection('unadjusted_loss', tf.reduce_mean(loss))
loss = tf.where(loss < 50, loss, tf.sqrt(loss))
return loss
def _inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [images.get_shape().as_list()[0], -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=None)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
cross_entropy = logits
#Clip error spikes
if FLAGS.num_stddev:
cross_entropy = capper_fn(logits)
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
with tf.control_dependencies([apply_gradient_op]):
variables_averages_op = variable_averages.apply(tf.trainable_variables())
return variables_averages_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| [
"[email protected]"
] | |
edaac70f667d8974a20ff95bebcd399de5484509 | e62c135403c766497245dd4dc72e7431a023404d | /helloShikha.py | 096873a704da900e7e19b3dbbfcafacf142d731e | [
"Apache-2.0"
] | permissive | ctsit/J.O.B-Training-Repo-1 | 3c437589e5c49749bbd2f97ce906cf01d000bad1 | 3455666557a23837ebcfe7e40de395a267689a6a | refs/heads/master | 2023-03-15T20:45:43.991881 | 2023-03-06T15:11:15 | 2023-03-06T15:11:15 | 82,118,694 | 0 | 31 | Apache-2.0 | 2023-08-24T15:52:05 | 2017-02-15T23:48:22 | Python | UTF-8 | Python | false | false | 54 | py | #This is my hello world program
print 'Hello Shikha!'
| [
"[email protected]"
] | |
cac5589c71a79b0bc92bda216d163e76b9777908 | b4f0f536c721178a69128eba0afb39fde6f62ffb | /tests/e2e/redis_client.py | 1a61fcb32ee7863df6606f0f932eb42ff2f39bfc | [
"MIT"
] | permissive | jeantardelli/architecture-patterns-with-python | c049257febc369c7d213428019387fe19d38998a | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | refs/heads/main | 2023-05-27T05:12:12.951919 | 2021-06-04T18:48:43 | 2021-06-04T18:48:43 | 355,638,599 | 1 | 0 | MIT | 2021-06-04T18:48:44 | 2021-04-07T18:03:08 | Python | UTF-8 | Python | false | false | 390 | py | import json
import redis
from allocation import config
r = redis.Redis(**config.get_redis_host_and_port())
def subscribe_to(channel):
pubsub = r.pubsub()
pubsub.subscribe(channel)
confirmation = pubsub.get_message(timeout=3)
assert confirmation["type"] == "subscribe"
return pubsub
def publish_message(channel, message):
r.publish(channel, json.dumps(message))
| [
"[email protected]"
] | |
c54a4eef064dc72dd2b30f222d245099b69f7005 | b1547d22520133f03c7529086f70d4ae83d24237 | /script/conntect.py | adb439f4a913590a349ab9d91767eb1d6ecc3e6f | [] | no_license | wrwahut/flask_demo | 913b6a3e4cd56fadb834895f559d25af742f1a7f | 699beaa1a6c9f0f413147ff237bb6256c651e953 | refs/heads/master | 2020-03-29T22:03:54.659686 | 2019-01-03T07:48:38 | 2019-01-03T07:48:38 | 150,401,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # -*- coding:utf-8 -*-
import requests
from flask import request
import json
req_session = requests.session()
class Caller(object):
def __init__(self, url_fix,args={}):
self.url = "http://localhost:10002/resource/user/" + url_fix
self.args = args
self.headers = {
"content-type": "application/json",
"accept": "application/json"
}
# self.cookie = {
# "token": request.cookies.get("token", "None"),
# "lang": request.cookies.get("lang", "zh-CN")
# }
def _res_data(self, response):
res_data = response.json()
if not res_data.get("data"):
res_data["data"] = {}
return res_data
def post_req(self):
payload = json.dumps(self.args, "utf-8")
response = req_session.post(self.url, data=payload, headers=self.headers)
return self._res_data(response) | [
"[email protected]"
] | |
00233f22e2e5ef66eb8018f58af6d447b2945cbb | dcbef06d5a00f07756339b9e62c684dec2fee425 | /nuitka/code_generation/LoopCodes.py | b553f9f6b95a7db4207f65448e6f7735be2edebb | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Nuitka/Nuitka | f9543d8d95bfa0b81d4e60af0dfad99fb72893a4 | d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2 | refs/heads/develop | 2023-08-28T14:00:32.861328 | 2023-08-27T09:16:45 | 2023-08-27T09:16:45 | 9,626,741 | 8,573 | 599 | Apache-2.0 | 2023-09-13T02:49:41 | 2013-04-23T15:40:33 | Python | UTF-8 | Python | false | false | 3,141 | py | # Copyright 2023, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop codes.
Code generation for loops, breaking them, or continuing them. In Nuitka, there
are no for-loops or while-loops at this point. They have been re-formulated in
a simpler loop without a condition, and statements there-in that break under
certain conditions.
See Developer Manual for how the CPython loops are mapped to these nodes.
"""
from .CodeHelpers import generateStatementSequenceCode
from .ErrorCodes import getErrorExitBoolCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .LabelCodes import getGotoCode, getLabelCode
def generateLoopBreakCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
break_target = context.getLoopBreakTarget()
getGotoCode(break_target, emit)
def generateLoopContinueCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
continue_target = context.getLoopContinueTarget()
getGotoCode(continue_target, emit)
def generateLoopCode(statement, emit, context):
loop_start_label = context.allocateLabel("loop_start")
if not statement.isStatementAborting():
loop_end_label = context.allocateLabel("loop_end")
else:
loop_end_label = None
getLabelCode(loop_start_label, emit)
old_loop_break = context.setLoopBreakTarget(loop_end_label)
old_loop_continue = context.setLoopContinueTarget(loop_start_label)
generateStatementSequenceCode(
statement_sequence=statement.subnode_loop_body,
allow_none=True,
emit=emit,
context=context,
)
context.setLoopBreakTarget(old_loop_break)
context.setLoopContinueTarget(old_loop_continue)
# Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.
with context.withCurrentSourceCodeReference(statement.getSourceReference()):
getErrorExitBoolCode(
condition="CONSIDER_THREADING(tstate) == false", emit=emit, context=context
)
getGotoCode(loop_start_label, emit)
if loop_end_label is not None:
getLabelCode(loop_end_label, emit)
| [
"[email protected]"
] | |
2db81d67ed5865facf874ae717b23430c8c420f3 | 4a06e7a07df377e224477c1d24cef620a4f98cc4 | /httprunner/report.py | 19d1581aa9841adcca9bf26388a5392d69109ae6 | [
"Apache-2.0"
] | permissive | ovalike/HttpRunner | b1cbec8ac02a0835987429bf7d966e7e64e62e61 | 6b23edf44c9aa71eba2c6d5d2e6b3335243ac992 | refs/heads/master | 2021-08-02T15:18:01.568043 | 2021-04-26T15:28:23 | 2021-04-26T15:28:23 | 177,881,052 | 0 | 0 | Apache-2.0 | 2019-03-26T22:58:20 | 2019-03-26T22:58:19 | null | UTF-8 | Python | false | false | 11,724 | py | # encoding: utf-8
import io
import os
import platform
import time
import unittest
from base64 import b64encode
from collections import Iterable
from datetime import datetime
import requests
from httprunner import loader, logger
from httprunner.__about__ import __version__
from httprunner.compat import basestring, bytes, json, numeric_types
from jinja2 import Template, escape
def get_platform():
return {
"httprunner_version": __version__,
"python_version": "{} {}".format(
platform.python_implementation(),
platform.python_version()
),
"platform": platform.platform()
}
def get_summary(result):
""" get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
"""
summary = {
"success": result.wasSuccessful(),
"stat": {
'total': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.expectedFailures),
'unexpectedSuccesses': len(result.unexpectedSuccesses)
}
}
summary["stat"]["successes"] = summary["stat"]["total"] \
- summary["stat"]["failures"] \
- summary["stat"]["errors"] \
- summary["stat"]["skipped"] \
- summary["stat"]["expectedFailures"] \
- summary["stat"]["unexpectedSuccesses"]
summary["time"] = {
'start_at': result.start_at,
'duration': result.duration
}
summary["records"] = result.records
return summary
def aggregate_stat(origin_stat, new_stat):
""" aggregate new_stat to origin_stat.
Args:
origin_stat (dict): origin stat dict, will be updated with new_stat dict.
new_stat (dict): new stat dict.
"""
for key in new_stat:
if key not in origin_stat:
origin_stat[key] = new_stat[key]
elif key == "start_at":
# start datetime
origin_stat[key] = min(origin_stat[key], new_stat[key])
else:
origin_stat[key] += new_stat[key]
def stringify_summary(summary):
""" stringify summary, in order to dump json file and generate html report.
"""
for index, suite_summary in enumerate(summary["details"]):
if not suite_summary.get("name"):
suite_summary["name"] = "testcase {}".format(index)
for record in suite_summary.get("records"):
meta_datas = record['meta_datas']
__stringify_meta_datas(meta_datas)
meta_datas_expanded = []
__expand_meta_datas(meta_datas, meta_datas_expanded)
record["meta_datas_expanded"] = meta_datas_expanded
record["response_time"] = __get_total_response_time(meta_datas_expanded)
def __stringify_request(request_data):
""" stringfy HTTP request data
Args:
request_data (dict): HTTP request data in dict.
{
"url": "http://127.0.0.1:5000/api/get-token",
"method": "POST",
"headers": {
"User-Agent": "python-requests/2.20.0",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
"Connection": "keep-alive",
"user_agent": "iOS/10.3",
"device_sn": "TESTCASE_CREATE_XXX",
"os_platform": "ios",
"app_version": "2.8.6",
"Content-Type": "application/json",
"Content-Length": "52"
},
"json": {
"sign": "cb9d60acd09080ea66c8e63a1c78c6459ea00168"
},
"verify": false
}
"""
for key, value in request_data.items():
if isinstance(value, list):
value = json.dumps(value, indent=2, ensure_ascii=False)
elif isinstance(value, bytes):
try:
encoding = "utf-8"
value = escape(value.decode(encoding))
except UnicodeDecodeError:
pass
elif not isinstance(value, (basestring, numeric_types, Iterable)):
# class instance, e.g. MultipartEncoder()
value = repr(value)
elif isinstance(value, requests.cookies.RequestsCookieJar):
value = value.get_dict()
request_data[key] = value
def __stringify_response(response_data):
""" stringfy HTTP response data
Args:
response_data (dict):
{
"status_code": 404,
"headers": {
"Content-Type": "application/json",
"Content-Length": "30",
"Server": "Werkzeug/0.14.1 Python/3.7.0",
"Date": "Tue, 27 Nov 2018 06:19:27 GMT"
},
"encoding": "None",
"content_type": "application/json",
"ok": false,
"url": "http://127.0.0.1:5000/api/users/9001",
"reason": "NOT FOUND",
"cookies": {},
"json": {
"success": false,
"data": {}
}
}
"""
for key, value in response_data.items():
if isinstance(value, list):
value = json.dumps(value, indent=2, ensure_ascii=False)
elif isinstance(value, bytes):
try:
encoding = response_data.get("encoding")
if not encoding or encoding == "None":
encoding = "utf-8"
if key == "content" and "image" in response_data["content_type"]:
# display image
value = "data:{};base64,{}".format(
response_data["content_type"],
b64encode(value).decode(encoding)
)
else:
value = escape(value.decode(encoding))
except UnicodeDecodeError:
pass
elif not isinstance(value, (basestring, numeric_types, Iterable)):
# class instance, e.g. MultipartEncoder()
value = repr(value)
elif isinstance(value, requests.cookies.RequestsCookieJar):
value = value.get_dict()
response_data[key] = value
def __expand_meta_datas(meta_datas, meta_datas_expanded):
""" expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
"""
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded)
def __get_total_response_time(meta_datas_expanded):
""" caculate total response time of all meta_datas
"""
try:
response_time = 0
for meta_data in meta_datas_expanded:
response_time += meta_data["stat"]["response_time_ms"]
return "{:.2f}".format(response_time)
except TypeError:
# failure exists
return "N/A"
def __stringify_meta_datas(meta_datas):
if isinstance(meta_datas, list):
for _meta_data in meta_datas:
__stringify_meta_datas(_meta_data)
elif isinstance(meta_datas, dict):
data_list = meta_datas["data"]
for data in data_list:
__stringify_request(data["request"])
__stringify_response(data["response"])
def render_html_report(summary, report_template=None, report_dir=None):
""" render html report with specified report name and template
Args:
report_template (str): specify html report template path
report_dir (str): specify html report save directory
"""
if not report_template:
report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"templates",
"report_template.html"
)
logger.log_debug("No html report template specified, use default.")
else:
logger.log_info("render with html report template: {}".format(report_template))
logger.log_info("Start to render Html report ...")
report_dir = report_dir or os.path.join(os.getcwd(), "reports")
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
start_at_timestamp = int(summary["time"]["start_at"])
summary["time"]["start_datetime"] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S')
report_path = os.path.join(report_dir, "{}.html".format(start_at_timestamp))
with io.open(report_template, "r", encoding='utf-8') as fp_r:
template_content = fp_r.read()
with io.open(report_path, 'w', encoding='utf-8') as fp_w:
rendered_content = Template(
template_content,
extensions=["jinja2.ext.loopcontrols"]
).render(summary)
fp_w.write(rendered_content)
logger.log_info("Generated Html report: {}".format(report_path))
return report_path
class HtmlTestResult(unittest.TextTestResult):
""" A html result class that can generate formatted html results.
Used by TextTestRunner.
"""
def __init__(self, stream, descriptions, verbosity):
super(HtmlTestResult, self).__init__(stream, descriptions, verbosity)
self.records = []
def _record_test(self, test, status, attachment=''):
data = {
'name': test.shortDescription(),
'status': status,
'attachment': attachment,
"meta_datas": test.meta_datas
}
self.records.append(data)
def startTestRun(self):
self.start_at = time.time()
def startTest(self, test):
""" add start test time """
super(HtmlTestResult, self).startTest(test)
logger.color_print(test.shortDescription(), "yellow")
def addSuccess(self, test):
super(HtmlTestResult, self).addSuccess(test)
self._record_test(test, 'success')
print("")
def addError(self, test, err):
super(HtmlTestResult, self).addError(test, err)
self._record_test(test, 'error', self._exc_info_to_string(err, test))
print("")
def addFailure(self, test, err):
super(HtmlTestResult, self).addFailure(test, err)
self._record_test(test, 'failure', self._exc_info_to_string(err, test))
print("")
def addSkip(self, test, reason):
super(HtmlTestResult, self).addSkip(test, reason)
self._record_test(test, 'skipped', reason)
print("")
def addExpectedFailure(self, test, err):
super(HtmlTestResult, self).addExpectedFailure(test, err)
self._record_test(test, 'ExpectedFailure', self._exc_info_to_string(err, test))
print("")
def addUnexpectedSuccess(self, test):
super(HtmlTestResult, self).addUnexpectedSuccess(test)
self._record_test(test, 'UnexpectedSuccess')
print("")
@property
def duration(self):
return time.time() - self.start_at
| [
"httprunner"
] | httprunner |
9770456866da2c0a4a7485ed4ccefca3170983b2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02417/s513271588.py | dd2618394ee3a3fd9727f547170f17cf79edd3e4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import string
from sys import stdin
chs = dict([(ch, 0) for ch in string.ascii_lowercase])
for line in stdin:
for ch in line:
c = ch.lower()
if c not in chs:
continue
chs[c] += 1
for ch in string.ascii_lowercase:
print(ch, ':', chs[ch]) | [
"[email protected]"
] | |
20c35fdc0ddd1e4481b51b2d8e0f88a9989398f7 | df770e5961aa7a0790314da663c426c8b3a93092 | /python/adult_sklearn.py | f344a98e3b820e08c867ac05917d704ba14a9690 | [] | no_license | dengl11/CS315B-Project | be996d461582110e70c3f9e621d13d80ed78a160 | 0da493eb5f6030562d2d2d7546ac7c107f9d2879 | refs/heads/master | 2021-08-28T18:34:41.655923 | 2017-12-13T00:06:29 | 2017-12-13T00:06:29 | 108,506,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | ############################################################
# My Decision Tree Classification
############################################################
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from my_decision_tree import *
# param
max_depth = 3
# max_depth = 6
def get_data(input):
"""return (X, y) for input
Args:
input:
Return:
"""
mat = np.loadtxt(input, skiprows=2)
y = mat[:, 0].astype(int)
X = mat[:, 1:].astype(np.float)
return (X, y)
X_train, y_train = get_data("../data/adult/adult_train_tiny.tsv")
X_test, y_test = get_data("../data/adult/adult_test_tiny.tsv")
# X_train, y_train = get_data("../data/adult/adult_train.tsv")
# X_test, y_test = get_data("../data/adult/adult_test.tsv")
# construct estimator
estimator = DecisionTreeClassifier(max_depth=max_depth , random_state=0)
# train
estimator.fit(X_train, y_train)
# [pred_train, pred_test]
predictions = [estimator.predict(x) for x in (X_train, X_test)]
# [acc_train, acc_test]
accuracys = [accuracy_score(p, y) for (p, y) in zip(predictions, (y_train, y_test))]
print("------------------- Sklearn Decision Tree -------------------")
print("Train Accuracy: {:.2f}".format(accuracys[0]))
print("Test Accuracy : {:.2f}".format(accuracys[1]))
| [
"[email protected]"
] | |
03733d50c66db99aab90d764c67c8102c1927d32 | fdce456e2f0ea12f854e98583cfda95955b9a36b | /seekerbuilder/migrations/0016_auto_20210916_1303.py | ad0d13f013270eb5cd84ec0db637506231ce82d1 | [] | no_license | atifasr/jobportal | e5fdc8058759311e8d4ca2c0291066ad86059fb6 | 3fe211598daa66f2a76c2b3d4d26d73459ac7457 | refs/heads/master | 2023-08-05T02:01:00.870360 | 2021-09-29T11:59:29 | 2021-09-29T11:59:29 | 388,807,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # Generated by Django 3.2.5 on 2021-09-16 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_management', '0019_auto_20210913_2147'),
('seekerbuilder', '0015_auto_20210914_1503'),
]
operations = [
migrations.AlterModelOptions(
name='educationdetail',
options={'verbose_name_plural': "Seekers' education details"},
),
migrations.AlterModelOptions(
name='experiencedetail',
options={'verbose_name_plural': "Seekers' experience details"},
),
migrations.AlterModelOptions(
name='seekerprofile',
options={'verbose_name_plural': "Seekers' profile"},
),
migrations.AlterModelOptions(
name='seekerskillset',
options={'verbose_name': 'Seeker skill set', 'verbose_name_plural': "Seeker's skills"},
),
migrations.RemoveField(
model_name='seekerskillset',
name='skill_set',
),
migrations.AddField(
model_name='seekerskillset',
name='skill_set',
field=models.ManyToManyField(to='job_management.Skillset'),
),
]
| [
"[email protected]"
] | |
781493f32e1e6751e010c4bfc68e52c0d2761f13 | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/twisted-Zope-3.2.1/twisted/web/woven/model.py | 30887f1e2ab52ec7730c05c3ce860bd0f79dc854 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,065 | py | # -*- test-case-name: twisted.web.test.test_woven -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
__version__ = "$Revision: 1.53 $"[11:-2]
import types
import weakref
import warnings
from zope.interface import implements
from twisted.python import components, reflect
from twisted.internet import defer
from twisted.web.woven import interfaces
class _Nothing: pass
def adaptToIModel(m, parent=None, submodel=None):
adapted = components.getAdapter(m, interfaces.IModel, None, components.getAdapterClassWithInheritance)
if adapted is None:
adapted = Wrapper(m)
adapted.parent = parent
adapted.name = submodel
return adapted
class Model:
"""
A Model which keeps track of views which are looking at it in order
to notify them when the model changes.
"""
implements(interfaces.IModel)
def __init__(self, *args, **kwargs):
if len(args):
self.original = args[0]
else:
self.original = self
self.name = ''
self.parent = None
self.views = []
self.subviews = {}
self.submodels = {}
self._getter = kwargs.get('getter')
self._setter = kwargs.get('setter')
self.cachedFor = None
self.initialize(*args, **kwargs)
def __getstate__(self):
self.views = []
self.subviews = {}
self.submodels = {}
return self.__dict__
def invalidateCache(self):
"""Invalidate the cache for this object, so the next time
getData is called, it's getter method is called again.
"""
self.cachedFor = None
def initialize(self, *args, **kwargs):
"""
Hook for subclasses to initialize themselves without having to
mess with the __init__ chain.
"""
pass
def addView(self, view):
"""
Add a view for the model to keep track of.
"""
if view not in [ref() for ref in self.views]:
self.views.append(weakref.ref(view))
def addSubview(self, name, subview):
subviewList = self.subviews.get(name, [])
subviewList.append(weakref.ref(subview))
self.subviews[name] = subviewList
def removeView(self, view):
"""
Remove a view that the model no longer should keep track of.
"""
# AM: loop on a _copy_ of the list, since we're changing it!!!
for weakref in list(self.views):
ref = weakref()
if ref is view or ref is None:
self.views.remove(weakref)
def setGetter(self, getter):
self._getter = getter
def setSetter(self, setter):
self._setter = setter
def notify(self, changed=None):
"""
Notify all views that something was changed on me.
Passing a dictionary of {'attribute': 'new value'} in changed
will pass this dictionary to the view for increased performance.
If you don't want to do this, don't, and just use the traditional
MVC paradigm of querying the model for things you're interested
in.
"""
self.cachedFor = None
if changed is None: changed = {}
retVal = []
# AM: loop on a _copy_ of the list, since we're changing it!!!
for view in list(self.views):
ref = view()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
self.views.remove(view)
for key, value in self.subviews.items():
if value.wantsAllNotifications or changed.has_key(key):
for item in list(value):
ref = item()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
value.remove(item)
return retVal
protected_names = ['initialize', 'addView', 'addSubview', 'removeView', 'notify', 'getSubmodel', 'setSubmodel', 'getData', 'setData']
allowed_names = []
def lookupSubmodel(self, request, submodelName):
"""
Look up a full submodel name. I will split on `/' and call
L{getSubmodel} on each element in the 'path'.
Override me if you don't want 'traversing'-style lookup, but
would rather like to look up a model based on the entire model
name specified.
If you override me to return Deferreds, make sure I look up
values in a cache (created by L{setSubmodel}) before doing a
regular Deferred lookup.
XXX: Move bits of this docstring to interfaces.py
"""
if not submodelName:
return None
# Special case: If the first character is /
# Start at the bottom of the model stack
currentModel = self
if submodelName[0] == '/':
while currentModel.parent is not None:
currentModel = currentModel.parent
submodelName = submodelName[1:]
submodelList = submodelName.split('/') #[:-1]
# print "submodelList", submodelList
for element in submodelList:
if element == '.' or element == '':
continue
elif element == '..':
currentModel = currentModel.parent
else:
currentModel = currentModel.getSubmodel(request, element)
if currentModel is None:
return None
return currentModel
def submodelCheck(self, request, name):
"""Check if a submodel name is allowed. Subclass me to implement a
name security policy.
"""
if self.allowed_names:
return (name in self.allowed_names)
else:
return (name and name[0] != '_' and name not in self.protected_names)
def submodelFactory(self, request, name):
warnings.warn("Warning: default Model lookup strategy is changing:"
"use either AttributeModel or MethodModel for now.",
DeprecationWarning)
if hasattr(self, name):
return getattr(self, name)
else:
return None
def getSubmodel(self, request, name):
"""
Get the submodel `name' of this model. If I ever return a
Deferred, then I ought to check for cached values (created by
L{setSubmodel}) before doing a regular Deferred lookup.
"""
if self.submodels.has_key(name):
return self.submodels[name]
if not self.submodelCheck(request, name):
return None
m = self.submodelFactory(request, name)
if m is None:
return None
sm = adaptToIModel(m, self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
"""
Set a submodel on this model. If getSubmodel or lookupSubmodel
ever return a Deferred, I ought to set this in a place that
lookupSubmodel/getSubmodel know about, so they can use it as a
cache.
"""
if self.submodelCheck(request, name):
if self.submodels.has_key(name):
del self.submodels[name]
setattr(self, name, value)
def dataWillChange(self):
pass
def getData(self, request):
if self.cachedFor != id(request) and self._getter is not None:
self.cachedFor = id(request)
self.dataWillChange()
self.orig = self.original = self._getter(request)
return self.original
def setData(self, request, data):
if self._setter is not None:
self.cachedFor = None
return self._setter(request, data)
else:
if hasattr(self, 'parent') and self.parent:
self.parent.setSubmodel(request, self.name, data)
self.orig = self.original = data
components.backwardsCompatImplements(Model)
class MethodModel(Model):
"""Look up submodels with wmfactory_* methods.
"""
def submodelCheck(self, request, name):
"""Allow any submodel for which I have a submodel.
"""
return hasattr(self, "wmfactory_"+name)
def submodelFactory(self, request, name):
"""Call a wmfactory_name method on this model.
"""
meth = getattr(self, "wmfactory_"+name)
return meth(request)
def getSubmodel(self, request=None, name=None):
if name is None:
warnings.warn("Warning! getSubmodel should now take the request as the first argument")
name = request
request = None
cached = self.submodels.has_key(name)
sm = Model.getSubmodel(self, request, name)
if sm is not None:
if not cached:
sm.cachedFor = id(request)
sm._getter = getattr(self, "wmfactory_"+name)
return sm
class AttributeModel(Model):
"""Look up submodels as attributes with hosts.allow/deny-style security.
"""
def submodelFactory(self, request, name):
if hasattr(self, name):
return getattr(self, name)
else:
return None
#backwards compatibility
WModel = Model
class Wrapper(Model):
"""
I'm a generic wrapper to provide limited interaction with the
Woven models and submodels.
"""
parent = None
name = None
def __init__(self, orig):
Model.__init__(self)
self.orig = self.original = orig
def dataWillChange(self):
pass
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class ListModel(Wrapper):
"""
I wrap a Python list and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
try:
i = int(name)
except:
return None
if i > len(orig):
return None
sm = adaptToIModel(orig[i], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[int(name)] = value
def __len__(self):
return len(self.original)
def __getitem__(self, name):
return self.getSubmodel(None, str(name))
def __setitem__(self, name, value):
self.setSubmodel(None, str(name), value)
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class StringModel(ListModel):
""" I wrap a Python string and allow it to interact with the Woven models
and submodels. """
def setSubmodel(self, request=None, name=None, value=None):
raise ValueError("Strings are immutable.")
# pyPgSQL returns "PgResultSet" instances instead of lists, which look, act
# and breathe just like lists. pyPgSQL really shouldn't do this, but this works
try:
from pyPgSQL import PgSQL
components.registerAdapter(ListModel, PgSQL.PgResultSet, interfaces.IModel)
except:
pass
class DictionaryModel(Wrapper):
"""
I wrap a Python dictionary and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("getSubmodel must get a request argument now")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
if name not in orig:
return None
sm = adaptToIModel(orig[name], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[name] = value
class AttributeWrapper(Wrapper):
"""
I wrap an attribute named "name" of the given parent object.
"""
def __init__(self, parent, name):
self.original = None
parent = ObjectWrapper(parent)
Wrapper.__init__(self, parent.getSubmodel(None, name))
self.parent = parent
self.name = name
class ObjectWrapper(Wrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
sm = adaptToIModel(getattr(self.original, name), self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
setattr(self.original, name, value)
class UnsafeObjectWrapper(ObjectWrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
I am unsafe because I allow methods to be called. In fact, I am
dangerously unsafe. Be wary or I will kill your security model!
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
value = getattr(self.original, name)
if callable(value):
return value()
sm = adaptToIModel(value, self, name)
self.submodels = sm
return sm
class DeferredWrapper(Wrapper):
def setData(self, request=None, data=_Nothing):
if data is _Nothing:
warnings.warn("setData should be called with request as first arg")
data = request
request = None
if isinstance(data, defer.Deferred):
self.original = data
else:
views, subviews = self.views, self.subviews
new = adaptToIModel(data, self.parent, self.name)
self.__class__ = new.__class__
self.__dict__ = new.__dict__
self.views, self.subviews = views, subviews
class Link(AttributeModel):
def __init__(self, href, text):
AttributeModel.__init__(self)
self.href = href
self.text = text
try:
components.registerAdapter(StringModel, types.StringType, interfaces.IModel)
components.registerAdapter(ListModel, types.ListType, interfaces.IModel)
components.registerAdapter(ListModel, types.TupleType, interfaces.IModel)
components.registerAdapter(DictionaryModel, types.DictionaryType, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.Deferred, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.DeferredList, interfaces.IModel)
except ValueError:
# The adapters were already registered
pass
| [
"[email protected]"
] | |
c81c481c4164984cc48621ac820341764eda0f70 | eec9c673984da80f42d2a296ee2cb068639db169 | /tods/tests/feature_analysis/test_StastiticalStd.py | e0e968bb983bc7b56792b66f46ebdad5e1844590 | [
"Apache-2.0"
] | permissive | datamllab/tods | 0766f48e7736fd2dca1cbc59fef019447039fed8 | 314dd6efc6ed3f8d25e100b08de4115edc636e14 | refs/heads/master | 2023-09-03T22:44:31.929096 | 2023-08-24T17:21:27 | 2023-08-24T17:21:27 | 293,719,013 | 1,094 | 175 | Apache-2.0 | 2023-08-24T17:21:28 | 2020-09-08T06:18:12 | Python | UTF-8 | Python | false | false | 5,434 | py | import unittest
from d3m import container, utils
from d3m.metadata import base as metadata_base
from tods.feature_analysis import StatisticalStd
class StatisticalStdTestCase(unittest.TestCase):
def test_basic(self):
self.maxDiff=None
main = container.DataFrame({'timestamp': [1, 3, 2, 5], 'values': [1.0, 2.0, 3.0, 4.0], 'b': [1.0, 4.0, 5.0, 6.0]},
columns=['timestamp', 'values', 'b'],
generate_metadata=True)
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{
'selector': [],
'metadata': {
# 'top_level': 'main',
'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
'structural_type': 'd3m.container.pandas.DataFrame',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
'dimension': {
'name': 'rows',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
'length': 4,
},
},
}, {
'selector': ['__ALL_ELEMENTS__'],
'metadata': {
'dimension': {
'name': 'columns',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
'length': 3,
},
},
}, {
'selector': ['__ALL_ELEMENTS__', 0],
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'},
}, {
'selector': ['__ALL_ELEMENTS__', 1],
'metadata': {'structural_type': 'numpy.float64', 'name': 'values'},
}, {
'selector': ['__ALL_ELEMENTS__', 2],
'metadata': {'structural_type': 'numpy.float64', 'name': 'b'},
}])
hyperparams_class = StatisticalStd.StatisticalStdPrimitive.metadata.get_hyperparams()
hp = hyperparams_class.defaults().replace({
'use_columns': [1,2],
'use_semantic_types' : True,
'window_size':2
})
primitive = StatisticalStd.StatisticalStdPrimitive(hyperparams=hp)
output_main = primitive._produce(inputs=main).value
print(output_main)
expected_output = container.DataFrame(
{'timestamp': [1, 3, 2, 5], 'values': [1.0, 2.0, 3.0, 4.0], 'b': [1.0, 4.0, 5.0, 6.0],
'values_std': [0.5, 0.5, 0.5, 0.5], 'b_std': [1.5, 1.5, 0.5, 0.5]},
columns=['timestamp', 'values', 'b', 'values_std', 'b_std'])
self.assertEqual(output_main[['timestamp', 'values', 'b', 'values_std',
'b_std']].values.tolist(), expected_output[
['timestamp', 'values', 'b', 'values_std', 'b_std'
]].values.tolist())
self.assertEqual(utils.to_json_structure(output_main.metadata.to_internal_simple_structure()),
[{'metadata': {'dimension': {'length': 4,
'name': 'rows',
'semantic_types': [
'https://metadata.datadrivendiscovery.org/types/TabularRow']},
'schema': 'https://metadata.datadrivendiscovery.org/schemas/v0/container.json',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
'structural_type': 'd3m.container.pandas.DataFrame'},
'selector': []},
{'metadata': {'dimension': {'length': 5,
'name': 'columns',
'semantic_types': [
'https://metadata.datadrivendiscovery.org/types/TabularColumn']}},
'selector': ['__ALL_ELEMENTS__']},
{'metadata': {'name': 'timestamp', 'structural_type': 'numpy.int64'},
'selector': ['__ALL_ELEMENTS__', 0]},
{'metadata': {'name': 'values', 'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 1]},
{'metadata': {'name': 'b', 'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 2]},
{'metadata': {'name': 'values_std',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 3]},
{'metadata': {'name': 'b_std',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 4]},
])
params = primitive.get_params()
primitive.set_params(params=params)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a28a37b7bb5df3b8e2d9c62214b8cdbe474acac8 | 35c8d28a6324d2425929c5aa43cf9d828c8977db | /favsinger/favsingerapp/admin.py | 898788cbaeba2c06bb422217c3ef40ab79967ddf | [] | no_license | peanutyumyum/Likelion_8th | 3bc9bb0414e0b5527ad6fbb4b86b90690e2f6111 | c7d9510f42a5d12f6b6cd58b298abceaf6befa2e | refs/heads/master | 2022-12-16T14:37:28.066275 | 2020-09-24T17:22:04 | 2020-09-24T17:22:04 | 275,782,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.contrib import admin
from .models import singer
# Register your models here.
admin.site.register(singer) | [
"[email protected]"
] | |
110218098957e307e9f699349df924065f373b44 | f4d0c26d3aa27c77a7c27d9002a08465a0638cbb | /csv_schema/migrations/0008_auto_20171025_1410.py | 5a5a669d07a6928196bbb33dce2d54f89af67d33 | [] | no_license | uk-gov-mirror/nhsengland.NCDR-reference-library | 3afe0711f47dc1b5fa25646bc870a806b3512ce5 | cac30ee0787e81fb9868731576c242c7ea3dbde8 | refs/heads/master | 2023-04-03T15:10:19.320708 | 2017-11-03T15:03:27 | 2017-11-03T15:03:27 | 356,799,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 14:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('csv_schema', '0007_auto_20171025_1119'),
]
operations = [
migrations.AlterUniqueTogether(
name='row',
unique_together=set([('table', 'data_item')]),
),
]
| [
"[email protected]"
] | |
6750c234e65cc1a9d0a5a6882b55fffe847f320d | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/884.py | a8d74bc04bec198c408a24561663646aefed2414 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 169 | py | def word_count(phrase):
phrase = phrase.split()
occurences = {}
for word in set(phrase):
occurences[word] = phrase.count(word)
return occurences
| [
"[email protected]"
] | |
d313990cc102d01487a3785a466d81c1356d418e | d3dc1d50f683566c9d71722f218afc9340ed6ab5 | /mql.tolog/tests/test_tolog_converter.py | c083f199c167dc39587fb81161ecaf545da5e0ec | [] | no_license | heuer/mappa | b20ec8a61979a75802af19803c54ee339e65807c | fc89cf32560d2e9ea6b380127b77fb3587bbd06c | refs/heads/master | 2021-01-23T15:31:54.242393 | 2015-03-16T17:17:35 | 2015-03-16T17:17:35 | 32,339,134 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007 - 2014 -- Lars Heuer - Semagia <http://www.semagia.com/>.
# All rights reserved.
#
# BSD license.
#
"""\
Tests against the "back-to-tolog" stylesheet.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD License
"""
import os
import io
import json
import glob
from nose.tools import eq_
from mql import tolog
from mql.tolog import xsl
_IGNORE = (
'fold-type-assoc.tl',
'tolog-tut-2-4_2.tl',
'topic-types.tl',
'topic-types2.tl',
'fold-scope-name.tl',
'fold-scope-occ.tl',
'create-dyn-occ3.tl',
)
def fail(msg): raise AssertionError(msg)
def test_tolog_plus():
base_dir = os.path.abspath('./xsltests/')
with open(os.path.join(base_dir, 'query2optimizers.json'), 'rb') as f:
query2optimizers = json.load(f)
tolog_dir = os.path.abspath(os.path.join(base_dir, './in/'))
found_files = set([os.path.basename(fn) for fn in glob.glob(tolog_dir + '/*.tl')])
baseline_dir = os.path.join(base_dir, './baseline/')
for fn in query2optimizers:
if fn in _IGNORE:
continue
found_files.remove(fn)
optimizers = ['query-c14n']
optimizers.extend(query2optimizers[fn])
filename = os.path.join(tolog_dir, fn)
f = open(filename, 'rb')
# 1. Apply optimizers and return tolog+
tl = tolog.convert_to_tolog_plus(f, optimizers=optimizers)
# 2. Parse created tolog+
try:
tree = tolog.parse_to_etree(tl, iri='http://www.example.org/mql-tolog/', tolog_plus=True)
except Exception, ex:
fail('Error: %r in %s' % (ex, tl))
# 3. Apply optimizers to the newly parsed query
res = xsl.apply_transformations(tree, optimizers)
out = io.BytesIO()
res.write_c14n(out)
expected = io.open(os.path.join(baseline_dir, fn + '.c14n'), encoding='utf-8').read()
yield eq_, expected, out.getvalue(), 't+: %s\n%s' % (fn, tl)
for fn in _IGNORE:
found_files.remove(fn)
if found_files:
raise Exception('Found more files in the directory: %r' % found_files)
def test_tolog():
base_dir = os.path.abspath('./xsltests/')
with open(os.path.join(base_dir, 'query2optimizers.json'), 'rb') as f:
query2optimizers = json.load(f)
tolog_dir = os.path.abspath(os.path.join(base_dir, './in/'))
found_files = set([os.path.basename(fn) for fn in glob.glob(tolog_dir + '/*.tl')])
baseline_dir = os.path.join(base_dir, './baseline/')
for fn in query2optimizers:
if fn in _IGNORE:
continue
found_files.remove(fn)
optimizers = ['query-c14n']
optimizers.extend(query2optimizers[fn])
filename = os.path.join(tolog_dir, fn)
f = open(filename, 'rb')
# 1. Apply optimizers and return tolog
tl = tolog.convert_to_tolog(f, optimizers=optimizers)
# 2. Parse created tolog+
try:
tree = tolog.parse_to_etree(tl, iri='http://www.example.org/mql-tolog/', tolog_plus=False)
except Exception, ex:
fail('Error: %r in %s' % (ex, tl))
# 3. Apply optimizers to the newly parsed query
res = xsl.apply_transformations(tree, optimizers)
out = io.BytesIO()
res.write_c14n(out)
expected = io.open(os.path.join(baseline_dir, fn + '.c14n'), encoding='utf-8').read()
yield eq_, expected, out.getvalue(), 't: %s' % fn
for fn in _IGNORE:
found_files.remove(fn)
if found_files:
raise Exception('Found more files in the directory: %r' % found_files)
if __name__ == '__main__':
import nose
nose.core.runmodule()
| [
"Lars@localhost"
] | Lars@localhost |
87ea03bc8f13ab1d0b3226d73cc1dd151a73eb2f | e8f76b7162c9781a4457cd06f5405925a9a18593 | /vbb_backend/users/admin.py | df843087d8aa089373a17f4662347cf93c4abe73 | [
"MIT"
] | permissive | wasswarichard/backend-vbb-portal | bcb54d4cf63f91862704ef5f6e5953b76d8839e8 | 8e4deec8a6e71b17da3476b0a05dbfe73d547b55 | refs/heads/master | 2023-03-24T21:09:25.364768 | 2021-03-16T18:53:26 | 2021-03-16T18:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.contrib import admin
from vbb_backend.users.models import User, Mentor, Student, HeadMaster
from django.contrib.auth.admin import UserAdmin
class MyUserAdmin(UserAdmin):
model = User
fieldsets = UserAdmin.fieldsets + (
(None, {"fields": ("user_type", "external_id")}),
)
admin.site.register(User, MyUserAdmin)
admin.site.register(Mentor)
admin.site.register(Student)
admin.site.register(HeadMaster) | [
"[email protected]"
] | |
1fd18852d0cbcda793b74043c834504fd069e531 | e296f0f3d7db598aba5658de3ff8c767634e533e | /zoo/migrations/069_add_columns_is_visible_and_moderated_by_and_moderated_at_to_photos_photo.py | c04134e45dbcd4f0f9271346ff9617625c56833f | [] | no_license | devfort/wildlifenearyou | b2ac05070aa6face60156d6e7c85f98f00013c25 | 8e618aea90bbcedc45a4e30199e31880ea9e6dca | refs/heads/master | 2021-01-13T01:25:29.467549 | 2010-06-10T06:37:43 | 2010-06-10T06:37:43 | 7,874,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.conf import settings
if settings.DATABASE_ENGINE == 'mysql':
from dmigrations.mysql import migrations as m
elif settings.DATABASE_ENGINE == 'sqlite3':
from dmigrations.sqlite3 import migrations as m
import datetime
migration = m.Compound([
m.AddColumn('photos', 'photo', 'is_visible', 'bool NOT NULL'),
m.AddColumn('photos', 'photo', 'moderated_at', 'datetime NULL'),
m.AddColumn('photos', 'photo', 'moderated_by', 'integer NULL', 'auth_user'),
])
| [
"[email protected]"
] | |
4886128779480e9bd970b67106abee8174e3da54 | b0ef0b7b7752564b703b4438e2624a4645299006 | /usedcar/userinfo/models.py | 3e3221136a2010eb538098c2a6e27d5b90d3e2aa | [] | no_license | beantomemory/django | 66f1ff3f7fbc72df18ee01e394e733b1135fb01c | 1b588cf3888724a5f4d86df04d7ebc91d7f20000 | refs/heads/master | 2020-04-19T09:17:32.771039 | 2019-01-29T07:21:01 | 2019-01-29T07:21:01 | 168,105,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
sex_choice = (
(0, "男"),
(1, "女"),
)
role_choice = (
(0, "买家"),
(1, "卖家"),
(2, "平台"),
)
bank_choice = (
(0, "中国工商银行"),
(1, "中国建设银行"),
(2, "中国农业银行"),
(3, "招商银行"),
(4, "北京银行"),
(5, "我家银行"),
)
class Userinfo(AbstractUser):
# username = models.CharField(verbose_name="用户名", max_length=30, null=False)
# password = models.CharField(verbose_name="密码", max_length=200, null = False)
realname = models.CharField(verbose_name="真实姓名", max_length=30, null=False)
iden = models.CharField(verbose_name="身份证号", max_length=18, null=False)
ads = models.CharField(verbose_name="地址", max_length=200, null=False)
uphone = models.CharField(verbose_name="手机号", max_length=20, null=False)
sex = models.IntegerField(verbose_name="性别", choices=sex_choice, default=0)
role = models.IntegerField(verbose_name="角色", choices=role_choice, default=0)
isactive = models.BooleanField(verbose_name="是否激活", default = False)
isban = models.BooleanField(verbose_name="是否禁用", default = False)
def __str__(self):
return self.username
class Meta:
db_table = "userinfo"
verbose_name = "用户信息"
verbose_name_plural = verbose_name
class Bank(models.Model):
cardno = models.CharField("卡号", max_length=30, null=False)
user = models.ForeignKey(Userinfo)
cpwd = models.CharField("交易密码", max_length=200, null=False)
bank = models.IntegerField("开户银行", choices=bank_choice, default=0)
isdelete = models.BooleanField("是否删除", default=False)
def __str__(self):
return self.bank
class Meta:
db_table = "bank"
verbose_name = "银行卡"
verbose_name_plural = verbose_name
| [
"[email protected]"
] | |
72a1e8fbf7d997f06f2b6a1c01637a81067d34d5 | e09b905a092bd324050e405e1caf1e8a5630cc1f | /pyscf/x2c/sfx2c1e_hess.py | 3f553e32752db64b4fc050c0732c6e6e071cc884 | [
"Apache-2.0"
] | permissive | yangcal/pyscf | 09a2776e8d88b2118f1d00a810a9c90e7e214281 | e833b9a4fd5fb24a061721e5807e92c44bb66d06 | refs/heads/master | 2021-06-18T11:45:57.808856 | 2021-03-18T04:58:53 | 2021-03-18T04:58:53 | 246,690,849 | 1 | 0 | Apache-2.0 | 2020-03-11T22:17:28 | 2020-03-11T22:17:28 | null | UTF-8 | Python | false | false | 16,868 | py | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Analytical nuclear hessian for 1-electron spin-free x2c method
Ref.
JCP 135, 244104 (2011); DOI:10.1063/1.3667202
JCTC 8, 2617 (2012); DOI:10.1021/ct300127e
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import x2c
from pyscf.x2c import sfx2c1e_grad
def hcore_hess_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)
if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)
get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(ia, ja):
h1 = get_h1_xmol(ia, ja)
if contr_coeff is not None:
h1 = lib.einsum('pi,xypq,qj->xyij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv
def gen_sf_hfw(mol, approx='1E'):
approx = approx.upper()
c = lib.param.LIGHT_SPEED
h0, s0 = sfx2c1e_grad._get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
c0[:,c0[1]<0] *= -1
aoslices = mol.aoslice_by_atom()
nao = mol.nao_nr()
if 'ATOM' in approx:
x0 = numpy.zeros((nao,nao))
for ia in range(mol.natm):
ish0, ish1, p0, p1 = aoslices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = mol.intor('int1e_kin', shls_slice=shls_slice)
s1 = mol.intor('int1e_ovlp', shls_slice=shls_slice)
with mol.with_rinv_at_nucleus(ia):
z = -mol.atom_charge(ia)
v1 = z * mol.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * mol.intor('int1e_prinvp', shls_slice=shls_slice)
x0[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
t0x0 = numpy.dot(s0[nao:,nao:], x0)
s_nesc0 = s0[:nao,:nao] + numpy.dot(x0.T, t0x0)
w_s, v_s = scipy.linalg.eigh(s0[:nao,:nao])
w_sqrt = numpy.sqrt(w_s)
s_nesc0_vbas = reduce(numpy.dot, (v_s.T, s_nesc0, v_s))
R0_mid = numpy.einsum('i,ij,j->ij', 1./w_sqrt, s_nesc0_vbas, 1./w_sqrt)
wr0, vr0 = scipy.linalg.eigh(R0_mid)
wr0_sqrt = numpy.sqrt(wr0)
# R0 in v_s basis
R0 = numpy.dot(vr0/wr0_sqrt, vr0.T)
R0 *= w_sqrt
R0 /= w_sqrt[:,None]
# Transform R0 back
R0 = reduce(numpy.dot, (v_s, R0, v_s.T))
R0 = x2c._get_r(s0[:nao,:nao], s_nesc0)
c_fw0 = numpy.vstack((R0, numpy.dot(x0, R0)))
h0_fw_half = numpy.dot(h0, c_fw0)
epq = e0[:,None] - e0
degen_mask = abs(epq) < 1e-7
epq[degen_mask] = 1e200
s2aa = mol.intor('int1e_ipipovlp', comp=9).reshape(3,3,nao,nao)
t2aa = mol.intor('int1e_ipipkin', comp=9).reshape(3,3,nao,nao)
v2aa = mol.intor('int1e_ipipnuc', comp=9).reshape(3,3,nao,nao)
w2aa = mol.intor('int1e_ipippnucp', comp=9).reshape(3,3,nao,nao)
s2ab = mol.intor('int1e_ipovlpip', comp=9).reshape(3,3,nao,nao)
t2ab = mol.intor('int1e_ipkinip', comp=9).reshape(3,3,nao,nao)
v2ab = mol.intor('int1e_ipnucip', comp=9).reshape(3,3,nao,nao)
w2ab = mol.intor('int1e_ippnucpip', comp=9).reshape(3,3,nao,nao)
n2 = nao * 2
h2ao = numpy.zeros((3,3,n2,n2), dtype=v2aa.dtype)
s2ao = numpy.zeros((3,3,n2,n2), dtype=v2aa.dtype)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0, approx)
def hcore_deriv(ia, ja):
ish0, ish1, i0, i1 = aoslices[ia]
jsh0, jsh1, j0, j1 = aoslices[ja]
s2cc = numpy.zeros_like(s2aa)
t2cc = numpy.zeros_like(s2aa)
v2cc = numpy.zeros_like(s2aa)
w2cc = numpy.zeros_like(s2aa)
if ia == ja:
with mol.with_rinv_origin(mol.atom_coord(ia)):
z = mol.atom_charge(ia)
rinv2aa = z*mol.intor('int1e_ipiprinv', comp=9).reshape(3,3,nao,nao)
rinv2ab = z*mol.intor('int1e_iprinvip', comp=9).reshape(3,3,nao,nao)
prinvp2aa = z*mol.intor('int1e_ipipprinvp', comp=9).reshape(3,3,nao,nao)
prinvp2ab = z*mol.intor('int1e_ipprinvpip', comp=9).reshape(3,3,nao,nao)
s2cc[:,:,i0:i1 ] = s2aa[:,:,i0:i1 ]
s2cc[:,:,i0:i1,j0:j1]+= s2ab[:,:,i0:i1,j0:j1]
t2cc[:,:,i0:i1 ] = t2aa[:,:,i0:i1 ]
t2cc[:,:,i0:i1,j0:j1]+= t2ab[:,:,i0:i1,j0:j1]
v2cc -= rinv2aa + rinv2ab
v2cc[:,:,i0:i1 ]+= v2aa[:,:,i0:i1 ]
v2cc[:,:,i0:i1,j0:j1]+= v2ab[:,:,i0:i1,j0:j1]
v2cc[:,:,i0:i1 ]+= rinv2aa[:,:,i0:i1]
v2cc[:,:,i0:i1 ]+= rinv2ab[:,:,i0:i1]
v2cc[:,:,: ,i0:i1]+= rinv2aa[:,:,i0:i1].transpose(0,1,3,2)
v2cc[:,:,: ,i0:i1]+= rinv2ab[:,:,:,i0:i1]
w2cc -= prinvp2aa + prinvp2ab
w2cc[:,:,i0:i1 ]+= w2aa[:,:,i0:i1 ]
w2cc[:,:,i0:i1,j0:j1]+= w2ab[:,:,i0:i1,j0:j1]
w2cc[:,:,i0:i1 ]+= prinvp2aa[:,:,i0:i1]
w2cc[:,:,i0:i1 ]+= prinvp2ab[:,:,i0:i1]
w2cc[:,:,: ,i0:i1]+= prinvp2aa[:,:,i0:i1].transpose(0,1,3,2)
w2cc[:,:,: ,i0:i1]+= prinvp2ab[:,:,:,i0:i1]
else:
s2cc[:,:,i0:i1,j0:j1] = s2ab[:,:,i0:i1,j0:j1]
t2cc[:,:,i0:i1,j0:j1] = t2ab[:,:,i0:i1,j0:j1]
v2cc[:,:,i0:i1,j0:j1] = v2ab[:,:,i0:i1,j0:j1]
w2cc[:,:,i0:i1,j0:j1] = w2ab[:,:,i0:i1,j0:j1]
zi = mol.atom_charge(ia)
zj = mol.atom_charge(ja)
with mol.with_rinv_at_nucleus(ia):
shls_slice = (jsh0, jsh1, 0, mol.nbas)
rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)
rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)
prinvp2aa = mol.intor('int1e_ipipprinvp', comp=9, shls_slice=shls_slice)
prinvp2ab = mol.intor('int1e_ipprinvpip', comp=9, shls_slice=shls_slice)
rinv2aa = zi * rinv2aa.reshape(3,3,j1-j0,nao)
rinv2ab = zi * rinv2ab.reshape(3,3,j1-j0,nao)
prinvp2aa = zi * prinvp2aa.reshape(3,3,j1-j0,nao)
prinvp2ab = zi * prinvp2ab.reshape(3,3,j1-j0,nao)
v2cc[:,:,j0:j1] += rinv2aa
v2cc[:,:,j0:j1] += rinv2ab.transpose(1,0,2,3)
w2cc[:,:,j0:j1] += prinvp2aa
w2cc[:,:,j0:j1] += prinvp2ab.transpose(1,0,2,3)
with mol.with_rinv_at_nucleus(ja):
shls_slice = (ish0, ish1, 0, mol.nbas)
rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)
rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)
prinvp2aa = mol.intor('int1e_ipipprinvp', comp=9, shls_slice=shls_slice)
prinvp2ab = mol.intor('int1e_ipprinvpip', comp=9, shls_slice=shls_slice)
rinv2aa = zj * rinv2aa.reshape(3,3,i1-i0,nao)
rinv2ab = zj * rinv2ab.reshape(3,3,i1-i0,nao)
prinvp2aa = zj * prinvp2aa.reshape(3,3,i1-i0,nao)
prinvp2ab = zj * prinvp2ab.reshape(3,3,i1-i0,nao)
v2cc[:,:,i0:i1] += rinv2aa
v2cc[:,:,i0:i1] += rinv2ab
w2cc[:,:,i0:i1] += prinvp2aa
w2cc[:,:,i0:i1] += prinvp2ab
s2cc = s2cc + s2cc.transpose(0,1,3,2)
t2cc = t2cc + t2cc.transpose(0,1,3,2)
v2cc = v2cc + v2cc.transpose(0,1,3,2)
w2cc = w2cc + w2cc.transpose(0,1,3,2)
h2ao[:,:,:nao,:nao] = v2cc
h2ao[:,:,:nao,nao:] = t2cc
h2ao[:,:,nao:,:nao] = t2cc
h2ao[:,:,nao:,nao:] = w2cc * (.25/c**2) - t2cc
s2ao[:,:,:nao,:nao] = s2cc
s2ao[:,:,nao:,nao:] = t2cc * (.5/c**2)
h1i, s1i, e1i, c1i, x1i, s_nesc1i, R1i, c_fw1i = get_h1_etc(ia)
h1j, s1j, e1j, c1j, x1j, s_nesc1j, R1j, c_fw1j = get_h1_etc(ja)
if 'ATOM' not in approx:
f2 = lib.einsum('xypq,qj->xypj', h2ao, c0[:,nao:])
f2+= lib.einsum('xpq,yqj->xypj', h1i, c1j)
f2+= lib.einsum('ypq,xqj->xypj', h1j, c1i)
sc2 = lib.einsum('xypq,qj->xypj', s2ao, c0[:,nao:])
sc2+= lib.einsum('xpq,yqj->xypj', s1i, c1j)
sc2+= lib.einsum('ypq,xqj->xypj', s1j, c1i)
f2-= sc2 * e0[nao:]
sc1i = lib.einsum('xpq,qj->xpj', s1i, c0[:,nao:])
sc1j = lib.einsum('xpq,qj->xpj', s1j, c0[:,nao:])
sc1i+= lib.einsum('pq,xqj->xpj', s0, c1i)
sc1j+= lib.einsum('pq,xqj->xpj', s0, c1j)
f2-= lib.einsum('xpq,yqj->xypj', sc1i, e1j)
f2-= lib.einsum('ypq,xqj->xypj', sc1j, e1i)
c2 = lib.einsum('pi,xypj->xyij', c0.conj(), f2) / -epq[:,nao:]
c2_ao = lib.einsum('pq,xyqi->xypi', c0, c2)
cl2 = c2_ao[:,:,:nao]
cs2 = c2_ao[:,:,nao:]
tmp = cs2 - lib.einsum('pq,xyqi->xypi', x0, cl2)
tmp-= lib.einsum('xpq,yqi->xypi', x1i, c1j[:,:nao])
tmp-= lib.einsum('ypq,xqi->xypi', x1j, c1i[:,:nao])
x2 = scipy.linalg.solve(cl0.T, tmp.reshape(-1,nao).T).T.reshape(3,3,nao,nao)
hfw2 = numpy.empty((3,3,nao,nao))
for i in range(3):
for j in range(3):
if 'ATOM' in approx:
s_nesc2 = reduce(numpy.dot, (x0.T, s2ao[i,j,nao:,nao:], x0))
s_nesc2 += s2ao[i,j,:nao,:nao]
R2 = _get_r2((w_sqrt,v_s), s_nesc0,
s1i[i,:nao,:nao], s_nesc1i[i],
s1j[j,:nao,:nao], s_nesc1j[j],
s2ao[i,j,:nao,:nao], s_nesc2, (wr0_sqrt,vr0))
c_fw2 = numpy.vstack((R2, numpy.dot(x0, R2)))
else:
s_nesc2 = numpy.dot(x2[i,j].T, t0x0)
s_nesc2 += reduce(numpy.dot, (x1i[i].T, s1j[j,nao:,nao:], x0))
s_nesc2 += reduce(numpy.dot, (x0.T, s1i[i,nao:,nao:], x1j[j]))
s_nesc2 += reduce(numpy.dot, (x1i[i].T, s0[nao:,nao:], x1j[j]))
s_nesc2 = s_nesc2 + s_nesc2.T
s_nesc2 += reduce(numpy.dot, (x0.T, s2ao[i,j,nao:,nao:], x0))
s_nesc2 += s2ao[i,j,:nao,:nao]
R2 = _get_r2((w_sqrt,v_s), s_nesc0,
s1i[i,:nao,:nao], s_nesc1i[i],
s1j[j,:nao,:nao], s_nesc1j[j],
s2ao[i,j,:nao,:nao], s_nesc2, (wr0_sqrt,vr0))
c_fw_s = (numpy.dot(x0, R2) + numpy.dot(x1i[i], R1j[j]) +
numpy.dot(x1j[j], R1i[i]) + numpy.dot(x2[i,j], R0))
c_fw2 = numpy.vstack((R2, c_fw_s))
tmp = numpy.dot(c_fw2.T, h0_fw_half)
tmp += reduce(numpy.dot, (c_fw1i[i].T, h1j[j], c_fw0))
tmp += reduce(numpy.dot, (c_fw0.T, h1i[i], c_fw1j[j]))
tmp += reduce(numpy.dot, (c_fw1i[i].T, h0, c_fw1j[j]))
hfw2[i,j] = tmp + tmp.T
hfw2[i,j]+= reduce(numpy.dot, (c_fw0.T, h2ao[i,j], c_fw0))
return hfw2
return hcore_deriv
def _get_r2(s0_roots, sa0, s1i, sa1i, s1j, sa1j, s2, sa2, r0_roots):
w_sqrt, v_s = s0_roots
w_invsqrt = 1. / w_sqrt
wr0_sqrt, vr0 = r0_roots
wr0_invsqrt = 1. / wr0_sqrt
sa0 = lib.einsum('pi,pq,qj->ij', v_s, sa0 , v_s)
s1i = lib.einsum('pi,pq,qj->ij', v_s, s1i , v_s)
s1j = lib.einsum('pi,pq,qj->ij', v_s, s1j , v_s)
s2 = lib.einsum('pi,pq,qj->ij', v_s, s2 , v_s)
sa1i = lib.einsum('pi,pq,qj->ij', v_s, sa1i, v_s)
sa1j = lib.einsum('pi,pq,qj->ij', v_s, sa1j, v_s)
sa2 = lib.einsum('pi,pq,qj->ij', v_s, sa2 , v_s)
s1i_sqrt = s1i / (w_sqrt[:,None] + w_sqrt)
s1i_invsqrt = (numpy.einsum('i,ij,j->ij', w_invsqrt**2, s1i, w_invsqrt**2)
/ -(w_invsqrt[:,None] + w_invsqrt))
s1j_sqrt = s1j / (w_sqrt[:,None] + w_sqrt)
s1j_invsqrt = (numpy.einsum('i,ij,j->ij', w_invsqrt**2, s1j, w_invsqrt**2)
/ -(w_invsqrt[:,None] + w_invsqrt))
tmp = numpy.dot(s1i_sqrt, s1j_sqrt)
s2_sqrt = (s2 - tmp - tmp.T) / (w_sqrt[:,None] + w_sqrt)
tmp = numpy.dot(s1i*w_invsqrt**2, s1j)
tmp = s2 - tmp - tmp.T
tmp = -numpy.einsum('i,ij,j->ij', w_invsqrt**2, tmp, w_invsqrt**2)
tmp1 = numpy.dot(s1i_invsqrt, s1j_invsqrt)
s2_invsqrt = (tmp - tmp1 - tmp1.T) / (w_invsqrt[:,None] + w_invsqrt)
R1i_mid = lib.einsum('ip,pj,j->ij', s1i_invsqrt, sa0, w_invsqrt)
R1i_mid = R1i_mid + R1i_mid.T
R1i_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa1i, w_invsqrt)
R1i_mid = tmpi = lib.einsum('pi,pq,qj->ij', vr0, R1i_mid, vr0)
R1i_mid = (numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, R1i_mid, wr0_invsqrt**2)
/ -(wr0_invsqrt[:,None] + wr0_invsqrt))
R1j_mid = lib.einsum('ip,pj,j->ij', s1j_invsqrt, sa0, w_invsqrt)
R1j_mid = R1j_mid + R1j_mid.T
R1j_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa1j, w_invsqrt)
R1j_mid = tmpj = lib.einsum('pi,pq,qj->ij', vr0, R1j_mid, vr0)
R1j_mid = (numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, R1j_mid, wr0_invsqrt**2)
/ -(wr0_invsqrt[:,None] + wr0_invsqrt))
# second derivative of (s_invsqrt * sa * s_invsqrt), 9 terms
R2_mid = lib.einsum('ip,pj,j->ij', s2_invsqrt , sa0 , w_invsqrt)
R2_mid+= lib.einsum('ip,pj,j->ij', s1i_invsqrt, sa1j, w_invsqrt)
R2_mid+= lib.einsum('i,ip,pj->ij', w_invsqrt , sa1i, s1j_invsqrt)
R2_mid+= lib.einsum('ip,pq,qj->ij', s1i_invsqrt, sa0 , s1j_invsqrt)
R2_mid = R2_mid + R2_mid.T
R2_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa2, w_invsqrt)
R2_mid = lib.einsum('pi,pq,qj->ij', vr0, R2_mid, vr0)
tmp = numpy.dot(tmpi*wr0_invsqrt**2, tmpj)
tmp = R2_mid - tmp - tmp.T
tmp = -numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, tmp, wr0_invsqrt**2)
tmp1 = numpy.dot(R1i_mid, R1j_mid)
R2_mid = (tmp - tmp1 - tmp1.T) / (wr0_invsqrt[:,None] + wr0_invsqrt)
R0_mid = numpy.dot(vr0*wr0_invsqrt, vr0.T)
R1i_mid = reduce(numpy.dot, (vr0, R1i_mid, vr0.T))
R1j_mid = reduce(numpy.dot, (vr0, R1j_mid, vr0.T))
R2_mid = reduce(numpy.dot, (vr0, R2_mid, vr0.T))
R2 = lib.einsum('ip,pj,j->ij' , s2_invsqrt , R0_mid , w_sqrt)
R2 += lib.einsum('ip,pj,j->ij' , s1i_invsqrt, R1j_mid, w_sqrt)
R2 += lib.einsum('ip,pq,qj->ij', s1i_invsqrt, R0_mid , s1j_sqrt)
R2 += lib.einsum('ip,pj,j->ij' , s1j_invsqrt, R1i_mid, w_sqrt)
R2 += numpy.einsum('i,ij,j->ij', w_invsqrt , R2_mid , w_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R1i_mid, s1j_sqrt)
R2 += lib.einsum('ip,pq,qj->ij', s1j_invsqrt, R0_mid , s1i_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R1j_mid, s1i_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R0_mid , s2_sqrt)
R2 = reduce(numpy.dot, (v_s, R2, v_s.T))
return R2
if __name__ == '__main__':
bak = lib.param.LIGHT_SPEED
lib.param.LIGHT_SPEED = 10
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h1_deriv_1 = sfx2c1e_grad.gen_sf_hfw(mol, approx='1E')
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h1_deriv_2 = sfx2c1e_grad.gen_sf_hfw(mol, approx='1E')
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0. )],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h2_deriv = gen_sf_hfw(mol)
h2 = h2_deriv(0,0)
h2_ref = (h1_deriv_1(0)[2] - h1_deriv_2(0)[2]) / 0.0002 * lib.param.BOHR
print(abs(h2[2,2]-h2_ref).max())
print(lib.finger(h2) - 33.71188112440316)
h2 = h2_deriv(1,0)
h2_ref = (h1_deriv_1(1)[2] - h1_deriv_2(1)[2]) / 0.0002 * lib.param.BOHR
print(abs(h2[2,2]-h2_ref).max())
print(lib.finger(h2) - -23.609411428378138)
lib.param.LIGHT_SPEED = bak
| [
"[email protected]"
] | |
7554d991424a83b97388e0f2edccaa713a4db8e9 | 7f114a1fb511b816c116d5b9e67cb998e3e23956 | /PyplayS31.py | c0da34dbd4a6b96011dfd4aadb0932634bbc90f4 | [] | no_license | Bharanij27/bharanirep | 90ac34eb28deaa7ec96d042de456de71b96866d7 | 982133a7939c889d433c178a601441fa087293d9 | refs/heads/master | 2021-08-07T20:22:36.244395 | 2020-06-05T04:58:10 | 2020-06-05T04:58:10 | 186,580,768 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | n=list(input())
s=0
for i in range(0,len(n)):
if n[i]=='(': s=s+1
elif n[i]==')': s=s-1
if s==0: print("yes")
else: print("no")
| [
"[email protected]"
] | |
704a4aee8b6f27bb5942d6c99ff9aad57ada94b8 | d2f50124ff3bec70b9b3139ecb063b06e526781d | /biable/migrations/0030_auto_20170104_1540.py | e5309f4b5538cf1b5c001ee7667bcac85e3fe29f | [] | no_license | odecsarrollo/odecopack-componentes | e8d993f089bf53bbf3c53d1265e70ac5c06b59b8 | b583a115fb30205d358d97644c38d66636b573ff | refs/heads/master | 2022-12-12T00:33:02.874268 | 2020-08-13T18:45:01 | 2020-08-13T18:45:01 | 189,262,705 | 0 | 0 | null | 2022-12-08T11:23:46 | 2019-05-29T16:37:21 | Python | UTF-8 | Python | false | false | 872 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-04 20:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('biable', '0029_auto_20170104_1528'),
]
operations = [
migrations.AlterField(
model_name='vendedorbiable',
name='colaborador',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='mi_vendedor_biable', to='usuarios.UserExtended'),
),
migrations.AlterField(
model_name='vendedorbiableuser',
name='usuario',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='mis_vendedores', to='usuarios.UserExtended'),
),
]
| [
"[email protected]"
] | |
ef1d6fd47f5542551fbfe13739016b1565abc26b | 1bddfbc901946b6cdef47e5325626d26a9865a51 | /setup.py | c7120cb1461fa0651511e8b384ddc401e186d6a8 | [] | no_license | kagesenshi/dkiscm.importer | ce56eccf70ac776692a0e1015d3e5bc311680979 | c255bca2a755cd3681106c5d3ee2f917de359e2b | refs/heads/master | 2016-09-11T00:58:54.109283 | 2013-10-01T20:05:23 | 2013-10-01T20:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | from setuptools import setup, find_packages
import os
version = '1.4.dev0'
setup(name='dkiscm.importer',
version=version,
description="",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.rst")).read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Inigo Consulting',
author_email='[email protected]',
url='http://github.com/inigoconsulting/',
license='gpl',
packages=find_packages(),
namespace_packages=['dkiscm'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.dexterity',
'plone.namedfile [blobs]',
'collective.grok',
'plone.app.referenceablebehavior',
'collective.dexteritytextindexer',
'plone.app.multilingual',
'plone.multilingualbehavior',
# -*- Extra requirements: -*-
],
extras_require={
'test': [
'plone.app.testing',
],
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
# The next two lines may be deleted after you no longer need
# addcontent support from paster and before you distribute
# your package.
setup_requires=["PasteScript"],
paster_plugins=["templer.localcommands"],
)
| [
"[email protected]"
] | |
56702c50ea0061115156969d39cfedd65bcc9d30 | 63c0a3e5599be2a038bb713abc69584db42a3aae | /system_status.py | baf403826f3a9dc1bef32657a89548c790de8370 | [] | no_license | dmitryduev/sserv-njs | 6ccc98512547ba399e62dd83a99eef6ffe92c075 | 17e53069a2804506aca2a2b984ab465c5b5ff718 | refs/heads/master | 2020-05-22T04:39:53.115636 | 2018-04-11T06:44:43 | 2018-04-11T06:44:43 | 49,021,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | from __future__ import print_function
import time
import psutil
import datetime
import json
import traceback
import argparse
if __name__ == '__main__':
''' Create command line argument parser '''
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Manage data archive for Robo-AO')
parser.add_argument('config_file', metavar='config_file',
action='store', help='path to config file.', type=str)
args = parser.parse_args()
# read in config
with open(args.config_file) as cjson:
config = json.load(cjson)
# config must not be empty:
if len(config) == 0:
raise Exception('Failed to load config file')
while 1:
# construct line with telemetry
try:
# UTC running? start_time #_enqueued_tasks system_CPU_usage_% system_memory_usage_%
_utc_now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
_cpu_usage = psutil.cpu_percent(interval=None)
_mem_usage = psutil.virtual_memory().percent
_root = psutil.disk_usage('/').percent
_data = psutil.disk_usage('/Data').percent
_data_1 = psutil.disk_usage('/Data1').percent
_data_2 = psutil.disk_usage('/Data2').percent
_data_3 = psutil.disk_usage('/Data3').percent
_data_4 = psutil.disk_usage('/Data4').percent
_t = '{:s} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
_utc_now, _cpu_usage, _mem_usage,
_root, _data, _data_1, _data_2, _data_3, _data_4)
with open(config['status']['Control machine status']['data-file'], 'w') as _f:
_f.write(_t)
except Exception as _e:
print(_e)
traceback.print_exc()
# take a nap
time.sleep(0.95)
| [
"[email protected]"
] | |
c6de41952c29d959461ae7ba8c7269444ef04189 | 2c20cd2b84b44f6711d53e02f681b455100d5bdc | /setup.py | e3f064e118ecf33fe25b41078bef36e51b3bfdde | [
"MIT"
] | permissive | YAtOff/s3rsync | a28c69c21a1091ee3d53362af8a9d40357d0469a | ebb36e47602491ef7dcb073bed9527f6243e317b | refs/heads/master | 2023-05-12T04:05:19.950099 | 2020-01-10T10:57:28 | 2020-01-10T10:57:28 | 224,131,742 | 0 | 1 | MIT | 2023-05-01T21:17:12 | 2019-11-26T07:43:08 | Python | UTF-8 | Python | false | false | 184 | py | from setuptools import setup, find_packages
setup(
name="s3rsync",
version="1.0.0",
description="",
packages=find_packages(exclude=("tests",)),
zip_safe=False,
)
| [
"[email protected]"
] | |
35c2d0ab34307b215c50250f7de343a66666276a | 9b5c995b247803b64895223fc51f407e9da2df45 | /IQ/Files/create_file_and_write_data.py | 2cc9b05c3f86d4e7f3dbc1997bc0e34c16dac890 | [] | no_license | Shyam-Personal/python_repo | 3453156ed73efaa91fa5e01dd15a1a0e664d3d22 | 1809de5afbecc1fd17cd70ae80a1eb4b9282d554 | refs/heads/master | 2021-01-25T14:10:30.394842 | 2019-09-23T13:26:47 | 2019-09-23T13:26:47 | 123,660,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
Create 10 files of 1MB size each.
"""
def main():
try:
#Solution 1
data = "A"*1024*1024
for i in range(10):
filename = "Test_{:03d}".format(i)
with open(filename, "w") as fh:
fh.write(data)
except Exception as e:
print("Exception while executing program. Error details: {}".format(str(e)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
272486de2b4247c949cc78cd8e0754d37480de58 | b2913030cf1646310b08efaa57c2199bb08e37c9 | /general/apero_updates/inprogress/is_night_obs.py | c5c6c4306f1de856f7212981e99d50db3911ae68 | [
"MIT"
] | permissive | njcuk9999/apero-utils | 6f5b5083537562a31573b5c4cc76908c5fe194b9 | 368d53182428ca8befcdd3e5c8ca054f61913711 | refs/heads/master | 2023-08-31T02:56:01.369406 | 2023-08-18T15:12:59 | 2023-08-18T15:12:59 | 238,777,509 | 3 | 5 | MIT | 2023-08-17T14:15:41 | 2020-02-06T20:24:49 | Python | UTF-8 | Python | false | false | 2,454 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2023-01-31 at 11:24
@author: cook
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from tqdm import tqdm
import warnings
# =============================================================================
# Define variables
# =============================================================================
# -----------------------------------------------------------------------------
# =============================================================================
# Define functions
# =============================================================================
def function1():
return 0
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
from astropy.io import fits
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates import get_sun, AltAz
from astropy.time import Time
file = 'NIRPS_2023-01-20T08_49_12_510_pp_e2dsff_tcorr_A.fits'
# get the header
h = fits.getheader(file)
lat = h['BC_LAT'] # latitude
lon = h['BC_LONG'] # longitude
mjd = h['MJD-OBS'] # Modified Julian day
sun_time = Time(mjd, format='mjd') # UTC time
loc = coord.EarthLocation(lon=lon * u.deg,
lat=lat * u.deg)
altaz = AltAz(obstime=sun_time, location=loc)
sun_elevation = 90 - get_sun(sun_time).transform_to(altaz).zen.value
# Leval definition of twilight angles
CIV_TWIL = sun_elevation < (-6) # suggestion for Civil twilight keyword
NAU_TWIL = sun_elevation < (-12) # suggestion for Nautical twilight keyword
AST_TWIL = sun_elevation < (-18) # suggestion for Astronomical twilight keyword
print('Civil twilight : {}\n'
'Nautical twilight : {}\n'
'Astron twilight : {}'.format(CIV_TWIL, NAU_TWIL, AST_TWIL))
print('Sun elevation : {:.1f} deg'.format(sun_elevation))
# =============================================================================
# End of code
# =============================================================================
| [
"[email protected]"
] | |
182247988bf376661723c18e69b7095523833c84 | 04eb5ed2afbd0b2a190e38a48f1c8b86f63b5497 | /client/tests/mocks.py | c7f5fe1971df6068e73fca1512cc9aca0a5d54af | [
"MIT"
] | permissive | Playfloor/pyre-check | 04d671c63ce882891f978c8d1f6540d236dd22ab | 2e8b86fe7ed9fd84a026c188d08877a77b142309 | refs/heads/main | 2023-08-21T11:08:30.229589 | 2021-10-06T03:20:01 | 2021-10-06T03:21:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,028 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from pathlib import Path
from typing import Optional
from unittest.mock import MagicMock
from .. import command_arguments, configuration as configuration_module
from ..analysis_directory import AnalysisDirectory
from ..commands.command import IncrementalStyle
from ..commands.incremental import Incremental
from ..configuration import SharedMemory
def mock_arguments(
debug: bool = False,
changed_files_path=None,
enable_profiling: bool = False,
enable_memory_profiling: bool = False,
features=None,
load_initial_state_from=None,
local_configuration=None,
log_identifier: str = "",
no_saved_state: bool = False,
output: str = command_arguments.TEXT,
save_initial_state_to=None,
saved_state_project=None,
sequential: bool = False,
source_directories=None,
targets=None,
dot_pyre_directory: Optional[Path] = None,
) -> command_arguments.CommandArguments:
return command_arguments.CommandArguments(
local_configuration=local_configuration,
version=False,
debug=debug,
sequential=sequential,
strict=False,
additional_checks=[],
show_error_traces=False,
output=output,
enable_profiling=enable_profiling,
enable_memory_profiling=enable_memory_profiling,
noninteractive=True,
logging_sections=None,
log_identifier=log_identifier,
logger=None,
targets=targets or [],
use_buck_builder=False,
use_buck_source_database=False,
source_directories=source_directories or [],
filter_directory=None,
buck_mode=None,
no_saved_state=no_saved_state,
search_path=["some_path"],
binary="/foo/binary.exe",
buck_builder_binary=None,
exclude=[],
typeshed="/typeshed",
save_initial_state_to=save_initial_state_to,
load_initial_state_from=load_initial_state_from,
changed_files_path=changed_files_path,
saved_state_project=saved_state_project,
dot_pyre_directory=dot_pyre_directory or Path(".pyre"),
features=features,
python_version="3.6.0",
shared_memory_heap_size=1024 * 1024 * 1024,
)
def mock_configuration(version_hash=None, file_hash=None) -> MagicMock:
configuration = MagicMock()
configuration.project_root = "/root"
configuration.local_root = None
configuration.strict = False
configuration.source_directories = ["."]
configuration.logger = None
configuration.get_number_of_workers = lambda: 5
configuration.search_path = []
configuration.taint_models_path = []
configuration.get_typeshed_respecting_override = lambda: "stub"
configuration.get_version_hash_respecting_override = lambda: version_hash
configuration.file_hash = file_hash
configuration.local_root = None
configuration.autocomplete = False
configuration.dot_pyre_directory = Path(".pyre")
configuration.relative_local_root = None
configuration.log_directory = ".pyre"
configuration.disabled = False
configuration.get_python_version = lambda: configuration_module.PythonVersion(
major=3, minor=6, micro=0
)
configuration.shared_memory = SharedMemory(heap_size=1024 * 1024 * 1024)
return configuration
def mock_incremental_command(cfg: configuration_module.Configuration) -> Incremental:
arguments = mock_arguments()
analysis_directory = AnalysisDirectory(
configuration_module.SimpleSearchPathElement(".")
)
return Incremental(
arguments,
original_directory="/original/directory",
configuration=cfg,
analysis_directory=analysis_directory,
nonblocking=False,
incremental_style=IncrementalStyle.FINE_GRAINED,
no_start_server=False,
no_watchman=False,
)
| [
"[email protected]"
] | |
7281a1d2b651d0b7b363e815d9c84fe19146565a | 116aadef9866be33782c6cbd06901703728295cc | /tests/conftest.py | 3b640c81e07bc3e0f36f219c27129fc10614ecc5 | [
"Apache-2.0"
] | permissive | dracos/datasette-tiles | 9c4cf6ca683a703f08e1f69cbc4def3694d7bcc3 | f7aa1a49df23584445cf154ad0e3e6d750965b15 | refs/heads/main | 2023-02-28T22:33:08.331682 | 2021-02-03T22:21:57 | 2021-02-03T22:21:57 | 335,932,265 | 0 | 0 | null | 2021-02-04T11:24:40 | 2021-02-04T11:24:39 | null | UTF-8 | Python | false | false | 2,211 | py | import asyncio
from datasette.app import Datasette
from datasette.database import Database
import pytest
CREATE_TILES_TABLE = "CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob)"
CREATE_METADATA_TABLE = "CREATE TABLE metadata (name text, value text)"
@pytest.fixture(scope="module")
async def ds():
datasette = Datasette([], memory=True)
await datasette.invoke_startup()
return datasette
# Needed because of https://stackoverflow.com/a/56238383
# to allow me to use scope="module" on the ds() fixture below
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
async def ds_tiles_stack():
return await ds_tiles()
@pytest.fixture(scope="module")
async def ds_tiles_stack_with_stack_order():
return await ds_tiles(
{
"plugins": {
"datasette-tiles": {"tiles-stack-order": ["world", "country", "city2"]}
}
}
)
async def ds_tiles(metadata=None):
datasette = Datasette([], metadata=metadata or {}, memory=True)
for db_name, tiles in (
("world", [[1, 1, 1]]),
("country", [[1, 1, 2], [1, 2, 2]]),
("city1", [[1, 2, 2]]),
("city2", [[1, 3, 3]]),
):
db = datasette.add_database(Database(datasette, memory_name=db_name))
# During test runs database tables may exist already
if await db.table_exists("tiles"):
continue
await db.execute_write(CREATE_TILES_TABLE, block=True)
await db.execute_write(CREATE_METADATA_TABLE, block=True)
for pair in (("name", db_name), ("format", "png")):
await db.execute_write(
"insert into metadata (name, value) values (?, ?)",
pair,
block=True,
)
for tile in tiles:
await db.execute_write(
"insert into tiles (zoom_level, tile_column, tile_row, tile_data) values (?, ?, ?, ?)",
tile + [db_name + ":" + "/".join(map(str, tile))],
block=True,
)
await datasette.invoke_startup()
return datasette
| [
"[email protected]"
] | |
292b77898a0865b25cff82a3cada848553b42769 | 1e37cc605d52a8264329ba23e2bc7a74408b0f22 | /chrome_from_url.py | 11afc3ec2f015d2f264d0fd1b2ff6d06e5837a79 | [] | no_license | maasano/download_file_from_web | 7df45639a26f9911bb0ae42727d7a5acfb396659 | fa6ce6d57f30c71e9ccd982a3e6f6e314187c8c6 | refs/heads/master | 2022-03-06T06:19:47.662728 | 2019-12-06T04:07:42 | 2019-12-06T04:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # URL でダウンロード
from selenium import webdriver
import os
import time
import csv
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import urllib.request
driver = webdriver.Chrome()
url = "https://pythonchannel.com/media/codecamp/201908-/scrape-test.html"
driver.get(url)
file_url = driver.find_element_by_tag_name("a").get_attribute("href")
urllib.request.urlretrieve(file_url, "□my_download.csv")
time.sleep(3)
#driver.close() | [
"[email protected]"
] | |
99613f69b3d7edc99a5dc7a75e483837ae852e7c | 0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c | /custom_components/spook/ectoplasms/repairs/services/create.py | 6d46089ce9dab0e2aa31caa286d4f9116eebb2cd | [
"Unlicense"
] | permissive | bacco007/HomeAssistantConfig | d91a5368344f50abbea881bd1e6dfc57a0e456ca | 8548d9999ddd54f13d6a307e013abcb8c897a74e | refs/heads/master | 2023-08-30T07:07:33.571959 | 2023-08-29T20:00:00 | 2023-08-29T20:00:00 | 230,585,631 | 98 | 16 | Unlicense | 2023-09-09T08:28:39 | 2019-12-28T09:05:02 | Python | UTF-8 | Python | false | false | 1,658 | py | """Spook - Not your homie."""
from __future__ import annotations
from typing import TYPE_CHECKING
import voluptuous as vol
from homeassistant.components.repairs import DOMAIN as REPAIRS_DOMAIN
from homeassistant.helpers import config_validation as cv, issue_registry as ir
from homeassistant.util.ulid import ulid
from ....const import DOMAIN
from ....services import AbstractSpookService
if TYPE_CHECKING:
from homeassistant.core import ServiceCall
class SpookService(AbstractSpookService):
"""Home Assistant Repairs service to create your own issues."""
domain = REPAIRS_DOMAIN
service = "create"
schema = {
vol.Required("title"): cv.string,
vol.Required("description"): cv.string,
vol.Optional("issue_id", default=ulid): cv.string,
vol.Optional("domain", default=DOMAIN): cv.string,
vol.Optional("severity", default=ir.IssueSeverity.WARNING): vol.Coerce(
ir.IssueSeverity,
),
vol.Optional("persistent", default=False): cv.boolean,
}
async def async_handle_service(self, call: ServiceCall) -> None:
"""Handle the service call."""
ir.async_create_issue(
self.hass,
domain=DOMAIN,
is_fixable=True,
is_persistent=call.data["persistent"],
issue_domain=call.data["domain"],
issue_id=f"user_{call.data['issue_id']}",
severity=call.data["severity"],
translation_key="user_issue",
translation_placeholders={
"title": call.data["title"],
"description": call.data["description"],
},
)
| [
"[email protected]"
] | |
4057875c237527412532aabab219ffd580c79c80 | 25dda94672497e3287a7403e283fb279ad171b79 | /boj/11286 절대값 힙.py | 339ce72781b6d758e8e55eb60f6416475b82351b | [] | no_license | woorud/Algorithm | c94b844e8c96a446c5fdee5c0abb159bfee384d7 | f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541 | refs/heads/master | 2023-02-23T13:53:28.645036 | 2021-01-29T12:24:23 | 2021-01-29T12:24:23 | 230,908,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import heapq, sys
n = int(sys.stdin.readline())
absheap = []
heapq.heapify(absheap)
for i in range(n):
num = int(sys.stdin.readline())
if num != 0:
heapq.heappush(absheap, (abs(num), num))
else:
try:
print(heapq.heappop(absheap)[1])
except:
print(0) | [
"[email protected]"
] | |
cd8f4cd99721da1d6f5fef038c49ebdd3bbe34f1 | 1010e13cab98cb45a39f3db51d9ccb177fa8b87f | /tools/build_pytorch_libs.py | 1c4f16670c04d5f79b9dd767f2d7be42b567ac63 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | junjun315/pytorch | 2e7dbf60ae63da5e09bf5c8c9fea75f5fed4e940 | 35f1e28ba9b2f7b10198c2365c6437544de5acb8 | refs/heads/master | 2020-04-13T03:53:42.317924 | 2019-01-29T03:02:27 | 2019-01-29T03:02:27 | 162,945,027 | 0 | 0 | NOASSERTION | 2019-01-29T03:02:12 | 2018-12-24T03:18:09 | C++ | UTF-8 | Python | false | false | 11,686 | py | from .setup_helpers.env import (IS_ARM, IS_DARWIN, IS_LINUX, IS_PPC, IS_WINDOWS,
DEBUG, REL_WITH_DEB_INFO, USE_MKLDNN,
check_env_flag, check_negative_env_flag, hotpatch_build_env_vars)
import os
import sys
import distutils
import distutils.sysconfig
from distutils.file_util import copy_file
from distutils.dir_util import copy_tree
from subprocess import check_call, call, check_output
from distutils.version import LooseVersion
from .setup_helpers.cuda import USE_CUDA, CUDA_HOME
from .setup_helpers.dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS
from .setup_helpers.nccl import USE_SYSTEM_NCCL, NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB
from .setup_helpers.rocm import ROCM_HOME, ROCM_VERSION, USE_ROCM
from .setup_helpers.nnpack import USE_NNPACK
from .setup_helpers.qnnpack import USE_QNNPACK
from .setup_helpers.cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIB_DIR, CUDNN_LIBRARY, USE_CUDNN
from pprint import pprint
from glob import glob
import multiprocessing
import shutil
def which(thefile):
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for dir in path:
fname = os.path.join(dir, thefile)
fnames = [fname]
if IS_WINDOWS:
exts = os.environ.get('PATHEXT', '').split(os.sep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if (os.path.exists(name) and os.access(name, os.F_OK | os.X_OK)
and not os.path.isdir(name)):
return name
return None
def cmake_version(cmd):
for line in check_output([cmd, '--version']).decode('utf-8').split('\n'):
if 'version' in line:
return LooseVersion(line.strip().split(' ')[2])
raise Exception('no version found')
def get_cmake_command():
cmake_command = 'cmake'
if IS_WINDOWS:
return cmake_command
cmake3 = which('cmake3')
if cmake3 is not None:
cmake = which('cmake')
if cmake is not None:
bare_version = cmake_version(cmake)
if bare_version < LooseVersion("3.5.0") and cmake_version(cmake3) > bare_version:
cmake_command = 'cmake3'
return cmake_command
def cmake_defines(lst, **kwargs):
for key in sorted(kwargs.keys()):
value = kwargs[key]
if value is not None:
lst.append('-D{}={}'.format(key, value))
# Ninja
try:
import ninja
USE_NINJA = True
except ImportError:
USE_NINJA = False
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
torch_lib_dir = base_dir + "/torch/lib"
install_dir = base_dir + "/torch/lib/tmp_install"
build_type = "Release"
if DEBUG:
build_type = "Debug"
elif REL_WITH_DEB_INFO:
build_type = "RelWithDebInfo"
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError:
pass
def run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir):
cmake_args = [
get_cmake_command(),
base_dir
]
if USE_NINJA:
cmake_args.append('-GNinja')
try:
import numpy as np
NUMPY_INCLUDE_DIR = np.get_include()
USE_NUMPY = True
except ImportError:
USE_NUMPY = False
NUMPY_INCLUDE_DIR = None
cflags = os.getenv('CFLAGS') or ""
ldflags = os.getenv('LDFLAGS') or ""
if IS_DARWIN:
ldflags += " -Wl,-rpath,@loader_path"
elif USE_ROCM:
ldflags += " -Wl,-rpath,\\\\\\$ORIGIN"
elif IS_WINDOWS:
cflags += " /EHa"
else:
ldflags += " -Wl,-rpath,$ORIGIN"
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
my_env['PYTORCH_PYTHON'] = sys.executable
if USE_CUDNN:
my_env['CUDNN_LIBRARY'] = CUDNN_LIBRARY
my_env['CUDNN_INCLUDE_DIR'] = CUDNN_INCLUDE_DIR
if USE_CUDA:
my_env['CUDA_BIN_PATH'] = CUDA_HOME
mkdir_p(install_dir)
mkdir_p(build_dir)
cmake_defines(
cmake_args,
PYTHON_EXECUTABLE=escape_path(sys.executable),
PYTHON_LIBRARY=escape_path(cmake_python_library),
PYTHON_INCLUDE_DIR=escape_path(distutils.sysconfig.get_python_inc()),
BUILDING_WITH_TORCH_LIBS="ON",
TORCH_BUILD_VERSION=version,
CMAKE_BUILD_TYPE=build_type,
BUILD_TORCH="ON",
BUILD_PYTHON=build_python,
BUILD_SHARED_LIBS=os.getenv("BUILD_SHARED_LIBS") or "ON",
BUILD_BINARY=check_env_flag('BUILD_BINARY'),
BUILD_TEST=build_test,
INSTALL_TEST=build_test,
BUILD_CAFFE2_OPS=not check_negative_env_flag('BUILD_CAFFE2_OPS'),
ONNX_NAMESPACE=os.getenv("ONNX_NAMESPACE") or "onnx_torch",
USE_CUDA=USE_CUDA,
USE_DISTRIBUTED=USE_DISTRIBUTED,
USE_FBGEMM=not (check_env_flag('NO_FBGEMM') or check_negative_env_flag('USE_FBGEMM')),
USE_NUMPY=USE_NUMPY,
NUMPY_INCLUDE_DIR=escape_path(NUMPY_INCLUDE_DIR),
USE_SYSTEM_NCCL=USE_SYSTEM_NCCL,
NCCL_INCLUDE_DIR=NCCL_INCLUDE_DIR,
NCCL_ROOT_DIR=NCCL_ROOT_DIR,
NCCL_SYSTEM_LIB=NCCL_SYSTEM_LIB,
CAFFE2_STATIC_LINK_CUDA=check_env_flag('USE_CUDA_STATIC_LINK'),
USE_ROCM=USE_ROCM,
USE_NNPACK=USE_NNPACK,
USE_LEVELDB=check_env_flag('USE_LEVELDB'),
USE_LMDB=check_env_flag('USE_LMDB'),
USE_OPENCV=check_env_flag('USE_OPENCV'),
USE_QNNPACK=USE_QNNPACK,
USE_TENSORRT=check_env_flag('USE_TENSORRT'),
USE_FFMPEG=check_env_flag('USE_FFMPEG'),
USE_SYSTEM_EIGEN_INSTALL="OFF",
USE_MKLDNN=USE_MKLDNN,
NCCL_EXTERNAL=USE_CUDA,
CMAKE_INSTALL_PREFIX=install_dir,
CMAKE_C_FLAGS=cflags,
CMAKE_CXX_FLAGS=cflags,
CMAKE_EXE_LINKER_FLAGS=ldflags,
CMAKE_SHARED_LINKER_FLAGS=ldflags,
THD_SO_VERSION="1",
CMAKE_PREFIX_PATH=os.getenv('CMAKE_PREFIX_PATH') or distutils.sysconfig.get_python_lib(),
BLAS=os.getenv('BLAS'),
CUDA_NVCC_EXECUTABLE=escape_path(os.getenv('CUDA_NVCC_EXECUTABLE')),
USE_REDIS=os.getenv('USE_REDIS'),
USE_GLOG=os.getenv('USE_GLOG'),
USE_GFLAGS=os.getenv('USE_GFLAGS'))
if USE_GLOO_IBVERBS:
cmake_defines(cmake_args, USE_IBVERBS="1", USE_GLOO_IBVERBS="1")
expected_wrapper = '/usr/local/opt/ccache/libexec'
if IS_DARWIN and os.path.exists(expected_wrapper):
cmake_defines(cmake_args,
CMAKE_C_COMPILER="{}/gcc".format(expected_wrapper),
CMAKE_CXX_COMPILER="{}/g++".format(expected_wrapper))
pprint(cmake_args)
printenv()
check_call(cmake_args, cwd=build_dir, env=my_env)
def copy_files(build_test):
def copy_all(pattern, dst):
for file in glob(pattern):
if os.path.isdir(file):
copy_tree(file, dst, update=True)
else:
copy_file(file, dst, update=True)
shutil.rmtree(install_dir + '/lib/cmake', ignore_errors=True)
shutil.rmtree(install_dir + '/lib/python', ignore_errors=True)
copy_all(install_dir + '/lib/*', torch_lib_dir)
if os.path.exists(install_dir + '/lib64'):
copy_all(install_dir + '/lib64/*', torch_lib_dir)
copy_file(base_dir + '/aten/src/THNN/generic/THNN.h', torch_lib_dir, update=True)
copy_file(base_dir + '/aten/src/THCUNN/generic/THCUNN.h', torch_lib_dir, update=True)
copy_tree(install_dir + '/include', torch_lib_dir + '/include', update=True)
if os.path.exists(install_dir + '/bin/'):
copy_all(install_dir + '/bin/*', torch_lib_dir)
if build_test:
# Copy the test files to pytorch/caffe2 manually
# They were built in pytorch/torch/lib/tmp_install/test
# Why do we do this? So, setup.py has this section called 'package_data' which
# you need to specify to include non-default files (usually .py files).
# package_data takes a map from 'python package' to 'globs of files to
# include'. By 'python package', it means a folder with an __init__.py file
# that's not excluded in the find_packages call earlier in setup.py. So to
# include our cpp_test into the site-packages folder in
# site-packages/caffe2/cpp_test, we have to copy the cpp_test folder into the
# root caffe2 folder and then tell setup.py to include them. Having another
# folder like site-packages/caffe2_cpp_test would also be possible by adding a
# caffe2_cpp_test folder to pytorch with an __init__.py in it.
mkdir_p(base_dir + '/caffe2/cpp_test/')
copy_tree(install_dir + '/test', base_dir + '/caffe2/cpp_test', update=True)
def build_caffe2(version,
cmake_python_library,
build_python,
rerun_cmake,
build_dir):
build_test = not check_negative_env_flag('BUILD_TEST')
cmake_cache_file = 'build/CMakeCache.txt'
if rerun_cmake and os.path.isfile(cmake_cache_file):
os.remove(cmake_cache_file)
if not os.path.exists(cmake_cache_file) or (USE_NINJA and not os.path.exists('build/build.ninja')):
run_cmake(version,
cmake_python_library,
build_python,
build_test,
build_dir)
if IS_WINDOWS:
if USE_NINJA:
# sccache will fail if all cores are used for compiling
j = max(1, multiprocessing.cpu_count() - 1)
check_call(['cmake', '--build', '.', '--target', 'install', '--config', build_type, '--', '-j', str(j)],
cwd=build_dir)
else:
check_call(['msbuild', 'INSTALL.vcxproj', '/p:Configuration={}'.format(build_type)],
cwd=build_dir)
else:
if USE_NINJA:
check_call(['ninja', 'install'], cwd=build_dir)
else:
max_jobs = os.getenv('MAX_JOBS', str(multiprocessing.cpu_count()))
check_call(['make', '-j', str(max_jobs), 'install'], cwd=build_dir)
# in cmake, .cu compilation involves generating certain intermediates
# such as .cu.o and .cu.depend, and these intermediates finally get compiled
# into the final .so.
# Ninja updates build.ninja's timestamp after all dependent files have been built,
# and re-kicks cmake on incremental builds if any of the dependent files
# have a timestamp newer than build.ninja's timestamp.
# There is a cmake bug with the Ninja backend, where the .cu.depend files
# are still compiling by the time the build.ninja timestamp is updated,
# so the .cu.depend file's newer timestamp is screwing with ninja's incremental
# build detector.
# This line works around that bug by manually updating the build.ninja timestamp
# after the entire build is finished.
if os.path.exists('build/build.ninja'):
os.utime('build/build.ninja', None)
if build_python:
for proto_file in glob('build/caffe2/proto/*.py'):
if os.path.sep != '/':
proto_file = proto_file.replace(os.path.sep, '/')
if proto_file != 'build/caffe2/proto/__init__.py':
shutil.copyfile(proto_file, "caffe2/proto/" + os.path.basename(proto_file))
copy_files(build_test)
def printenv():
envs = map(lambda k, v: k + '=' + v + '\n', os.environ.keys(), os.environ.values())
print(''.join(envs))
def escape_path(path):
if os.path.sep != '/' and path is not None:
return path.replace(os.path.sep, '/')
return path
| [
"[email protected]"
] | |
30f9b23d261d536ea04bb46352a57de101437555 | 248793d43cd41e588fe343fe6f7dd82f667d2eb8 | /num4ch.py | 5a15eaf1bf1360318191d485c7cfb5cbe665d42b | [] | no_license | dongzeyuan/Algorithms | 85dfe22abc0ef539f7d9ff4efa205810e80d70cc | a57bfe5a2887947419f5d6deb6988ce94917c286 | refs/heads/master | 2020-03-09T01:03:15.737335 | 2018-04-07T06:53:47 | 2018-04-07T06:53:47 | 128,503,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | # coding=UTF-8
import wx
class MyFrame(wx.Frame):
'''这是个测试各种函数用法的框架'''
def __init__(self):
wx.Frame.__init__(self, None, -1, 'TestFrame', size=(400, 300))
panel = wx.Panel(self, -1)
# 创建个文本框组件
# 4-04,这个组件定义,必须定义为self.的形式,下面回调函数才能使用该变量
self.num_txt = wx.TextCtrl(panel, -1, size=(150, 100),
pos=(5, 5), style=wx.TE_MULTILINE | wx.TE_READONLY)
# 试试SetValue方法
# 创建个按钮组件
self.num_button = wx.Button(
panel, -1, "确定", size=(150, 100), pos=(5, 110))
# 绑定按钮事件,绑定on_click回调函数
self.num_button.Bind(wx.EVT_BUTTON, self.on_click)
# 定义on_click回调函数,注意函数需要2个参数,self,event,但是这里我写的有问题
# 我本意是想写成每点击一次,i自增1,记录点击次数,但是这里每次调用都会重置i,导致每次调用都打印1
# 4-04,使用全局变量解决问题
# 4-04, 终于学到了如何改变显示数值的方法
def on_click(self, event):
# 全局变量i。得这么写才能实现自增的功能
global i
i += 1
# 清除上次传输的内容,如果不加Clear()方法,会导致逐行显示
self.num_txt.Clear()
self.num_txt.AppendText('%d\n' % i)
if __name__ == "__main__":
# 初始化全局变量i
i = 0
app = wx.App()
MyFrame().Show(True)
app.MainLoop()
| [
"[email protected]"
] | |
418ab74777c51e9c75e706afd4e9ced9af142f16 | f669b07bf4e1047c7fa4f36983f5d8d9f607a42c | /.metadata/.plugins/org.eclipse.core.resources/.history/c0/20134fb0049000161aace3ce0a3d140b | 4ecac11caf1f115505b487e0511d733ae686192b | [] | no_license | tca85/python | d07b4453b38b5c5a024e2a6f8b8795dac6d928b1 | 436a23250539ef8055f817ada20cb21eab65cc42 | refs/heads/master | 2021-01-10T22:53:01.118480 | 2016-10-24T21:46:59 | 2016-10-24T21:46:59 | 70,340,585 | 1 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 1,233 | #!/usr/bin/python
# -*- coding: latin1 -*-
'''
Created on Oct 5, 2016
@author: tca85
'''
# Para importar a classe é melhor utilizar os atalhos do Eclipse,
# mas também segue o seguinte padrão:
# from pacote + nome do arquivo import NomeClasse
from pacote.Teste import Teste
from pacote.Arquivo import Arquivo
from pacote.DateTime import DateTime
from pacote.Cadastro import Cadastro
teste = Teste()
lista = teste.multiplicar_range_por_dois(15)
teste.testar_resto_igual_zero(lista, 3)
teste.tamanho_palavra('lsdfjfkjsdfkjas')
print teste.media(lista)
print teste.nome_sistema_operacional()
teste.exemplo_interpolacao()
print teste.inverter_string('thiago')
print teste.exemplo_string_template()
teste.exemplo_argumentos('peso', 10, unidade='k')
#-----------------------------------------------------------------------------
arquivo = Arquivo()
arquivo.criarArquivoTextoeEscrever('teste')
arquivo.mostrarListaArquivoTamanho()
arquivo.criarArquivoTemporario()
arquivo.gravarTextoArquivoCompactado()
#-----------------------------------------------------------------------------
datetime = DateTime()
datetime.testaClasseDateTime()
#-----------------------------------------------------------------------------
| [
"[email protected]"
] | ||
be54f6e5681f2eb17e502fb6f64a2b06c1775e86 | 9b54e3d58447e917a238b85891020c392c4ac601 | /codeup/1013.py | 645162bf88e339a04cea6f30dd165cdc5e16443a | [
"MIT"
] | permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | a, b = map(int, input().split())
print(a, b)
| [
"[email protected]"
] | |
db3a5e796dccbcd86c59c07f265a266fba9cb209 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch66_2020_06_21_19_51_02_942607.py | 0e1afeb82048df4970ad1ba78a24fc6673a561ca | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | def lista_sufixos(string):
lista_sufixos = []
while i < len(string):
lista_sufixos.append(string(i+1:))
i+=1
return lista_sufixos
| [
"[email protected]"
] | |
04fe48c86770c1775a8f336b73a22b73e71e8ae1 | a9adab93602c472d90a327a466d2cb250bd47788 | /angstromCTF2019/Paint/solve.py | ad3234a32bae12301d55be4a7c6ebefee124f02c | [] | no_license | minmin53/writeups | afc4586e6a9390d7185f2baf99e0aead47bc3ba4 | 4e05be5f0e5eb26a5ee398eb24245e1109ddc056 | refs/heads/master | 2022-05-20T06:58:36.780383 | 2020-04-27T05:06:36 | 2020-04-27T05:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,636 | py | from Crypto.Util.number import *
secret = 629921607003244034334739296597900783683872903809471621783318441724296155260647861566002145401774841786965516424821133148061140507283116747339148975177513485103967011207217568924993463569559551429141756952018711071204949930416859383037306197953684591391066287527469114753495090054370608519379326915615068308557735119497576999275516623932355604742058855833591651141407379343873413310424307672368844204423176033536465560324264458606570832918771689488513626547477988015235832957445514499444921298913651835294484177694907540420778298030233425343791552742606481998105977335541679798111463675261162481691943108104757462361
pallete = 32317006071311007300714876688669951960444102669715484032130345427524655138867890893197201411522913463688717960921898019494119559150490921095088152386448283120630877367300996091750197750389652106796057638384067568276792218642619756161838094338476170470581645852036305042887575891541065808607552399123930385521914333389668342420684974786564569494856176035326322058077805659331026192708460314150258592864177116725943603718461857357598351152301645904403697613233287231227125684710820209725157101726931323469678542580656697935045997268352998638215525166389437335543602135433229604645318478604952148193555853611059596230656
your_mix = 14317253516668543276504878316838097235650210449758621543536146016892160048656997634541093315774403078357942150970695487937570449270120625898199254439189104072891595263513437420116930684308702803055295267600790477195902538538739117809573391251939794413361184343367694928615752045687223262368136262534778688889202144260002584306527206705616186699377315031757095455954292951059462279988296369935635246644221722025457496936215039008069820514166063271894671978845634968761626636993374291118230179892722513818307254406450607168911057458141649111515924404215975886422961651958216688209696158879621701708955382424640000048217
painting = 17665922529512695488143524113273224470194093921285273353477875204196603230641896039854934719468650093602325707751566466034447988065494130102242572713515917910688574332104680867377750329904425039785453961697828887505197701127086732126907914324992806733394244034438537271953062873710421922341053639880387051921552573241651939698279628619278357238684137922164483956735128373164911380749908774512869223017256152942356111845682044048514917460601214157119487675633689081081818805777951203838578632029105960085810547586385599419736400861419214277678792284994133722491622512615732083564207280344459191773058670866354126043620
shared_mix = pow(your_mix, secret, pallete)
print(long_to_bytes(shared_mix ^ painting))
| [
"[email protected]"
] | |
bd0a4edb0505e032600598e3d09c283a811f850c | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/51000001.py | 7e66d0a6b9ad05613ad85688a4de506f6035643c | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/51000001.py generated: Wed, 25 Jan 2017 15:25:34
#
# Event Type: 51000000
#
# ASCII decay Descriptor: e- => ?
#
from Configurables import ParticleGun
from Configurables import MomentumRange
ParticleGun().addTool( MomentumRange )
from GaudiKernel import SystemOfUnits
ParticleGun().MomentumRange.MomentumMin = 1*SystemOfUnits.GeV
from GaudiKernel import SystemOfUnits
ParticleGun().MomentumRange.MomentumMax = 1*SystemOfUnits.GeV
ParticleGun().EventType = 51000001
ParticleGun().ParticleGunTool = "MomentumRange"
ParticleGun().NumberOfParticlesTool = "FlatNParticles"
ParticleGun().MomentumRange.PdgCodes = [ 11 ]
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/e-,fixP=CaloAcc.dec"
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/CaloAcceptance.py" )
| [
"[email protected]"
] | |
1402215de795362c3bf280285a432acd439ed1a3 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-minExclusive-4-3.py | 546cd4d622222e004eb7c096b36aa268f39fa7a8 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 259 | py | from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_min_exclusive_4_xsd.nistschema_sv_iv_atomic_byte_min_exclusive_4 import NistschemaSvIvAtomicByteMinExclusive4
obj = NistschemaSvIvAtomicByteMinExclusive4(
value=109
)
| [
"[email protected]"
] | |
17e70d2e41bdd516debf650b514fe200ab485a25 | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/43_Multiply_Strings/solution.py | 90b9fe407e659af20688b1f0a37aefc2a67638f3 | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Given two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2, also represented as a string.
Note: You must not use any built-in BigInteger library or convert the inputs to integer directly.
Example 1:
Input: num1 = "2", num2 = "3"
Output: "6"
Example 2:
Input: num1 = "123", num2 = "456"
Output: "56088"
Constraints:
1 <= num1.length, num2.length <= 200
num1 and num2 consist of digits only.
Both num1 and num2 do not contain any leading zero, except the number 0 itself.
"""
import sys
import itertools
import pytest
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
def mul(num, digit):
d = int(digit)
ret = ''
c = 0
for a in reversed(num):
c, n = divmod(int(a)*d + c, 10)
ret += str(n)
if c:
ret += str(c)
return ret[::-1]
def plus(num1, num2):
ret = ''
c = 0
for a, b in itertools.zip_longest(reversed(num1), reversed(num2)):
a = a or '0'
b = b or '0'
c, n = divmod(int(a)+int(b)+c, 10)
ret += str(n)
if c:
ret += str(c)
return ret[::-1]
ret = '0'
for i, d in enumerate(reversed(num2)):
ret = plus(ret, mul(num1, d) + '0'*i)
return ret
@pytest.mark.parametrize('num1, num2, expected', [
("2", "3", "6"),
("123", "456", "56088"),
])
def test(num1, num2, expected):
assert expected == Solution().multiply(num1, num2)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"[email protected]"
] | |
e7b70b9f8478cbe00449d124f054ef12ee82c03b | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/api/IR_engine_20210728234203.py | 14e0af8534a9c7489e013618793835e8ca8fd7ab | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,671 | py | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from IPython.display import display
'''
Functions to write:
1. tf-idf with cosine sim/Euclidean distance
- represent terms in each document with its tf-idf weights,
2. VSM with cosine sim/Euclidean distance
3. BIM
4. BM25
5. BERT
'''
titles_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles_stemmed.csv"
tweets_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\tweets_data_stemmed.csv"
SEARCH_MODELS = {
"tfcs": "Tf-idf w Cosine Sim",
"tfed": "Tf-idf w Euclidean Dist"
}
def returnTweetsBasedOnSearchModel(dataProcessor, articleId, searchModel):
#accepts a search model, article title, and article id, returns n most relevant results
if searchModel == SEARCH_MODELS["tfcs"]:
return dataProcessor.cosineSimilarity.query(articleId, articleText)
class DataProcessor:
def __init__(self):
self.titles_data = pd.read_csv(titles_file_path)
self.tweets_data = pd.read_csv(tweets_file_path)
self.titles_data = self.titles_data.dropna()
self.tweets_data = self.tweets_data.dropna()
self.cosineSimilarity = CosineSimilarity(self.titles_data, self.tweets_data)
print ("Data Processor up and ready...")
class CosineSimilarity:
def __init__(self, titles, tweets, type='tfidf'):
self.titles = titles #contains titles data
self.tweets = tweets #contains tweets data
self.vectorizer = self.change_matrix_type(type)
def get_result(self, return_size):
cos_sim = cosine_similarity(self.matrix, self.matrix)
top_ind = np.flip(np.argsort(cos_sim[0]))[1:return_size+1]
top_id = [list(self.matrix.index)[i] for i in top_ind]
self.result = []
for i in top_id:
filt = self.tweets[self.tweets.tweet==i]
for ind, r in filt.iterrows():
rel = r['relevance_score']
text = r['tweet']
related = r['article_id']
score = 0
# if related==self.query_id and rel>0:
# score = 1
# if related==self.query_id and rel==0:
# score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related})
#'score': score})
def query(self, query_id, query_text, return_size=30):
self.query_id = query_id
term_doc = self.vectorizer.fit_transform([query_text]+list(self.tweets.clean_text)) #returns document term matrix
ind = ["query"] + list(self.tweets.tweet)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vectorizer.get_feature_names(), index=ind) #indexes are the tweets, columns is the entire vocab
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
return TfidfVectorizer()
elif type == 'dt':
return CountVectorizer() #transforms the entire word matrix into a set of vectors
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
class EuclideanDistance:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
self.matrix = None
def get_result(self, return_size):
euclidean = euclidean_distances(self.matrix.values[1:], [self.matrix.values[0]])
top_ind = np.argsort(euclidean.T[0])[:return_size]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(sorted(euclidean[:20]),top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
def getArticleId(dataProcessor, articleTitle):
for row in dataProcessor.titles_data.iterrows():
print (row["title"])
if row.title == articleTitle:
return row.id
'''
sample_query_id = "f7ca322d-c3e8-40d2-841f-9d7250ac72ca"
sample_query_text = "Worcester breakfast club for veterans gives hunger its marching orders"
cosine_similarity_obj = CosineSimilarity(titles = titles, tweets = tweets)
result = cosine_similarity_obj.query(sample_query_id, sample_query_text)
print (display(result.head()))
Test Titles:
f7ca322d-c3e8-40d2-841f-9d7250ac72ca Worcester breakfast club for veterans gives hunger its marching orders
609772bc-0672-4db5-8516-4c025cfd54ca Jumpshot Gives Marketers Renewed Visibility Into Paid and Organic Keywords With Launch of Jumpshot Elite
1aa9d1b0-e6ba-4a48-ad0c-66552d896aac The Return Of The Nike Air Max Sensation Has 80’s Babies Hyped!
719699f9-47be-4bc7-969b-b53a881c95ae This New Dating App Will Ruin Your Internet Game
'''
| [
"[email protected]"
] | |
9c09d0d031676f69bb99df3784b5c7cc3a91d70e | 00657ecc75e0529f5b77759112398bdb11e612bb | /Python3.6/264-Py3-M-Ugly Number II.py | 8930b07936863fb50c031ac463f4cd020151073e | [] | no_license | Hidenver2016/Leetcode | da949cd17f8e29d6007b492719bbc97418ae9cb7 | 1379a6dc2400751ecf79ccd6ed401a1fb0d78046 | refs/heads/master | 2021-07-18T14:46:00.986614 | 2020-05-08T05:02:20 | 2020-05-08T05:02:20 | 152,190,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 22:41:55 2019
@author: hjiang
"""
"""
Write a program to find the n-th ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
Example:
Input: n = 10
Output: 12
Explanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.
Note:
1 is typically treated as an ugly number.
n does not exceed 1690.
https://blog.csdn.net/fuxuemingzhu/article/details/49231615
所有的ugly number都是由1开始,乘以2/3/5生成的。
只要将这些生成的数排序即可获得,自动排序可以使用set
这样每次取出的第一个元素就是最小元素,由此再继续生成新的ugly number.
可以分成如下三组:
(1) 1×2, 2×2, 3×2, 4×2, 5×2, …
(2) 1×3, 2×3, 3×3, 4×3, 5×3, …
(3) 1×5, 2×5, 3×5, 4×5, 5×5, …
"""
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
if n < 0:
return 0
dp = [1] * n
index2, index3, index5 = 0, 0, 0
for i in range(1, n):#一边计算一边排序,溜溜溜溜!
dp[i] = min(2 * dp[index2], 3 * dp[index3], 5 * dp[index5])
if dp[i] == 2 * dp[index2]: index2 += 1
if dp[i] == 3 * dp[index3]: index3 += 1
if dp[i] == 5 * dp[index5]: index5 += 1
return dp[n - 1]
| [
"[email protected]"
] | |
8143ad2895c961ddd4a0d4b7b09c61ab15f705df | 799a8605e28118da863079f0924cd93974221c3c | /src/ralph/account/admin.py | 341a44f6d5b3666c748d8458f48daf0ba7e4f22c | [
"Apache-2.0"
] | permissive | damjanek/ralph | 31a2fae13e2608bcf9f13853199cfc00ba6db317 | 728e1c17ea8a70600928a59d5ec17a964063485d | refs/heads/master | 2021-01-24T05:06:34.308524 | 2013-02-20T10:26:47 | 2013-02-20T10:26:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,980 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from lck.django.activitylog.admin import IPInline, UserAgentInline
from lck.django.common.admin import ForeignKeyAutocompleteTabularInline
from lck.django.profile.admin import ProfileInlineFormSet
from tastypie.models import ApiKey
from ralph.account.models import BoundPerm, Profile
class ProfileInline(admin.StackedInline):
model = Profile
readonly_fields = ('last_active',)
max_num = 1
can_delete = False
class ProfileBoundPermInline(ForeignKeyAutocompleteTabularInline):
model = BoundPerm
exclude = ['created', 'modified', 'created_by', 'modified_by', 'role',
'group']
related_search_fields = {
'venture': ['^name'],
}
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileBoundPermInline, self).__init__(Profile, admin_site)
class ProfileIPInline(IPInline):
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileIPInline, self).__init__(Profile, admin_site)
class ProfileUserAgentInline(UserAgentInline):
formset = ProfileInlineFormSet
def __init__(self, parent_model, admin_site):
self.fk_name = 'profile'
super(ProfileUserAgentInline, self).__init__(Profile, admin_site)
class ApiKeyInline(admin.StackedInline):
model = ApiKey
readonly_fields = ('created',)
extra = 0
class ProfileAdmin(UserAdmin):
def groups_show(self):
return "<br> ".join([g.name for g in self.groups.order_by('name')])
groups_show.allow_tags = True
groups_show.short_description = _("groups")
inlines = [
ProfileInline, ProfileBoundPermInline, ApiKeyInline,
ProfileIPInline, ProfileUserAgentInline,
]
list_display = ('username', 'email', 'first_name', 'last_name',
groups_show, 'is_staff', 'is_active')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups',)
save_on_top = True
search_fields = ('username', 'first_name', 'last_name',
'email', 'profile__nick')
admin.site.unregister(User)
admin.site.register(User, ProfileAdmin)
class GroupBoundPermInline(ForeignKeyAutocompleteTabularInline):
model = BoundPerm
exclude = ['created', 'modified', 'created_by', 'modified_by', 'role',
'profile']
related_search_fields = {
'venture': ['^name'],
}
class CustomGroupAdmin(GroupAdmin):
save_on_top = True
inlines = [GroupBoundPermInline]
admin.site.unregister(Group)
admin.site.register(Group, CustomGroupAdmin)
| [
"[email protected]"
] | |
e0a024431ad980f1924a8f5527c9a6124af8d894 | a9c3e212f86acdbc84ba57357194e8f11c844535 | /catalogue_management/migrations/0005_auto_20170805_1824.py | 7f5911b81aa8bf4c4485c57fcc7aa45c5711b848 | [] | no_license | bitapardaz/carwash | bde4635bda1f1fa51409c2454e27aca84c2bffa0 | 0a10954eae44df7341372b5f3def652e512538b0 | refs/heads/master | 2021-01-15T13:34:31.198300 | 2017-08-23T11:35:33 | 2017-08-23T11:35:33 | 99,678,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue_management', '0004_auto_20170727_0856'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
9544cbbc6bb48a09e347d92844c17726d2e71c59 | 41e2cf24f0ff3a11a98bb00e03c598dde35452c4 | /reportview/migrations/0011_auto_20180727_1857.py | 1460d461da8f7a8e002282c032bb1794a19e8855 | [] | no_license | anushamokashi/mob | f5dbedc729073092f94323feca6d95dee24087a2 | 37bc0eb033bc23d37e9d4fb9bb8b2b456553ff7f | refs/heads/master | 2020-04-24T08:36:56.008212 | 2019-02-21T09:09:04 | 2019-02-21T09:09:04 | 171,810,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-27 13:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reportview', '0010_auto_20180725_1515'),
]
operations = [
migrations.RenameField(
model_name='reportprintformataction',
old_name='JasperFile',
new_name='htmlfile',
),
]
| [
"[email protected]"
] | |
9466b21de1d9b7a2824a0e619a7509c314f821cf | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /bpmn/models/data_output_association.py | c0dd7b339bab9f4979a5cb5433bfb5f15dd78b02 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 346 | py | from dataclasses import dataclass
from .t_data_output_association import TDataOutputAssociation
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class DataOutputAssociation(TDataOutputAssociation):
class Meta:
name = "dataOutputAssociation"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.