blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6827cbf6518d397d25b1759ecbab6d3cf03cb7f1 | f2b4be9a933aa024a7934ab9758a0b29816e74d3 | /Interfaces/API/NewInterface/Tests/Test_DDSim.py | d81a22add8109fbab36d9483992e42f789235e2f | []
| no_license | hamzazafar/ILCDIRAC | 84c24a4b65e75d7df55f91c3601867cc45990ee6 | 6fa2b7b130b6248afeb7ae77d42502f2f72908aa | refs/heads/master | 2020-03-25T03:39:54.444975 | 2017-07-28T10:51:18 | 2017-11-23T14:02:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,345 | py | #!/usr/local/env python
"""
Test DDSim module
"""
import inspect
import unittest
from mock import create_autospec, patch, MagicMock as Mock
from DIRAC import gLogger, S_OK, S_ERROR
from ILCDIRAC.Interfaces.API.NewInterface.Applications import DDSim
from ILCDIRAC.Tests.Utilities.GeneralUtils import assertEqualsImproved, assertDiracFailsWith, \
assertDiracSucceeds
__RCSID__ = "$Id$"
MODULE_NAME = 'ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim'
gLogger.setLevel("DEBUG")
gLogger.showHeaders(True)
#pylint: disable=protected-access
class DDSimTestCase( unittest.TestCase ):
""" Base class for the DDSim test cases
"""
def setUp(self):
"""set up the objects"""
self.dds = DDSim( {} )
def test_setrandomseed( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setRandomSeed( 89421 )
self.assertFalse( self.dds._errorDict )
assertEqualsImproved( self.dds.randomSeed, 89421, self )
def test_setrandomseed_fails( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setRandomSeed( [ 'abc' ] )
self.assertIn( '_checkArgs', self.dds._errorDict )
def test_setstartfrom( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setStartFrom( 89421 )
self.assertFalse( self.dds._errorDict )
assertEqualsImproved( self.dds.startFrom, 89421, self )
def test_setstartfrom_fails( self ):
self.assertFalse( self.dds._errorDict )
self.dds.setStartFrom( 'adgiuj' )
self.assertIn( '_checkArgs', self.dds._errorDict )
def test_resolvelinkedparams( self ):
step_mock = Mock()
input_mock = Mock()
input_mock.getType.return_value = { 'abc' : False }
self.dds._linkedidx = 3
self.dds._jobsteps = [ None, None, None, input_mock ]
assertDiracSucceeds( self.dds._resolveLinkedStepParameters( step_mock ), self )
step_mock.setLink.assert_called_once_with( 'InputFile', { 'abc' : False }, 'OutputFile' )
def test_resolvelinkedparams_noinputstep( self ):
self.dds._linkedidx = None
self.dds._inputappstep = []
assertDiracSucceeds( self.dds._resolveLinkedStepParameters( None ), self )
def test_checkworkflow_app_missing( self ):
self.dds._inputapp = [ 'some_depdency', 'unavailable_dependency_fail_on_this' ]
self.dds._jobapps = [ 'myjobapp_1', 'some_dependency' ]
assertDiracFailsWith( self.dds._checkWorkflowConsistency(), 'job order not correct', self )
def test_checkworkflow_empty( self ):
self.dds._inputapp = []
self.dds._jobapps = []
assertDiracSucceeds( self.dds._checkWorkflowConsistency(), self )
def test_checkworkflow_success( self ):
self.dds._inputapp = [ 'some_dependency', 'other_dependencies', 'many_more' ]
self.dds._jobapps = [ 'ignore_me', 'many_more', 'some_dependency', 'other_dependencies' ]
assertDiracSucceeds( self.dds._checkWorkflowConsistency(), self )
def test_userjobmodules( self ):
module_mock = Mock()
assertDiracSucceeds( self.dds._userjobmodules( module_mock ), self )
def test_prodjobmodules( self ):
module_mock = Mock()
assertDiracSucceeds( self.dds._prodjobmodules( module_mock ), self )
def test_userjobmodules_fails( self ):
with patch('%s._setUserJobFinalization' % MODULE_NAME, new=Mock(return_value=S_OK('something'))),\
patch('%s._setApplicationModuleAndParameters' % MODULE_NAME, new=Mock(return_value=S_ERROR('some_test_err'))):
assertDiracFailsWith( self.dds._userjobmodules( None ),
'userjobmodules failed', self )
def test_prodjobmodules_fails( self ):
with patch('%s._setApplicationModuleAndParameters' % MODULE_NAME, new=Mock(return_value=S_OK('something'))), \
patch('%s._setOutputComputeDataList' % MODULE_NAME, new=Mock(return_value=S_ERROR('some_other_test_err'))):
assertDiracFailsWith( self.dds._prodjobmodules( None ),
'prodjobmodules failed', self )
def test_checkconsistency( self ):
self.dds.version = '134'
self.dds.detectorModel = 'mymodel.det'
self.dds.outputFile = 'myoutput.file'
self.dds._jobtype = 'User'
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertNotIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
self.assertNotIn( 'nbevts', self.dds.prodparameters )
self.assertNotIn( 'Process', self.dds.prodparameters )
def test_checkconsistency_nodetectormodel( self ):
self.dds.version = 123
self.dds.steeringFile = None
self.dds.detectorModel = None
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'no detectormodel set', self )
def test_checkconsistency_noversion( self ):
self.dds.version = None
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'no version found', self )
def test_checkconsistency_existsfails( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
with patch('os.path.exists', new=Mock(return_value=False)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_ERROR('testerr_exists_mock'))):
assertDiracFailsWith( self.dds._checkConsistency( Mock() ), 'testerr_exists_mock', self )
def test_checkconsistency_userjob( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
self.dds._jobtype = 'notUser'
self.dds.detectorModel = 'myDetectorv200'
with patch('os.path.exists', new=Mock(return_value=True)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_ERROR('testerr_exists_mock'))):
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
for keyword in [ 'detectorType', 'slic_detectormodel' ]:
self.assertIn( keyword, self.dds.prodparameters )
def test_checkconsistency_userjob_notdetmodel( self ):
self.dds.version = '134'
self.dds.steeringFile = 'mysteer.file'
self.dds._jobtype = 'notUser'
self.dds.detectorModel = True
self.dds.setStartFrom( 148 )
with patch('os.path.exists', new=Mock(return_value=False)), \
patch.object(inspect.getmodule(DDSim), 'Exists', new=Mock(return_value=S_OK())):
assertDiracSucceeds( self.dds._checkConsistency( Mock() ), self )
self.assertIn( { 'outputFile' : '@{OutputFile}', 'outputPath' : '@{OutputPath}',
'outputDataSE' : '@{OutputSE}' }, self.dds._listofoutput )
for keyword in [ 'detectorType', 'slic_detectormodel' ]:
self.assertIn( keyword, self.dds.prodparameters )
#pylint: disable=protected-access
class TestDDSim( unittest.TestCase ):
"""tests for the DDSim interface"""
def setUp( self ):
pass
def tearDown( self ):
"""cleanup any files"""
pass
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_OK({'CLIC_o2_v03':"/some/path"})))
def test_setDetectorModel1( self ):
"""test DDSIm setDetectorModel part of software................................................."""
detModel = "CLIC_o2_v03"
ddsim = DDSim()
ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, detModel )
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_ERROR("No known models")))
def test_setDetectorModel2( self ):
"""test DDSIm setDetectorModel part of software failure........................................."""
detModel = "CLIC_o2_v03"
ddsim = DDSim()
res = ddsim.setDetectorModel( detModel )
self.assertEqual( res['Message'], "No known models" )
@patch( "ILCDIRAC.Interfaces.API.NewInterface.Applications.DDSim.getKnownDetectorModels",
new = Mock(return_value=S_OK({'CLIC_o2_v04':"/some/path"})))
def test_setDetectorModel3( self ):
"""test DDSIm setDetectorModel is not known....................................................."""
detModel = "ATLAS"
ddsim = DDSim()
ret = ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, '' )
self.assertFalse( ret['OK'] )
self.assertIn( "Unknown detector model in ddsim: ATLAS", ret['Message'] )
@patch( "os.path.exists", new = Mock(return_value=True ) )
def test_setDetectorModel_TB_success( self ):
"""test DDSIm setDetectorModel tarBall success.................................................."""
detModel = "CLIC_o2_v03"
ext = ".tar.gz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel+ext )
self.assertEqual( ddsim.detectorModel, detModel )
self.assertTrue( detModel+ext in ddsim.inputSB )
@patch( "os.path.exists", new = Mock(return_value=False))
def test_setDetectorModel_TB_notLocal( self ):
"""test DDSIm setDetectorModel tarBall notLocal................................................."""
detModel = "CLIC_o2_v03"
ext = ".tgz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel+ext )
self.assertEqual( ddsim.inputSB, [] )
self.assertEqual( ddsim.detectorModel, detModel )
def test_setDetectorModel_LFN_succcess( self ):
"""test DDSIm setDetectorModel lfn success......................................................"""
detModel = "lfn:/ilc/user/s/sailer/CLIC_o2_v03.tar.gz"
ddsim = DDSim()
ddsim.setDetectorModel( detModel )
self.assertEqual( ddsim.detectorModel, "CLIC_o2_v03" )
self.assertTrue( detModel in ddsim.inputSB )
def test_setStartFrom1( self ):
"""test DDSIm setStartFrom 1...................................................................."""
ddsim = DDSim()
ddsim.setStartFrom( "Arg")
self.assertTrue( ddsim._errorDict )
def test_setStartFrom2( self ):
"""test DDSIm setStartFrom 2...................................................................."""
ddsim = DDSim()
ddsim.setStartFrom( 42 )
self.assertEqual( ddsim.startFrom, 42 )
def test_getKnownDetModels1( self ):
"""test getKnownDetectorModels failure no version..............................................."""
ddsim = DDSim()
ret = ddsim.getKnownDetectorModels()
self.assertFalse( ret['OK'] )
self.assertEqual( "No software version defined", ret['Message'] )
def test_getKnownDetModels2( self ):
"""test getKnownDetectorModels success.........................................................."""
ddsim = DDSim()
ddsim.version = "test"
import DIRAC
ddsim._ops = create_autospec(DIRAC.ConfigurationSystem.Client.Helpers.Operations.Operations, spec_set=True)
ddsim._ops.getOptionsDict.return_value = S_OK({"detModel1":"/path", "detModel2":"/path2"})
ret = ddsim.getKnownDetectorModels()
self.assertIn( "detModel1", ret['Value'] )
self.assertTrue( ret['OK'] )
def runTests():
"""Runs our tests"""
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestDDSim )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
print testResult
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DDSimTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
print testResult
if __name__ == '__main__':
runTests()
| [
"[email protected]"
]
| |
4570a4f1135d69481df51ef52485f7fe43b9827d | 430bd23decf16dc572a587b7af9f5c8e7dea5e6b | /clients/python/swagger_client/models/funding.py | ff0870b285368906885d8379191500364f1d06c3 | [
"Apache-2.0"
]
| permissive | jltrade/api-connectors | 332d4df5e7e60bd27b6c5a43182df7d99a665972 | fa2cf561b414e18e9d2e1b5d68e94cc710d315e5 | refs/heads/master | 2020-06-19T10:20:46.022967 | 2016-09-24T13:12:17 | 2016-09-24T13:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,097 | py | # coding: utf-8
"""
BitMEX API
REST API for the BitMEX.com trading platform.<br><br><a href=\"/app/restAPI\">REST Documentation</a><br><a href=\"/app/wsAPI\">Websocket Documentation</a>
OpenAPI spec version: 1.2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Funding(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, timestamp=None, symbol=None, funding_interval=None, funding_rate=None, funding_rate_daily=None):
"""
Funding - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'timestamp': 'date',
'symbol': 'str',
'funding_interval': 'date',
'funding_rate': 'float',
'funding_rate_daily': 'float'
}
self.attribute_map = {
'timestamp': 'timestamp',
'symbol': 'symbol',
'funding_interval': 'fundingInterval',
'funding_rate': 'fundingRate',
'funding_rate_daily': 'fundingRateDaily'
}
self._timestamp = timestamp
self._symbol = symbol
self._funding_interval = funding_interval
self._funding_rate = funding_rate
self._funding_rate_daily = funding_rate_daily
@property
def timestamp(self):
"""
Gets the timestamp of this Funding.
:return: The timestamp of this Funding.
:rtype: date
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this Funding.
:param timestamp: The timestamp of this Funding.
:type: date
"""
self._timestamp = timestamp
@property
def symbol(self):
"""
Gets the symbol of this Funding.
:return: The symbol of this Funding.
:rtype: str
"""
return self._symbol
@symbol.setter
def symbol(self, symbol):
"""
Sets the symbol of this Funding.
:param symbol: The symbol of this Funding.
:type: str
"""
self._symbol = symbol
@property
def funding_interval(self):
"""
Gets the funding_interval of this Funding.
:return: The funding_interval of this Funding.
:rtype: date
"""
return self._funding_interval
@funding_interval.setter
def funding_interval(self, funding_interval):
"""
Sets the funding_interval of this Funding.
:param funding_interval: The funding_interval of this Funding.
:type: date
"""
self._funding_interval = funding_interval
@property
def funding_rate(self):
"""
Gets the funding_rate of this Funding.
:return: The funding_rate of this Funding.
:rtype: float
"""
return self._funding_rate
@funding_rate.setter
def funding_rate(self, funding_rate):
"""
Sets the funding_rate of this Funding.
:param funding_rate: The funding_rate of this Funding.
:type: float
"""
self._funding_rate = funding_rate
@property
def funding_rate_daily(self):
"""
Gets the funding_rate_daily of this Funding.
:return: The funding_rate_daily of this Funding.
:rtype: float
"""
return self._funding_rate_daily
@funding_rate_daily.setter
def funding_rate_daily(self, funding_rate_daily):
"""
Sets the funding_rate_daily of this Funding.
:param funding_rate_daily: The funding_rate_daily of this Funding.
:type: float
"""
self._funding_rate_daily = funding_rate_daily
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
cfbdabfc13d6e89a1cfcbc015ee849ffc5635eb5 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/db/sqlmap/plugins/generic/filesystem.py | ee9770612e90346479646bec319c7d2028574f13 | []
| no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,062 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
from lib.core.agent import agent
from lib.core.common import dataToOutFile
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import decloakToTemp
from lib.core.common import decodeHexValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isListLike
from lib.core.common import isStackingAvailable
from lib.core.common import isTechniqueAvailable
from lib.core.common import readInput
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapUndefinedMethod
from lib.request import inject
class Filesystem:
"""
This class defines generic OS file system functionalities for plugins.
"""
def __init__(self):
self.fileTblName = "sqlmapfile"
self.tblField = "data"
def _checkFileLength(self, localFile, remoteFile, fileRead=False):
if Backend.isDbms(DBMS.MYSQL):
lengthQuery = "LENGTH(LOAD_FILE('%s'))" % remoteFile
elif Backend.isDbms(DBMS.PGSQL) and not fileRead:
lengthQuery = "SELECT LENGTH(data) FROM pg_largeobject WHERE loid=%d" % self.oid
elif Backend.isDbms(DBMS.MSSQL):
self.createSupportTbl(self.fileTblName, self.tblField, "VARBINARY(MAX)")
inject.goStacked("INSERT INTO %s(%s) SELECT %s FROM OPENROWSET(BULK '%s', SINGLE_BLOB) AS %s(%s)" % (
self.fileTblName, self.tblField, self.tblField, remoteFile, self.fileTblName, self.tblField));
lengthQuery = "SELECT DATALENGTH(%s) FROM %s" % (self.tblField, self.fileTblName)
localFileSize = os.path.getsize(localFile)
if fileRead and Backend.isDbms(DBMS.PGSQL):
logger.info("length of read file %s cannot be checked on PostgreSQL" % remoteFile)
sameFile = True
else:
logger.debug("checking the length of the remote file %s" % remoteFile)
remoteFileSize = inject.getValue(lengthQuery, resumeValue=False, expected=EXPECTED.INT,
charsetType=CHARSET_TYPE.DIGITS)
sameFile = None
if isNumPosStrValue(remoteFileSize):
remoteFileSize = long(remoteFileSize)
sameFile = False
if localFileSize == remoteFileSize:
sameFile = True
infoMsg = "the local file %s and the remote file " % localFile
infoMsg += "%s have the same size (%db)" % (remoteFile, localFileSize)
elif remoteFileSize > localFileSize:
infoMsg = "the remote file %s is larger (%db) than " % (remoteFile, remoteFileSize)
infoMsg += "the local file %s (%db)" % (localFile, localFileSize)
else:
infoMsg = "the remote file %s is smaller (%db) than " % (remoteFile, remoteFileSize)
infoMsg += "file %s (%db)" % (localFile, localFileSize)
logger.info(infoMsg)
else:
sameFile = False
warnMsg = "it looks like the file has not been written (usually "
warnMsg += "occurs if the DBMS process' user has no write "
warnMsg += "privileges in the destination path)"
logger.warn(warnMsg)
return sameFile
def fileToSqlQueries(self, fcEncodedList):
"""
Called by MySQL and PostgreSQL plugins to write a file on the
back-end DBMS underlying file system
"""
counter = 0
sqlQueries = []
for fcEncodedLine in fcEncodedList:
if counter == 0:
sqlQueries.append("INSERT INTO %s(%s) VALUES (%s)" % (self.fileTblName, self.tblField, fcEncodedLine))
else:
updatedField = agent.simpleConcatenate(self.tblField, fcEncodedLine)
sqlQueries.append("UPDATE %s SET %s=%s" % (self.fileTblName, self.tblField, updatedField))
counter += 1
return sqlQueries
def fileEncode(self, fileName, encoding, single):
"""
Called by MySQL and PostgreSQL plugins to write a file on the
back-end DBMS underlying file system
"""
retVal = []
with open(fileName, "rb") as f:
content = f.read().encode(encoding).replace("\n", "")
if not single:
if len(content) > 256:
for i in xrange(0, len(content), 256):
_ = content[i:i + 256]
if encoding == "hex":
_ = "0x%s" % _
elif encoding == "base64":
_ = "'%s'" % _
retVal.append(_)
if not retVal:
if encoding == "hex":
content = "0x%s" % content
elif encoding == "base64":
content = "'%s'" % content
retVal = [content]
return retVal
def askCheckWrittenFile(self, localFile, remoteFile, forceCheck=False):
output = None
if forceCheck is not True:
message = "do you want confirmation that the local file '%s' " % localFile
message += "has been successfully written on the back-end DBMS "
message += "file system (%s)? [Y/n] " % remoteFile
output = readInput(message, default="Y")
if forceCheck or (output and output.lower() == "y"):
return self._checkFileLength(localFile, remoteFile)
return True
def askCheckReadFile(self, localFile, remoteFile):
message = "do you want confirmation that the remote file '%s' " % remoteFile
message += "has been successfully downloaded from the back-end "
message += "DBMS file system? [Y/n] "
output = readInput(message, default="Y")
if not output or output in ("y", "Y"):
return self._checkFileLength(localFile, remoteFile, True)
return None
def nonStackedReadFile(self, remoteFile):
errMsg = "'nonStackedReadFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def stackedReadFile(self, remoteFile):
errMsg = "'stackedReadFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def unionWriteFile(self, localFile, remoteFile, fileType, forceCheck=False):
errMsg = "'unionWriteFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def stackedWriteFile(self, localFile, remoteFile, fileType, forceCheck=False):
errMsg = "'stackedWriteFile' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def readFile(self, remoteFiles):
localFilePaths = []
self.checkDbmsOs()
for remoteFile in remoteFiles.split(","):
fileContent = None
kb.fileReadMode = True
if conf.direct or isStackingAvailable():
if isStackingAvailable():
debugMsg = "going to read the file with stacked query SQL "
debugMsg += "injection technique"
logger.debug(debugMsg)
fileContent = self.stackedReadFile(remoteFile)
elif Backend.isDbms(DBMS.MYSQL):
debugMsg = "going to read the file with a non-stacked query "
debugMsg += "SQL injection technique"
logger.debug(debugMsg)
fileContent = self.nonStackedReadFile(remoteFile)
else:
errMsg = "none of the SQL injection techniques detected can "
errMsg += "be used to read files from the underlying file "
errMsg += "system of the back-end %s server" % Backend.getDbms()
logger.error(errMsg)
fileContent = None
kb.fileReadMode = False
if fileContent in (None, "") and not Backend.isDbms(DBMS.PGSQL):
self.cleanup(onlyFileTbl=True)
elif isListLike(fileContent):
newFileContent = ""
for chunk in fileContent:
if isListLike(chunk):
if len(chunk) > 0:
chunk = chunk[0]
else:
chunk = ""
if chunk:
newFileContent += chunk
fileContent = newFileContent
if fileContent is not None:
fileContent = decodeHexValue(fileContent, True)
if fileContent:
localFilePath = dataToOutFile(remoteFile, fileContent)
if not Backend.isDbms(DBMS.PGSQL):
self.cleanup(onlyFileTbl=True)
sameFile = self.askCheckReadFile(localFilePath, remoteFile)
if sameFile is True:
localFilePath += " (same file)"
elif sameFile is False:
localFilePath += " (size differs from remote file)"
localFilePaths.append(localFilePath)
else:
errMsg = "no data retrieved"
logger.error(errMsg)
return localFilePaths
def writeFile(self, localFile, remoteFile, fileType=None, forceCheck=False):
written = False
checkFile(localFile)
self.checkDbmsOs()
if localFile.endswith('_'):
localFile = decloakToTemp(localFile)
if conf.direct or isStackingAvailable():
if isStackingAvailable():
debugMsg = "going to upload the %s file with " % fileType
debugMsg += "stacked query SQL injection technique"
logger.debug(debugMsg)
written = self.stackedWriteFile(localFile, remoteFile, fileType, forceCheck)
self.cleanup(onlyFileTbl=True)
elif isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION) and Backend.isDbms(DBMS.MYSQL):
debugMsg = "going to upload the %s file with " % fileType
debugMsg += "UNION query SQL injection technique"
logger.debug(debugMsg)
written = self.unionWriteFile(localFile, remoteFile, fileType, forceCheck)
else:
errMsg = "none of the SQL injection techniques detected can "
errMsg += "be used to write files to the underlying file "
errMsg += "system of the back-end %s server" % Backend.getDbms()
logger.error(errMsg)
return None
return written
| [
"[email protected]"
]
| |
55bd1e9c03901d04fa75221fbb2e004339501afe | c3379fb707daf434fc731006e173da817b68ca75 | /pydatview/fast/runner.py | f321a81d662cb9f3714995b306df20a08da376e1 | [
"MIT"
]
| permissive | dviewtest/pyDatView | 43cb6d2bb76a78670ecd1083495024f935bc9e9b | 3516ffaff601c122d62ffc94abd842958354ece8 | refs/heads/master | 2023-06-27T11:08:52.056689 | 2021-06-23T17:57:35 | 2021-06-23T17:57:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,304 | py | # --- For cmd.py
from __future__ import division, print_function
import os
import subprocess
import multiprocessing
import collections
import glob
import pandas as pd
import numpy as np
import shutil
import stat
import re
# --- Fast libraries
from weio.weio.fast_input_file import FASTInputFile
from weio.weio.fast_output_file import FASTOutputFile
# from pyFAST.input_output.fast_input_file import FASTInputFile
# from pyFAST.input_output.fast_output_file import FASTOutputFile
FAST_EXE='openfast'
# --------------------------------------------------------------------------------}
# --- Tools for executing FAST
# --------------------------------------------------------------------------------{
# --- START cmd.py
def run_cmds(inputfiles, exe, parallel=True, showOutputs=True, nCores=None, showCommand=True):
""" Run a set of simple commands of the form `exe input_file`
By default, the commands are run in "parallel" (though the method needs to be improved)
The stdout and stderr may be displayed on screen (`showOutputs`) or hidden.
A better handling is yet required.
"""
Failed=[]
def _report(p):
if p.returncode==0:
print('[ OK ] Input : ',p.input_file)
else:
Failed.append(p)
print('[FAIL] Input : ',p.input_file)
print(' Directory: '+os.getcwd())
print(' Command : '+p.cmd)
print(' Use `showOutputs=True` to debug, or run the command above.')
#out, err = p.communicate()
#print('StdOut:\n'+out)
#print('StdErr:\n'+err)
ps=[]
iProcess=0
if nCores is None:
nCores=multiprocessing.cpu_count()
if nCores<0:
nCores=len(inputfiles)+1
for i,f in enumerate(inputfiles):
#print('Process {}/{}: {}'.format(i+1,len(inputfiles),f))
ps.append(run_cmd(f, exe, wait=(not parallel), showOutputs=showOutputs, showCommand=showCommand))
iProcess += 1
# waiting once we've filled the number of cores
# TODO: smarter method with proper queue, here processes are run by chunks
if parallel:
if iProcess==nCores:
for p in ps:
p.wait()
for p in ps:
_report(p)
ps=[]
iProcess=0
# Extra process if not multiptle of nCores (TODO, smarter method)
for p in ps:
p.wait()
for p in ps:
_report(p)
# --- Giving a summary
if len(Failed)==0:
print('[ OK ] All simulations run successfully.')
return True
else:
print('[FAIL] {}/{} simulations failed:'.format(len(Failed),len(inputfiles)))
for p in Failed:
print(' ',p.input_file)
return False
def run_cmd(input_file_or_arglist, exe, wait=True, showOutputs=False, showCommand=True):
""" Run a simple command of the form `exe input_file` or `exe arg1 arg2` """
# TODO Better capture STDOUT
if isinstance(input_file_or_arglist, list):
args= [exe] + input_file_or_arglist
input_file = ' '.join(input_file_or_arglist)
input_file_abs = input_file
else:
input_file=input_file_or_arglist
if not os.path.isabs(input_file):
input_file_abs=os.path.abspath(input_file)
else:
input_file_abs=input_file
if not os.path.exists(exe):
raise Exception('Executable not found: {}'.format(exe))
args= [exe,input_file]
#args = 'cd '+workDir+' && '+ exe +' '+basename
shell=False
if showOutputs:
STDOut= None
else:
STDOut= open(os.devnull, 'w')
if showCommand:
print('Running: '+' '.join(args))
if wait:
class Dummy():
pass
p=Dummy()
p.returncode=subprocess.call(args , stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
else:
p=subprocess.Popen(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
# Storing some info into the process
p.cmd = ' '.join(args)
p.args = args
p.input_file = input_file
p.input_file_abs = input_file_abs
p.exe = exe
return p
# --- END cmd.py
def run_fastfiles(fastfiles, fastExe=None, parallel=True, showOutputs=True, nCores=None, showCommand=True, reRun=True):
if fastExe is None:
fastExe=FAST_EXE
if not reRun:
# Figure out which files exist
newfiles=[]
for f in fastfiles:
base=os.path.splitext(f)[0]
if os.path.exists(base+'.outb') or os.path.exists(base+'.out'):
print('>>> Skipping existing simulation for: ',f)
pass
else:
newfiles.append(f)
fastfiles=newfiles
return run_cmds(fastfiles, fastExe, parallel=parallel, showOutputs=showOutputs, nCores=nCores, showCommand=showCommand)
def run_fast(input_file, fastExe=None, wait=True, showOutputs=False, showCommand=True):
if fastExe is None:
fastExe=FAST_EXE
return run_cmd(input_file, fastExe, wait=wait, showOutputs=showOutputs, showCommand=showCommand)
def writeBatch(batchfile, fastfiles, fastExe=None):
""" Write batch file, everything is written relative to the batch file"""
if fastExe is None:
fastExe=FAST_EXE
fastExe_abs = os.path.abspath(fastExe)
batchfile_abs = os.path.abspath(batchfile)
batchdir = os.path.dirname(batchfile_abs)
fastExe_rel = os.path.relpath(fastExe_abs, batchdir)
with open(batchfile,'w') as f:
for ff in fastfiles:
ff_abs = os.path.abspath(ff)
ff_rel = os.path.relpath(ff_abs, batchdir)
l = fastExe_rel + ' '+ ff_rel
f.write("%s\n" % l)
def removeFASTOuputs(workDir):
# Cleaning folder
for f in glob.glob(os.path.join(workDir,'*.out')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.outb')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.ech')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.sum')):
os.remove(f)
if __name__=='__main__':
run_cmds(['main1.fst','main2.fst'], './Openfast.exe', parallel=True, showOutputs=False, nCores=4, showCommand=True)
pass
# --- Test of templateReplace
| [
"[email protected]"
]
| |
27d32813b7fee47a8f3898e5b10327bb6f1e91ce | 25404f4cfb9be3e6f1b3fe31a1554459eb200813 | /1_todo/string_io_and_json_example.py | 5cb62ee749b5815bcf6dba5c20c390f1ac5608f1 | []
| no_license | nightimero/annal_report_test | 1c6eb4b71482f870c753f5084212afd071929f57 | 7bbc76ba703527ba8f4b84fbdb94fd57b37b9887 | refs/heads/master | 2021-09-06T21:18:59.534963 | 2018-02-11T15:31:21 | 2018-02-11T15:31:21 | 103,259,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # -*- coding: utf-8 -*-
from StringIO import StringIO
import json
io = StringIO()
json.dump(['streaming API'], io)
io.getvalue()
# '["streaming API"]'
# 2.use seperator, Compact encoding
import json
json.dumps([1, 2, 3, {'4': 5, '6': 7}], separators=(',', ':'))
'[1,2,3,{"4":5,"6":7}]'
# 3.Pretty printing: indent参数是缩进的意思
import json
print json.dumps({'4': 5, '6': 7}, sort_keys=True,
indent=4, separators=(',', ': '))
# {
# "4": 5,
# "6": 7
# }
# 4.Decoding JSON:
import json
json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
json.loads('"\\"foo\\bar"')
u'"foo\x08ar'
from StringIO import StringIO
io = StringIO('["streaming API"]')
json.load(io)
[u'streaming API']
# 5跳过错误的键值
# 另一个比较有用的dumps参数是skipkeys,默认为False。 dumps方法存储dict对象时,key必须是str类型,如果出现了其他类型的话,
# 那么会产生TypeError异常,如果开启该参数,设为True的话,则会比较优雅的过度。
data = {'b': 789, 'c': 456, (1, 2): 123}
print json.dumps(data, skipkeys=True)
#
# {"c": 456, "b": 789}
| [
"[email protected]"
]
| |
3b2b4b72c827466af785eb8a9670fc7e4d2bff0d | 06ee5a5d83466896bbfd1653206da0151d6aa81a | /apps/business/serializers/file_serializer.py | ae6dac0452ba845b69a632709ac10c18ac7e31f3 | []
| no_license | fengjy96/rest_task | 201421a40ce42031223f61135d1d5e85809188e6 | db1d7c4eb2d5d229ab54c6d5775f96fc1843716e | refs/heads/master | 2020-07-22T19:48:19.940094 | 2019-09-02T13:40:11 | 2019-09-02T13:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from rest_framework import serializers
from business.models.files import Files
class FilesSerializer(serializers.ModelSerializer):
"""
文件:增删改查
"""
class Meta:
model = Files
fields = '__all__'
class FilesListSerializer(serializers.ModelSerializer):
"""
消息:增删改查
"""
class Meta:
model = Files
fields = '__all__'
depth = 1
| [
"[email protected]"
]
| |
e01b140eb36a9c67eba75192ebe27eb8b1a977f6 | 6f2a8a9d2f11d194fe41762e71ebd7270a22325b | /source/abstract/entities/electronic/controller/controller.py | 889ac5c8eca1c378a0464c9d0484d2aa82609ba9 | []
| no_license | rschum/game | 053da314a276445e03d682c6481a35aa888c5125 | 59ef0461c1ac60e690d39f6c180256f387999e44 | refs/heads/master | 2020-05-23T20:10:57.698939 | 2017-04-20T03:04:31 | 2017-04-20T03:04:31 | 84,785,024 | 0 | 0 | null | 2017-03-13T04:45:46 | 2017-03-13T04:45:46 | null | UTF-8 | Python | false | false | 193 | py | from source.abstract.entities.inanimate.controller import controller
class Controller(controller.Controller):
def __init__(self):
controller.Controller.__init__(self)
pass
| [
"[email protected]"
]
| |
11247c56107695e84821a8412a5d43b66542c9fc | a5d0a0499dd069c555080c8cefc2434304afead4 | /Programmers/pipe.py | bfa9ff3f16b5e878de473bd4fbe430f11b47ebcb | []
| no_license | devjinius/algorithm | 9bdf9afc021249b188d6930cf9d71f9147325d9f | 007fa6346a19868fbbc05eefd50848babb5f1cca | refs/heads/master | 2020-05-04T06:08:32.827207 | 2019-07-31T02:39:39 | 2019-07-31T02:39:39 | 178,999,456 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # 프로그래머스 쇠막대기
# https://programmers.co.kr/learn/courses/30/lessons/42585
def solution(arrangement):
stack = []
prevStr = ''
count = 0
for word in arrangement:
if(word == ")"):
if(prevStr == "("):
stack.pop()
count += len(stack)
else:
stack.pop()
count += 1
else:
stack.append(word)
prevStr = word
return count
| [
"[email protected]"
]
| |
b1ac9099c36ddeeab3548464dd1b5d5e9b1ee687 | 84d2040faf1acaabedce67e884b55767b6b98e57 | /source/watches/migrations/0003_auto_20210305_1130.py | e955040939fd33e381c347577ff1f00f4c1035ee | []
| no_license | UuljanAitnazarova/watches_shop | 3adae63141107c91ae6a489dddeb8f8fa9433666 | 6f54b11d468957cf05275c37b17f4c2e669e9fc2 | refs/heads/master | 2023-05-08T11:51:25.597190 | 2021-05-27T12:48:46 | 2021-05-27T12:48:46 | 344,481,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 3.1.7 on 2021-03-05 11:30
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watches', '0002_auto_20210304_1426'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_availability',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Остаток'),
),
]
| [
"[email protected]"
]
| |
346d811811941e402f2c375d0f49101f32158661 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/scaleform/daapi/view/lobby/customization/camouflageinterface.py | 6ebe0013b393fb58ca391ed6ffa5081abde07dd3 | []
| no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 16,854 | py | # 2016.02.14 12:39:07 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/CamouflageInterface.py
import BigWorld
import functools
from datetime import timedelta
from math import ceil
import time
from CurrentVehicle import g_currentVehicle
from constants import IGR_TYPE
from debug_utils import LOG_DEBUG
from gui import SystemMessages, g_tankActiveCamouflage
import gui
from gui.Scaleform.daapi.view.lobby.customization.BaseTimedCustomizationInterface import BaseTimedCustomizationInterface
from gui.Scaleform.daapi.view.lobby.customization.data_providers import CamouflageGroupsDataProvider, CamouflagesDataProvider, CamouflageRentalPackageDataProvider
from gui.Scaleform.daapi.view.lobby.customization import CustomizationHelper
from gui.Scaleform.genConsts.CUSTOMIZATION_ITEM_TYPE import CUSTOMIZATION_ITEM_TYPE
from gui.Scaleform.locale.MENU import MENU
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.shared import g_itemsCache
from gui.shared.utils.HangarSpace import g_hangarSpace
from helpers import i18n, time_utils
from items import vehicles
from items.vehicles import CAMOUFLAGE_KINDS
class CamouflageInterface(BaseTimedCustomizationInterface):
def __init__(self, name, nationId, type, position):
super(CamouflageInterface, self).__init__(name, nationId, type, position)
self.currentItemsByKind = {}
self.indexToKind = {}
self.resetCurrentItems()
def resetCurrentItems(self):
for k, v in CAMOUFLAGE_KINDS.iteritems():
self.setCurrentItem(v, None, None, None, None)
self.indexToKind[v] = k
return
def setCurrentItem(self, kindIdx, ID, lifeCycle, newItemID, packageIdx):
self.currentItemsByKind[kindIdx] = {'id': ID,
'lifeCycle': lifeCycle,
'newItemID': newItemID,
'packageIdx': packageIdx}
def __del__(self):
LOG_DEBUG('CamouflageInterface deleted')
def getRentalPackagesDP(self):
dp = CamouflageRentalPackageDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageRentalPackageDP)
return dp
def getGroupsDP(self):
dp = CamouflageGroupsDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageGroupsDataProvider)
return dp
def getItemsDP(self):
dp = CamouflagesDataProvider(self._nationID)
dp.setFlashObject(self.flashObject.camouflageDP)
return dp
def getItemPriceFactor(self, vehType):
return g_itemsCache.items.shop.getVehCamouflagePriceFactor(vehType.compactDescr)
def isNewItemIGR(self):
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
return self._itemsDP.isIGRItem(item.get('newItemID'))
return False
def getItemDefaultPriceFactor(self, vehType):
return g_itemsCache.items.shop.defaults.getVehCamouflagePriceFactor(vehType.compactDescr)
def refreshViewData(self, vehType, refresh = False):
if vehType is not None:
self._groupsDP.buildList()
self._itemsDP.setVehicleTypeParams(self.getItemPriceFactor(vehType), self.getItemDefaultPriceFactor(vehType), self.currentItemsByKind.get(CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup, 0), {'id': None}).get('id'))
self._rentalPackageDP.refreshList()
return
def invalidateViewData(self, vehType, refresh = False):
if vehType is not None:
self._groupsDP.buildList()
self._itemsDP.setVehicleTypeParams(self.getItemPriceFactor(vehType), self.getItemDefaultPriceFactor(vehType), self.currentItemsByKind.get(0, {'id': None}).get('id'))
self._rentalPackageDP.getRentalPackages(refresh)
return
def isNewItemSelected(self):
return self.getSelectedItemsCount() > 0
def getNewItems(self):
newItems = None
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
if newItems is None:
newItems = []
newItems.append(self._itemsDP.makeItem(item.get('newItemID'), False, None, None, kind))
return newItems
def getSelectedItemCost(self):
newItemsCosts = [ self.getItemCost(item.get('newItemID'), item.get('packageIdx')) for kind, item in self.currentItemsByKind.iteritems() if item.get('newItemID') is not None ]
return newItemsCosts
def getSelectedItemsCount(self, *args):
if len(args):
newItems = []
for kind, item in self.currentItemsByKind.iteritems():
if item.get('newItemID') is not None:
cost = self.getItemCost(item.get('newItemID'), item.get('packageIdx'))
if cost.get('isGold') == args[0]:
newItems.append(item)
else:
newItems = [ item for kind, item in self.currentItemsByKind.iteritems() if item.get('newItemID') is not None ]
return len(newItems)
def isCurrentItemRemove(self):
currentItems = []
for kind, item in self.currentItemsByKind.iteritems():
if item.get('id') is not None and item.get('newItemID') is not None and item.get('lifeCycle', (0, 0))[1] > 0:
currentItems.append(item)
return len(currentItems) > 0
def getCurrentItemRemoveStr(self):
removeStr = None
for kind, item in self.currentItemsByKind.iteritems():
lifeCycle = item.get('lifeCycle')
if item.get('id') is not None and item.get('newItemID') and lifeCycle is not None:
if removeStr is None:
removeStr = []
if lifeCycle[1] > 0:
removeStr.append(gui.makeHtmlString('html_templates:lobby/customization', 'remove-camouflage-{0}'.format(kind)))
else:
removeStr.append(gui.makeHtmlString('html_templates:lobby/customization', 'store-camouflage-{0}'.format(kind)))
return removeStr
def getCurrentItem(self):
space = g_hangarSpace.space
if space is not None:
space.locateCameraToPreview()
items = []
for key, item in self.currentItemsByKind.iteritems():
items.append(self._itemsDP.makeItem(item.get('id'), True, item.get('lifeCycle'), self._makeTimeLeftString(item=item), key))
return items
def onSetID(self, itemID, kind, packageIdx):
item = self.currentItemsByKind.get(kind)
if itemID == -1:
item['newItemID'] = None
else:
if item.get('id') == itemID:
item['newItemID'] = None
else:
item['newItemID'] = itemID
item['packageIdx'] = packageIdx
self.updateVehicleCustomization(itemID)
return
def _onRentalPackagesDataInited(self, selectedPackage, refresh):
if selectedPackage:
self._itemsDP.setDefaultCost(selectedPackage.get('cost'), selectedPackage.get('defCost'), selectedPackage.get('isGold'), selectedPackage.get('isIGR'), selectedPackage.get('periodDays'))
if refresh:
for kind, item in self.currentItemsByKind.iteritems():
item['newItemID'] = None
self._rentalPackageDP.refresh()
self._itemsDP.refresh()
LOG_DEBUG('CamouflageInterface data inited', self._name)
self.onDataInited(self._name)
return
def _makeTimeLeftString(self, **kwargs):
result = ''
item = kwargs.get('item')
if item.get('lifeCycle') is not None:
startTime, days = item.get('lifeCycle')
if days > 0:
timeLeft = startTime + days * 86400 - time.time()
if timeLeft > 0:
delta = timedelta(0, timeLeft)
if delta.days > 0:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_CAMOUFLAGE_TIMELEFT_DAYS, delta.days + 1 if delta.seconds > 0 else delta.days)
else:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_CAMOUFLAGE_TIMELEFT_HOURS, ceil(delta.seconds / 3600.0))
else:
result = i18n.makeString(MENU.CUSTOMIZATION_LABELS_TIMELEFT_LASTMINUTE)
return result
def updateVehicleCustomization(self, itemID = None):
space = g_hangarSpace.space
if space is not None and g_currentVehicle.isInHangar():
space.updateVehicleCamouflage(camouflageID=itemID)
return
def fetchCurrentItem(self, vehDescr):
if vehDescr is not None:
camouflages = vehDescr.camouflages
if camouflages is not None:
for camouflage in camouflages:
itemId, startTime, days = camouflage
if itemId is not None:
lifeCycle = None if itemId is None else (time_utils.makeLocalServerTime(startTime), days)
camouflageObject = self._itemsDP.getCamouflageDescr(itemId)
self.setCurrentItem(camouflageObject.get('kind'), itemId, lifeCycle, None, self._rentalPackageDP.getIndexByDays(days, self._itemsDP.isIGRItem(itemId)))
return
def change(self, vehInvID, section, isAlreadyPurchased):
if self._rentalPackageDP.selectedPackage is None:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DAYS_NOT_SELECTED)
self.onCustomizationChangeFailed(message)
return
else:
isNewItemFound = False
for kind, item in self.currentItemsByKind.iteritems():
newItemID = item.get('newItemID', None)
currItemId = item.get('id', None)
if newItemID is None:
continue
elif not isNewItemFound:
isNewItemFound = True
price = self.getItemCost(newItemID, item.get('packageIdx'))
cost = price.get('cost', 0)
isGold = price.get('isGold', False)
if cost < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_COST_NOT_FOUND)
self.onCustomizationChangeFailed(message)
return
localKind = kind
if CustomizationHelper.isItemInHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE, newItemID, self._nationID):
hangarItem = CustomizationHelper.getItemFromHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE_TYPE, newItemID, self._nationID)
daysToWear = 0 if hangarItem.get('isPermanent') else 7
else:
daysToWear = self._rentalPackageDP.pyRequestItemAt(item.get('packageIdx')).get('periodDays')
newIdToSend = 0
isNewInDefaultSetup = False
isCurrIgr = self._itemsDP.isIGRItem(currItemId)
if isCurrIgr:
isNewInDefaultSetup = CustomizationHelper.isIdInDefaultSetup(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE, newItemID)
if currItemId is None or not isCurrIgr or isCurrIgr and not isNewInDefaultSetup or isCurrIgr and isNewInDefaultSetup and daysToWear > 0:
newIdToSend = newItemID
BigWorld.player().inventory.changeVehicleCamouflage(vehInvID, localKind, newIdToSend, daysToWear, functools.partial(self.__onChangeVehicleCamouflage, (cost, isGold), localKind))
if not isNewItemFound:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_NOT_SELECTED)
self.onCustomizationChangeFailed(message)
return
def drop(self, vehInvID, kind):
if self.currentItemsByKind.get(kind) is None:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_NOT_FOUND_TO_DROP)
self.onCustomizationDropFailed(message)
return
else:
BigWorld.player().inventory.changeVehicleCamouflage(vehInvID, kind, 0, 0, lambda resultID: self.__onDropVehicleCamouflage(resultID, kind))
return
def update(self, vehicleDescr):
camouflages = vehicleDescr.camouflages
isUpdated = False
for index, camouflage in enumerate(camouflages):
camouflageID = camouflage[0] if camouflage is not None else None
item = self.currentItemsByKind[index]
if camouflageID != item.get('id'):
isUpdated = True
item['id'] = camouflageID
if camouflage is not None:
_, startTime, days = camouflage
startTime = time_utils.makeLocalServerTime(startTime)
item['lifeCycle'] = (startTime, days)
else:
item['lifeCycle'] = None
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == index:
self._itemsDP.currentItemID = item['id']
if isUpdated:
self.onCurrentItemChange(self._name)
return
def _populate(self):
super(CamouflageInterface, self)._populate()
def _dispose(self):
self.updateVehicleCustomization()
self.resetCurrentItems()
super(CamouflageInterface, self)._dispose()
def __onChangeVehicleCamouflage(self, price, kind, resultID):
if resultID < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SERVER_ERROR)
self.onCustomizationChangeFailed(message)
return
else:
item = self.currentItemsByKind.get(kind)
g_tankActiveCamouflage[g_currentVehicle.item.intCD] = kind
item['id'] = item.get('newItemID')
item['lifeCycle'] = None
item['newItemID'] = None
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == kind:
self._itemsDP.currentItemID = item['id']
cost, isGold = price
if cost == 0:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_FREE
typeValue = SystemMessages.SM_TYPE.Information
str = i18n.makeString(key)
else:
if isGold:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_GOLD
fCost = BigWorld.wg_getGoldFormat(cost)
typeValue = SystemMessages.SM_TYPE.CustomizationForGold
else:
key = SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_CHANGE_SUCCESS_CREDITS
fCost = BigWorld.wg_getIntegralFormat(cost)
typeValue = SystemMessages.SM_TYPE.CustomizationForCredits
str = i18n.makeString(key, fCost)
self.onCustomizationChangeSuccess(str, typeValue)
return
def __onDropVehicleCamouflage(self, resultID, kind):
if resultID < 0:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DROP_SERVER_ERROR)
self.onCustomizationDropFailed(message)
return
else:
item = self.currentItemsByKind.get(kind)
hangarItem = CustomizationHelper.getItemFromHangar(CUSTOMIZATION_ITEM_TYPE.CAMOUFLAGE_TYPE, item.get('id'), self._nationID)
if hangarItem:
intCD = g_currentVehicle.item.intCD
vehicle = vehicles.getVehicleType(int(intCD))
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_STORED_SUCCESS, vehicle=vehicle.userString)
else:
message = i18n.makeString(SYSTEM_MESSAGES.CUSTOMIZATION_CAMOUFLAGE_DROP_SUCCESS)
if g_tankActiveCamouflage.has_key(g_currentVehicle.item.intCD):
del g_tankActiveCamouflage[g_currentVehicle.item.intCD]
newID = None
newLifeCycle = None
if gui.game_control.g_instance.igr.getRoomType() != IGR_TYPE.NONE:
camouflages = g_currentVehicle.item.descriptor.camouflages
camo = camouflages[kind]
if camo[0] is not None:
newID = camo[0]
newLifeCycle = (camo[1], camo[2])
item['id'] = newID
item['lifeCycle'] = newLifeCycle
if CAMOUFLAGE_KINDS.get(self._itemsDP.currentGroup) == kind:
self._itemsDP.currentItemID = newID
self.onCustomizationDropSuccess(message)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\customization\camouflageinterface.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:39:08 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
b73d1826be68e566cc4418a478ee654d378cc0a6 | 073d40d3ea58e37d8a130794910068005f3f259d | /processing/surface_based_analysis.py | 56afba929f609a17760fcae36ccf26cd024a0541 | [
"BSD-2-Clause"
]
| permissive | KamalakerDadi/public_analysis_code | bd925f442d32fbedc56e145ad0bc981d5ac3924c | b8770d485fd2697838b911120c41d91250671636 | refs/heads/master | 2020-03-20T21:10:33.759118 | 2018-07-30T18:27:10 | 2018-07-30T18:27:10 | 137,727,239 | 0 | 0 | null | 2018-06-18T08:27:58 | 2018-06-18T08:27:58 | null | UTF-8 | Python | false | false | 5,182 | py | """
This script does 2 things:
1. Freesurfer segmentation
2. project the coregistered fMRI images to the surface:
the surface is the grey-white matter interface of the subject
The purpose is to perform proper group analysis on the surface on fsaverage,
and use existing atlases on the surface.
Author: Bertrand Thirion, Isabelle Courcol, 2013 -- 2016
Note
----
First run: export SUBJECTS_DIR=''
"""
import os
import glob
import commands
from nipype.caching import Memory
from joblib import Parallel, delayed
from nipype.interfaces.freesurfer import ReconAll, BBRegister
work_dir = '/neurospin/ibc/derivatives'
subjects = ['sub-%02d' % i for i in [1, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
subjects = ['sub-%02d' % i for i in [8, 9, 11, 12, 13, 14]]
mem = Memory(base_dir='/neurospin/tmp/ibc')
# Step 1: Perform recon-all
os.environ['SUBJECTS_DIR'] = ''
def recon_all(work_dir, subject, high_res=True):
# create directories in output_dir
if high_res:
# high-resolution T1
anat_img = glob.glob(os.path.join(
work_dir, subject, 'ses-*/anat/sub-*_ses-*_acq-highres_T1w.nii*'))[0]
print(anat_img)
t1_dir = os.path.dirname(anat_img)
os.system('recon-all -all -subjid %s -sd %s -hires -i %s -expert expert.opts' % (subject, t1_dir, anat_img))
else:
# low-resolution T1
subject_dir = os.path.join(work_dir, subject, 'ses-00')
t1_dir = os.path.join(subject_dir, 'anat')
anat_img = glob.glob(os.path.join(t1_dir, '%s_ses-00_T1w.nii*' % subject))[0]
# reconall = mem.cache(ReconAll)
#reconall(subject_id=subject,
# directive='all',
# subjects_dir=t1_dir,
# T1_files=anat_img)
os.system('recon-all -all -subjid %s -sd %s' % (subject, t1_dir))
#Parallel(n_jobs=1)(delayed(recon_all)(work_dir, subject, True)
# for subject in subjects)
# Step 2: Perform the projection
def project_volume(work_dir, subject, sessions, do_bbr=True):
t1_dir = os.path.join(work_dir, subject, 'ses-00', 'anat')
for session in sessions:
subject_dir = os.path.join(work_dir, subject, session)
if not os.path.exists(subject_dir):
continue
fmri_dir = os.path.join(subject_dir, 'func')
fs_dir = os.path.join(subject_dir, 'freesurfer')
fmri_images = glob.glob(os.path.join(fmri_dir, 'rdc*.nii.gz'))
# --------------------------------------------------------------------
# run the projection using freesurfer
os.environ['SUBJECTS_DIR'] = t1_dir
if not os.path.exists(fs_dir):
os.mkdir(fs_dir)
# take the fMRI series
print("fmri_images", fmri_images)
for fmri_session in fmri_images:
basename = os.path.basename(fmri_session).split('.')[0]
print (basename)
# output names
# the .gii files will be put in the same directory as the input fMRI
left_fmri_tex = os.path.join(fs_dir, basename + '_lh.gii')
right_fmri_tex = os.path.join(fs_dir, basename + '_rh.gii')
if do_bbr:
# use BBR registration to finesse the coregistration
bbreg = BBRegister(subject_id=subject, source_file=fmri_session,
init='header', contrast_type='t2')
bbreg.run()
# run freesrufer command for projection
regheader = os.path.join(fmri_dir, basename + '_bbreg_%s.dat' % subject)
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi lh --projfrac-avg 0 2 0.1'
% (fmri_session, left_fmri_tex, regheader)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi rh --projfrac-avg 0 2 0.1'
% (fmri_session, right_fmri_tex, regheader)))
# resample to fsaverage
left_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_lh.gii')
right_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_rh.gii')
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsurfval %s --trgsubject ico --trgicoorder 7 '\
'--hemi lh --nsmooth-out 5' %
(subject, left_fmri_tex, left_fsaverage_fmri_tex)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsubject ico --trgicoorder 7 --trgsurfval %s '\
'--hemi rh --nsmooth-out 5' %
(subject, right_fmri_tex, right_fsaverage_fmri_tex)))
from pipeline import get_subject_session
subject_sessions = sorted(get_subject_session('enumeration'))
Parallel(n_jobs=4)(
delayed(project_volume)(work_dir, subject_session[0], [subject_session[1]], do_bbr=True)
for subject_session in subject_sessions)
| [
"[email protected]"
]
| |
b33dc6ed7a11b4a2e3127592c66b9d813072b574 | e62a8943ea2cc45b660b17ab10e238e7cb4642dc | /CompareMatchingMethod_Codec_ORB.py | 0a47f5d8d3a1da1bab046fd0ef44a1cec40c6526 | []
| no_license | mkjubran/HMS | 6229fd36e7f01b93be6f572e59e26a42a1058257 | 2402380d4e68c9b924303a8e1efac6af434d3a57 | refs/heads/master | 2021-04-09T15:18:07.334487 | 2020-11-10T19:49:30 | 2020-11-10T19:49:30 | 125,491,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,171 | py | #Frame1: Type POC QPoffset QPOffsetModelOff QPOffsetModelScale CbQPoffset CrQPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2 temporal_id #ref_pics_active #ref_pics reference pictures predict deltaRPS #ref_idcs reference idcs
#print >> fid, 'Frame1: P 1 5 -6.5 0.2590 0 0 1.0 0 0 0 1 1 -1 0');
from __future__ import division
from numpy import *
import numpy as np
import cv2, os, sys, subprocess, pdb
import argparse
import ConfigParser
import time, re, datetime
import math
import matplotlib.pyplot as plt
FRMPERWIN = 1 ; INF = 999
###--------------------------------------------------------------
## Parse configuration Parameters from the configuration file
def main(argv=None):
# Do argv default this way, as doing it in the functional
# declaration sets it at compile time.
if argv is None:
argv = sys.argv
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = { "option":"default"}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Parametters")))
#print(dict(config.items("Parametters")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
parser.set_defaults(**defaults)
args = parser.parse_args(remaining_argv)
return(args)
###--------------------------------------------------------------
def call(cmd):
# proc = subprocess.Popen(["cat", "/etc/services"], stdout=subprocess.PIPE, shell=True)
#proc = subprocess.Popen(cmd, \
# stdout=subprocess.PIPE, shell=True)
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return (out, err)
###--------------------------------------------------------------
def call_bg(cmd):
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
return proc
###--------------------------------------------------------------
def prepare_video(fn):
osout = call('rm -rf ../CodecSIFT')
osout = call('mkdir ../CodecSIFT')
osout = call('mkdir ../CodecSIFT/pngall')
osout = call('ffmpeg -r 1 -i {} -r 1 -qp 0 ../CodecSIFT/pngall/%d.png'.format(fn))
osout = call('ffmpeg -start_number 0 -i ../CodecSIFT/pngall/%d.png -c:v libx264 -vf "fps=25,format=yuv420p" -qp 0 ../CodecSIFT/{}_CodecSIFT.mp4'.format(fnname))
osout = call('ffmpeg -y -i ../CodecSIFT/{}_CodecSIFT.mp4 -vcodec rawvideo -pix_fmt yuv420p -qp 0 ../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname,fnname))
return
###--------------------------------------------------------------
## Building Configuration File
def Build_encoding_struct_stitch():
iFNums=map(int, range(GOP+1))
## get total number of frames
NumFrames=round(len(iFNums))
NumFrames=int(NumFrames)
##write config files header
fid = open('../CodecSIFT/encoder_HMS_GOP.cfg','w')
print >> fid, '#======== Coding Structure ============='
print >> fid, 'IntraPeriod : -1 # Period of I-Frame ( -1 = only first)'
print >> fid, 'DecodingRefreshType : 2 # Random Accesss 0:none, 1:CRA, 2:IDR, 3:Recovery Point SEI'
print >> fid, 'GOPSize : '+str(GOP)+' # GOP Size (number of B slice = GOPSize-1)'
print >> fid, 'ReWriteParamSetsFlag : 1 # Write parameter sets with every IRAP'
'# Type POC QPoffset QPOffsetModelOff QPOffsetModelScale CbQPoffset CrQPoffset QPfactor tcOffsetDiv2 betaOffsetDiv2 temporal_id #ref_pics_active #ref_pics reference pictures predict deltaRPS' '#ref_idcs reference idcs'
print >> fid,''
## Produce iFNums_array2 [StitchFrame; other frames ordered]
iFNums_array = np.array(iFNums)
#iFNums_array=iFNums_array.clip(0, 999999999)
#indexes = np.unique(iFNums_array, return_index=True)[1]
#iFNums_array=[iFNums_array[index] for index in sorted(indexes)]
#iFNums_array=np.array(iFNums_array)
ref_pics_Stitching_array=np.array([StitchFrame])
ref_pics_RemovedStitching_array=np.array(range(0,NumFrames))
index=np.where(np.isin(ref_pics_RemovedStitching_array,ref_pics_Stitching_array))
ref_pics_RemovedStitching_array=np.delete(ref_pics_RemovedStitching_array,index)
ref_pics_RemovedStitching_array.sort()
iFNums_array2=np.concatenate((ref_pics_Stitching_array,ref_pics_RemovedStitching_array), axis=0) #Stitching Frames + Ordered remaining Frames
#print(iFNums_array2)
ref_pics_active_Stitching=1
ref_pics_active_Max=1
## Buidling encoding structure for Stitching mode
ref_pics_stitch_to_use=[]
if 0 in ref_pics_Stitching_array:
if ref_pics_active_Stitching>0:
ref_pics_stitch_to_use=np.append(ref_pics_stitch_to_use,0)
ref_pics=np.array([StitchFrame])
GOPLine='Frame' + str(1) + ': I '+ str(StitchFrame) +' 0 -6.5 0.2590 0 0 1.0 0 0 0 '+ str(0) + ' ' + str(0)+' '+str(int(0))
print >> fid, GOPLine
cntin=1
for cnt in range(1,NumFrames):
if cnt != StitchFrame:
GOPLine='Frame' + str(cnt+cntin) + ': P '+ str(cnt) +' 0 -6.5 0.2590 0 0 1.0 0 0 0 '+ str(len(ref_pics)) + ' ' + str(len(ref_pics))
for cnt1 in range(len(ref_pics)):
GOPLine=GOPLine+' '+str(int(ref_pics[cnt1]-cnt))
GOPLine=GOPLine+' 2 0'
print >> fid, GOPLine
else:
cntin=0
###--------------------------------------------------------------
def Encode_decode_video():
print('Encoding Video')
InputYUV='../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname)
BitstreamFile='../CodecSIFT/{}_CodecSIFT.bin'.format(fnname)
ReconFile='../CodecSIFT/{}_CodecSIFT_Recon.yuv'.format(fnname)
osout = call('rm -rf {}'.format(BitstreamFile))
osout = call('cp -f ./encoder_HMS.cfg ../CodecSIFT/encoder_HMS.cfg')
if RateControl==0:
osout=call_bg('./HMS/bin/TAppEncoderStatic -c ../CodecSIFT/encoder_HMS.cfg -c ../CodecSIFT/encoder_HMS_GOP.cfg --InputFile={} --SourceWidth={} --SourceHeight={} --SAO=0 --QP={} --FrameRate={} --FramesToBeEncoded={} --MaxCUSize={} --MaxPartitionDepth={} --QuadtreeTULog2MaxSize=4 --BitstreamFile="{}" --RateControl={} --TargetBitrate={} '.format(InputYUV,Width,Height,QP,fps,GOP,MaxCUSize,MaxPartitionDepth,BitstreamFile,RateControl,rate))
else:
osout=call_bg('./HMS/bin/TAppEncoderStatic -c ../CodecSIFT/encoder_HMS.cfg -c ../CodecSIFT/encoder_HMS_GOP.cfg --InputFile={} --SourceWidth={} --SourceHeight={} --SAO=0 --QP={} --FrameRate={} --FramesToBeEncoded={} --MaxCUSize={} --MaxPartitionDepth={} --QuadtreeTULog2MaxSize=4 --BitstreamFile="{}" --RateControl={} --TargetBitrate={} &'.format(InputYUV,Width,Height,QP,fps,GOP*alpha,MaxCUSize,MaxPartitionDepth,BitstreamFile,RateControl,rate))
encoderlogfile='../CodecSIFT/encoderlog.dat'
fid = open(encoderlogfile,'w')
fid.write(osout.stdout.read())
fid.close
osout.stdout.read()
print('Decoding Video')
osout = call('rm -rf {}'.format(ReconFile))
osout=call_bg('./HMS/bin/TAppDecoderStatic --BitstreamFile="{}" --ReconFile="{}" &'.format(BitstreamFile,ReconFile))
decoderlogfile='../CodecSIFT/decoderlog.dat'
fid = open(decoderlogfile,'w')
fid.write(osout.stdout.read())
fid.close
return
###--------------------------------------------------------------
def Measure_Rate_PSNR():
InputYUV='../CodecSIFT/{}_CodecSIFT.yuv'.format(fnname)
ReconFile='../CodecSIFT/{}_CodecSIFT_Recon.yuv'.format(fnname)
(osout,err)=call('python ./Quality/measure.py {} {} {} {} &'.format(InputYUV,ReconFile,Width,Height))
encoderlogfile='../CodecSIFT/encoderlog.dat'
fid = open(encoderlogfile,'a')
fid.write(osout)
fid.close
return
###--------------------------------------------------------------
def Edit_encoder_log():
PIXEL_MAX = 255.0
mseY=0
mseU=0
mseV=0
mseYUV=0
NumFramesPSNR=0
NumFramesRate=0
TotalBits=0
CombinedLinesRateAll=[]
CombinedLinesPSNRAll=[]
CombinedLinesRate=[]
CombinedLinesPSNR=[]
encoderlogfile='../CodecSIFT/encoderlog.dat'
with open(encoderlogfile) as f:
Lines = f.readlines()
f.close()
cnt_col_Rate=0
cnt_col_PSNR=0
for cnt in range(len(Lines)):
templine=(Lines[cnt][:]).rstrip()
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
if templine.split(' ')[0] == 'POC':
#print('{} ... {}'.format(cnt_row,cnt_col_Rate))
CombinedLinesRateAll.append(Lines[cnt][:])
CombinedLinesRate.append(Lines[cnt][:])
cnt_col_Rate=cnt_col_Rate+1
TotalBits=TotalBits+int(templine.split(' ')[11])
NumFramesRate=NumFramesRate+1
if (NumFramesRate>0):
AverageRate=(TotalBits/NumFramesRate)*fps
if (((re.split(' |:',templine)[0]) == 'Frame') and ((re.split(' |:',templine)[3]) == '[Y')):
CombinedLinesPSNRAll.append(Lines[cnt][:])
PSNRYFrame=re.split(' |:',templine)[4]
PSNRUFrame=re.split(' |:',templine)[6]
PSNRVFrame=re.split(' |:',templine)[8]
PSNRYUVFrame=re.split(' |:',templine)[10]
PSNRYFrame=float(PSNRYFrame[0:(len(PSNRYFrame)-2)])
PSNRUFrame=float(PSNRUFrame[0:(len(PSNRUFrame)-2)])
PSNRVFrame=float(PSNRVFrame[0:(len(PSNRVFrame)-2)])
PSNRYUVFrame=float(PSNRYUVFrame[0:(len(PSNRYUVFrame)-3)])
mseYFrame=((PIXEL_MAX)/(10**(PSNRYFrame/20)))**2
mseY=mseY+mseYFrame
mseUFrame=((PIXEL_MAX)/(10**(PSNRUFrame/20)))**2
mseU=mseU+mseUFrame
mseVFrame=((PIXEL_MAX)/(10**(PSNRVFrame/20)))**2
mseV=mseV+mseVFrame
mseYUVFrame=((PIXEL_MAX)/(10**(PSNRYUVFrame/20)))**2
mseYUV=mseYUV+mseYUVFrame
NumFramesPSNR=NumFramesPSNR+1
PSNRYVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseY/NumFramesPSNR)))
PSNRUVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseU/NumFramesPSNR)))
PSNRVVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseV/NumFramesPSNR)))
PSNRYUVVideo=20 * math.log10(PIXEL_MAX / (math.sqrt(mseYUV/NumFramesPSNR)))
templineNew=('Frame {0:3d}: [Y {1:1.4f}dB U {2:1.4f}dB V {3:1.4f}dB YUV {4:1.4f}dB] ..... Video: [Y {5:1.4f}dB U {6:1.4f}dB V {7:1.4f}dB YUV {8:1.4f}dB]').format(NumFramesPSNR,PSNRYFrame,PSNRUFrame,PSNRVFrame,PSNRYUVFrame,PSNRYVideo,PSNRUVideo,PSNRVVideo,PSNRYUVVideo)
CombinedLinesPSNR.append(templineNew)
cnt_col_PSNR=cnt_col_PSNR+1
## write to edited log file
fid = open(Edited_encoder_log,'w')
fid.write('Input File (MP4) = {}\n'.format(vid))
fid.write('QP = {}\n'.format(QP))
fid.write('MaxCUSize = {}\n'.format(MaxCUSize))
fid.write('MaxPartitionDepth = {}\n'.format(MaxPartitionDepth))
fid.write('fps = {}\n'.format(fps))
fid.write('RateControl = {}\n'.format(RateControl))
fid.write('rate = {}\n'.format(rate))
## write PSNR
for cnt in range(len(CombinedLinesPSNR)):
templine=CombinedLinesPSNR[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.rstrip()
templine=templine.split(' ')
fid.write('Frame {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(cnt,str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19])))
PSNR_temp=str(templine[3])
Rate_PSNR[cnt,0]=cnt
Rate_PSNR[cnt,2]=float(PSNR_temp[0:(len(PSNR_temp)-2)])
## write Rate
fid.write('\n\n')
for cnt in range(len(CombinedLinesRate)):
templine=CombinedLinesRate[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
fid.write('POC {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(cnt,str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19]),str(templine[20]),str(templine[21]),str(templine[22])))
Rate_temp=str(templine[11])
Rate_PSNR[cnt,1]=float(Rate_temp)
fid.write('\nNumber of Frames = {}\n'.format(NumFramesRate))
fid.write('Written bites = {}\n'.format(TotalBits))
fid.write('Bit Rate = {} kbps\n'.format(AverageRate/1000))
fid.close
fid = open((Edited_encoder_log[0:(len(Edited_encoder_log)-4)]+'All.dat'),'w')
for cnt in range(len(CombinedLinesPSNRAll)):
templine=CombinedLinesPSNRAll[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
#print('Frame {}...{}'.format(cnt,templine[2:10]))
fid.write('Frame {} {} {} {} {} {} {} {} {}\n'.format(str(templine[1]),str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10])))
fid.write('\n\n')
for cnt in range(len(CombinedLinesRateAll)):
templine=CombinedLinesRateAll[cnt][:].replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.replace(" "," ")
templine=templine.split(' ')
#print('POC {}...{}'.format(cnt,templine[2:22]))
fid.write('POC {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(str(templine[1]),str(templine[2]),str(templine[3]),str(templine[4]),str(templine[5]),str(templine[6]),str(templine[7]),str(templine[8]),str(templine[9]),str(templine[10]),str(templine[11]),str(templine[12]),str(templine[13]),str(templine[14]),str(templine[15]),str(templine[16]),str(templine[17]),str(templine[18]),str(templine[19]),str(templine[20]),str(templine[21]),str(templine[22])))
fid.close
return
###################################################################3
## check similarity using SIFT
def call_err(cmd):
# proc = subprocess.Popen(["cat", "/etc/services"], stdout=subprocess.PIPE, shell=True)
proc = subprocess.Popen(cmd, \
stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return (out, err)
def get_frames_list(fn):
osout = call_err('ls -v ../CodecSIFT/pngall/*.png') ; lfrmall = osout[0]
lfrmall = lfrmall.split('\n')[0:-1]
return lfrmall
def make_windows(lfrm, numfrmwin):
numfrm = len(lfrm) ; numwin = numfrm/numfrmwin
lwin = []
for i in range(0, numfrm, numfrmwin ): lwin.append(lfrm[i:i+numfrmwin])
return lwin
def comp_similarity(lwin_,lwin_sc_,lwinsim):
for win in lwin_:
now = datetime.datetime.now()
#print('{} ... {}').format(win,now.strftime("%Y-%m-%d %H:%M:%S"))
for win_sc in lwin_sc_:
s=re.split('/',str(win))[-1]
iwin=int(s[0:(len(s)-6)])
s=re.split('/',win_sc)[-1]
iwin_sc=int(s[0:(len(s)-4)])
lwinsim[iwin-1][iwin_sc-1]=window_similarity(win, win_sc)
#print('{}..&..{}=..{}').format(win,win_sc,lwinsim[iwin-1][iwin_sc-1])
return lwinsim
def window_similarity(win_0, win_1):
lfrmsim = []
if (type(win_0) == str and type(win_1) == str):
lfrmsim.append(content_similarity(win_0, win_1))
elif (type(win_0) == str and type(win_1) <> str):
lfrmsim.append(content_similarity(win_0, win_1[0]))
elif (type(win_0) <> str and type(win_1) == str):
lfrmsim.append(content_similarity(win_0[0], win_1))
else:
lfrmsim.append(content_similarity(win_0[0], win_1[0]))
return np.mean(lfrmsim)
def content_similarity(img_0, img_1):
img1 = cv2.imread(img_0, 0)
img2 = cv2.imread(img_1, 0)
# Initiate SIFT detector
orb = cv2.ORB_create(nfeatures=100000)
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
if (type(des1)==type(des2)):
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
distances = [ _.distance for _ in matches if _.distance < 100]
if not distances:
simind_1=INF
else:
simind_1 = np.mean(distances)
if math.isnan(simind_1):
simind_1=INF
simind_2=simind_1
simind = (simind_1 + simind_2)/float(2)
else:
simind=INF
return simind
##################################################################
## Main Body
if __name__ == "__main__":
np.set_printoptions(threshold='nan')
args=main()
##Inputs
StitchFrame=int(args.stitchframe);
vid=args.vid;
mode=args.mode;
fps=int(args.fps);
GOP=int(args.gop);
Width=int(args.w);
Height=int(args.h);
QP=int(args.qp);
MaxCUSize=int(args.maxcusize);
MaxPartitionDepth=int(args.maxpartitiondepth);
RateControl=int(args.ratecontrol);
rate=int(args.rate);
Edited_encoder_log=args.edited_encoder_log
fsr=fps
fnname=vid.split('/')[-1]
fnname=fnname[0:(len(fnname)-4)]
if GOP%2!=0:
GOP=int(GOP/2) * 2
prepare_video(vid)
Build_encoding_struct_stitch()
Encode_decode_video()
Measure_Rate_PSNR()
Rate_PSNR=np.full((GOP,3), INF,float)
Edit_encoder_log()
Rate_PSNR=np.array(Rate_PSNR)
#print(Rate_PSNR)
fname=fnname
lfrm = get_frames_list(vid);
lfrm=lfrm[0:GOP]
lwin = make_windows(lfrm, FRMPERWIN)
lwinsim=np.full((len(lwin),len(lwin)), INF)
lwin_stitch=lwin[StitchFrame-1]
lwinsim=comp_similarity(lwin,lwin_stitch,lwinsim)
Rate=Rate_PSNR[:,1]
Rate_Norm=Rate/np.max(Rate)
PSNR=Rate_PSNR[:,2]
PSNR_Norm=PSNR/np.max(PSNR)
s=re.split('/',str(lwin_stitch))[-1]
lwinsim=np.array(lwinsim)
SIFT_score=lwinsim[:,int(s[0:(len(s)-6)])-1]
SIFT_score=SIFT_score[0:GOP]
SIFT_score_Norm=SIFT_score/np.max(SIFT_score)
SIFT_score=SIFT_score.reshape(len(SIFT_score),1)
Rate_PSNR_SIFT=np.concatenate((Rate_PSNR, SIFT_score),axis=1)
np.save(('../savenpy/'+fnname+'_Rate_PSNR_ORB'),Rate_PSNR_SIFT)
#print(Rate_PSNR_SIFT)
fig1, ax1 =plt.subplots()
ax1.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax1.plot(range(len(Rate)),Rate_Norm,'--b')
ax1.plot(range(len(PSNR)),PSNR_Norm,':r')
ax1.set_title('ORB Similarity Score & CODEC Rate PSNR')
ax1.set_xlabel('Frame Number')
#ax1.set_ylabel('Average SIFT Score')
ax1.legend(['ORB','Rate','PSNR'])
fig2, ax2 =plt.subplots()
ax2.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax2.plot(range(len(Rate)),Rate_Norm,'--b')
ax2.set_title('ORB Similarity Score & CODEC Rate')
ax2.set_xlabel('Frame Number')
#ax2.set_ylabel('Average SIFT Score')
ax2.legend(['ORB','Rate'])
fig3, ax3 =plt.subplots()
ax3.plot(range(len(SIFT_score_Norm)),SIFT_score_Norm,'-k')
ax3.plot(range(len(PSNR)),PSNR_Norm,':r')
ax3.set_title('ORB Similarity Score & CODEC PSNR')
ax3.set_xlabel('Frame Number')
#ax3.set_ylabel('Average SIFT Score')
ax3.legend(['ORB','PSNR'])
fig4, ax4 =plt.subplots()
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,3]/np.max(Rate_PSNR_SIFT[:,3])),'-k')
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,2]/np.max(Rate_PSNR_SIFT[:,2])),':r')
ax4.plot(range(np.shape(Rate_PSNR_SIFT)[0]),(Rate_PSNR_SIFT[:,1]/np.max(Rate_PSNR_SIFT[:,1])),'--b')
ax4.set_title('ORB Similarity Score & CODEC PSNR')
ax4.set_xlabel('Frame Number')
#ax3.set_ylabel('Average SIFT Score')
ax4.legend(['SIFT','PSNR'])
plt.show()
| [
"[email protected]"
]
| |
cd15183227ca013ef8df4b0a9c35e52023611ad0 | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/Blast/Applications.py | 634372703b80657a8c7039c95ba7de9db2e186ef | [
"MIT"
]
| permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /usr/share/pyshared/Bio/Blast/Applications.py | [
"[email protected]"
]
| |
42e44d36df2d8995690e0ac00535e4955d8b3472 | 94d5467b1315791fa75165eb862fdd8fef300958 | /yunyan_baotou/src/business_ultra/init_data.py | 61600784148a3a71b91b0ae55c58a09ba84d4b62 | []
| no_license | scmsqhn/code | e31926174c247d49c1db8f121e3ec1b82f8a2d9d | b389d7dc5fafad8a4185a03cd6d5519ccf8f99df | refs/heads/master | 2022-12-09T05:37:07.065840 | 2019-05-14T01:55:07 | 2019-05-14T01:55:07 | 185,903,771 | 1 | 0 | null | 2022-12-08T05:05:51 | 2019-05-10T02:22:28 | Python | UTF-8 | Python | false | false | 5,621 | py | #!/usr/bin/env python3
import datetime
import pandas as pd
from datetime import datetime
import json
import os
import codecs
import numpy as np
import traceback
import sys
sys.path.append(os.environ['YUNYAN'])
sys.path.append(os.environ['ROOT'])
sys.path.append(os.environ['WORKBENCH'])
#import gensim
#from gensimplus.source.gensim_plus_config import FLAGS
#from gensimplus.source.model_save_load_helper import ModelSaveLoadHelper
#from gensim.models import LsiModel
#from gensim.models import LdaModel
#from gensim.models import TfidfModel
import myconfig
import src
from src import myjieba_posseg
from myjieba_posseg import posseg as posseg
import user_prob
from user_prob.test import new_cut
import re
import numpy as np
import pdb
import codecs
import function_ultra.trie_tree as trie_tree
import function_ultra.utils as utils
#DEBUG = False
DICT = False#$True
DEBUG = True
JIEBACUT= True
global r_cnt
global w_cnt
r_cnt = 1
w_cnt = 0
standard_addr = {}
load_json = lambda x:json.load(open(x,'r',encoding='utf-8'))
standard_addr = load_json(myconfig.STDADD)
standard_dct = {}
ks = []
vs = []
for item in standard_addr['RECORDS']:
v = item['name']
k = item['type']
ks.append(k)
vs.append(v)
keys = list(set(ks))
values = list(set(vs))
level_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组",\
"门牌号","小区名","建筑物名称","组团名称","栋号",\
"单元号","楼层","户室号","sent","rw"]
out_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组","门牌号","小区名","组团名称","栋号","单元号","楼层","户室号"]
global global_cnt
def read_standard_data(self,docpath='standard_address.json'):
'''
read word from standard dict, return key words dict
'''
standard_kvs = {}
standard_num = {}
fl = open(docpath,'r',encoding='utf-8')
info = json.load(fl)
return info #返回标准地址库
kvs_lst = info.get('RECORDS','')
for item in kvs_lst:
k = item.get('name','')
v = len(standard_kvs)
standard_kvs[k] = v
for k in standard_kvs:
_k = standard_kvs[k]
_v = k
standard_num[_k] = _v
return standard_kvs, standard_num
def gen_word_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_WORD):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
print(len(lines))
for sent in lines:
words = sent.split('/')
for word in words:
my_tree.insert(word)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_std_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE,delimeter='/'):
print('\n>gen_std_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
words = sent.split(delimeter)
my_tree.insert(words)
utils.save_var(my_tree,sav_file)
print('\n>my std tree save ok')
return my_tree
def remove_nan(item):
clritem = []
for node in item:
if 'nan' in node:
continue
clritem.append(node)
return clritem
def gen_std_tree_from_dataframe(data_src, sav_file=myconfig.MY_TREE):
# 从dataframe创建标准地址树
print('\n>gen_std_tree_from_dataframe start')
my_tree = trie_tree.Trie()
for item in data_src:
clritem = remove_nan(item)
print(clritem)
pdb.set_trace()
my_tree.part_insert(my_tree.root,clritem)
utils.save_var(my_tree,sav_file)
print('\n>gen_std_tree_from_dataframe ready and save finish')
return myconfig.SUCCESS
def gen_address_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
my_tree.insert(sent)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_zhengzhou_tree(dirname=myconfig.ZZ_STD_ADD,sav_file=myconfig.zhengzhou_std_word,sav_file_2=myconfig.zhengzhou_std_tree):
addr_kv_rec = open("./addr_match.txt",'w+')
print('\n>gen_zhengzhou_tree start')
#pdb.set_trace()
my_tree = trie_tree.Trie()
my_word = trie_tree.Trie()
paths = os.walk(dirname)
sum_lines = []
cnt = 0
for _,_,fs in paths:
for f in fs:
pth = os.path.join(dirname,str(f))
lines = open(pth,'r').readlines()
np.random.shuffle(lines)
#lines = open(pth,'r').readlines()[:myconfig.TRAIN_DATA]
for line in lines:
if not ',' in line:
continue
_line = line.split(',')[1]
line = utils.pre_trans(_line)
addr_kv_rec.write('%s\t%s\n'%(str(line),str(_line)))
cnt+=1
if cnt%10000==1:
print(cnt)
my_tree.insert(line)
my_word.insert(_line)
utils.save_var(my_word,sav_file)
utils.save_var(my_tree,sav_file_2)
print('\n>my address tree save ok')
addr_kv_rec.close()
def load_address_tree(sav_file='./my_tree.pkl'):
my_tree = utils.read_var(sav_file)
return my_tree
#gen_address_tree()
if __name__ == "__time__":
pass
print('')
gen_address_tree(filename='/home/distdev/src/iba/dmp/gongan/gy_addr_normal/pre_data/yyap_address_tree.csv',sav_file='./my_tree.pkl')
| [
"[email protected]"
]
| |
c6d6095b6aecf8907d6dbe353e20a0cf0c58d042 | cc36d7ba409dfc2c9b7252b3c4efa55ca829adb7 | /tests/test_split_and_export.py | 354a7a0d37f2c8667857f6c75d9617afb5048cbd | []
| no_license | shinglyu/MusicPupil-tf | e09b2615047e9b87caa797fd7108b8ae35b34cf5 | 5ae05dc23fef1f9daf9deecd378adee9353a9e66 | refs/heads/master | 2021-05-15T18:10:37.284122 | 2017-12-30T15:34:46 | 2017-12-30T15:34:46 | 107,603,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | import os
import sys
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..',
'feature_extractor'
)
)
from unittest.mock import patch, MagicMock
import music21
import csv
import split_and_export
def test_split_train_test():
samples = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
# Split in 2 as a group
splitted = split_and_export.split_train_test(samples, int(len(samples)/2))
assert len(splitted) > 1 # More then one way to split
assert len(splitted[0]['training']) > 0
assert len(splitted[0]['testing']) > 0
assert len(splitted[0]['training']) > len(splitted[0]['testing'])
for elem in splitted[0]['testing']:
assert elem not in splitted[0]['training']
def test_export_to_csv():
samples = [
{
"score_features": {
"foo": [1, 2, 3]
},
"perf_features": {
"bar": [7, 8, 9]
}
},
{
"score_features": {
"foo": [4, 5, 6]
},
"perf_features": {
"bar": [10, 11, 12]
}
}
]
split_and_export.export_to_csv(samples, "tests/test_export_training.csv")
with open('tests/test_export_training.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
rows = list(reader)
assert rows[0] == ["foo", "bar"]
assert rows[1] == ["1", "7"]
def test_export_all_to_csv():
splits = [
{"training": "training_0", "testing": "testing_0"},
{"training": "training_1", "testing": "testing_1"},
{"training": "training_2", "testing": "testing_2"},
]
with patch("split_and_export.export_to_csv") as mock_export:
split_and_export.export_all_to_csv(splits, "tests/test_export")
mock_export.assert_any_call("testing_0", "tests/test_export_0_testing.csv")
mock_export.assert_any_call("training_0", "tests/test_export_0_training.csv")
mock_export.assert_any_call("testing_1", "tests/test_export_1_testing.csv")
mock_export.assert_any_call("training_1", "tests/test_export_1_training.csv")
mock_export.assert_any_call("testing_2", "tests/test_export_2_testing.csv")
mock_export.assert_any_call("training_2", "tests/test_export_2_training.csv")
| [
"[email protected]"
]
| |
cbf6bc2fa02f3077d4a2e66ac887debcce4bae36 | aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14 | /mi/dataset/driver/flort_kn/stc_imodem/flort_kn__stc_imodem_recovered_driver.py | 4fe4de3d18ce68d6534b32380e50fd98fe6bab2f | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | oceanobservatories/mi-instrument | 3ad880c1366b1a8461fc9085768df0e9ddeb6ef5 | bdbf01f5614e7188ce19596704794466e5683b30 | refs/heads/master | 2023-07-23T07:28:36.091223 | 2023-07-14T15:54:49 | 2023-07-14T15:54:49 | 24,165,325 | 1 | 32 | BSD-2-Clause | 2023-07-13T01:39:22 | 2014-09-17T22:53:22 | Python | UTF-8 | Python | false | false | 877 | py | from mi.dataset.parser.flort_kn__stc_imodem import Flort_kn_stc_imodemParser,Flort_kn_stc_imodemParserDataParticleRecovered
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("0.0.2")
def parse(unused, source_file_path, particle_data_handler):
with open(source_file_path,"r") as fil :
parser = Flort_kn_stc_imodemParser({
DataSetDriverConfigKeys.PARTICLE_MODULE: "mi.dataset.parser.flort_kn__stc_imodem",
DataSetDriverConfigKeys.PARTICLE_CLASS: "Flort_kn_stc_imodemParserDataParticleRecovered"},
None,
fil,
lambda state, f: None,
lambda state: None)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
| [
"[email protected]"
]
| |
60f501dd33bc408bb5b0ce9ae012cb0765548801 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_144/ch20_2020_03_09_20_17_14_756367.py | 0730239956a1947237866393bd5dc6de5401f7cc | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | distância=float(input("Digite a distância a percorrer:"))
multa = distancia - 200
excesso = multa * 0.45
if distância <= 200:
passagem = 0.5 * distância
else:
passagem = passagem + excesso
print("Preço da passagem: R$ %7.2f" % passagem)
| [
"[email protected]"
]
| |
ceacf929311c32f3db1575b140d2548b6ce06f9d | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/asr/models/k2_sequence_models.py | 087e9e41b85dd8673ac6e2ff667bad355c5e747f | [
"Apache-2.0"
]
| permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 13,426 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.models.rnnt_models import EncDecRNNTModel
from nemo.collections.asr.parts.k2.classes import ASRK2Mixin
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging
class EncDecK2SeqModel(EncDecCTCModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2SeqModelBPE(EncDecCTCModelBPE, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "ctc")
if loss_type != "ctc" and loss_type != "mmi":
raise ValueError(f"Class {self.__class__.__name__} does not support `loss_type`={loss_type}")
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
@typecheck()
def forward(
self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None,
):
"""
Forward pass of the model.
Args:
input_signal: Tensor that represents a batch of raw audio signals,
of shape [B, T]. T here represents timesteps, with 1 second of audio represented as
`self.sample_rate` number of floating point values.
input_signal_length: Vector of length B, that contains the individual lengths of the audio
sequences.
processed_signal: Tensor that represents a batch of processed audio signals,
of shape (B, D, T) that has undergone processing via some DALI preprocessor.
processed_signal_length: Vector of length B, that contains the individual lengths of the
processed audio sequences.
Returns:
A tuple of 3 elements -
1) The log probabilities tensor of shape [B, T, D].
2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].
3) The greedy token predictions of the model of shape [B, T] (via argmax)
"""
log_probs, encoded_len, greedy_predictions = super().forward(
input_signal=input_signal,
input_signal_length=input_signal_length,
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
)
return self._forward_k2_post_processing(
log_probs=log_probs, encoded_length=encoded_len, greedy_predictions=greedy_predictions
)
class EncDecK2RnntSeqModel(EncDecRNNTModel, ASRK2Mixin):
"""Encoder decoder models with various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_vocabulary: List[str]):
"""
Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
If new_vocabulary == self.decoder.vocabulary then nothing will be changed.
Args:
new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \
this is target alphabet.
Returns: None
"""
super().change_vocabulary(new_vocabulary)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
class EncDecK2RnntSeqModelBPE(EncDecRNNTBPEModel, ASRK2Mixin):
"""Encoder decoder models with Byte Pair Encoding and various lattice losses."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
loss_type = cfg.graph_module_cfg.get("loss_type", "rnnt")
criterion_type = cfg.graph_module_cfg.get("criterion_type", "ml")
if loss_type != "rnnt" or criterion_type != "ml":
raise ValueError(
f"""Class {self.__class__.__name__} does not support
`criterion_type`={criterion_type} with `loss_type`={loss_type}"""
)
super().__init__(cfg=cfg, trainer=trainer)
self._init_k2()
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
pass
def change_vocabulary(self, new_tokenizer_dir: str, new_tokenizer_type: str):
"""
Changes vocabulary of the tokenizer used during CTC decoding process.
Use this method when fine-tuning on from pre-trained model.
This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would
use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need
model to learn capitalization, punctuation and/or special characters.
Args:
new_tokenizer_dir: Path to the new tokenizer directory.
new_tokenizer_type: Either `bpe` or `wpe`. `bpe` is used for SentencePiece tokenizers,
whereas `wpe` is used for `BertTokenizer`.
Returns: None
"""
super().change_vocabulary(new_tokenizer_dir, new_tokenizer_type)
if self.use_graph_lm:
self.token_lm = None
logging.warning(
f"""With .change_vocabulary() call for a model with criterion_type=`{self.loss.criterion_type}`,
a new token_lm has to be set manually: call .update_k2_modules(new_cfg)
or update .graph_module_cfg.backend_cfg.token_lm before calling this method."""
)
self.update_k2_modules(self.graph_module_cfg)
| [
"[email protected]"
]
| |
aa82b974a22240b99dced283997bfed6a235f20a | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/models/inline_response20094_site.py | 60809a21527af5e7d917c54707fe326dad72bc22 | []
| no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20094Site(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""InlineResponse20094Site - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this InlineResponse20094Site. # noqa: E501
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:return: The id of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InlineResponse20094Site.
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:param id: The id of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this InlineResponse20094Site. # noqa: E501
Name of the site. # noqa: E501
:return: The name of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this InlineResponse20094Site.
Name of the site. # noqa: E501
:param name: The name of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20094Site, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20094Site):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
134f1ee4050d20ac333a4b35da4222bf51a32243 | cce0f3939036f536a182d7541b636874cd8247b6 | /xicam/core/data/bluesky_utils.py | f93d436d1f70f7dd0bcf439a14fb2df9fa7ab9e4 | [
"BSD-3-Clause-LBNL"
]
| permissive | Xi-CAM/Xi-cam.core | b942ab7935935b4b514cd8593afcfba83ce7b042 | f993699391439402624934daafe329024165bb0b | refs/heads/master | 2023-08-25T16:16:19.231948 | 2020-05-01T17:28:29 | 2020-05-01T17:28:29 | 111,475,839 | 0 | 0 | NOASSERTION | 2020-04-28T22:51:49 | 2017-11-20T23:55:13 | Python | UTF-8 | Python | false | false | 907 | py | from databroker.core import BlueskyRun
def ndims_from_descriptor(descriptor: dict, field: str):
return len(descriptor['data_keys'][field]['shape']) # NOTE: this doesn't include event dim
def shape_from_descriptor(descriptor: dict, field: str):
return descriptor['data_keys'][field]['shape']
def fields_from_stream(run: BlueskyRun, stream: str):
return fields_from_descriptor(descriptors_from_stream(run, stream))
def descriptors_from_stream(run: BlueskyRun, stream: str):
return run[stream].metadata['descriptors']
def fields_from_descriptor(descriptor):
return list(descriptor['data_keys'].keys())
def streams_from_run(run: BlueskyRun):
return list(run)
def xarray_from_run(run: BlueskyRun, stream: str = None, field: str = None):
data = run.to_dask()
if stream:
data = data[stream]
if field:
data = data[field]
return data
| [
"[email protected]"
]
| |
1553d5d277a72ef2274a5f58479348835444fb15 | c1e31f49a59beb6089328d09040f6f48d2e12cde | /lib/python2.7/tests/test_plotants.py | 7a7cdc4a579d40018e4ad412b42fcc84faf8eb45 | [
"Python-2.0"
]
| permissive | kernsuite-debian/casalite | 3d81761e0d8ae497f97ea242e98d4357618a7591 | b620981f14f4ba5b77f347f649cd2c16d498db04 | refs/heads/master | 2021-06-22T16:22:51.765703 | 2021-02-25T13:28:05 | 2021-02-25T13:28:05 | 80,822,139 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | import os
import string
import sys
import shutil
import unittest
from __main__ import default
from tasks import *
#from taskinit import *
from __casac__ import tableplot
'''
Unit tests for task plotants. It tests the following parameters:
vis: wrong and correct values
figfile: if output is created
'''
tp = tableplot.tableplot()
class plotants_test(unittest.TestCase):
# Input and output names
msfile = 'ic2233_1.ms'
res = None
fig = 'plotantstest.png'
#tp = tableplot.tableplot()
def setUp(self):
self.res = None
default(plotants)
# Switch off the displaying of the GUI
tp.setgui(gui=False)
# It is not necessary to copy it for all tests
if (not os.path.exists(self.msfile)):
datapath = os.environ.get('CASAPATH').split()[0] + '/data/regression/ic2233/'
shutil.copytree(datapath+self.msfile, self.msfile)
def tearDown(self):
if (os.path.exists(self.msfile)):
os.system('rm -rf ' + self.msfile)
os.system('rm -rf ' + self.fig)
# Switch GUI back on
tp.setgui(gui=True)
def test1(self):
'''Test 1: Default parameters'''
self.res = plotants()
self.assertFalse(self.res)
def test2(self):
'''Test 2: Bad input file'''
msfile = 'badfile'
self.res = plotants(vis=msfile)
self.assertFalse(self.res)
def test3(self):
'''Test 3: Good input file and output exists'''
self.res = plotants(vis=self.msfile, figfile=self.fig)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test4(self):
'''Test 4: Label antenna IDs'''
self.res = plotants(vis=self.msfile, figfile=self.fig, antindex=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test5(self):
'''Test 5: Logarithmic antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig, logpos=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test6(self):
'''Test 6: Exclude antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='1,5,19,14,10,13')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test7(self):
'''Test 7: checkbaselines'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test8(self):
'''Test 8: exclude checkbaselines'''
# antenna (name) 11 is already excluded by checkbaselines
# (warning)
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='11', checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test9(self):
'''Test 9: Title'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
title='IC2233')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test10(self):
'''Test 10: All arguments'''
self.res = plotants(self.msfile, self.fig, True, True, '1,3,5,7,9',
True, "IC2233")
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def suite():
return [plotants_test]
| [
"[email protected]"
]
| |
4feb8e94c1009ed1a7bd4e668bd531bd760e00c5 | 24e7e0dfaaeaca8f911b40fcc2937342a0f278fd | /venv/Lib/site-packages/plotly/graph_objs/parcoords/line/_colorbar.py | 257f038abfbb04d2dbc94a73796c799953fc8c52 | [
"MIT"
]
| permissive | BimiLevi/Covid19 | 90e234c639192d62bb87364ef96d6a46d8268fa0 | 5f07a9a4609383c02597373d76d6b6485d47936e | refs/heads/master | 2023-08-04T13:13:44.480700 | 2023-08-01T08:36:36 | 2023-08-01T08:36:36 | 288,455,446 | 1 | 0 | MIT | 2021-01-22T19:36:26 | 2020-08-18T12:53:43 | HTML | UTF-8 | Python | false | false | 70,590 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcoords.line"
_path_str = "parcoords.line.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-time-format#locale_format We add
one item to d3's date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.parcoords.line.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.parcoords.line
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
parcoords.line.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.line.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-time-
format#locale_format We add one item to d3's date
formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.line.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
]
| |
a03b0d31c5006e59062ef309a36e5e16b33b6c54 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/web/v20190801/web_app_private_endpoint_connection.py | 61880949796fac9550f145ecc6ea0878b1e34616 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 6,253 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['WebAppPrivateEndpointConnection']
class WebAppPrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Private Endpoint Connection ARM resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the site.
:param pulumi.Input[pulumi.InputType['PrivateLinkConnectionStateArgs']] private_link_service_connection_state: The state of a private link connection
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if private_endpoint_connection_name is None:
raise TypeError("Missing required property 'private_endpoint_connection_name'")
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['private_endpoint'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppPrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppPrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppPrivateEndpointConnection, __self__).__init__(
'azure-nextgen:web/v20190801:WebAppPrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppPrivateEndpointConnection':
"""
Get an existing WebAppPrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppPrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.ArmIdWrapperResponse']]:
"""
PrivateEndpoint of a remote private endpoint connection
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkConnectionStateResponse']]:
"""
The state of a private link connection
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
]
| |
12bfd823bba8659e67c22af6c2bd0062937a4c5f | 362224f8a23387e8b369b02a6ff8690c200a2bce | /django/django_orm/courses/courses/settings.py | d11c36f0bb97c266c1f7db84060415fcde1a5412 | []
| no_license | Helenyixuanwang/python_stack | ac94c7c532655bf47592a8453738daac10f220ad | 97fbc77e3971b5df1fe3e79652b294facf8d6cee | refs/heads/main | 2023-06-11T02:17:27.277551 | 2021-06-21T17:01:09 | 2021-06-21T17:01:09 | 364,336,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | py | """
Django settings for courses project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_1&w+4p@b%g)to7vg0oi5+wjevbh58q0l1k3ieg9m7!lsjak@e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'courses_app',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',#newly added when watch django extra on platform
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',#newly added May 19,2021
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'courses.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'courses.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
| [
"[email protected]"
]
| |
6df3222d955efd0abe5781c7c48aced830dbed13 | 5dcaf0c31a8362d64134d0dcd9131fb8e827307a | /footmark/vpc/router.py | 9ca6687ef880578501b031e2b61357f50519bf50 | [
"Apache-2.0"
]
| permissive | lixue323/footmark | 10a94ef97cefdab2264088cda70c937c63b819ec | 30cbb2f4b61546d530d955079ccbb38f22fa3edb | refs/heads/master | 2020-08-11T21:57:07.782124 | 2019-10-15T16:15:17 | 2019-10-15T16:16:08 | 211,007,645 | 0 | 0 | Apache-2.0 | 2019-09-26T05:34:05 | 2019-09-26T05:34:05 | null | UTF-8 | Python | false | false | 1,532 | py | """
Represents an VPC Security Group
"""
from footmark.vpc.vpcobject import TaggedVPCObject
class RouteTable(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteTable, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteTable:%s' % self.id
def __getattr__(self, name):
if name == 'id':
return self.route_table_id
raise AttributeError
def __setattr__(self, name, value):
if name == 'id':
self.route_table_id = value
super(TaggedVPCObject, self).__setattr__(name, value)
class RouteEntry(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteEntry, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteEntry:%s' % self.destination_cidrblock
def __getattr__(self, name):
if name == 'destination_cidrblock':
return self.destination_cidr_block
if name == 'next_hop_id':
return self.instance_id
if name.startswith('nexthop_'):
return getattr(self, 'next_hop' + name[7:])
raise AttributeError
def __setattr__(self, name, value):
if name == 'destination_cidrblock':
self.destination_cidr_block = value
if name == 'next_hop_id':
self.instance_id = value
if name.startswith('nexthop_'):
setattr(self, 'next_hop' + name[7:], value)
super(TaggedVPCObject, self).__setattr__(name, value)
| [
"[email protected]"
]
| |
88e8f0be914f7822411fdb479f9868d5490751f1 | b3ab02d32b7ed543d95f5a4a38d6cc3cbda138d9 | /Python-数据结构-青岛大学/线性表实现/数组(序列类型)/数组.py | ac9ab4ea381d47a5d68dcde000ab8840076aa8ae | []
| no_license | reallyz/Beta | b09d7a53d4cd4a9cfdea984be8c61eade28d5a15 | 0c91b162d0f7367df13930390dd306aca5d20b3d | refs/heads/master | 2021-01-02T17:12:27.478572 | 2020-05-08T04:08:15 | 2020-05-08T04:08:15 | 239,717,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | # 一维
array=[]
# 二维
array=[[],]
import numpy as np
array=np.array() | [
"[email protected]"
]
| |
6d150af553878700d5df20c1eccef683e5acb322 | c3ffb020314af5894242073c23c7138a9aa6ea6e | /Past/Rest/script.py | 66cc3e50b6f1c8087cc3a27b882438816d74bbb2 | []
| no_license | mohammedjasam/CleverNator | 1fa8a54c8dca281696de1f33c4c62d7ab78725a1 | dd04b975c4caaa201ccdf92df51635213156c920 | refs/heads/master | 2020-05-18T15:13:40.667968 | 2017-11-25T17:44:35 | 2017-11-25T17:44:35 | 84,256,689 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,907 | py | """# Pull the existing Tensorflow Environment
docker run -it gcr.io/tensorflow/tensorflow:latest-devel
# Download the multilabel data from internet to a single folder
# Ex: Place Darth_vader pics folder + Darth_Maul Pics Folder in Star_Wars folder
# Move the multi-label image folder(star_wars) to docker
mv "c:../.../star_wars/" .
# link that folder in the container
docker run -it -v $HOME/data:/data/ gcr.io/tensorflow/tensorflow:latest-devel
docker run -it -v $HOME/dataa:/data/ ci:new
# Go to root
cd ..
# Pull latest tf image
cd tensorflow
git pull
# Train the model using the images
python35 tensorflow/examples/image_retraining/retrain.py \
--bottleneck_dir=/tf_files/bottlenecks \
--how_many_training_steps 500 \
--model_dir=/tf_files/inception \
--output_graph=/tf_files/retrained_graph.pb \
--output_labels=/tf_files/retrained_labels.txt \
--image_dir /tf_files/trainData"""
C:\Users\Stark\Desktop\CleverNator\KerasBuild\
python35 retrain.py --bottleneck_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\bottlenecks --how_many_training_steps 500 --model_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\inception --output_graph=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_graph.pb --output_labels=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_labels.txt --image_dir C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\trainData
# go into tf_files and write python file
cat > classifier.py
write code then ctrl + c
$ docker commit f6434fa9498e star_wars_classifier:initial
docsha256:d0484f84fbf56d0271c0e35730c2d6ae1f13fb9a06910966380336864b5f2d30
Stark@LAPTOP-M7QFG7RS MINGW64 ~
$ docker run -it -v $HOME/star_wars:/star_wars/ star_wars_classifier:initial
$ docker commit 4f27d772af7b violent:initial
import tensorflow as tf
import sys
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
| [
"[email protected]"
]
| |
64c6e624e5cdc1d68562c50856c653259e6a714f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/spanner/v1/spanner-v1-py/tests/unit/gapic/spanner_v1/test_spanner.py | 48c0e610faf11dbdc0bd2728018980965350473f | [
"Apache-2.0"
]
| permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150,377 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.spanner_v1.services.spanner import SpannerAsyncClient
from google.cloud.spanner_v1.services.spanner import SpannerClient
from google.cloud.spanner_v1.services.spanner import pagers
from google.cloud.spanner_v1.services.spanner import transports
from google.cloud.spanner_v1.services.spanner.transports.base import _API_CORE_VERSION
from google.cloud.spanner_v1.services.spanner.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.spanner_v1.types import commit_response
from google.cloud.spanner_v1.types import keys
from google.cloud.spanner_v1.types import mutation
from google.cloud.spanner_v1.types import result_set
from google.cloud.spanner_v1.types import spanner
from google.cloud.spanner_v1.types import transaction
from google.cloud.spanner_v1.types import type as gs_type
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SpannerClient._get_default_mtls_endpoint(None) is None
assert SpannerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SpannerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SpannerClient,
SpannerAsyncClient,
])
def test_spanner_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'spanner.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
SpannerClient,
SpannerAsyncClient,
])
def test_spanner_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'spanner.googleapis.com:443'
def test_spanner_client_get_transport_class():
transport = SpannerClient.get_transport_class()
available_transports = [
transports.SpannerGrpcTransport,
]
assert transport in available_transports
transport = SpannerClient.get_transport_class("grpc")
assert transport == transports.SpannerGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient))
@mock.patch.object(SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient))
def test_spanner_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SpannerClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SpannerClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "true"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SpannerClient, transports.SpannerGrpcTransport, "grpc", "false"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SpannerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerClient))
@mock.patch.object(SpannerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpannerAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_spanner_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_spanner_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SpannerClient, transports.SpannerGrpcTransport, "grpc"),
(SpannerAsyncClient, transports.SpannerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_spanner_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_spanner_client_client_options_from_dict():
with mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SpannerClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_session(transport: str = 'grpc', request_type=spanner.CreateSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(
name='name_value',
)
response = client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
def test_create_session_from_dict():
test_create_session(request_type=dict)
def test_create_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
client.create_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
@pytest.mark.asyncio
async def test_create_session_async(transport: str = 'grpc_asyncio', request_type=spanner.CreateSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session(
name='name_value',
))
response = await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CreateSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
@pytest.mark.asyncio
async def test_create_session_async_from_dict():
await test_create_session_async(request_type=dict)
def test_create_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
call.return_value = spanner.Session()
client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CreateSessionRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.create_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_create_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_session(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
def test_create_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_session(
spanner.CreateSessionRequest(),
database='database_value',
)
@pytest.mark.asyncio
async def test_create_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_session(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
@pytest.mark.asyncio
async def test_create_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_session(
spanner.CreateSessionRequest(),
database='database_value',
)
def test_batch_create_sessions(transport: str = 'grpc', request_type=spanner.BatchCreateSessionsRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse(
)
response = client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
def test_batch_create_sessions_from_dict():
test_batch_create_sessions(request_type=dict)
def test_batch_create_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
client.batch_create_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
@pytest.mark.asyncio
async def test_batch_create_sessions_async(transport: str = 'grpc_asyncio', request_type=spanner.BatchCreateSessionsRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse(
))
response = await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BatchCreateSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.BatchCreateSessionsResponse)
@pytest.mark.asyncio
async def test_batch_create_sessions_async_from_dict():
await test_batch_create_sessions_async(request_type=dict)
def test_batch_create_sessions_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
call.return_value = spanner.BatchCreateSessionsResponse()
client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_batch_create_sessions_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BatchCreateSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse())
await client.batch_create_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_batch_create_sessions_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_create_sessions(
database='database_value',
session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
assert args[0].session_count == 1420
def test_batch_create_sessions_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database='database_value',
session_count=1420,
)
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_create_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.BatchCreateSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.BatchCreateSessionsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_create_sessions(
database='database_value',
session_count=1420,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
assert args[0].session_count == 1420
@pytest.mark.asyncio
async def test_batch_create_sessions_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_create_sessions(
spanner.BatchCreateSessionsRequest(),
database='database_value',
session_count=1420,
)
def test_get_session(transport: str = 'grpc', request_type=spanner.GetSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session(
name='name_value',
)
response = client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
def test_get_session_from_dict():
test_get_session(request_type=dict)
def test_get_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
client.get_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
@pytest.mark.asyncio
async def test_get_session_async(transport: str = 'grpc_asyncio', request_type=spanner.GetSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session(
name='name_value',
))
response = await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.GetSessionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.Session)
assert response.name == 'name_value'
@pytest.mark.asyncio
async def test_get_session_async_from_dict():
await test_get_session_async(request_type=dict)
def test_get_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
call.return_value = spanner.Session()
client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.GetSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
await client.get_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_session(
spanner.GetSessionRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.Session()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.Session())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_session(
spanner.GetSessionRequest(),
name='name_value',
)
def test_list_sessions(transport: str = 'grpc', request_type=spanner.ListSessionsRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse(
next_page_token='next_page_token_value',
)
response = client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_sessions_from_dict():
test_list_sessions(request_type=dict)
def test_list_sessions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
client.list_sessions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
@pytest.mark.asyncio
async def test_list_sessions_async(transport: str = 'grpc_asyncio', request_type=spanner.ListSessionsRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ListSessionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_sessions_async_from_dict():
await test_list_sessions_async(request_type=dict)
def test_list_sessions_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
call.return_value = spanner.ListSessionsResponse()
client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_sessions_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ListSessionsRequest()
request.database = 'database/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse())
await client.list_sessions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'database=database/value',
) in kw['metadata']
def test_list_sessions_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_sessions(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
def test_list_sessions_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_sessions(
spanner.ListSessionsRequest(),
database='database_value',
)
@pytest.mark.asyncio
async def test_list_sessions_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ListSessionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ListSessionsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_sessions(
database='database_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].database == 'database_value'
@pytest.mark.asyncio
async def test_list_sessions_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_sessions(
spanner.ListSessionsRequest(),
database='database_value',
)
def test_list_sessions_pager():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('database', ''),
)),
)
pager = client.list_sessions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, spanner.Session)
for i in results)
def test_list_sessions_pages():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
pages = list(client.list_sessions(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_sessions_async_pager():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
async_pager = await client.list_sessions(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, spanner.Session)
for i in responses)
@pytest.mark.asyncio
async def test_list_sessions_async_pages():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sessions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
spanner.Session(),
],
next_page_token='abc',
),
spanner.ListSessionsResponse(
sessions=[],
next_page_token='def',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
],
next_page_token='ghi',
),
spanner.ListSessionsResponse(
sessions=[
spanner.Session(),
spanner.Session(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_sessions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_session(transport: str = 'grpc', request_type=spanner.DeleteSessionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_session_from_dict():
test_delete_session(request_type=dict)
def test_delete_session_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
client.delete_session()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
@pytest.mark.asyncio
async def test_delete_session_async(transport: str = 'grpc_asyncio', request_type=spanner.DeleteSessionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.DeleteSessionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_session_async_from_dict():
await test_delete_session_async(request_type=dict)
def test_delete_session_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
call.return_value = None
client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_session_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.DeleteSessionRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_session(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_session_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_session_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_session(
spanner.DeleteSessionRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_session_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_session(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_session_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_session(
spanner.DeleteSessionRequest(),
name='name_value',
)
def test_execute_sql(transport: str = 'grpc', request_type=spanner.ExecuteSqlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet(
)
response = client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_execute_sql_from_dict():
test_execute_sql(request_type=dict)
def test_execute_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
client.execute_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_sql_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteSqlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet(
))
response = await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_execute_sql_async_from_dict():
await test_execute_sql_async(request_type=dict)
def test_execute_sql_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
call.return_value = result_set.ResultSet()
client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_sql_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_sql),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet())
await client.execute_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_execute_streaming_sql(transport: str = 'grpc', request_type=spanner.ExecuteSqlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_execute_streaming_sql_from_dict():
test_execute_streaming_sql(request_type=dict)
def test_execute_streaming_sql_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
client.execute_streaming_sql()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
@pytest.mark.asyncio
async def test_execute_streaming_sql_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteSqlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
response = await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteSqlRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_execute_streaming_sql_async_from_dict():
await test_execute_streaming_sql_async(request_type=dict)
def test_execute_streaming_sql_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
call.return_value = iter([result_set.PartialResultSet()])
client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_streaming_sql_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteSqlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_streaming_sql),
'__call__') as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
await client.execute_streaming_sql(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_execute_batch_dml(transport: str = 'grpc', request_type=spanner.ExecuteBatchDmlRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.ExecuteBatchDmlResponse(
)
response = client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
def test_execute_batch_dml_from_dict():
test_execute_batch_dml(request_type=dict)
def test_execute_batch_dml_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
client.execute_batch_dml()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
@pytest.mark.asyncio
async def test_execute_batch_dml_async(transport: str = 'grpc_asyncio', request_type=spanner.ExecuteBatchDmlRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.ExecuteBatchDmlResponse(
))
response = await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ExecuteBatchDmlRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.ExecuteBatchDmlResponse)
@pytest.mark.asyncio
async def test_execute_batch_dml_async_from_dict():
await test_execute_batch_dml_async(request_type=dict)
def test_execute_batch_dml_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
call.return_value = spanner.ExecuteBatchDmlResponse()
client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_execute_batch_dml_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ExecuteBatchDmlRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.execute_batch_dml),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.ExecuteBatchDmlResponse())
await client.execute_batch_dml(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_read(transport: str = 'grpc', request_type=spanner.ReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = result_set.ResultSet(
)
response = client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
def test_read_from_dict():
test_read(request_type=dict)
def test_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
client.read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_read_async(transport: str = 'grpc_asyncio', request_type=spanner.ReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet(
))
response = await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, result_set.ResultSet)
@pytest.mark.asyncio
async def test_read_async_from_dict():
await test_read_async(request_type=dict)
def test_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
call.return_value = result_set.ResultSet()
client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.read),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(result_set.ResultSet())
await client.read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_streaming_read(transport: str = 'grpc', request_type=spanner.ReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([result_set.PartialResultSet()])
response = client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, result_set.PartialResultSet)
def test_streaming_read_from_dict():
test_streaming_read(request_type=dict)
def test_streaming_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
client.streaming_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
@pytest.mark.asyncio
async def test_streaming_read_async(transport: str = 'grpc_asyncio', request_type=spanner.ReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
response = await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.ReadRequest()
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, result_set.PartialResultSet)
@pytest.mark.asyncio
async def test_streaming_read_async_from_dict():
await test_streaming_read_async(request_type=dict)
def test_streaming_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
call.return_value = iter([result_set.PartialResultSet()])
client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_streaming_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.ReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_read),
'__call__') as call:
call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[result_set.PartialResultSet()])
await client.streaming_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_begin_transaction(transport: str = 'grpc', request_type=spanner.BeginTransactionRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction(
id=b'id_blob',
)
response = client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b'id_blob'
def test_begin_transaction_from_dict():
test_begin_transaction(request_type=dict)
def test_begin_transaction_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
client.begin_transaction()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
@pytest.mark.asyncio
async def test_begin_transaction_async(transport: str = 'grpc_asyncio', request_type=spanner.BeginTransactionRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction(
id=b'id_blob',
))
response = await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.BeginTransactionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transaction.Transaction)
assert response.id == b'id_blob'
@pytest.mark.asyncio
async def test_begin_transaction_async_from_dict():
await test_begin_transaction_async(request_type=dict)
def test_begin_transaction_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
call.return_value = transaction.Transaction()
client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_begin_transaction_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.BeginTransactionRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction())
await client.begin_transaction(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_begin_transaction_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.begin_transaction(
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].options == transaction.TransactionOptions(read_write=None)
def test_begin_transaction_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.begin_transaction(
spanner.BeginTransactionRequest(),
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.begin_transaction),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = transaction.Transaction()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(transaction.Transaction())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.begin_transaction(
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].options == transaction.TransactionOptions(read_write=None)
@pytest.mark.asyncio
async def test_begin_transaction_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.begin_transaction(
spanner.BeginTransactionRequest(),
session='session_value',
options=transaction.TransactionOptions(read_write=None),
)
def test_commit(transport: str = 'grpc', request_type=spanner.CommitRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse(
)
response = client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
def test_commit_from_dict():
test_commit(request_type=dict)
def test_commit_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
client.commit()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
@pytest.mark.asyncio
async def test_commit_async(transport: str = 'grpc_asyncio', request_type=spanner.CommitRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse(
))
response = await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.CommitRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, commit_response.CommitResponse)
@pytest.mark.asyncio
async def test_commit_async_from_dict():
await test_commit_async(request_type=dict)
def test_commit_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
call.return_value = commit_response.CommitResponse()
client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_commit_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.CommitRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse())
await client.commit(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_commit_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.commit(
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].mutations == [mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))]
assert args[0].single_use_transaction == transaction.TransactionOptions(read_write=None)
def test_commit_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.commit(
spanner.CommitRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
@pytest.mark.asyncio
async def test_commit_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = commit_response.CommitResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(commit_response.CommitResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.commit(
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].mutations == [mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))]
assert args[0].single_use_transaction == transaction.TransactionOptions(read_write=None)
@pytest.mark.asyncio
async def test_commit_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.commit(
spanner.CommitRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
mutations=[mutation.Mutation(insert=mutation.Mutation.Write(table='table_value'))],
single_use_transaction=transaction.TransactionOptions(read_write=None),
)
def test_rollback(transport: str = 'grpc', request_type=spanner.RollbackRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_rollback_from_dict():
test_rollback(request_type=dict)
def test_rollback_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
client.rollback()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
@pytest.mark.asyncio
async def test_rollback_async(transport: str = 'grpc_asyncio', request_type=spanner.RollbackRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.RollbackRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_rollback_async_from_dict():
await test_rollback_async(request_type=dict)
def test_rollback_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
call.return_value = None
client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_rollback_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.RollbackRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.rollback(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_rollback_flattened():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rollback(
session='session_value',
transaction_id=b'transaction_id_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].transaction_id == b'transaction_id_blob'
def test_rollback_flattened_error():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rollback(
spanner.RollbackRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
)
@pytest.mark.asyncio
async def test_rollback_flattened_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rollback),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rollback(
session='session_value',
transaction_id=b'transaction_id_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session == 'session_value'
assert args[0].transaction_id == b'transaction_id_blob'
@pytest.mark.asyncio
async def test_rollback_flattened_error_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rollback(
spanner.RollbackRequest(),
session='session_value',
transaction_id=b'transaction_id_blob',
)
def test_partition_query(transport: str = 'grpc', request_type=spanner.PartitionQueryRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse(
)
response = client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_query_from_dict():
test_partition_query(request_type=dict)
def test_partition_query_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
client.partition_query()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
@pytest.mark.asyncio
async def test_partition_query_async(transport: str = 'grpc_asyncio', request_type=spanner.PartitionQueryRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse(
))
response = await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_query_async_from_dict():
await test_partition_query_async(request_type=dict)
def test_partition_query_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
call.return_value = spanner.PartitionResponse()
client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_partition_query_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionQueryRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_query),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse())
await client.partition_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_partition_read(transport: str = 'grpc', request_type=spanner.PartitionReadRequest):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = spanner.PartitionResponse(
)
response = client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
def test_partition_read_from_dict():
test_partition_read(request_type=dict)
def test_partition_read_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
client.partition_read()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
@pytest.mark.asyncio
async def test_partition_read_async(transport: str = 'grpc_asyncio', request_type=spanner.PartitionReadRequest):
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse(
))
response = await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == spanner.PartitionReadRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, spanner.PartitionResponse)
@pytest.mark.asyncio
async def test_partition_read_async_from_dict():
await test_partition_read_async(request_type=dict)
def test_partition_read_field_headers():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
call.return_value = spanner.PartitionResponse()
client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_partition_read_field_headers_async():
client = SpannerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = spanner.PartitionReadRequest()
request.session = 'session/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.partition_read),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(spanner.PartitionResponse())
await client.partition_read(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session=session/value',
) in kw['metadata']
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SpannerClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SpannerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SpannerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SpannerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SpannerGrpcTransport,
)
def test_spanner_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_spanner_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SpannerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_session',
'batch_create_sessions',
'get_session',
'list_sessions',
'delete_session',
'execute_sql',
'execute_streaming_sql',
'execute_batch_dml',
'read',
'streaming_read',
'begin_transaction',
'commit',
'rollback',
'partition_query',
'partition_read',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_spanner_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_spanner_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
def test_spanner_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.spanner_v1.services.spanner.transports.SpannerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SpannerTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_spanner_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpannerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_spanner_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SpannerClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/spanner.data',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_spanner_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/spanner.data',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SpannerGrpcTransport,
transports.SpannerGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_spanner_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_gte_1_26_0
def test_spanner_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
scopes=["1", "2"],
default_host="spanner.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_spanner_transport_create_channel_old_api_core(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SpannerGrpcTransport, grpc_helpers),
(transports.SpannerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_spanner_transport_create_channel_user_scopes(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"spanner.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_spanner_host_no_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='spanner.googleapis.com'),
)
assert client.transport._host == 'spanner.googleapis.com:443'
def test_spanner_host_with_port():
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='spanner.googleapis.com:8000'),
)
assert client.transport._host == 'spanner.googleapis.com:8000'
def test_spanner_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_spanner_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SpannerGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SpannerGrpcTransport, transports.SpannerGrpcAsyncIOTransport])
def test_spanner_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/spanner.data',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_database_path():
project = "squid"
instance = "clam"
database = "whelk"
expected = "projects/{project}/instances/{instance}/databases/{database}".format(project=project, instance=instance, database=database, )
actual = SpannerClient.database_path(project, instance, database)
assert expected == actual
def test_parse_database_path():
expected = {
"project": "octopus",
"instance": "oyster",
"database": "nudibranch",
}
path = SpannerClient.database_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_database_path(path)
assert expected == actual
def test_session_path():
project = "cuttlefish"
instance = "mussel"
database = "winkle"
session = "nautilus"
expected = "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}".format(project=project, instance=instance, database=database, session=session, )
actual = SpannerClient.session_path(project, instance, database, session)
assert expected == actual
def test_parse_session_path():
expected = {
"project": "scallop",
"instance": "abalone",
"database": "squid",
"session": "clam",
}
path = SpannerClient.session_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_session_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SpannerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = SpannerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = SpannerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = SpannerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = SpannerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = SpannerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = SpannerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = SpannerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SpannerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = SpannerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SpannerClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SpannerTransport, '_prep_wrapped_messages') as prep:
client = SpannerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SpannerTransport, '_prep_wrapped_messages') as prep:
transport_class = SpannerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
cd2488e6fb072ad9466b9e8656c5c2ce6f99929e | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script1161.py | d205b142ad1e49664fe6d443dda932a4ef1e8727 | []
| no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,556 | py |
# coding: utf-8
# # A Laconic Approach - EDA (Version 1)
# In this notebook, I will be doing exploratory data analysis for given data for text normalization in laconic fashion. The task given to us in this competition is to convert written text into spoken forms, for example - 6ft will be converted into 6 feets, or $1.13 will convert into one dollar and thirteen cents. I will work first by taking a glimpse of dataset shared and then go towards features extraction or rule based approach or selecting a RNN for this task. ** Lastly I will explain my analysis in laconic fashion, less words and more information**
#
# - PS - My Second public kernel, do upvote if you find this analysis useful.
#
#
# ## Loading packages
# In[ ]:
import pandas as pd #pandas for using dataframe and reading csv
import numpy as np #numpy for vector operations and basic maths
import re #for processing regular expressions
import datetime #for datetime operations
import calendar #for calendar for datetime operations
import time #to get the system time
import scipy #for other dependancies
from sklearn.cluster import KMeans # for doing K-means clustering
import math #for basic maths operations
import seaborn as sns#for making plots
import matplotlib.pyplot as plt # for plotting
import os # for operating system commands
import plotly.plotly as py # for Ploting
import plotly.graph_objs as go # for ploting
import plotly # for ploting
plotly.offline.init_notebook_mode() # for using plotly in offline mode
# ## Importing input data
# **Train** - The dataset provided has following fields
# 1. Setence_id - it signifies the id of sentence
# 2. Token id - it signifies the word's id inside that particular sentence
# 3. class - TBU
# 4. before/ after - they shows how the token is getting changed after
#
# **Test** - It won't have the field after
# In[ ]:
s = time.time()
train_df = pd.read_csv("../input/en_train.csv")
test_df = pd.read_csv("../input/en_test.csv")
end = time.time()
print("time taken by above cell is {}.".format(end -s))
train_df.head()
# In[ ]:
train_seq = train_df.copy() # storing an original copy for later use
# ## Sanity check -
# Let's check three things -
# 1. Number of rows in training and test dataets provided in this competition
# 2. Number of sentence in training and test dataets provided in this competition
# 3. Number of Nulls in training and test data and column wise nulls distribution
# In[ ]:
start = time.time()
print("Total number of rows in given training data is {}.".format(train_df.shape[0]))
print("Total number of sentence in given training data is {}".format(len(set(train_df.sentence_id))))
print("Total number of Nulls in given training data is \n{}.".format(train_df.isnull().sum()))
print("Total number of rows in given test data is {}.".format(test_df.shape[0]))
print("Total number of sentence in given test data is {}".format(len(set(test_df.sentence_id))))
print("Total number of Nulls in given test data is \n{}.".format(test_df.isnull().sum()))
end = time.time()
print("Time taken by above cell is {}.".format(end - start))
# ## Lets explore given variables in training data
# **1. Sentence_id and Token_id ** - Let's plot a hoistogram and check the number of words in a given sentence and their frequency
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
start = time.time()
sns.set(style="white", palette="muted", color_codes=True)
f, axes = plt.subplots(1, 1, figsize=(11, 7), sharex=True)
sns.despine(left=True)
temp_tr = pd.DataFrame(train_df.groupby('sentence_id')['token_id'].count())
sns.distplot(temp_tr['token_id'], axlabel = 'Number of words in a sentence', label = 'Number of words in a sentence', color="r")
plt.setp(axes, yticks=[])
plt.tight_layout()
end = time.time()
print("Min and Max of word per sentence is {} and {}.".format(temp_tr.token_id.min(),temp_tr.token_id.max()))
del temp_tr
print("Time taken by above cell is {}.".format((end-start)))
plt.show()
# **Findings**
# - From the above plot, it is clear that the most of the sentence has less than or equal to 30 tokenper sentence and very few sentence s have more than 30 token per sentence.
# - Minimum words per sentence is 2
# - Maximum words per sentence is 256
# **2. Class** - Lets make box plots of classes and check the distributions of class variable
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby('class')['token_id'].count())
temp_tr = temp_tr.reset_index()
X = list(temp_tr['class'])
Y = list(temp_tr['token_id'])
data = [go.Bar(
x=X,
y=Y
)]
del temp_tr
plotly.offline.iplot(data, filename='basic-bar')
end = time.time()
print("Total number of different classes in training data is {}.".format(len(X)))
print("Time taken by above cell is {}.".format((end-start)))
# ** Findings - **
# - We can see that most frequent classes are plain, punct, letters, verbatim, date and cardinal ( total 6)
# - Rest 10 classes are occuring very less frequently
# - ** Class vaiable isn't present in test data => We have to assign class variable to test (you got it, right ? - cool ;) )**
# **3. Lets see change before/ after with class** -
# - Lets create a flag variable for token and check if before after is same or not
# - Summarize over class varaible and see the effect of class type on normalization
# In[ ]:
# Lets first assign a variable change as 0 and if there is any change we will modify this change varaible to 1
start = time.time()
def isChange(row):
"""function to check if before after is getting changed or not"""
chan = 0
if row['before'] == row['after']:
chan = 0
else:
chan = 1
return chan
train_df['change'] = 0
train_df['change'] = train_df.apply(lambda row: isChange(row), axis = 1)
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
train_df.head()
# In[ ]:
start = time.time()
temp_chn = train_df.loc[train_df['change']==1]
temp_nchn = train_df.loc[train_df['change']==0]
temp_tr1 = pd.DataFrame(temp_chn.groupby('class')['token_id'].count())
temp_tr1 = temp_tr1.reset_index()
X1 = list(temp_tr1['class'])
Y1 = list(temp_tr1['token_id'])
temp_tr2 = pd.DataFrame(temp_nchn.groupby('class')['token_id'].count())
temp_tr2 = temp_tr2.reset_index()
X2 = list(temp_tr2['class'])
Y2 = list(temp_tr2['token_id'])
trace1 = go.Bar(
x=X1,
y=Y1,
name='Change'
)
trace2 = go.Bar(
x=X2,
y=Y2,
name='NO Change'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
plotly.offline.iplot(fig, filename='grouped-bar')
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# ** Findings ** -
# - Most of the things that are getting changed are in all the difference classes but in plain and punct, and few in verbatim
# - Implies most of the data that is given to us in this competition is not changing and is redundent
# - **Cardinal is changing => cardinal is getting spoken in english, 24 becomes twenty-four**
# - ** Date is changing => date 2Jan or 2/01/2001 is spoken as second January two thousand one**
# - ** Same is the case with letters, time, telephone**
# **4. Class vs changes of token in sentence** - Lets plot changes in sentence grouped by over class and see the distribution using swarmplots in seaborn packages
# - first plot is when the token_id change is considered as it is
# - second plot, limit on y is set, for better visulization of data
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby(['class', 'sentence_id', 'change'])['token_id'].count())
temp_tr.reset_index(inplace = True)
sns.set(style="ticks")
sns.set_context("poster")
sns.boxplot(x="class", y="token_id", hue="change", data=temp_tr, palette="PRGn")
plt.ylim(0, 150)
sns.despine(offset=10, trim=True)
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# In[ ]:
start = time.time()
temp_tr = pd.DataFrame(train_df.groupby(['class', 'sentence_id', 'change'])['token_id'].count())
temp_tr.reset_index(inplace = True)
sns.set(style="ticks")
sns.set_context("poster")
sns.boxplot(x="class", y="token_id", hue="change", data=temp_tr, palette="PRGn")
plt.ylim(0, 15)
sns.despine(offset=10, trim=True)
end = time.time()
print(temp_tr['class'].unique())
print("Time taken by above cell is {}.".format((end-start)))
# In[ ]:
start = time.time()
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
temp_tr1 = pd.DataFrame(temp_chn.groupby('sentence_id')['token_id'].count())
temp_tr2 = pd.DataFrame(temp_nchn.groupby('sentence_id')['token_id'].count())
sns.distplot(temp_tr1['token_id'], ax=ax[0], color='blue', label='With Change')
sns.distplot(temp_tr2['token_id'], ax=ax[1], color='green', label='Without Change')
ax[0].legend(loc=0)
ax[1].legend(loc=0)
plt.show()
end = time.time()
print("Time taken by above cell is {}.".format((end-start)))
# **Findings ** -
# - From the above plot, it is clear that the distribution of sentences having change is somewhat similar to complete data
# - Distribution of data for which there is no change is completely different than dist of complete data
# In[ ]:
print("Fraction of token in complete data that are being changed are {}.".format(temp_tr1.shape[0]*100/train_df.shape[0]))
# **Findings ** -
# - Fraction of data that is being changed is around ~4%, and anyway plain class data is redundent, **be careful of the class**
# In[ ]:
# lets check overlap between train and test
train_list = train_df['before'].tolist()
test_list = test_df['before'].tolist()
s1 = set(train_list)
s2 = set(test_list)
common = s1.intersection(s2)
print("Common tokens between train and test is {}".format(len(common)/len(s2)))
# In[ ]:
def Assign(test, train):
""" function to assign results"""
token_dict = {}
token_dict = dict(zip(train.before, train.after))
#test['after'] = ''
print("test shape {}".format(test.shape[0]))
train.sort_values('before', ascending = True, inplace = True)
train.drop_duplicates(subset='before', keep='first', inplace=True)
train_new = train[['before', 'after']]
print(train_new.head())
print(test.head())
test_new = pd.merge(test, train_new, how = 'left', on = 'before')
print(test_new.head())
#test_new['after'] = list(map(str, test_new['after']))
def isNaN(num):
return num != num
test_new.after = np.where(isNaN(test_new.after), test_new.before, test_new.after)
return(test_new)
start = time.time()
sub = Assign(test_df, train_df)
end = time.time()
sub.head(5)
#sub1.shape[0]
# In[ ]:
def submission(row):
a = str(row['sentence_id'])+ "_"+ str(row['token_id'])
return(a)
sub['id'] = sub.apply(lambda row: submission(row), axis =1)
sub[['id', 'after']].to_csv("mahesh_common_token.csv", index = False)
# ## Data preprocessing for Seq2Seq Modeling using RNN
# My plan is now is to make a RNN for seq2seq modelling, As there can be contextual information and to capture that you must have the idea of context which can only be there is you are seeing sequences and not the words. Now for sequence to sequence modelling the first task is to convert the output sequence to correct output format.
# In[ ]:
# I am defining the functions and will work on it later when I get time
print(train_seq.head(2))
def words_to_sequence(train_sub):
"""function takes the input dataframe and outputs a df which has sequence/sentences"""
seq_ids = list(train_sub.sentence_id.unique())
seq_df = pd.DataFrame(columns = ['sentence_id', 'before', 'after'])
for i in seq_ids:
temp = train_sub.loc[train_sub['sentence_id']==i]
before_ = list(temp.before)
#print(before_)
before_list = ' '.join(word for word in before_)
#print(before_list)
after_ = list(temp.after)
after_list = ' '.join(word for word in after_)
seq_dict = {}
seq_dict['sentence_id'] =i
seq_dict['before'] = before_list
seq_dict['after'] = after_list
seq_temp = pd.DataFrame([seq_dict], columns=seq_dict.keys())
seq_df = seq_df.append(seq_temp, ignore_index=True)
return(seq_df)
train_sub_seq = words_to_sequence(train_seq.loc[train_seq.sentence_id < 25].copy())
train_sub_seq.head(10)
# In[ ]:
def seq_to_words(seq_df):
"""function to convert seq dataframe to input kind of df"""
return(words_df)
# Will finish this function later..
# # To be continued ....
| [
"[email protected]"
]
| |
2e83051cab98c6e966a89981f641b396e0630240 | efde9197a0a0ea1e11113e79bce87c3ded80573e | /hackerRank/cyclic binary string.py | 00f79a18ec9e5abbffb5f41d0b5d339823f58e5e | []
| no_license | dkarthicks27/ML_Database | bb370366e7b4d2ad06d992778c02815304a30f2d | 9f3856b1ac2aead5df4e3ef05e1800b1152f777e | refs/heads/master | 2021-10-27T10:45:47.258344 | 2021-10-24T10:42:55 | 2021-10-24T10:42:55 | 238,627,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | """Algorithm: 1011010
So it is actually left rotation
So what we can actually do is that we can probably shift each time convert to decimal and see if there exist a integer log for this number but I don’t know if this method is feasible
Take the input string save it to a variable original
1. Convert it to decimal and then check if there exist a positive log to the base 2 for this number
2. If it exist, store it as current value and also check if its greater than previous value if its replace it as the new value
3. Now check left shift the string and check if it is different from the original, if its different repeat the process else exist.
"""
from math import log2
from copy import deepcopy
def leftShift(string):
new_string = string[-1] + string[:-1]
return new_string
def maximumPower(string):
originals = deepcopy(string)
print('string: ', string)
original = string
number = int(original, 2)
print('number:', number)
val = log2(number)
print('val: ', val)
maximumVal = 0
if val.is_integer():
maximumVal = int(val)
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
else:
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
print('\n\n\n')
return maximumVal
print(maximumPower('0011'))
| [
"[email protected]"
]
| |
eb1e71aacc3892c3756d3e6efab1d5dbebcb4e7a | 4331279865c4b1262179068ba5ac85d8d75123b6 | /final/home/views/insurance.py | ae8d65e45714f7c174c4d5d0273a66627fdbf017 | []
| no_license | manankshastri/NYmed-Scripts | fb4633c19dadfdf982f127454a5dd643ba0f8a8b | 993af47223ca7cb38a2e9af88a2fc99baa7f3d88 | refs/heads/master | 2020-04-07T11:29:01.839909 | 2019-05-16T22:39:50 | 2019-05-16T22:39:50 | 158,328,115 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import (CreateView, ListView, DeleteView, DetailView, UpdateView)
from django.contrib.messages.views import SuccessMessageMixin
from ..decorators import insurance_required
from ..forms import InsuranceSignUpForm
from ..models import Insurance, User, Prescription, Patient
class InsuranceSignUpView(CreateView):
model = User
form_class = InsuranceSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'insurance'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('insurance:insurance_list')
@login_required
@insurance_required
def InsuranceDetailView(request, pk):
pat_all = Patient.objects.all()
template_name = 'home/insurance/insurance_detail.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceListView(ListView):
model = Insurance
template_name = 'home/insurance/insurance_list.html'
@login_required
@insurance_required
def InsurancePatientBillsView(request, pk):
pat_all = Prescription.objects.all()
template_name = 'home/insurance/insurance_patient.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceBillDetailView(DetailView):
model = Prescription
template_name = 'home/insurance/insurance_bills.html'
| [
"[email protected]"
]
| |
c18989b9fc9e25bf40b4ac083d12d27f4d5d3a0e | 96b2009e5a3bcaa4a0a6bb699015e5d2d62e3ccc | /卷积神经网络+keras/__init__.py | c9f26105c126796a86360adc6512fe8d3d8fda08 | []
| no_license | zlszhonglongshen/NLP | 612d9f73ca017d387c48b8b3ebae24510dad6732 | 8f373f737b309c7441b516c0d408e43aebacff61 | refs/heads/master | 2022-07-24T20:18:12.465840 | 2019-11-27T09:06:57 | 2019-11-27T09:06:57 | 125,830,945 | 0 | 0 | null | 2022-07-15T20:19:20 | 2018-03-19T09:11:40 | Jupyter Notebook | UTF-8 | Python | false | false | 7,572 | py | #coding:utf-8
import os
import numpy as np
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense,Input,Flatten
from keras.layers import Conv1D,MaxPooling1D,Embedding
from keras.models import Model
from keras.optimizers import *
from keras.models import Sequential
from keras.layers import merge
import sys
BASE_DIR = 'E:/NLP/卷积神经网络+keras' #这里指定当前目录
GLOVE_DIR = BASE_DIR + '/glove.6B/' # 根据实际目录名更改
TEXT_DATA_DIR = BASE_DIR + '/news20/20_newsgroup/' # 根据实际目录名更改
MAX_SEQUENCE_LENGTH = 1000 #每个文本的最长选取程度,较短的文本可以设短一些
MAX_NB_WORDS = 20000 #整体词库字典中,词的多少,可以略微调大或者调小
EMBEDDING_DIM = 50 #词向量的维度,可以根据实际情况使用
VALIDATION_SPLIT = 0.4 #这里用作是测试集的比例,单词本身的意思是验证集
#first build index mapping words in the embedding set
#to their embedding vector 这段话是指简历一个词到向量之间的索引比如 peking 对应的词向量可能是(0.1,0,32,...0.35,0.5)等等。
print('Indexing word vectors...')
embedding_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.50d.txt'),encoding="utf-8") # 读入50维的词向量文件,可以改成100维或者其他
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
embedding_index[word] = coefs
f.close()
print('Found %s word vectors.'%len(embedding_index))
#second prepare text samples and their labels
print('Processing text dateset') #下面主要是读入训练集和测试集
texts = [] #存储训练样本的list
labels_index = {} #词到词编号的字典,比如peking对应100
labels = [] #存储训练样本,类别编号的文本,比如文章A属于第一类文本
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR,name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path,fname)
if sys.version_info<(3,):
f = open(fpath)
else:
f = open(fpath,encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
print('Found %s texts.'%len(texts)) #输出训练样本的数量
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
# finally, vectorize the text samples into a 2D integer tensor,下面这段代码主要是将文本转换成文本序列,比如 文本'我爱中华' 转化为[‘我爱’,'中华'],然后再将其转化为[101,231],最后将这些编号展开成词向量,这样每个文本就是一个2维矩阵,这块可以参加本文‘<span style="font-size:18px;">二.卷积神经网络与词向量的结合’这一章节的讲述</span>
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.'%len(word_index))
data = pad_sequences(sequences,maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set,下面这段代码,主要是将数据集分为,训练集和测试集(英文原意是验证集,但是我略有改动代码)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples] # 训练集
y_train = labels[:-nb_validation_samples]# 训练集的标签
x_val = data[-nb_validation_samples:] # 测试集,英文原意是验证集
y_val = labels[-nb_validation_samples:] # 测试集的标签
print('Preparing embedding matrix.')
# prepare embedding matrix 这部分主要是创建一个词向量矩阵,使每个词都有其对应的词向量相对应
nb_words = min(MAX_NB_WORDS,len(word_index))
embedding_matrix = np.zeros((nb_words+1,EMBEDDING_DIM))
for word,i in word_index.items():
if i>MAX_NB_WORDS:
continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
#words not found in embedding index will be all_zeros
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# 神经网路的第一层,词向量层,本文使用了预训练glove词向量,可以把trainable那里设为False
embedding_layer = Embedding(nb_words+1,EMBEDDING_DIM,input_length=MAX_SEQUENCE_LENGTH,weights=[embedding_matrix],trainable=True)
#train a 1D convert with global maxpoolinnb_words
# left model 第一块神经网络,卷积窗口是5*50(50是词向量维度)
model_left = Sequential()
# model.add(Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32'))
model_left.add(embedding_layer)
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(35))
model_left.add(Flatten())
# right model <span style="font-family:Arial, Helvetica, sans-serif;">第二块神经网络,卷积窗口是4*50</span>
model_right = Sequential()
model_right.add(embedding_layer)
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(28))
model_right.add(Flatten())
# third model <span style="font-family:Arial, Helvetica, sans-serif;">第三块神经网络,卷积窗口是6*50</span>
model_3 = Sequential()
model_3.add(embedding_layer)
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(30))
model_3.add(Flatten())
merged = merge([model_left, model_right, model_3],mode='concat') # 将三种不同卷积窗口的卷积层组合 连接在一起,当然也可以只是用三个model中的一个,一样可以得到不错的效果,只是本文采用论文中的结构设计
model = Sequential()
model.add(merged) # add merge
model.add(Dense(128, activation='tanh')) # 全连接层
model.add(Dense(len(labels_index), activation='softmax')) # softmax,输出文本属于20种类别中每个类别的概率
# 优化器我这里用了adadelta,也可以使用其他方法
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
# =下面开始训练,nb_epoch是迭代次数,可以高一些,训练效果会更好,但是训练会变慢
model.fit(x_train, y_train, nb_epoch=3)
score = model.evaluate(x_train, y_train, verbose=0) # 评估模型在训练集中的效果,准确率约99%
print('train score:', score[0])
print('train accuracy:', score[1])
score = model.evaluate(x_val, y_val, verbose=0) # 评估模型在测试集中的效果,准确率约为97%,迭代次数多了,会进一步提升
print('Test score:', score[0])
print('Test accuracy:', score[1])
| [
"[email protected]"
]
| |
0e7c3b828694480c0b910383edc2dc5f6294ab81 | a728a685fa841388da0d27c8d596ce2178a60ad0 | /app/core/tests/test_models.py | 55151200fda78a0faad3fa52acf148c40fe6a526 | [
"MIT"
]
| permissive | aikinyi/recipe-app-api | bd3c037acf650a09cdae35497c8e62b4988ad454 | 419ab18f715f66d044af125680ce3417f7af61f4 | refs/heads/main | 2023-03-19T13:46:50.341555 | 2021-03-20T22:39:36 | 2021-03-20T22:39:36 | 321,140,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
# Helper functions
def sample_user(email='[email protected]', password='test123456'):
return get_user_model().objects.create_user(email, password)
class ModelTest(TestCase):
"""
Creating Model TDD
"""
def test_create_user(self):
"""
Creating test user TDD function
"""
email = '[email protected]'
password = '123456'
user = get_user_model().objects.create_user(
email=email,
password=password,
)
# Asserting the password and email
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_normalize_email(self):
"""
TDD for normalizing email
"""
email = '[email protected]'
user = get_user_model().objects.create_user(
email, 'aikinyiltd',
)
# Assertion on email normalization
self.assertEqual(user.email, email.lower())
def test_validate_user_email(self):
"""
Validating user email
"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'email address here')
def test_create_superuser(self):
"""
Creaating superuser
"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'123abdcd'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""
Creating TDD for testing tag MODEL
"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Abdul'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""
TDD for testing creation of new ingredient
"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
| [
"[email protected]"
]
| |
cd604accecbe1e3a174eb64d58aa50cb702a0acf | 26771494974942f4ab18d2cd8247506c344e1d14 | /895-maximumFrequencyStack.py | 4a40053b7ed6952b9019de75037801c0192ff639 | []
| no_license | wangyunpengbio/LeetCode | 9f4c6076e067c5e847d662679483f737d40e8ca5 | cec1fd11fe43177abb2d4236782c0f116e6e8bce | refs/heads/master | 2020-04-29T22:28:25.899420 | 2020-04-03T07:37:26 | 2020-04-03T07:37:26 | 176,448,957 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | class FreqStack:
# 超时
def __init__(self):
from collections import defaultdict
self.stack = []
self.dic = defaultdict(int)
self.maxFrequency = 0
def push(self, x: int) -> None:
self.stack.append(x)
self.dic[x] = self.dic[x] + 1
self.calculateMaxFrequency()
def pop(self) -> int:
# print(self.stack,self.dic,self.maxFrequency)
for i in range(len(self.stack)-1,-1,-1):
# print(self.stack[i])
if self.dic[self.stack[i]] == self.maxFrequency:
self.dic[self.stack[i]] = self.dic[self.stack[i]] - 1
item = self.stack.pop(i)
break
self.calculateMaxFrequency()
return item
def calculateMaxFrequency(self):
self.maxFrequency = 0
for key,value in self.dic.items():
if value > self.maxFrequency:
self.maxFrequency = value
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() | [
"[email protected]"
]
| |
bed170e3a61e169e68a386884050efbff4067342 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/labels_20200908183820.py | 429c534d5ee867e57cc47bc486a667f5c91d2405 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def labels(S):
if len(S) == 0:
return 0
output_arr = []
last_indices = []
for i in range(len(S)):
| [
"[email protected]"
]
| |
4c374d623b41f4b08ccaf0d7c3dc45adefcbee20 | 233928d206e13e068cf8cb5ff7888c9a2d84ad61 | /BOJ/BOJ_2920_음계.py | d99e9a4bb4060c1a3c802597873370a6c6437450 | []
| no_license | Jinwoongma/Algorithm | 7f6daa2d3c2c361059c09fb4fe287b1cce4863e2 | 78803f4572f1416451a9f4f31f53b7d653f74d4a | refs/heads/master | 2022-10-07T22:53:20.333329 | 2020-06-07T13:27:47 | 2020-06-07T13:27:47 | 237,114,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | data = list(map(int, input().split()))
if data == list(range(1, 9)):
print('ascending')
elif data == list(range(8, 0, -1)):
print('descending')
else:
print('mixed') | [
"[email protected]"
]
| |
a16fd8e50b9c997067a44669d605721cbf30a699 | c82b0584f91a7a130718273ecf72039e2d5f9ab1 | /polyaxon_deploy/schemas/security_context.py | a6ce5946b5aed47c96e476bc8c5a116f43003948 | [
"MIT"
]
| permissive | todokku/polyaxon-deploy | 7af770dac9fb9797b86e3bf6b5f1da477a751ba0 | 77828e028670c43cc74704a4d7b9ec2e661e10a4 | refs/heads/master | 2021-02-15T16:02:13.468664 | 2020-03-04T09:37:06 | 2020-03-04T09:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import ValidationError, fields, validates_schema
from polyaxon_deploy.schemas.base import BaseConfig, BaseSchema
def validate_security_context(user, group):
if any([user, group]) and not all([user, group]):
raise ValidationError(
"Security context requires both `user` and `group` or none.")
class SecurityContextSchema(BaseSchema):
enabled = fields.Bool(allow_none=True)
user = fields.Int(allow_none=True)
group = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return SecurityContextConfig
@validates_schema
def validate_security_context(self, data):
validate_security_context(data.get('user'), data.get('group'))
class SecurityContextConfig(BaseConfig):
SCHEMA = SecurityContextSchema
REDUCED_ATTRIBUTES = ['enabled', 'user', 'group']
def __init__(self, enabled=None, user=None, group=None):
validate_security_context(user, group)
self.enabled = enabled
self.user = user
self.group = group
| [
"[email protected]"
]
| |
015f28cff9057185f32b9aa80589b0f4ae92b00a | b1a7fce60e8935592d07323222212d132eedb407 | /Raspi/Confirm.py | a4d5142e76c993a17e454a2068f3e4dc046cbad7 | []
| no_license | Namlitruong/Capstone-ModularRobot | d0922030a8ee0af7a06667ea5f333b19e1bbb070 | e23b07b260a7bfef9a0ef07bb74816cf64cc6a56 | refs/heads/master | 2022-12-17T23:07:07.952625 | 2020-08-17T00:41:11 | 2020-08-17T00:41:11 | 273,672,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,766 | py | import CANbus
import can
import csv
#############################--INTERRUPT--######################################
import time
import os, signal
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def interrupt_handler(channel):
ID = os.getppid()
print(ID)
pid = os.popen("ps aux | grep 'python3 Confirm.py' | awk '{print $2}'").readlines()
print ("Length: ", len(pid))
for i in range (len(pid)):
print (pid[i])
os.system ('sudo kill -9 '+ pid[i])
print("####################################")
GPIO.add_event_detect(13, GPIO.RISING,
callback=interrupt_handler,
bouncetime=500)
###################################################################################
actuatorID = []
sensorID = []
def wriToFile (aID, sID):
f = open ('config.csv', 'w')
with f:
writer = csv.writer(f, delimiter = ";")
writer.writerow (aID)
writer.writerow (sID)
def classifier (msg):
subID = 0
mType = 0
if (msg.arbitration_id == 0x1A0):
print ("Module detected !!!")
subID = 0x1A0
mType = 'A'
elif (msg.arbitration_id == 0x1F0):
#print ("Sensor module detected !!!")
subID = 0x1F0
mType = 'S'
return subID, mType
def searchValidID (IDlist, tempModule):
for i in range (1, 16):
flag = False
tempModule.ID = tempModule.ID + 1
if (len(IDlist) == 0):
break
for j in range (len(IDlist)):
if (IDlist[j].ID == tempModule.ID):
flag = True
break
if (flag == False and j+1 == len(IDlist)):
break
IDlist.append (tempModule)
print ("Assign new ID: ", hex(tempModule.ID))
return tempModule.ID
def verifyID (IDlist):
activeList = []
for i in range (len(IDlist)):
while (True):
CANbus.send((IDlist[i].ID - 0x100), [0x00])
msg = CANbus.receiveNonBlocking(0.1)
if (IDlist[i].timeout == 5):
break
if (msg == None):
IDlist[i].timeout = IDlist[i].timeout + 1
else:
activeList.append (IDlist[i])
break
return activeList
def printAvailableID (msg, module):
IDlist =[]
print (msg)
for i in range (len(module)):
print (module[i].ID, " ", i)
IDlist.append (module[i].ID)
return IDlist
if __name__ == "__main__":
while (True):
while (True):
print ("Waiting for connecting modules")
msg = CANbus.receive()
tempID, mType = classifier (msg)
if (msg.arbitration_id == tempID):
break
tempModule = CANbus.module(msg.arbitration_id)
if (mType == 'A'):
tempID = searchValidID (actuatorID, tempModule)
CANbus.send (0x0A0, [(tempID - 0x1A0)])
elif (mType == 'S'):
tempID = searchValidID (sensorID, tempModule)
CANbus.send (0x0F0, [(tempID - 0x1F0)])
#CANbus.send (0x0A0, [(tempID - 0x1A0)])
print ("Sending Confirmation", tempID - 0x100)
while (True):
msg = CANbus.receive()
if (msg.arbitration_id == tempID):
break
print ("Confirmation Complete")
#Verify modules
print ("Verifying existing modules")
actuatorID = verifyID (actuatorID)
sensorID = verifyID (sensorID)
aID = printAvailableID ("Available Module: ", actuatorID)
#sID = printAvailableID ("Available Sensor: ", sensorID)
sID = printAvailableID (" ", sensorID)
wriToFile (aID, sID) | [
"pi@raspberrypi"
]
| pi@raspberrypi |
1ae71121fe67533c75e20874fc8ff41f033c1d67 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.16/_downloads/plot_artifacts_detection.py | 86f915a1f8213e207c582dae54ccbc31f59c58bd | []
| permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 5,773 | py | """
Introduction to artifacts and artifact detection
================================================
Since MNE supports the data of many different acquisition systems, the
particular artifacts in your data might behave very differently from the
artifacts you can observe in our tutorials and examples.
Therefore you should be aware of the different approaches and of
the variability of artifact rejection (automatic/manual) procedures described
onwards. At the end consider always to visually inspect your data
after artifact rejection or correction.
Background: what is an artifact?
--------------------------------
Artifacts are signal interference that can be
endogenous (biological) and exogenous (environmental).
Typical biological artifacts are head movements, eye blinks
or eye movements, heart beats. The most common environmental
artifact is due to the power line, the so-called *line noise*.
How to handle artifacts?
------------------------
MNE deals with artifacts by first identifying them, and subsequently removing
them. Detection of artifacts can be done visually, or using automatic routines
(or a combination of both). After you know what the artifacts are, you need
remove them. This can be done by:
- *ignoring* the piece of corrupted data
- *fixing* the corrupted data
For the artifact detection the functions MNE provides depend on whether
your data is continuous (Raw) or epoch-based (Epochs) and depending on
whether your data is stored on disk or already in memory.
Detecting the artifacts without reading the complete data into memory allows
you to work with datasets that are too large to fit in memory all at once.
Detecting the artifacts in continuous data allows you to apply filters
(e.g. a band-pass filter to zoom in on the muscle artifacts on the temporal
channels) without having to worry about edge effects due to the filter
(i.e. filter ringing). Having the data in memory after segmenting/epoching is
however a very efficient way of browsing through the data which helps
in visualizing. So to conclude, there is not a single most optimal manner
to detect the artifacts: it just depends on the data properties and your
own preferences.
In this tutorial we show how to detect artifacts visually and automatically.
For how to correct artifacts by rejection see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_rejection.py`.
To discover how to correct certain artifacts by filtering see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
and to learn how to correct artifacts
with subspace methods like SSP and ICA see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
and :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`.
Artifacts Detection
-------------------
This tutorial discusses a couple of major artifacts that most analyses
have to deal with and demonstrates how to detect them.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
###############################################################################
# Low frequency drifts and line noise
(raw.copy().pick_types(meg='mag')
.del_proj(0)
.plot(duration=60, n_channels=100, remove_dc=False))
###############################################################################
# we see high amplitude undulations in low frequencies, spanning across tens of
# seconds
raw.plot_psd(tmax=np.inf, fmax=250)
###############################################################################
# On MEG sensors we see narrow frequency peaks at 60, 120, 180, 240 Hz,
# related to line noise.
# But also some high amplitude signals between 25 and 32 Hz, hinting at other
# biological artifacts such as ECG. These can be most easily detected in the
# time domain using MNE helper functions
#
# See :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`.
###############################################################################
# ECG
# ---
#
# finds ECG events, creates epochs, averages and plots
average_ecg = create_ecg_epochs(raw).average()
print('We found %i ECG events' % average_ecg.nave)
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
average_ecg.plot_joint(**joint_kwargs)
###############################################################################
# we can see typical time courses and non dipolar topographies
# not the order of magnitude of the average artifact related signal and
# compare this to what you observe for brain signals
###############################################################################
# EOG
# ---
average_eog = create_eog_epochs(raw).average()
print('We found %i EOG events' % average_eog.nave)
average_eog.plot_joint(**joint_kwargs)
###############################################################################
# Knowing these artifact patterns is of paramount importance when
# judging about the quality of artifact removal techniques such as SSP or ICA.
# As a rule of thumb you need artifact amplitudes orders of magnitude higher
# than your signal of interest and you need a few of such events in order
# to find decompositions that allow you to estimate and remove patterns related
# to artifacts.
#
# Consider the following tutorials for correcting this class of artifacts:
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
| [
"[email protected]"
]
| |
64cbbf12cccecdd79098ee784933598a826d5869 | b9f7c7a87292c1a9c231ce89933ae9d4bc51f487 | /src/sst/elements/simpleElementExample/tests/basicStatistics0.py | 3ea5c138cfc1b3558768044804877e0a4e49d5e9 | [
"BSD-3-Clause"
]
| permissive | sstsimulator/sst-elements | 3a8db475a7a6cbd4c2a5d737c32718752da9797a | 68cdb3ac843750705805653b3fdcd4b015e84089 | refs/heads/master | 2023-08-17T03:30:24.145168 | 2023-08-16T13:58:07 | 2023-08-16T13:58:07 | 43,475,440 | 85 | 145 | NOASSERTION | 2023-09-12T13:59:11 | 2015-10-01T02:57:18 | C++ | UTF-8 | Python | false | false | 3,054 | py | # Import the SST module
import sst
# The basicStatisticsX.py scripts demonstrate user-side configuration of statistics.
# Each one focuses on a different aspect of user-side configuration
#
# This example demonstrates:
# 1. Default output behavior (reporting statistics at the end of simulation)
# 2. Various output formats for statistics
#
# This component has no links and SST will produce a warning because that is an unusual configuration
# that often points to a mis-configuration. For this simulation, the warning can be ignored.
#
# Relevant code:
# simpleElementExample/basicStatistics.h
# simpleElementExample/basicStatistics.cc
# simpleElementExample/basicEvent.h
#
# Output:
# simpleElementExample/tests/refFiles/basicStatistics0.out
# simpleElementExample/tests/refFiles/basicStatistics0.csv
#
### Create two components (to compare different components' output in the CSV file)
component0 = sst.Component("StatisticComponent0", "simpleElementExample.basicStatistics")
component1 = sst.Component("StatisticComponent1", "simpleElementExample.basicStatistics")
### Parameterize the components.
# Run 'sst-info simpleElementExample.basicStatistics' at the command line
# to see parameter documentation
params0 = {
"marsagliaZ" : 438, # Seed for Marsaglia RNG
"marsagliaW" : 9375794, # Seed for Marsaglia RNG
"mersenne" : 102485, # Seed for Mersenne RNG
"run_cycles" : 1000, # Number of cycles to run for
"subids" : 3 # Number of SUBID_statistic instances
}
component0.addParams(params0)
params1 = {
"marsagliaZ" : 957537, # Seed for Marsaglia RNG
"marsagliaW" : 5857, # Seed for Marsaglia RNG
"mersenne" : 860, # Seed for Mersenne RNG
"run_cycles" : 1200, # Number of cycles to run for
"subids" : 6 # Number of SUBID_statistic instances
}
component1.addParams(params1)
### Enable statistics
## Limit the verbosity of statistics to any with a load level from 0-4
# This component's statistics range from 1-4 (see sst-info)
sst.setStatisticLoadLevel(4)
## Determine where statistics should be sent. By default this script uses CSV, other options are
# commented out below. Output locations are case-insensitive (e.g., statOutputCSV = statoutputcsv).
# Default: Output to CSV. Filename and separator can be specified
sst.setStatisticOutput("sst.statOutputCSV", { "filepath" : "./basicStatistics0.csv", "separator" : "," } )
# Option: Output to the terminal
#sst.setStatisticOutput("sst.statoutputconsole")
# Option: Output to a text file
#sst.setStatisticOutput("sst.statOutputTXT", { "filepath" : "./basicStatistics0.txt" } )
# Option: Output to HDF5. Requires sst-core to be configured with HDF5 library.
#sst.setStatisticOutput("sst.statoutputhd5f")
# Option: Output to JSON
#sst.setStatisticOutput("sst.statOutputJSON", { "filepath" : "./basicStatistics0.json" } )
## Enable statistics on the components
sst.enableAllStatisticsForComponentType("simpleElementExample.basicStatistics")
| [
"[email protected]"
]
| |
2fd1b907e6eff215b937433a3f361834b3dd96ec | a355b16b9b4cebdd39beb69a6c5aa4e175ae52f6 | /phytosanitary/urls/links.py | 8d16c92f08f546895ad6e4779cd0a8695434b8ee | []
| no_license | hypertexthero/Phytosanitary | e2ba31116b432a8623b332e53a390ff31c24fc10 | 4f001436c90de7a64649e82089e577af6981b793 | refs/heads/master | 2016-09-05T09:47:01.448846 | 2012-11-28T16:34:03 | 2012-11-28T16:34:03 | 3,460,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | from django.conf.urls.defaults import *
from phytosanitary.models import Link
link_info_dict = {
'queryset': Link.objects.all(),
'date_field': 'pub_date',
}
urlpatterns = patterns('django.views.generic.date_based',
(r'^$', 'archive_index', link_info_dict, 'phytosanitary_link_archive_index'),
(r'^(?P<year>\d{4})/$', 'archive_year', link_info_dict, 'phytosanitary_link_archive_year'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', 'archive_month', link_info_dict, 'phytosanitary_link_archive_month'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', 'archive_day', link_info_dict, 'phytosanitary_link_archive_day'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', 'object_detail', link_info_dict, 'phytosanitary_link_detail'),
) | [
"[email protected]"
]
| |
3160ede5e603262448964d8dc9e3a89b58592466 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Depth-first Search/111.Minimum Depth of Binary Tree.py | 28976c05b41b56e4880a2b5192eea9b5868c08e4 | []
| no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | # coding=utf-8
# Definition for singly-linked list.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
解题思路:
递归
分几种情况考虑:
1,树为空,则为0。
2,根节点如果只存在左子树或者只存在右子树,则返回值应为左子树或者右子树的(最小深度+1)。
3,如果根节点的左子树和右子树都存在,则返回值为(左右子树的最小深度的较小值+1)。
'''
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
if root.left == None and root.right != None:
return self.minDepth( root.right ) + 1
if root.left != None and root.right == None:
return self.minDepth( root.left ) + 1
return min( self.minDepth( root.left ), self.minDepth( root.right ) ) + 1
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
if __name__ == "__main__":
mnode = ListNode(3)
mnode.next = ListNode(5)
mnode.next.next = ListNode(6)
mnode.next.next.next = ListNode(7)
mnode.next.next.next.next = ListNode(8)
result = Solution().rotateRight(mnode, 6)
print(result.val)
| [
"[email protected]"
]
| |
9ef94e2e4d69efad94f09beea5a420f9acda3202 | c1654d09c1eccf17d105d31c62bbf4106feb89d8 | /resolution-mylar.py | 4d6222a94a7d894fdaa9fbff4e10052cca671b70 | []
| no_license | piti118/crystal-length-study-for-mu2e | 142be2f059299c9902706b50d375fda01e651ead | a0287d2676fef33c15298caf432b0d5b38443bd1 | refs/heads/master | 2016-09-11T09:12:07.118526 | 2012-05-14T05:26:27 | 2012-05-14T05:26:27 | 3,666,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,825 | py | # -*- coding: utf-8 -*-
# <nbformat>3</nbformat>
# <codecell>
from root_numpy import *
from dist_fit import *
from cithep import *
from h5py import *
sample='mylar'
# <codecell>
class Hitmap:
def __init__(self,numrow=21,numcol=21):
self.hmap = np.zeros([numrow,numcol])
self.numrow = numrow
self.numcol = numcol
def acc(self,l,k,E):
i,j = self.lk2ij(l,k)
self.hmap[i,j]+=E
def lk2ij(self, l,k):
return l+self.numcol/2,k+self.numrow/2
def sumE(self,cutoff=None):
if cutoff is not None:
return np.sum(np.sum(self.hmap[self.hmap>cutoff]))
else:
return np.sum(np.sum(self.hmap))
# <codecell>
hitmap = root2array('%s.root'%sample,'hitmap')
einfo = root2array('%s.root'%sample,'eventinfo')
# <codecell>
# <codecell>
laster = tuple()
thishit = None
result = np.array([],dtype=[('angle',np.double),('E',np.double)])
for hit in hitmap:
runno = hit['runno']
eventno = hit['eventno']
if (runno,eventno) != laster and laster != tuple():
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None
laster = (runno,eventno)
if thishit is None:
thishit = Hitmap()
thishit.acc(hit['l'],hit['k'],hit['E'])
if thishit is not None:
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None #take care of the last one
# <codecell>
f = File('%s.hdf5'%sample,'w')
f.create_dataset('result',data=result)
f.close()
# <codecell>
f = File('%s.hdf5'%sample,'r')
tmp = f['result']
result = np.array(tmp)
f.close()
# <codecell>
def my_gau(x,g_mu,g_sigma):
return gaussian(x,g_mu,g_sigma)
# <codecell>
def smear(E):
w = sqrt(1000.*E)#1000 photon per mev
ret = randn(len(E))
ret*=w/1000.
ret+=E
return ret
def doOneFit(E,range=(95.,110.),mean=104.,sigma=1.,n=20.,alpha=0.5,N=80000,
limit_N=(1000,100000),limit_n=(0.1,100.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,5.)):
#eg = Add2Pdf(my_gau,Normalize(crystalball,range))
#describe(eg)
#eg = Normalize(crystalball,range)
eg = Convolve(Normalize(crystalball,range),my_gau,(-2,2),nbins=40)
#eeg = eg
eeg = Extend(eg)
print describe(eeg)
#fit, m = fit_uml(eg,sm,mean=104.5,sigma=1.,n=20.,alpha=0.5, limit_n=(0.1,50.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,2.))
#try_uml(eg,sm,mean=104.,sigma=1.,n=50.,alpha=0.5)
fit,m = None,None
good = False
itry = 0
first = True
while not good and itry<5:
try:
if not first:
mean = 104.5+randn(1)*2.
alpha=0.5+randn(1)*0.2
first =False
fit,m = fit_binpoisson(eeg,E,maxcalls=2000000,bins=100,
mean=mean,sigma=sigma,n=n,alpha=alpha,N=N,g_mu=0.,g_sigma=0.3,
limit_N=limit_N,limit_n=limit_n, limit_mean=limit_mean, limit_sigma=limit_sigma,limit_alpha=limit_alpha,
limit_g_mu=(-1,1),limit_g_sigma=(0.001,0.5),
quiet=False,throw=False)
good = True
except Exception as e:
print e
#raise e
itry+=1
fit.draw(m)
l,h = fwhm_f(eeg,range,m.args)
print m.values
vertical_highlight(l,h)
return fit,m,h,l,eeg
# <codecell>
angles = np.linspace(0,90,19)[:-1]
myresult = {}
# <codecell>
arg = {
0 :{'range':(96.,105.5)},
1 :{'range':(96.,105.5)},
2 :{'range':(96.,105.5)},
3 :{'range':(96.,105.5)},
4 :{'range':(96.,105.5)},
5 :{'range':(96.,105.5)},
6 :{'range':(96.,105.5)},
7 :{'range':(96.,105.5)},
8 :{'range':(96.,105.5)},
9 :{'range':(96.,105.5)},
10:{'range':(96.,105.5)},
11:{'range':(96.,105.5)},
12:{'range':(90.,105.5)},
13:{'range':(90.,105.5)},
14:{'range':(90.,105.5)},
15:{'range':(90.,105.5)},
16:{'range':(80.,105.5)},
17:{'range':(80.,105.5)},
}
for i,angle in enumerate(angles):
if i < 14: continue
myE = result['E'][(result['angle']>(angle-0.1)) & (result['angle']<(angle+0.1))]
figure()
myE = smear(myE)
emin,emax = 101.,105.5
if i in arg:
emin,emax = arg[i]['range']
myE = myE[(myE>emin) & (myE<emax)]
myresult[i] = doOneFit(myE,range=(emin,emax),N=len(myE))
title(str(angle)+' '+str(i))
# <codecell>
#make and save the plot
def make_nice_plot(r):
fig,axs = subplots(3,3,figsize=(20,12))
for i in r:
ii = i%9
row = ii/3
col = ii%3
fit = myresult[i][0]
m = myresult[i][1]
fh,fl = myresult[i][2],myresult[i][3]
fwhm_res = (fh-fl)/2.35
ax=axs[row,col]
sca(ax)
fit.draw(m)
vertical_highlight(fl,fh)
title('%s %d deg'%(sample,5*i))
text(0.5,0.2,r'fwhm/2.35=%3.2f'%(fwhm_res),transform = ax.transAxes)
make_nice_plot(range(9))
savefig('%s_1.pdf'%sample,bbox_inches='tight')
make_nice_plot(range(9,18))
savefig('%s_2.pdf'%sample,bbox_inches='tight')
# <codecell>
fwhm = np.zeros(18)
for i in range(18): fwhm[i]=(myresult[i][2]-myresult[i][3])/2.35
np.save('fwhm_%s.npy'%sample,fwhm)
x = np.array(range(18))*5.
plot(x,fwhm,'xb')
# <codecell>
hist(result['E'],bins=100,range=(100,110),histtype='step');,
# <codecell>
a = numpy.array([],dtype=[('a',np.double)])
a
a.resize(len(a)+1)
a.resize(len(a)+1)
a
# <codecell>
gdf = df.groupby(['runno','eventno'])
# <codecell>
for k,v in gdf:
h = Hitmap(10,10)
for i in xrange(len(v)):
h.acc(v.l[i],v.k[i],v.E[i])
print h.hmap
print h.sumE()
break
# <codecell>
h = Hitmap(10,10)
# <codecell>
for x in hmap:
# <codecell>
| [
"[email protected]"
]
| |
994488c0995c4cb3859a16fbd3481c780bdb7c61 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/aio/operations/_load_balancer_load_balancing_rules_operations.py | 80034f67d188e49b5f19806c7376dfe4dd5c6385 | [
"LicenseRef-scancode-generic-cla",
"MIT"
]
| permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 8,796 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerLoadBalancingRulesOperations:
"""LoadBalancerLoadBalancingRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerLoadBalancingRuleListResult"]:
"""Gets all the load balancing rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerLoadBalancingRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.LoadBalancerLoadBalancingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerLoadBalancingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerLoadBalancingRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
load_balancing_rule_name: str,
**kwargs
) -> "_models.LoadBalancingRule":
"""Gets the specified load balancer load balancing rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param load_balancing_rule_name: The name of the load balancing rule.
:type load_balancing_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancingRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.LoadBalancingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'loadBalancingRuleName': self._serialize.url("load_balancing_rule_name", load_balancing_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}'} # type: ignore
| [
"[email protected]"
]
| |
cd6a459ece5a08bd23ac75e022e08a981b4e98c4 | 5d09e3b32b0f7dee1147139e5e57822f33dc0f32 | /lib/authorship_simulate_citations.py | dfe00d94e2db5ca746145205494cf1700d1da662 | []
| no_license | scone-snu/pyflib2 | cb797f625100d280f6bd3b757795040ca892b1ed | bb2ad7d9974903ac8c3b01ac48b4d6ab72d2ac80 | refs/heads/master | 2020-03-31T17:37:54.216805 | 2011-05-06T04:43:31 | 2011-05-06T04:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | import os
import glob
import re
import networkx as nx
import itertools
import matplotlib.pyplot as plt
import pickle
from collections import defaultdict
from PlotFunctions import *
import random
from scipy.stats import gamma
import math
# Variables that can be modified
START_YEAR = 1997 # Year to start simulation from (i.e. start simulation from START_YEAR+1)
NEW_EDGES_PER_YEAR = 1370 # Number of new edges per year
T = 6 # Years to simulate
P = 0.4 # Probability of choosing a neighbor
Q = 0.4 # Probability of choosing at random or closing a triangle, etc.
PREFIX = "ca"
# # Simulate from the single-edge graph
# G = nx.Graph()
# G.add_edge("1","2", weight=1, years=[START_YEAR])
# Simulate from START_YEAR
G = nx.read_edgelist("../data/parsed/authorship_%d.edgelist" % START_YEAR, create_using=nx.Graph(), comments='#', delimiter='|', data=True, encoding='utf-8')
# Load year of first publication for each author
with open("../data/parsed/authorship.year", "r") as f:
first_paper = pickle.load(f)
# Load # of papers each author produces in his/her lifetime
with open("../data/parsed/authorship.count", "r") as f:
num_papers = pickle.load(f)
max_gam = max(gamma.pdf(range(1,12),3,scale=2))
def num_new_nodes(year, author):
# Constant Activity Level
if random.random() < 0.648:
return 1
else:
return 0
def num_papers_dist():
return 4
def num_citations_dist():
return 71
new_num_citations = {}
for t in range(START_YEAR+1,START_YEAR+1+T):
print "Simulating year %d..." % t
# Load # of citations
with open("../data/parsed/citations_%d.count" % t) as f:
num_citations = pickle.load(f)
num_citations.update(new_num_citations)
# Create new edges for existing nodes
print "\t for existing nodes"
for node in G.nodes_iter():
for i in range(0, num_new_nodes(t,node)):
# See if we want to form an edge and set target if we want to
rand = random.random()
target = None
if rand < P:
# Pick a node proportional to edge weight
bins = []
for nbr in G.neighbors(node):
#print node,nbr,G[node][nbr]
mult = max([num_citations[p] for p in G[node][nbr]['papers']])
#clist = [num_citations[p] for p in G[node][nbr]['papers']]
#mult = int(round(float(sum(clist)) / len(clist)))
bins += [nbr] * mult
if len(bins) == 0:
bins = G.neighbors(node)
target = random.choice(bins)
elif rand < P + Q:
# Degree-random
bins = []
for nbr in G.neighbors(node):
for nbr2 in G.neighbors(nbr):
bins += [nbr2]
target = random.choice(bins)
# Form an edge if target is set, don't form self-loops
if target:
#print "Adding edge from %s to %s" % (node,target)
new_paper = "N"+str(t)+"_"+node+"_"+target
num_citations[new_paper] = num_citations_dist()
if G.has_edge(node,target):
G[node][target]['weight'] += 1
G[node][target]['years'].append(t)
G[node][target]['papers'].append(new_paper)
elif node != target:
G.add_edge(node, target, weight=1, years=[t], papers=[new_paper])
# New node additions
print "\t for new nodes"
if len(G.nodes()) > 0:
# Generate bins for preferential attachment
bins = []
for node,degree in G.degree_iter():
bins += [node] * degree
# Add new nodes and connect them to existing nodes using preferential attachment
for i in range(0,NEW_EDGES_PER_YEAR):
new_node = "N"+str(t)+"_"+str(i)
new_paper = "N"+str(t)+"_"+new_node
new_num_citations[new_paper] = num_citations_dist()
first_paper[new_node] = t
num_papers[new_node] = num_papers_dist()
# Pick & connect to a random node
G.add_edge(random.choice(bins), new_node, weight=1, years=[t], papers=[new_paper])
nx.write_edgelist(G, "../data/simulations/%ssim_%d_%d_%f_%f.edgelist" % (PREFIX, START_YEAR, t, P, Q), comments='#', delimiter='|', data=True, encoding='utf-8')
#print G.edges()
# # Uncomment the below to visualize the graph. Might take extremely long to render!
# nx.draw_graphviz(G)
# plt.show() | [
"[email protected]"
]
| |
12519564ac2077f1120fb5cbb0e9bfaf0c9762c4 | 0bb991864bb1c68eb41c40229b2a78adcbbf69c9 | /python/model_features/statistics.py | 5f73b2e6b61173784966955ab4a9f0dc70ecff90 | []
| no_license | kristianeschenburg/Parcellating-connectivity | ab78a62a11e549f027a177f57c15924ef6eafb9e | 19edaba4d923b1d283b182f21dca4f46a0fbd2f6 | refs/heads/master | 2020-03-22T13:37:16.801653 | 2018-07-29T18:33:47 | 2018-07-29T18:33:47 | 140,120,191 | 0 | 0 | null | 2018-07-07T22:16:40 | 2018-07-07T22:16:39 | null | UTF-8 | Python | false | false | 1,568 | py | import numpy as np
import time
def UpdateStats(stats, t0, curr_lp, max_lp, K, z, c, steps, gt_z, map_z, verbose):
"""
Update diagnostic statistics.
Parameters:
- - - - -
t0 : initial start time
curr_lp : current log-probability of map
max_lp : max log-probability
K : number of clusters
z : current map
c : current parent links
steps : total number of steps taken
gt_z : ground truth map
map_z : maximum a-posterior map
verbose : flag to print status updates
"""
stats['lp'].append(curr_lp)
stats['max_lp'].append(max_lp)
stats['K'].append(K)
stats['z'] = np.row_stack([stats['z'],z])
stats['c'] = np.row_stack([stats['c'],c])
curr_time = time.clock() - t0
stats['times'].append(curr_time)
if verbose:
print('Step: ' + str(steps) + ' Time: ' + str(curr_time) +
' LP: ' + str(curr_lp) + ' K: ' + str(K) + ' MaxLP: ' + str(max_lp))
if np.any(gt_z):
stats['NMI'].append(NMI(gt_z, map_z))
return stats
def NMI(z1, z2):
"""
Compute normalized mutual information between two maps.two
Parameters:
- - - - -
z1, z2 : maps to compare
"""
N = len(z1)
assert N == len(z2)
p1 = np.bincount(z1)/N
p1[p1 == 0] = 1
H1 = (-p1*np.log(p1)).sum()
p2 = np.bincount(z2)/N
p2[p2 == 0] = 1
H2 = (-p2*np.log(p2)).sum()
joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)],
normed=True)
joint_p = joint[0]
pdiv = joint_p/np.outer(p1,p2)
pdiv[joint_p == 0] = 1
MI = (joint_p*np.log(pdiv)).sum()
if MI == 0:
NMI = 0
else:
NMI = MI/np.sqrt(H1*H2)
return NMI | [
"[email protected]"
]
| |
801a2a01933e03fb0f56781ece4a79654cc8788c | b72d0900bec98fcee6c725cef035c02ca29bbf1b | /Python/100Excersises/1 to 25/25/25.py | 38dc3ba7dc12908e54d10b12f5a442b5a1ccd3cd | [
"MIT"
]
| permissive | sugamkarki/NAMI-Year-II-TERM-I-Group_Project | 68b8808c8607858a313e8b4d601d8d12c6edda2b | f0a9a5f219ccbec024eb5316361db3fca46e171c | refs/heads/master | 2023-06-28T19:07:19.330236 | 2021-07-24T03:05:42 | 2021-07-24T03:05:42 | 312,819,148 | 0 | 0 | MIT | 2021-07-24T12:45:06 | 2020-11-14T13:08:08 | Python | UTF-8 | Python | false | false | 163 | py | alphabet=[]
for letters in range(97,123):
alphabet.append(chr(letters))
d=dict(a=alphabet)
for item in d.values():
for alpha in item:
print(alpha)
| [
"[email protected]"
]
| |
75ed8c814760c96bc4cb333a81523c02f6fce8d5 | 52a4d282f6ecaf3e68d798798099d2286a9daa4f | /test_sa.py | 81104dd1d3c6c5b477f238e92d7d1b4e9c05347a | [
"MIT"
]
| permissive | bkovitz/FARGish | f0d1c05f5caf9901f520c8665d35780502b67dcc | 3dbf99d44a6e43ae4d9bba32272e0d618ee4aa21 | refs/heads/master | 2023-07-10T15:20:57.479172 | 2023-06-25T19:06:33 | 2023-06-25T19:06:33 | 124,162,924 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,332 | py | # test_sa.py -- Test of spreading activation
import unittest
from pprint import pprint as pp
import inspect
from time import process_time
from dataclasses import dataclass
import operator
from operator import itemgetter
from heapq import nlargest
from typing import Union, List, Tuple, Dict, Set, FrozenSet, Iterable, Any, \
NewType, Type, ClassVar, Sequence, Callable, Hashable
from itertools import chain
import networkx as nx
from Propagator import Propagator, Delta
NodeId = NewType('NodeId', int)
@dataclass
class MyProp(Propagator):
noise: float = 0.0
def make_deltas(self, g, old_d):
#print() #DEBUG
return chain.from_iterable(
self.deltas_from(g, old_d, nodeid)
for nodeid in old_d
)
def deltas_from(self, g, old_d, nodeid) \
-> List[Delta]:
'''Deltas from nodeid to its neighbors.'''
result: List[Delta] = []
nodeid_a = old_d.get(nodeid, 0.0)
for neighborid, edge_d in g.adj[nodeid].items():
weight = edge_d.get('weight', 1.0)
delta = Delta(
neighborid,
weight * nodeid_a,
nodeid
)
result.append(delta)
return result
def min_value(self, g, nodeid):
return 0.0
class Node:
nodeid: NodeId
@dataclass(frozen=True)
class Operator:
func: Callable
name: str
def call(self, *operands: int) -> int:
return self.func(*operands)
def __str__(self):
return self.name
plus = Operator(operator.add, '+')
times = Operator(operator.mul, 'x')
minus = Operator(operator.sub, '-')
@dataclass(frozen=True)
class Before:
'''A feature meaning that .obj was present before the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'Before({self.obj})'
@dataclass(frozen=True)
class After:
'''A feature meaning that .obj was present after the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'After({self.obj})'
@dataclass(frozen=True)
class Equation(Node):
operands: Tuple[int]
operator: Operator
result: int
def features(self) -> Iterable[Hashable]:
for operand in self.operands:
yield operand
yield Before(operand)
yield self.operator
yield self.result
yield After(self.result)
#return set(self.operands + (self.operator, self.result, Before
def __str__(self):
expr = f' {self.operator} '.join(str(n) for n in self.operands)
return f'{expr} = {self.result}'
class TestSA(unittest.TestCase):
def test_sa(self):
p = MyProp(positive_feedback_rate=0.0)
self.assertEqual(p.noise, 0.0)
g = nx.Graph() # undirected graph
g.add_edge(1, 2, weight=1.0)
g.add_edge(1, 3, weight=1.3)
g.add_node(4)
#print(g.edges[1, 2]['weight'])
#for neighbor in g.adj[1].items():
#print(neighbor)
# Let's give all nodes activation=1.0.
initial_a_dict = dict((nodeid, 1.0) for nodeid in g.nodes)
# Propagate
got: Dict[NodeId, float] = p.propagate(g, initial_a_dict)
self.assertEqual(got, {1: 1.026, 2: 1.0, 3: 1.006, 4: 0.98})
def test_eqns(self):
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=10):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
# Make slipnet: a bipartite graph of Equations and features
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
tups = query(g, [4, 5, Before(4), Before(5)], k=3)
self.assertCountEqual(
['5 + 4 = 9', '5 x 4 = 20', '5 - 4 = 1'],
[str(eqn) for (eqn, a) in tups]
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ion()
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=4):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
#e1 = Equation((2, 3), plus, plus.call(2, 3))
#print(e1)
# g.add_node(e1)
# for f in e1.features():
# g.add_edge(f, e1, weight=1.0)
# a0 = dict((f, 1.0) for f in [4, 5, Before(4), Before(5)])
# #a0 = dict((f, 1.0) for f in [7, 6, Before(7), Before(6)])
# see(a0)
# print()
#
# start = process_time()
# a1 = p.propagate(g, a0, num_iterations=10)
# end = process_time()
# print(end - start)
# #see(a1)
# print(sum(a1.values()))
es = query(g, [4, 5, Before(4), Before(5)])
pp(es)
#nx.draw(g, with_labels=True, pos=nx.bipartite_layout(g, [n for n in g.nodes if isinstance(n, Equation)]))
#plt.show()
| [
"[email protected]"
]
| |
49d98b69895f2db5dd9fa22267d1e67e92e73d52 | 669196cb7444c699b9c477bd36d76082d534e08a | /tests/unit/test_user_email.py | c475eef807feb4dd45015fb7490c85ba2be6c329 | [
"MIT"
]
| permissive | tilgovi/pyramid_fullauth | d51ad9fabca0ef380f6981c0f62e5c36d8484cba | 3de2f784e89c2e82104dbe36acbb85597e4fff31 | refs/heads/master | 2021-01-24T15:15:28.691347 | 2014-11-02T18:45:05 | 2014-11-02T18:45:05 | 26,466,736 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """Test email related User methods."""
from pyramid.compat import text_type
from pyramid_fullauth.models import User
NEW_EMAIL = text_type('[email protected]')
def test_set_new_email():
"""
Test User.set_new_email method.
setting new email should result in setting new_email field,
and key used to activate the change.
"""
user = User()
assert user.email_change_key is None
assert user.new_email is None
user.set_new_email(NEW_EMAIL)
assert user.new_email == NEW_EMAIL
assert user.email_change_key
def test_change_email():
"""
Test User.change_email method.
Calling it should copy new email set by set_new_email method
into regular email field.
"""
user = User()
assert not user.email
user.set_new_email(NEW_EMAIL)
user.change_email()
assert not user.email_change_key
assert user.email == NEW_EMAIL
| [
"[email protected]"
]
| |
775bc8ad2440dec3fa0750bcca10332e6a975a4f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/16a4c177de3f63055c5f0252c3f8ba202175fb41-<start_merge>-bug.py | 488cafe673b3ea8201fc11c222ab29d021e87ebf | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | def start_merge(self, project_id, previous_group_ids, new_group_id):
if (not previous_group_ids):
return
state = {
'transaction_id': uuid4().hex,
'project_id': project_id,
'previous_group_ids': previous_group_ids,
'new_group_id': new_group_id,
'datetime': datetime.now(tz=pytz.utc),
}
self._send(project_id, 'merge', extra_data=(state,), asynchronous=False) | [
"[email protected]"
]
| |
c1e9f92e53090868a41830a7785c711adfab01bc | d9f63d87a9f7b19d5ee60c5f38e9007687df4078 | /面向对象-类和对象4.py | 6b8af3e544ed5021e3843f440b94064de10669be | []
| no_license | zhouf1234/untitled3 | 4b156046f0fea2c773785cba0486621625004786 | 238c5aaef121f3d716c96290e7e417a9a4a03b4e | refs/heads/master | 2020-05-05T02:36:07.396459 | 2019-04-05T08:27:31 | 2019-04-05T08:27:31 | 179,643,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | class Person:
school='阳光幼儿园'
def __init__(self):
self.name='丹丹'
p1=Person()
# 使用对象的方法(属性)时,先看有无此属性,如果没有再看类有无此属性
print(p1.school) #阳光幼儿园
# 给对象改school属性后
Person.school='夏天小学'
print(p1.school) #夏天小学
print()
p2=Person()
print(p2.school) #夏天小学 | [
"="
]
| = |
8440e8250bda5ae92abd0501c1219d37a8251790 | d713770971a0d9e4a77921fa85fd03daf339dd84 | /business_hardcode/build_project/build_project.py | b34832268d919212f956754af2974f20ed2d4dea | [
"Apache-2.0"
]
| permissive | laashub/laas-soa | cf9c0403cb25eedc74326752aaa776f501fac9d0 | 63a5e84b646bf1d857e97ddbbc7c1c487a9dc9e4 | refs/heads/master | 2023-01-07T17:44:24.431030 | 2020-11-12T13:35:31 | 2020-11-12T13:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,528 | py | """
构建项目
需要依赖一些数据
构建服务器
源码仓库信息
项目配置信息
"""
import datetime
import json
import os
import time
import traceback
from rest.operate.executor import context
local_executor_root_path = os.path.join(os.getcwd(), "business_hardcode/build_project")
remote_executor_root_path = "/data/tristan/1" # 远程执行器根目录
# 准备本地目录
local_executor_data_data_path = os.path.join(local_executor_root_path, "data_data")
context.prepare_local_dirs([local_executor_data_data_path])
# 本地数据版本记录文件
local_update_datetime_record_path = local_executor_root_path + "/" + "local_update_datetime_record"
def build_project(executor_data_id, data_data_data):
"""
构建项目
:param executor_data_id:
:param data_data_data:
:return:
"""
# 记录全局数据
context.global_data.executor_data_id = executor_data_id
startup_timestamp = int(time.time())
context.log("启动时间: " + str(datetime.datetime.now()))
try:
"""
{'id': 11, 'git_server': '1', 'project_name': '仓库系统', 'branches': 'master', 'tags': '',
'program_language': 'java', 'docker_registry_id': '1', 'update_datetime': {'$date': 1605035741000},
'create_datetime': {'$date': 1605035741000}, 'repo_path': 'http://git.wjh.com/wms/wms_service'}
"""
# 查询执行器
host_build = context.select_data_by_data_id__data_data_id(15, 1)[0] # 查询服务器连接信息
# 获取最新版本的数据, 保存数据到本地, 同步最新版本的数据到执行器目录
latest_update_datetime_record = ""
"""
data_data:
git_server.json
docker_registry.json
"""
# 查询 git服务器
data_data_git_server = context.select_data_by_data_id__data_data_id('5', data_data_data['git_server'])[0]
latest_update_datetime_record += str(data_data_git_server["update_datetime"]) + ";"
# 查询 docker镜像仓库
data_data_docker_registry = \
context.select_data_by_data_id__data_data_id('4', data_data_data['docker_registry_id'])[0]
latest_update_datetime_record += str(data_data_docker_registry["update_datetime"]) + ";"
# 查询 仓库地址
local_update_datetime_record = None
if os.path.exists(local_update_datetime_record_path):
with open(local_update_datetime_record_path) as f:
local_update_datetime_record = f.read()
if not local_update_datetime_record or local_update_datetime_record != latest_update_datetime_record:
# ############### 同步数据到文件到远程服务器
# 准备远程目录
context.log(context.declare_remote_dirs(host_build, [remote_executor_root_path]))
context.write_data_data_2_file(data_data_git_server, local_executor_data_data_path + '/git_server.json')
context.write_data_data_2_file(data_data_docker_registry,
local_executor_data_data_path + '/docker_registry.json')
# 获取最新版本的业务, 保存业务到本地, 同步最新版本的业务到执行器
"""
business_hyper_fusion:
java:
do_build_project.sh
build_project.sh
clean_build_project.sh
startup.sh
Dockerfile
do_build_docker.sh
clean_build_docker.sh
"""
# 同步数据、业务脚本目录到服务器
context.sync_dirs_2_remote(host_build, local_executor_root_path, remote_executor_root_path,
["data_data", "business_hyper_fusion"])
# 同步启动文件到服务器
context.sync_files_2_remote(host_build, local_executor_root_path, remote_executor_root_path, ["startup.py"])
with open(local_update_datetime_record_path, 'w')as f:
f.write(latest_update_datetime_record)
# ######每次执行器都需要创建执行目录, 并将启动数据写入执行目录的data_data.json文件中
remote_executor_run_n_path = remote_executor_root_path + "/run/" + str(executor_data_id)
# 创建这次执行器的运行目录
context.declare_remote_dirs(host_build, [remote_executor_run_n_path])
# 写入启动参数
context.execute_remote_command(host_build, """
sudo cat >> %s<<EOF
%s
EOF
""" % (remote_executor_run_n_path + "/data_data.json", json.dumps(data_data_data, ensure_ascii=False)))
# 是否应该考虑将共享文件拷贝到自己的区域???
# 好处是什么? 目录都都可以在自己的目录, 坏处是什么, 需要拷贝文件
command = "cd %s && python startup.py -ei %s" % (remote_executor_root_path, executor_data_id)
context.RemoteShell(host_build["ip"], host_build["port"], host_build["username"],
host_build["password"]).execute(command)
# context.ShellHandler(host_build["ip"], host_build["port"], host_build["username"],host_build["password"]).execute(command)
print("=" * 200)
except Exception as e:
traceback.print_exc()
context.log(str(e))
context.log("结束时间: " + str(datetime.datetime.now()))
context.log("总耗时: %s 秒钟" + str(int((int(time.time()) - startup_timestamp) / 1000)))
| [
"[email protected]"
]
| |
bf0840495fc063b35d948fe9b69befd937bd7de7 | d60acaac9e460c5693efe61449667b3c399c53c8 | /algebra/linear/fishercriterion.py | 1c1c14ab2e5666bf05a05221df9b5c7bd15195f6 | []
| no_license | HussainAther/mathematics | 53ea7fb2470c88d674faa924405786ba3b860705 | 6849cc891bbb9ac69cb20dfb13fe6bb5bd77d8c5 | refs/heads/master | 2021-07-22T00:07:53.940786 | 2020-05-07T03:11:17 | 2020-05-07T03:11:17 | 157,749,226 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | import numpy as np
"""
We can use dimensionality reduction for linear classification models.
One-dimensional input vector x projected down to one dimension using
y = w^T x
We consider a two-class problem with N1 points of class C1 and N2 points of class C2
so the mean vectors of the two classes aare given by:
m1 = (1/N1) * summation of x_n over class C1 and m2 = (1/N2) times summation of x_n over class C2
Separation of the projected class means lets us choose w (the plane onto which we project)
m2 - m1 = w^T (m2-m1)
such that mk = w^T mk .
Fisher criterion is defined as the ratio of the between-class variance to the
within-class variance given by:
J(w) = (m2-m1)^2 / (s1^2 + s2^2)
in which sk^2 for some k is given by the summation of (yn - mk)^2
for one-dimensional space y
"""
def fisher_criterion(v1, v2):
return abs(np.mean(v1) - np.mean(v2)) / (np.var(v1) + np.var(v2))
| [
"[email protected]"
]
| |
0d6f563bf487e50143491c9294e56c9e298e24ec | a7596165a29e5186bc6c4718e3b6e835939b105d | /apps/pig/src/pig/views.py | 47823c4bb576f890292573687f7d79887416ac0b | [
"Apache-2.0"
]
| permissive | lockhart39/HueQualityAndIngestionApp | f0c778665f0fbe699ec30e0df5e9f3ed8a9c3384 | c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c | refs/heads/master | 2021-08-20T00:31:29.481333 | 2017-11-27T19:22:16 | 2017-11-27T19:22:16 | 112,237,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,542 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.models import Document
from oozie.views.dashboard import show_oozie_error, check_job_access_permission,\
check_job_edition_permission
from pig import api
from pig.management.commands import pig_setup
from pig.models import get_workflow_output, hdfs_link, PigScript,\
create_or_update_script, get_scripts
LOG = logging.getLogger(__name__)
@ensure_csrf_cookie
def app(request):
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={}) + '/'
except:
LOG.exception('failed to find autocomplete base url')
return render('app.mako', request, {
'autocomplete_base_url': autocomplete_base_url,
})
def scripts(request):
return JsonResponse(get_scripts(request.user, is_design=True), safe=False)
@show_oozie_error
def dashboard(request):
pig_api = api.get(request.fs, request.jt, request.user)
jobs = pig_api.get_jobs()
hue_jobs = Document.objects.available(PigScript, request.user, with_history=True)
massaged_jobs = pig_api.massaged_jobs_for_json(request, jobs, hue_jobs)
return JsonResponse(massaged_jobs, safe=False)
def save(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
}
pig_script = create_or_update_script(**attrs)
pig_script.is_design = True
pig_script.save()
response = {
'id': pig_script.id,
'docId': pig_script.doc.get().id
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def stop(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
job_id = pig_script.dict['job_id']
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
api.get(request.fs, request.jt, request.user).stop(job_id)
except RestException, e:
raise PopupException(_("Error stopping Pig script.") % e.message)
return watch(request, job_id)
@show_oozie_error
def run(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
'is_design': False
}
pig_script = create_or_update_script(**attrs)
params = request.POST.get('submissionVariables')
oozie_id = api.get(request.fs, request.jt, request.user).submit(pig_script, params)
pig_script.update_from_dict({'job_id': oozie_id})
pig_script.save()
response = {
'id': pig_script.id,
'watchUrl': reverse('pig:watch', kwargs={'job_id': oozie_id}) + '?format=python'
}
return JsonResponse(response, content_type="text/plain")
def copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
doc = pig_script.doc.get()
try:
doc.can_read_or_exception(request.user)
except Exception, e:
raise PopupException(e)
existing_script_data = pig_script.dict
owner = request.user
name = existing_script_data["name"] + _(' (Copy)')
script = existing_script_data["script"]
parameters = existing_script_data["parameters"]
resources = existing_script_data["resources"]
hadoopProperties = existing_script_data["hadoopProperties"]
script_copy = PigScript.objects.create(owner=owner)
script_copy.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
})
script_copy.save()
copy_doc = doc.copy(content_object=script_copy, name=name, owner=owner)
response = {
'id': script_copy.id,
'docId': copy_doc.id,
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
}
return JsonResponse(response, content_type="text/plain")
def delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
ids = request.POST.get('ids').split(",")
for script_id in ids:
try:
pig_script = PigScript.objects.get(id=script_id)
pig_script.can_edit_or_exception(request.user)
pig_script.doc.all().delete()
pig_script.delete()
except:
LOG.exception('failed to delete pig script')
None
response = {
'ids': ids,
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def watch(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
logs, workflow_actions, is_really_done = api.get(request.fs, request.jt, request.user).get_log(request, oozie_workflow)
output = get_workflow_output(oozie_workflow, request.fs)
workflow = {
'job_id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(),
'isRunning': oozie_workflow.is_running(),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id': oozie_workflow.id, 'action': 'kill'}),
'rerunUrl': reverse('oozie:rerun_oozie_job', kwargs={'job_id': oozie_workflow.id, 'app_path': oozie_workflow.appPath}),
'actions': workflow_actions
}
response = {
'workflow': workflow,
'logs': logs,
'isReallyDone': is_really_done,
'output': hdfs_link(output)
}
return JsonResponse(response, content_type="text/plain")
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
pig_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
| [
"[email protected]"
]
| |
8af064ef0d7490610f6c59dfd4002054ce1eda91 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27094.py | ca9e4b98345e5ed3db4156dcb2812fcc628ce499 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # Save full text of a tweet with tweepy
retweeted_status
| [
"[email protected]"
]
| |
187bd2a6ff0bfea7ed5629278eea007adedb4d97 | 54d3a1558a4bd38888d4d51f1ae2d2699965087c | /exa.py | 59f998f63b4e4f8e21e59e08b9035fd514853656 | []
| no_license | A8IK/Python-2 | a86843c6ccfe23d42faebb020307351a108075bd | 538aee64bac73110cd0a8ac74747c9d2fa485149 | refs/heads/main | 2023-01-21T12:42:51.226144 | 2020-12-04T18:14:32 | 2020-12-04T18:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | def div(a,b):
print(a/b)
div(4,2)
| [
"[email protected]"
]
| |
c1b71ce4bf116be38058532866d68049bfa605b1 | 88ea6ae5a8f97e3771490583d8acecdbe2877fd8 | /zips/plugin.video.vistatv-ini-maker/main.py | 773a4185cc39459dd2f2a721e93b53361a46dfec | []
| no_license | staycanuca/PersonalDataVistaTV | 26497a29e6f8b86592609e7e950d6156aadf881c | 4844edbfd4ecfc1d48e31432c39b9ab1b3b1a222 | refs/heads/master | 2021-01-25T14:46:25.763952 | 2018-03-03T10:48:06 | 2018-03-03T10:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,546 | py | from xbmcswift2 import Plugin
from xbmcswift2 import actions
import xbmc,xbmcaddon,xbmcvfs,xbmcgui
import re
from rpc import RPC
import requests
import random
import sqlite3
from datetime import datetime,timedelta
import time
#import urllib
import HTMLParser
import xbmcplugin
#import xml.etree.ElementTree as ET
#import sqlite3
import os
#import shutil
#from rpc import RPC
from types import *
plugin = Plugin()
big_list_view = False
def log2(v):
xbmc.log(repr(v))
def log(v):
xbmc.log(re.sub(',',',\n',repr(v)))
def get_icon_path(icon_name):
addon_path = xbmcaddon.Addon().getAddonInfo("path")
return os.path.join(addon_path, 'resources', 'img', icon_name+".png")
def remove_formatting(label):
label = re.sub(r"\[/?[BI]\]",'',label)
label = re.sub(r"\[/?COLOR.*?\]",'',label)
return label
@plugin.route('/addon/<id>')
def addon(id):
addon = plugin.get_storage(id)
items = []
for name in sorted(addon):
url = addon[name]
items.append(
{
'label': name,
'path': url,
'thumbnail':get_icon_path('tv'),
'is_playable':True,
})
return items
@plugin.route('/player')
def player():
if not plugin.get_setting('addons.folder'):
dialog = xbmcgui.Dialog()
dialog.notification("Echo INI Creator", "Set Folder",xbmcgui.NOTIFICATION_ERROR )
xbmcaddon.Addon ('plugin.video.vistatv-ini-maker').openSettings()
addons = plugin.get_storage("addons")
for a in addons.keys():
add = plugin.get_storage(a)
add.clear()
addons.clear()
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
addon = None
for line in lines:
if line.startswith('['):
a = line.strip('[]')
addons[a] = a
addon = plugin.get_storage(a)
addon.clear()
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
addon[name] = url
items = []
for id in sorted(addons):
items.append(
{
'label': id,
'path': plugin.url_for('addon',id=id),
'thumbnail':get_icon_path('tv'),
})
return items
@plugin.route('/play/<url>')
def play(url):
xbmc.executebuiltin('PlayMedia(%s)' % url)
@plugin.route('/pvr_subscribe')
def pvr_subscribe():
plugin.set_setting("pvr.subscribe","true")
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/pvr_unsubscribe')
def pvr_unsubscribe():
plugin.set_setting("pvr.subscribe","false")
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/add_folder/<id>/<path>')
def add_folder(id,path):
folders = plugin.get_storage('folders')
#ids = plugin.get_storage('ids')
folders[path] = id
#ids[id] = id
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_folder/<id>/<path>')
def remove_folder(id,path):
folders = plugin.get_storage('folders')
del folders[path]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/clear')
def clear():
folders = plugin.get_storage('folders')
folders.clear()
@plugin.route('/folder/<id>/<path>')
def folder(id,path):
folders = plugin.get_storage('folders')
response = RPC.files.get_directory(media="files", directory=path, properties=["thumbnail"])
files = response["files"]
dirs = dict([[remove_formatting(f["label"]), f["file"]] for f in files if f["filetype"] == "directory"])
links = {}
thumbnails = {}
for f in files:
if f["filetype"] == "file":
label = remove_formatting(f["label"])
file = f["file"]
while (label in links):
label = "%s." % label
links[label] = file
thumbnails[label] = f["thumbnail"]
items = []
for label in sorted(dirs):
path = dirs[label]
context_items = []
if path in folders:
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_folder, id=id, path=path))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_folder, id=id, path=path))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('folder',id=id, path=path),
'thumbnail': get_icon_path('tv'),
'context_menu': context_items,
})
for label in sorted(links):
items.append(
{
'label': label,
'path': plugin.url_for('play',url=links[label]),
'thumbnail': thumbnails[label],
})
return items
@plugin.route('/pvr')
def pvr():
index = 0
urls = []
channels = {}
for group in ["radio","tv"]:
urls = urls + xbmcvfs.listdir("pvr://channels/%s/All channels/" % group)[1]
for group in ["radio","tv"]:
groupid = "all%s" % group
json_query = RPC.PVR.get_channels(channelgroupid=groupid, properties=[ "thumbnail", "channeltype", "hidden", "locked", "channel", "lastplayed", "broadcastnow" ] )
if "channels" in json_query:
for channel in json_query["channels"]:
channelname = channel["label"]
channelid = channel["channelid"]-1
channellogo = channel['thumbnail']
streamUrl = urls[index]
index = index + 1
url = "pvr://channels/%s/All channels/%s" % (group,streamUrl)
channels[url] = channelname
items = []
for url in sorted(channels, key=lambda x: channels[x]):
name = channels[url]
items.append(
{
'label': name,
'path': url,
'is_playable': True,
})
return items
@plugin.route('/subscribe')
def subscribe():
folders = plugin.get_storage('folders')
ids = {}
for folder in folders:
id = folders[folder]
ids[id] = id
all_addons = []
for type in ["xbmc.addon.video", "xbmc.addon.audio"]:
response = RPC.addons.get_addons(type=type,properties=["name", "thumbnail"])
if "addons" in response:
found_addons = response["addons"]
all_addons = all_addons + found_addons
seen = set()
addons = []
for addon in all_addons:
if addon['addonid'] not in seen:
addons.append(addon)
seen.add(addon['addonid'])
items = []
pvr = plugin.get_setting('pvr.subscribe')
context_items = []
label = "PVR"
if pvr == "true":
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(pvr_unsubscribe))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(pvr_subscribe))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('pvr'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
addons = sorted(addons, key=lambda addon: remove_formatting(addon['name']).lower())
for addon in addons:
label = remove_formatting(addon['name'])
id = addon['addonid']
path = "plugin://%s" % id
context_items = []
if id in ids:
fancy_label = "[COLOR red][B]%s[/B][/COLOR] " % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Unsubscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_folder, id=id, path=path))))
else:
fancy_label = "[B]%s[/B]" % label
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Subscribe', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_folder, id=id, path=path))))
items.append(
{
'label': fancy_label,
'path': plugin.url_for('folder',id=id, path=path),
'thumbnail': get_icon_path('tv'),
'context_menu': context_items,
})
return items
@plugin.route('/update')
def update():
if not plugin.get_setting('addons.folder'):
dialog = xbmcgui.Dialog()
dialog.notification("Echo INI Creator", "Set Folder",xbmcgui.NOTIFICATION_ERROR )
xbmcaddon.Addon ('plugin.video.vistatv-ini-maker').openSettings()
folders = plugin.get_storage('folders')
streams = {}
for folder in folders:
log("[plugin.video.vistatv-ini-maker] " + folder)
path = folder
id = folders[folder]
if not id in streams:
streams[id] = {}
response = RPC.files.get_directory(media="files", directory=path, properties=["thumbnail"])
if not 'error' in response:
files = response["files"]
links = {}
thumbnails = {}
for f in files:
if f["filetype"] == "file":
label = remove_formatting(f["label"])
file = f["file"]
while (label in links):
label = "%s." % label
links[label] = file
thumbnails[label] = f["thumbnail"]
streams[id][label] = file
if plugin.get_setting("pvr.subscribe") == "true":
streams["plugin.video.vistatv-ini-maker"] = {}
items = pvr()
for item in items:
name = item["label"]
url = item["path"]
streams["plugin.video.vistatv-ini-maker"][name] = url
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"wb")
# steams contains all the addon ids of the addons you are generating i.e plugin.video.sportie
for id in sorted(streams):
# make a line that contains the plugin to a line before all the channels i.e [plugin.video.sportie]
line = "[%s]\n" % id
# write that line to the ini file.
f.write(line.encode("utf8"))
# make the word channels contain all the streams from said addon.
channels = streams[id]
# for each channel in the addon. i.e bbc one
for channel in sorted(channels):
# Grab the URL to the channel from the list
url = channels[channel]
# make a list called naughty that contains all the funny characters, all within "" and seperated by a comma.
naughty = [":","!",'"',"$","%","^","&","*","(",")","-","_","=","+","[","]","{","}","#","~","@",";",":","/","?",".",">",",","<","|",","]
# go through every item in the list. So in the first instance item would become :
for item in naughty:
# Check if that character exists in the channel name, if so replace it with a space.
channel = channel.replace(item,' ')
# Strip all whitespace from the beggining of the channel name (AKA Remove all spaces)
channel=channel.lstrip()
# Strip all whitespace from the end of the channel name (AKA Remove all spaces)
channel=channel.rstrip()
# Check if there are any double spaces in the channel name and replace them with a single space.
while " " in channel:
# Replace double spaces with single spaces.
channel = channel.replace(" "," ")
#Check if the length of the channel name is one or more characters.
if len(channel) >= 1:
# If so make the line to conatin the channel anme and url, you can see the = below, channel before the = url after.
line = "%s=%s\n" % (channel,url)
#write the line to the ini file.
f.write(line.encode("utf8"))
#Close the file.
f.close()
xbmcgui.Dialog().notification("Echo INI Creator", "Finished Update")
@plugin.route('/search/<what>')
def search(what):
if not what:
return
addons = plugin.get_storage("addons")
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
addon = None
for line in lines:
if line.startswith('['):
a = line.strip('[]')
addons[a] = a
addon = plugin.get_storage(a)
addon.clear()
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
addon[name] = url
items = []
for a in addons.keys():
add = plugin.get_storage(a)
log2(add.keys())
exact = [x for x in add.keys() if x.lower() == what.lower()]
log2(exact)
partial = [x for x in add.keys() if what.lower() in x.lower()]
ignore_space = [x for x in add.keys() if re.sub(' ','',what).lower() in re.sub(' ','',x).lower()]
found = exact + partial
for f in sorted(set(exact)):
items.append({
"label": "[COLOR green]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
for f in sorted(set(partial)-set(exact)):
items.append({
"label": "[COLOR orange]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
for f in sorted(set(ignore_space)-set(partial)-set(exact)):
items.append({
"label": "[COLOR red]%s [%s][/COLOR]" % (f,a),
"path" : add[f],
"is_playable" : True,
})
return items
@plugin.route('/search_dialog')
def search_dialog():
dialog = xbmcgui.Dialog()
what = dialog.input("Search")
if what:
return search(what)
@plugin.route('/add_channel')
def add_channel():
channels = plugin.get_storage('channels')
d = xbmcgui.Dialog()
channel = d.input("Add Channel")
if channel:
channels[channel] = ""
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_channel')
def remove_channel():
channels = plugin.get_storage('channels')
channel_list = sorted(channels)
d = xbmcgui.Dialog()
which = d.select("Remove Channel",channel_list)
if which == -1:
return
channel = channel_list[which]
del channels[channel]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/remove_this_channel/<channel>')
def remove_this_channel(channel):
channels = plugin.get_storage('channels')
del channels[channel]
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/clear_channels')
def clear_channels():
channels = plugin.get_storage('channels')
channels.clear()
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/import_channels')
def import_channels():
channels = plugin.get_storage('channels')
d = xbmcgui.Dialog()
filename = d.browse(1, 'Import Channels', 'files', '', False, False, 'special://home/')
if not filename:
return
if filename.endswith('.ini'):
lines = xbmcvfs.File(filename,'rb').read().splitlines()
for line in lines:
if not line.startswith('[') and not line.startswith('#') and "=" in line:
channel_url = line.split('=',1)
if len(channel_url) == 2:
name = channel_url[0]
channels[name] = ""
xbmc.executebuiltin('Container.Refresh')
@plugin.route('/stream_search/<channel>')
def stream_search(channel):
#folders = plugin.get_storage('folders')
streams = {}
folder = plugin.get_setting("addons.folder")
file = plugin.get_setting("addons.file")
filename = os.path.join(folder,file)
f = xbmcvfs.File(filename,"rb")
lines = f.read().splitlines()
for line in lines:
if line.startswith('['):
addon = line.strip('[]')
if addon not in streams:
streams[addon] = {}
elif "=" in line:
(name,url) = line.split('=',1)
if url and addon is not None:
streams[addon][url] = name
channel_search = channel.lower().replace(' ','')
stream_list = []
for id in sorted(streams):
files = streams[id]
for f in sorted(files, key=lambda k: files[k]):
label = files[f]
label_search = label.lower().replace(' ','')
if label_search in channel_search or channel_search in label_search:
stream_list.append((id,f,label))
labels = ["[%s] %s" % (x[0],x[2]) for x in stream_list]
d = xbmcgui.Dialog()
which = d.select(channel, labels)
if which == -1:
return
stream_name = stream_list[which][2]
stream_link = stream_list[which][1]
plugin.set_resolved_url(stream_link)
@plugin.route('/export_channels')
def export_channels():
channels = plugin.get_storage('channels')
f = xbmcvfs.File('special://profile/addon_data/plugin.video.vistatv-ini-maker/export.ini','wb')
for channel in sorted(channels):
url = plugin.url_for('stream_search',channel=channel)
channel = channel.replace(':','')
s = "%s=%s\n" % (channel,url)
f.write(s)
f.close()
@plugin.route('/channel_player')
def channel_player():
channels = plugin.get_storage("channels")
items = []
for channel in sorted(channels):
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Add Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Remove Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_this_channel, channel=channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Import Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(import_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Export Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(export_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear_channels))))
items.append(
{
'label': channel,
'path': plugin.url_for('stream_search',channel=channel),
'thumbnail':get_icon_path('tv'),
'is_playable': True,
'context_menu': context_items,
})
return items
@plugin.route('/')
def index():
items = []
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Subscriptions', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear))))
items.append(
{
'label': "[COLOR red]Subscribe[/COLOR]",
'path': plugin.url_for('subscribe'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
items.append(
{
'label': "[COLOR green]Create[/COLOR]",
'path': plugin.url_for('update'),
'thumbnail':get_icon_path('tv'),
})
items.append(
{
'label': "Play",
'path': plugin.url_for('player'),
'thumbnail':get_icon_path('tv'),
})
context_items = []
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Add Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(add_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Remove Channel', 'XBMC.RunPlugin(%s)' % (plugin.url_for(remove_channel))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Import Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(import_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Export Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(export_channels))))
context_items.append(("[COLOR red][B]%s[/B][/COLOR] " % 'Clear Channels', 'XBMC.RunPlugin(%s)' % (plugin.url_for(clear_channels))))
items.append(
{
'label': "Channels",
'path': plugin.url_for('channel_player'),
'thumbnail':get_icon_path('tv'),
'context_menu': context_items,
})
return items
if __name__ == '__main__':
plugin.run()
if big_list_view == True:
view_mode = int(plugin.get_setting('view_mode'))
plugin.set_view_mode(view_mode) | [
"[email protected]"
]
| |
5df953e7136216e7adfa597079d091686b4fa538 | deb97b21457bc360563e09c7bbba235cdd915548 | /gitkit/commands/del_merged.py | de55050ed183a4ab19f91ae4bcc81325227a18e2 | [
"MIT"
]
| permissive | akx/git-kit | e381ae5516a6f36f39d72af00e93aa5d4f0e985f | 8084d99c6a113aad56764b0907d157c6957a3977 | refs/heads/master | 2023-07-19T20:16:27.358018 | 2023-07-18T07:49:41 | 2023-07-18T07:49:41 | 22,340,212 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import click
from gitkit.conf import sacred_branches
from gitkit.util.refs import get_main_branch
from gitkit.util.shell import get_lines, run
@click.command()
@click.argument("ref", required=False, default=None)
def del_merged(ref):
"""
Delete merged branches.
"""
if not ref:
ref = get_main_branch()
for branch in set(get_lines(["git", "branch", "-l", "--merged", ref])):
branch = branch.strip("* ")
if branch != ref and branch not in sacred_branches:
run(["git", "branch", "-v", "-d", branch])
| [
"[email protected]"
]
| |
c703a262839b247143130d0cf69dd4626cb5d5ff | a63590f247d914b6993f4e72a5c27a439344d12a | /env/lib/python3.7/io.py | 062f32ae1bab0a72f1d55ace8c1184b6d81bdb8e | []
| no_license | wgcv/Social-Media-Analyze-Election-Guayaquil | e6c65e68e6f54a11aadad9d1765568521df9a20e | 784e6e4c94552307fefdf85367bb6a793ae878c3 | refs/heads/master | 2020-05-09T20:33:58.585077 | 2019-05-11T16:46:43 | 2019-05-11T16:46:43 | 181,410,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | /Users/wgcv/anaconda3/lib/python3.7/io.py | [
"[email protected]"
]
| |
e19d83d920cbf214a0559c2f0bb610c90b9d69ee | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1914.py | 20d7b72d1b8a35128812032e9655e83a53e17756 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | f = open("A-large.in","r")
t = int (f.readline())
ent = []
def check(ent):
for i in range(0,4):
if ('.' not in ent[i])and ('O' not in ent[i]):
return 0
if ('.' not in ent[i])and ('X' not in ent[i]):
return 1
for i in range(0,4):
a = []
for j in range(0,4):
a.append(ent[j][i])
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][0],ent[1][1],ent[2][2],ent[3][3]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][3],ent[1][2],ent[2][1],ent[3][0]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
if ('.' not in ent[0]) and ('.' not in ent[1]) and ('.' not in ent[2]) and ('.' not in ent[3]):
return 2
return 3
s = open("output.out","w")
for i in range(1,t+1):
for j in range(0,4):
ent.append(f.readline())
x = check(ent)
if x == 0:
s.write("Case #%d: X won" % i)
if x == 1:
s.write("Case #%d: O won" % i)
if x == 2:
s.write("Case #%d: Draw" % i)
if x == 3:
s.write("Case #%d: Game has not completed" % i)
if i<t:
ent.append(f.readline())
s.write("\n")
ent = []
f.close()
s.close()
| [
"[email protected]"
]
| |
360fbd0df75ba142aadd5589508fdb2d95ba7602 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_074/ch117_2020_04_01_19_24_01_200930.py | 446a96f7337eaf516aa30fe9c7ef40edbc6f0571 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import math
def snell_descartes (n1,n2,c):
o1>=o2
c=math.degrees(o1)
d=math.degrees(o2)
a=math.sin(c)
b=math.sin(d)
b=a*n1/n2
o1<=90 and o1>=0
if o1==0:
o2==0 and a==b==1
return(o1) | [
"[email protected]"
]
| |
b87d3f64e713ba53fb5b94de3507f74d8a97ea0b | 5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982 | /python/quantumhall/cyclotron.py | 108c267d7ee00673328a312228abdcb7f535d40f | []
| no_license | philzook58/python | 940c24088968f0d5c655e2344dfa084deaefe7c6 | 6d43db5165c9bcb17e8348a650710c5f603e6a96 | refs/heads/master | 2020-05-25T15:42:55.428149 | 2018-05-14T03:33:29 | 2018-05-14T03:33:29 | 69,040,196 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 993 | py |
#A suggesiton for the classical fractional hall effect
#Is a mean field organiztion of the cycltron phases, such that they synchronize.
#Leading to an effective time and angle dependant
# self consistantly dz/dt2 = i w dz/dt + P
# where E is a vortex configuration by conjecture. P = f(|z|)z^n
# and also has angular time dependance z/|z|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
omega =1.
g = -.5
def pack(z,zdot):
return np.array([np.real(z),np.imag(z),np.real(zdot),np.imag(zdot)])
def unpack(x):
return x[0]+1.j * x[1], x[2]+1.j * x[3],
def accel(z,zdot):
return 1.j * omega * zdot + g * np.conj(z)**3
def diffeq(x,t):
z, zdot = unpack(x)
return pack(zdot, accel(z,zdot))
from scipy import signal
T = 1000.
N = 1000
initcond = pack(1. + 0.j ,0. + 1.j)
t = np.linspace(0,T, N)
sol = odeint(diffeq, initcond, t)
f , P = signal.periodogram(sol[:,1],N/T)
plt.plot(t,sol[:,1])
plt.figure()
plt.plot(f,P)
plt.show()
| [
"[email protected]"
]
| |
60d4e232d5fa663fa88d5d6da7e0953144542f33 | 9ef0f266173887eafd5c797d13a6538733b39002 | /trimesh/path/entities.py | de2166781a4699322e91ad3e70b13e8fccd4f1c4 | [
"MIT"
]
| permissive | MiaoLi/trimesh | a850e3a922e43ce6500085eeaf16df8404ad0f17 | 8f6e537151d914d23180a1c1152d849c41d2c1fa | refs/heads/master | 2021-01-14T12:36:02.831270 | 2015-10-17T01:36:33 | 2015-10-17T01:36:33 | 44,636,986 | 2 | 0 | null | 2015-10-20T21:52:11 | 2015-10-20T21:52:10 | null | UTF-8 | Python | false | false | 5,072 | py | '''
entities.py: basic geometric primitives
Design intent: only store references to vertex indices and pass the vertex
array back to functions that require it.
This keeps all vertices in one external list.
'''
import numpy as np
from .arc import discretize_arc, arc_center
from .curve import discretize_bezier, discretize_bspline
from ..points import unitize
from ..util import replace_references
_HASH_LENGTH = 5
class Entity(object):
def __init__(self,
points,
closed = False):
self.points = np.array(points)
self.closed = closed
@property
def _class_id(self):
'''
Return an integer that is unique to the class type.
Note that this implementation will fail if a class is defined
that starts with the same letter as an existing class.
Since this function is called a lot, it is a tradeoff between
speed and robustness where speed won.
'''
return ord(self.__class__.__name__[0])
@property
def hash(self):
'''
Returns a string unique to the entity.
If two identical entities exist, they can be removed
by comparing the string returned by this function.
'''
hash = np.zeros(_HASH_LENGTH, dtype=np.int)
hash[-2:] = self._class_id, int(self.closed)
points_count = np.min([3, len(self.points)])
hash[0:points_count] = np.sort(self.points)[-points_count:]
return hash
def to_dict(self):
'''
Returns a dictionary with all of the information about the entity.
'''
return {'type' : self.__class__.__name__,
'points': self.points.tolist(),
'closed': self.closed}
def rereference(self, replacement):
'''
Given a replacement dictionary, change points to reflect the dictionary.
eg, if replacement = {0:107}, self.points = [0,1902] becomes [107, 1902]
'''
self.points = replace_references(self.points, replacement)
@property
def nodes(self):
'''
Returns an (n,2) list of nodes, or vertices on the path.
Note that this generic class function assumes that all of the reference
points are on the path, which is true for lines and three point arcs.
If you were to define another class where that wasn't the case
(for example, the control points of a bezier curve),
you would need to implement an entity- specific version of this function.
The purpose of having a list of nodes is so that they can then be added
as edges to a graph, so we can use functions to check connectivity,
extract paths, etc.
The slicing on this function is essentially just tiling points
so the first and last vertices aren't repeated. Example:
self.points = [0,1,2]
returns: [[0,1], [1,2]]
'''
return np.column_stack((self.points,
self.points)).reshape(-1)[1:-1].reshape((-1,2))
@property
def end_points(self):
'''
Returns the first and last points. Also note that if you
define a new entity class where the first and last vertices
in self.points aren't the endpoints of the curve you need to
implement this function for your class.
self.points = [0,1,2]
returns: [0,2]
'''
return self.points[[0,-1]]
class Arc(Entity):
def discrete(self, vertices, scale=1.0):
return discretize_arc(vertices[self.points],
close = self.closed,
scale = scale)
def center(self, vertices):
return arc_center(vertices[self.points])
class Line(Entity):
def discrete(self, vertices, scale=1.0):
return vertices[self.points]
class Curve(Entity):
@property
def _class_id(self):
return sum([ord(i) for i in self.__class__.__name__])
@property
def nodes(self):
return [[self.points[0],
self.points[1]],
[self.points[1],
self.points[-1]]]
class Bezier(Curve):
def discrete(self, vertices, scale=1.0):
return discretize_bezier(vertices[self.points], scale=scale)
class BSpline(Curve):
def __init__(self, points, knots, closed=False):
self.points = points
self.knots = knots
self.closed = closed
def discrete(self, vertices, count=None, scale=1.0):
result = discretize_bspline(control = vertices[self.points],
knots = self.knots,
count = count,
scale = scale)
return result
| [
"[email protected]"
]
| |
611e6bea09e4fc1314eb651ee69043dad69aec8d | 0af76aee48453b64d2f09dfadeb79f4a4ac6fef3 | /solution/practice/data-structures/multiple-choice/how-well-do-you-know-trees/solution.py | fc9723e63948c87114dd0dc2b22b2b8d0c594e5f | [
"MIT"
]
| permissive | Abhishek2019/HackerRank | 99ee5d437eb3abe8f041a04bea3968848605a811 | d8a297e2707545957452d07ca564086e3e34a527 | refs/heads/master | 2021-05-03T12:06:18.488528 | 2019-10-30T17:19:59 | 2019-10-30T17:19:59 | 120,493,921 | 0 | 1 | MIT | 2019-10-30T17:20:05 | 2018-02-06T17:08:34 | Python | UTF-8 | Python | false | false | 13 | py | print("n-1")
| [
"[email protected]"
]
| |
76732c90be1e6c89d923ed2aabebc32359ae7817 | b73b77dbbd6b4b2c216c1c1e08e5d92c734e545c | /hotel/migrations/0102_auto_20200414_1402.py | 4c95c54d31333b48f288d476d6df915d58142931 | []
| no_license | aadarshachapagain/hotel_booking | 0cf248b78a03277a5208aecb1a72aa1282319ead | 58503c57d2fd6d07fdbe6b7eb113954a0282dc3d | refs/heads/main | 2023-08-27T01:53:21.176194 | 2021-10-01T03:13:42 | 2021-10-01T03:13:42 | 412,294,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 2.1.5 on 2020-04-14 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotel', '0101_bedtype_status'),
]
operations = [
migrations.AlterField(
model_name='bedtype',
name='description',
field=models.TextField(blank=True, max_length=500, null=True),
),
]
| [
"[email protected]"
]
| |
acbeb910b65258b18b71182806b2cc75e84ffa03 | 3b1efdd0aacc98738f3b8b9ee09c6ff59cccc14e | /ietf/person/factories.py | e076b4ef72e4bec53e2bc6a55c5798054d06ced0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | unofficial-mirror/ietfdb | 15beb6bf17b1d4abb257ee656ac6b7488339d331 | ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81 | refs/heads/master | 2020-08-06T17:24:13.966746 | 2019-10-04T20:54:05 | 2019-10-04T20:54:05 | 213,088,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,456 | py | # Copyright The IETF Trust 2015-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import factory
import faker
import faker.config
import os
import random
import shutil
from unidecode import unidecode
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.utils.encoding import force_text
import debug # pyflakes:ignore
from ietf.person.models import Person, Alias, Email
from ietf.person.name import normalize_name, unidecode_name
fake = faker.Factory.create()
def random_faker():
# The transliteration of some arabic and devanagari names introduces
# non-alphabetic characgters that don't work with the draft author
# extraction code, and also don't seem to match the way people with arabic
# names romanize arabic names. Exlude those locales from name generation
# in order to avoid test failures.
locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_')) ] )
return faker.Faker(random.sample(locales, 1)[0])
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
exclude = ['faker', ]
faker = factory.LazyFunction(random_faker)
first_name = factory.LazyAttribute(lambda o: o.faker.first_name())
last_name = factory.LazyAttribute(lambda o: o.faker.last_name())
email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),
slugify(unidecode(u.last_name)), n, fake.domain_name()))
username = factory.LazyAttribute(lambda u: u.email)
@factory.post_generation
def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter
class PersonFactory(factory.DjangoModelFactory):
class Meta:
model = Person
user = factory.SubFactory(UserFactory)
name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name)))
ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name)))
class Params:
with_bio = factory.Trait(biography = "\n\n".join(fake.paragraphs()))
@factory.post_generation
def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
make_alias = getattr(AliasFactory, 'create' if create else 'build')
make_alias(person=obj,name=obj.name)
make_alias(person=obj,name=obj.ascii)
if obj.name != obj.plain_name():
make_alias(person=obj,name=obj.plain_name())
if obj.ascii != obj.plain_ascii():
make_alias(person=obj,name=obj.plain_ascii())
@factory.post_generation
def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if extracted is None:
extracted = True
if create and extracted:
make_email = getattr(EmailFactory, 'create' if create else 'build')
make_email(person=obj, address=obj.user.email)
@factory.post_generation
def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
import atexit
if obj.biography:
photo_name = obj.photo_name()
media_name = "%s/%s.jpg" % (settings.PHOTOS_DIRNAME, photo_name)
obj.photo = media_name
obj.photo_thumb = media_name
photosrc = os.path.join(settings.TEST_DATA_DIR, "profile-default.jpg")
photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg')
if not os.path.exists(photodst):
shutil.copy(photosrc, photodst)
def delete_file(file):
os.unlink(file)
atexit.register(delete_file, photodst)
class AliasFactory(factory.DjangoModelFactory):
class Meta:
model = Alias
@classmethod
def _create(cls, model_class, *args, **kwargs):
person = kwargs['person']
name = kwargs['name']
existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True))
if not name in existing_aliases:
obj = model_class(*args, **kwargs)
obj.save()
return obj
name = factory.Faker('name')
def fake_email_address(n):
address_field = [ f for f in Email._meta.fields if f.name == 'address'][0]
count = 0
while True:
address = '%s.%s_%d@%s' % (
slugify(unidecode(fake.first_name())),
slugify(unidecode(fake.last_name())),
n, fake.domain_name()
)
count += 1
if len(address) <= address_field.max_length:
break
if count >= 10:
raise RuntimeError("Failed generating a fake email address to fit in Email.address(max_length=%s)"%address_field.max_lenth)
return address
class EmailFactory(factory.DjangoModelFactory):
class Meta:
model = Email
django_get_or_create = ('address',)
address = factory.Sequence(fake_email_address)
person = factory.SubFactory(PersonFactory)
active = True
primary = False
origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '')
| [
"[email protected]"
]
| |
c2329e1d0a37e88a0fcbfb5d6a743b80e8753c28 | df3853b41ed05d86f5bcd992fcc265f637c67784 | /big_deal/test2/14.py | d79e788612e926b9cf62a3a53eddc0a537b10ca5 | []
| no_license | KseniaMIPT/Adamasta | 6ab0121519581dbbbf6ae788d1da85f545f718d1 | e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3 | refs/heads/master | 2021-01-10T16:48:31.141709 | 2016-11-23T21:02:25 | 2016-11-23T21:02:25 | 43,350,507 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | def digraph_from_input():
N = int(input())
digraph = {}
for i in range(N-1):
line = input().split()
if line[1] not in digraph:
digraph[line[1]] = {line[0]}
else:
digraph[line[1]].add(line[0])
if line[0] not in digraph:
digraph[line[0]] = set()
return digraph
digraph = digraph_from_input()
start_node = str(input())
def bfs_fire(g, start, fired=set(), tree =[]):
"""Функция выделяет остовое дерево методом обхода в ширину.
:param g: основной граф
:param start: начальная вершина
:param fired: множество уже имеющихся в графе вершин
:return tree: остовое дерево
"""
fired.add(start)
queue = [start]
while queue:
current = queue.pop(0)
for neighbour in g[current]:
if neighbour not in fired:
fired.add(neighbour)
queue.append(neighbour)
tree.append([current, neighbour])
return tree
tree = bfs_fire(digraph, start_node)
| [
"[email protected]"
]
| |
abf58fb31e51c78bb90abe08fcf94e44fc5f36c0 | 1985d1a7462d537e1f43055e3c75d91145407ff9 | /Next_Permutation.py | fcc699c978f678ede7468f2b601e8c68627e87c9 | []
| no_license | yeonnseok/algorithm_practice | d95425e59b7b579a70dbbd932e4fb691c57f4534 | c1468f23b2c077ecadac1fa843180674b6ea3295 | refs/heads/master | 2020-04-28T08:51:32.728010 | 2019-04-05T03:20:44 | 2019-04-05T03:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | def swap(list, a, b):
temp = list[b]
list[b] = list[a]
list[a] = temp
def next_permutation(c_list, n):
i = n - 1
while c_list[i - 1] >= c_list[i]:
i -= 1
if i <= 0: return False
j = n - 1
while c_list[j] <= c_list[i - 1]:
j -= 1
swap(c_list, j, i - 1)
j = n - 1
while i < j:
swap(c_list, j, i)
i += 1
j -= 1
return c_list
c_list = [7, 2, 3, 6, 5, 4, 1]
n = len(c_list)
print(next_permutation(c_list, n))
| [
"[email protected]"
]
| |
efb691981ff05fe7bcb03faa225d88b4bee1bde0 | 084d1b9cb341a1b943f95e98ee3cf680df502ba9 | /Products/mediaPage/tests/base.py | b0e818b0d28196ee7fc5c4b6020c8236190fd002 | []
| no_license | intk/Products.mediaPage | 629aa7c8f98e308b536f997cafbab177ba6ae1a5 | a3f4b0c900565b438593888a3009f8e7e4867792 | refs/heads/master | 2016-09-06T13:57:17.209247 | 2014-09-18T08:56:37 | 2014-09-18T08:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | """Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import Products.mediaPage
zcml.load_config('configure.zcml', Products.mediaPage)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML. Thus, we do it here. Note the use of installPackage()
# instead of installProduct().
# This is *only* necessary for packages outside the Products.*
# namespace which are also declared as Zope 2 products, using
# <five:registerPackage /> in ZCML.
# We may also need to load dependencies, e.g.:
# ztc.installPackage('borg.localrole')
ztc.installPackage('Products.mediaPage')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
ptc.setupPloneSite(products=['Products.mediaPage'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
roles = ('Member', 'Contributor')
self.portal.portal_membership.addMember('contributor',
'secret',
roles, [])
| [
"[email protected]"
]
| |
8ab113cf60a3a4a75b3d8b50adeeef8e0c253799 | 22b78677bfe20f4c548a8c6cadfaeebcc635a22e | /venv/bin/pip2 | e606e424a07a9bdbdd662dc790e5b6d64708c181 | []
| no_license | mr-kaveh/flasksocialapp | 57778db7bab285d514502d4dd0ef43245a0f1d5c | d9fa096c53b3a202191d2d9e0373ff1b39663421 | refs/heads/master | 2020-04-19T02:33:34.151348 | 2019-02-01T05:12:25 | 2019-02-01T05:12:25 | 167,907,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/home/hossein/myScripts/socialApp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
a47988e12caea650f9b6dc78153c6e2a74602047 | 5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8 | /client/local_objects/ClientPlayerManager.py | 8acf4ecba25471df1e138e3be612cc0741d8054f | []
| no_license | sheepsy90/survive | 26495f1ff2d8247fbb9470882f8be9f5272e7f2c | 0eddf637be0eacd34415761b78fc2c9d50bc1528 | refs/heads/master | 2021-01-09T05:55:16.546762 | 2017-02-03T20:15:28 | 2017-02-03T20:15:28 | 80,864,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | # -*- coding:utf-8 -*-
from client.local_objects.PlayerModel import PlayerModel
class ClientPlayerManager(object):
def __init__(self):
self.players = {}
self.me = None
def add_new_player_position(self, player_id, player_name, position, is_moving, is_me):
if player_id not in self.players:
self.players[player_id] = PlayerModel(player_id, player_name, position, is_moving)
else:
self.players[player_id].update_position(position, is_moving)
if is_me:
self.me = self.players[player_id]
def has_me(self):
return self.me is not None
def get_players(self):
return self.players.values()
def remove_player(self, name):
print "REMOVE PLAYER FROM CLIENT"
del self.players[name]
def get_me(self):
return self.me
def set_my_character_condition(self, blurriness, redness):
self.me.set_character_condition(blurriness, redness) | [
"[email protected]"
]
| |
b9cd9b43fb64eb1805b8b9e3a30ddee088c9540c | 76f59c245744e468577a293a0b9b078f064acf07 | /3.longest-substring-without-repeating-characters.py | f5ddb791b9a978f2ed72a471cf53a960cb68a2a9 | []
| no_license | satoshun-algorithm-example/leetcode | c3774f07e653cf58640a6e7239705e58c5abde82 | 16b39e903755dea86f9a4f16df187bb8bbf835c5 | refs/heads/master | 2020-07-01T10:24:05.343283 | 2020-01-13T03:27:27 | 2020-01-13T03:27:27 | 201,144,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #
# @lc app=leetcode id=3 lang=python3
#
# [3] Longest Substring Without Repeating Characters
#
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
c = 0
for i, _ in enumerate(s):
characters = ''
for j in s[i:]:
if j in characters:
break
characters += j
if len(characters) > c:
c = len(characters)
if len(characters) > c:
c = len(characters)
return c
| [
"[email protected]"
]
| |
f7876ee7e8a2e78ce0603729c772cba69f9f259d | f61db5940e29773aba8fc342a21de00e91a5ab2e | /base/day15/note/demo2/testcases.py | d496be253d9081853b34930bf67e2d3b34b715c9 | []
| no_license | liyaozr/project | c17a9dcbcda38fe9a15ec4c41a01242a13695991 | 0b0fc10e267ceb19f6792b490fede177035459fe | refs/heads/master | 2020-11-29T18:38:03.297369 | 2020-03-10T01:11:00 | 2020-03-10T01:11:00 | 230,190,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | """
============================
Author:柠檬班-木森
Time:2020/2/7 21:29
E-mail:[email protected]
Company:湖南零檬信息技术有限公司
============================
"""
import unittest
from py26_15day.demo2.register import register
from py26_15day.demo2.login import login_check
from py26_15day.demo2.readexcel import ReadExcel
class RegisterTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "register")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_register(self):
# 第一步:准备用例的数据
# 预期结果:
excepted = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = register(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(excepted, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
class LoginTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "login")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_login(self):
# 第一步:准备用例的数据
# 预期结果:
expected = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = login_check(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(expected, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
| [
"[email protected]"
]
| |
d23f0fdc9f79350dc59b7bbff909a0248f0ab93b | 4e59f5fbd1e777f2488eb2a46deca34acf813979 | /clients/admin.py | b19f3d0c3e98075355f6e14f4524c33f0aa4eac9 | []
| no_license | BoughezalaMohamedAimen/laser | f8c051be5c85be8f09b3ac4272065ce24af26555 | 1ac9c97b8ead4edcfcadeaafa0ee567f3f3d3d0d | refs/heads/master | 2020-08-09T19:26:21.516671 | 2019-10-10T10:30:54 | 2019-10-10T10:30:54 | 214,154,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(SeanceHistorique)
admin.site.register(Abonnement)
| [
"[email protected]"
]
| |
4e7b737ef7c0dfbd4334a02c47e6e82ee662b5e9 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-shared/treemenus/migrations/0003_menuitem_caption_pt.py | 7f07c34686f12f29e3581c5062d3499f2d994595 | []
| no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treemenus', '0002_menuitem_caption_pl'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='caption_pt',
field=models.CharField(max_length=150, null=True, verbose_name='Caption Portuguese', blank=True),
),
]
| [
"ibalyko@ubuntu-server-16-04"
]
| ibalyko@ubuntu-server-16-04 |
7666a5cf1d0b282967767d4acfacfbe8ebb452cc | 9a871ca18c94f080f51fab53de90ecec6bc4ca65 | /django_dzenlog/signals.py | 6ed5ebfd3059816ef7ef979ad003a885901b234d | []
| no_license | IIKovalenko/django-dzenlog | dbfc302ba70d39be28176b029f91d844faa83847 | 45025d20e6d56322fece40f81e0ab370beed2b9c | refs/heads/master | 2020-06-02T15:24:35.009989 | 2010-12-04T18:52:06 | 2010-12-04T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from django.dispatch import Signal
published = Signal(providing_args=['instance',])
| [
"[email protected]"
]
| |
929b7bcac75f34355aa13b3f1e3a5faab8b98760 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L29/29-40_wat_20Abox/set_4.py | e1cd145f4856de5364ea6cdd85c38159e2eee008 | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L29/wat_20Abox/ti_one-step/29_40/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
4b964397df7ef88fabea054402bb1db1ad59d9b4 | 7f43264f32a57599d87fe8be8e0d748d89abecab | /api_v0/ElasticsearchURL.py | 46da6ee9e31f2e2d8f574166965a86e3a980e86c | []
| no_license | chair300/rsss_api | e13215439be1bfaa536ea7be5bfe4cc657bb0663 | 03866b0f5052dc81b61cab3b1c2a451d8e2ec449 | refs/heads/master | 2023-03-19T02:38:09.963553 | 2018-01-17T00:41:18 | 2018-01-17T00:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | from rest_framework.response import Response
from api_v0.serializers import ScoresRowSerializer
from django.conf import settings
import requests
import random
import json
#make all the stuff in views.py use this class.
#NOTE: if it turns out that making a query to check if a server is response is
#This is the way to do it.
#TOO overhead-intensive, use the following algorithm:
# Fully formulate the URL as it would be used (randomly shuffle the ES boxes)
# make the request as-is, and try/catch to detect timeout and/or connection errors.
# If there's a dropped request; then pop the next machine off of the shuffled list of
# available ES nodes; try that URL.
# Either end up returning the result set; or a 500 status Response with a descriptive
# message about Elasticsearch being down.
class ElasticsearchURL(object):
#if operation is None, id_to_get had better be there.
#if scroll duration is included, this is a scrolling download.
def __init__(self, data_type, operation="_search",
from_result=None, page_size=None, id_to_get=None,
scroll_info=None):
url_base = self.get_base_es_url()
name_of_index = None
if data_type == 'atsnp_output':
name_of_index = settings.ES_INDEX_NAMES['ATSNP_DATA']
elif data_type == 'gencode_gene_symbols':
name_of_index = settings.ES_INDEX_NAMES['GENE_NAMES']
elif data_type == 'sequence':
name_of_index = settings.ES_INDEX_NAMES['SNP_INFO']
elif data_type == 'motif_bits':
name_of_index = settings.ES_INDEX_NAMES['MOTIF_BITS']
#print "url_base : " + url_base
#print "name_of_index: " + name_of_index
#print "data_type: " + data_type
#print "operation: " + operation
url_parts = [url_base, name_of_index, data_type]
get_args = []
if id_to_get is not None:
#throw a nice exception if this is invalid?
url_parts.append(id_to_get)
else:
#this is a search.
url_parts.append(operation)
get_args.append(self.get_page_size(page_size))
if scroll_info is not None:
if 'duration' in scroll_info:
get_args.append('scroll=' + scroll_info['duration'])
else:
#Use a bare URL to continue a scroll
get_args = []
url_parts = [url_base, operation]
url_parts.append('scroll')
if from_result is not None:
get_args.append("from=" + str(from_result))
bare_url = "/".join(url_parts)
if len(get_args) > 0:
self.url = '?'.join([bare_url,'&'.join(get_args)])
else:
self.url = bare_url
#print "url created: " + self.url
def setup_scroll_args(self, scroll_info):
scroll_args = []
if 'duration' in scroll_info:
scroll_args.append('scroll=' + scroll_info['duration'])
return scroll_args
#for searches
def get_page_size(self, page_size):
if page_size is None:
page_size = settings.ELASTICSEARCH_PAGE_SIZE
return "size=" + str(page_size)
def get_base_es_url(self):
machines_to_try = settings.ELASTICSEARCH_URLS[:]
random.shuffle(machines_to_try)
return machines_to_try.pop()
def get_url(self):
return self.url
| [
"[email protected]"
]
| |
7058046baa3c952775c38a273ce86611b6ff8399 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/516.py | 2f37f445e68945c2132f222f58ca3bd97747e8c4 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | lines = open('data.txt').read()
output = open('output.txt', 'w')
lines = lines.splitlines()
cases_num = int(lines[0])
lines = lines[1:]
cur_index = 0
for i in range(cases_num):
case_num = i + 1
m, n = lines[cur_index].split()
n = int(n)
m = int(m)
cur_index += 1
matrix = []
for row_ind in range(m):
line = lines[row_ind + cur_index]
matrix.append([int(x) for x in line.split()])
rows = []
columns = []
for row in matrix:
rows.append(sorted(set(row)))
for column in zip(*matrix):
columns.append(sorted(set(column)))
def is_lawnable():
for i in range(m):
for j in range(n):
elem = matrix[i][j]
i_row = rows[i].index(elem)
j_column = columns[j].index(elem)
if len(rows[i]) > i_row + 1 and len(columns[j]) > j_column + 1:
return False
return True
is_good = is_lawnable()
cur_index += m
if is_good:
output.write('Case #{0}:'.format(case_num) + ' YES\n')
print 'Case #{0}:'.format(case_num), 'YES'
else:
output.write('Case #{0}:'.format(case_num) + ' NO\n')
print 'Case #{0}:'.format(case_num), 'NO'
| [
"[email protected]"
]
| |
9fee9927053a85fe0988554aa2c1cf7fc746326b | 8a7950440a4a8015523a1e1474a3bfc3aaa95782 | /email_smtplib/basic/email_send2.py | d3b046d99e4e22a27c9ecef541a328042c1dbfab | []
| no_license | SatishNitk/Python_Web_Scrapper | bddb320b86a8942b6b3c346eb09f09b933be5b37 | f257ad2e6d2053f0f86443905de87ccf81df0c62 | refs/heads/master | 2020-05-07T19:51:51.816353 | 2019-07-07T13:31:27 | 2019-07-07T13:31:27 | 180,826,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPException,SMTPAuthenticationError
host = "smtp.gmail.com"
port = 587
email = "[email protected]"
password = ""
from1 = "[email protected]"
to_list = ["[email protected]"]
try:
email_obj = SMTP(host, port)
email_obj.ehlo()
email_obj.starttls()
email_obj.ehlo()
email_obj.login(email,password)
plain_text = "just a simple text message"
html_txt = """
<html>
<body>
<h1>
This paragraph
contains a lot of lines
in the source code,
but the browser
ignores it.
</h1>
</body>
</html>
"""
the_msg = MIMEMultipart("alternative")
the_msg['Subject'] = "Hello there"
the_msg['From'] = from1
part1 = MIMEText(plain_text, "plain")
part2 = MIMEText(html_txt, "html")
the_msg.attach(part1)
the_msg.attach(part2)
print(the_msg.as_string())
email_obj.sendmail(from1,to_list,the_msg.as_string())
except SMTPException:
print("exception occured in sending rmail check once whole code")
| [
"[email protected]"
]
| |
50fb214882899ea973df69630262b57e20b57534 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D94B/CONQVAD94BUN.py | 8785753a062814ca0ea352440adb08d86535cd20 | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,437 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD94BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 2},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 25},
{ID: 'FII', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'BII', MIN: 0, MAX: 100000, LEVEL: [
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 1, MAX: 6},
{ID: 'PRI', MIN: 0, MAX: 1},
{ID: 'LIN', MIN: 1, MAX: 100, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'GIS', MIN: 0, MAX: 5},
]},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 5},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
]
| |
d5e5e491086979335728a5ce09637227e79fbd84 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_background_task_request.py | 15f666a3a90534bc5325dbb4cc52bae5849ca114 | [
"Apache-2.0"
]
| permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,478 | py | # coding: utf-8
import pprint
import re
import six
class ShowBackgroundTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'task_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'task_id': 'task_id'
}
def __init__(self, instance_id=None, task_id=None):
"""ShowBackgroundTaskRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._task_id = None
self.discriminator = None
self.instance_id = instance_id
self.task_id = task_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:return: The instance_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:param instance_id: The instance_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._instance_id = instance_id
@property
def task_id(self):
"""Gets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:return: The task_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:param task_id: The task_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBackgroundTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
a7e3300d975a841171e8c857c965142b30239106 | 23631af0987b3f1d30b0bf8bfcea1bd63159eeba | /gate_api/api/__init__.py | 56f948e75c0f565a6647dec42431e9458c3446f2 | []
| no_license | xuvw/gateapi-python | 08c3c72ff0e2c4713bf3a2ffe0b15d05e57491ca | 1a3f3551cba4a756f76f17b070c3e0c5ff2e88ea | refs/heads/master | 2020-05-25T14:33:35.592775 | 2019-04-02T08:50:25 | 2019-04-02T08:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from gate_api.api.futures_api import FuturesApi
from gate_api.api.margin_api import MarginApi
from gate_api.api.spot_api import SpotApi
| [
"[email protected]"
]
| |
665b261c26c914af9be8d0cc6ca2991861d06d4a | 1d164438ac1ba7c88aeabb7c9ea39b58680ba79c | /django_postgres_matviews/management/commands/drop_matviews.py | da51ebd3f07c9398a3e198e8445d967b5dc87d2b | [
"Unlicense"
]
| permissive | andrewp-as-is/django-postgres-matviews.py | fac3288f199f013a0421ae23f634ea7082020181 | ff7d76f885318e208b81be7f5dcaa71ff7fc4fb3 | refs/heads/master | 2023-01-20T18:38:58.232754 | 2020-12-03T20:46:40 | 2020-12-03T20:46:40 | 285,872,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.core.management.base import BaseCommand
from django.db import connection
from django_postgres_matviews.utils import drop_matviews
class Command(BaseCommand):
def handle(self, *args, **options):
drop_matviews()
| [
"[email protected]"
]
| |
5bfee7606764826ff036404a7b07620623e24a96 | 88745dafec989d39726ca2e4d7f6cfb20bb60f5d | /tests/unit_tests/modules/s3/s3gis/BingLayer.py | c25ecc6c14edf56ab2f86ebbe914dc43f8fc5a3b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | sungkomp/SAMBRO | f1ced7c9d198ccfe30aaa1bf883c2f8a7478fffb | 4618d785d03424d122206d88d9ebfb6971486e2c | refs/heads/master | 2020-05-30T08:41:26.855362 | 2019-10-15T02:48:47 | 2019-10-15T02:48:47 | 69,448,194 | 1 | 0 | NOASSERTION | 2019-10-15T04:25:13 | 2016-09-28T09:31:35 | Python | UTF-8 | Python | false | false | 784 | py |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_BingLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_bing,
dict(
name = "Test Bing Layer",
description = "Test Bing layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
aerial_enabled = True,
road_enabled = True,
hybrid_enabled = True,
apikey = "FAKEAPIKEY",
),
"S3.gis.Bing",
{
"Aerial": u"Bing Satellite",
"ApiKey": u"FAKEAPIKEY",
"Hybrid": u"Bing Hybrid",
"Road": u"Bing Roads",
},
session = session,
request = request,
)
| [
"[email protected]"
]
| |
127e521fc174bcb018737f195d7d9d13e672b726 | 9b4fe9c2693abc6ecc614088665cbf855971deaf | /78.subsets.py | e02e0a2c09554ecf70645818837f819efcf53e44 | [
"MIT"
]
| permissive | windard/leeeeee | e795be2b9dcabfc9f32fe25794878e591a6fb2c8 | 0dd67edca4e0b0323cb5a7239f02ea46383cd15a | refs/heads/master | 2022-08-12T19:51:26.748317 | 2022-08-07T16:01:30 | 2022-08-07T16:01:30 | 222,122,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | # coding=utf-8
#
# @lc app=leetcode id=78 lang=python
#
# [78] Subsets
#
# https://leetcode.com/problems/subsets/description/
#
# algorithms
# Medium (51.03%)
# Likes: 2192
# Dislikes: 54
# Total Accepted: 396.6K
# Total Submissions: 731K
# Testcase Example: '[1,2,3]'
#
# Given a set of distinct integers, nums, return all possible subsets (the
# power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: nums = [1,2,3]
# Output:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
#
class Solution(object):
def _subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# DFS
# 组合
# 结果正确,顺序不对
result = temp = [[]]
last = []
while temp:
temp = []
for num in nums:
if not last:
temp.append(last + [num])
else:
for l in last:
if num > max(l):
temp.append(l + [num])
last = temp
result.extend(last)
return result
def __subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
data = {value:2**key for key,value in enumerate(nums)}
for i in range(2**length):
temp = []
for key,value in data.items():
if value & i != 0:
temp.append(key)
result.append(temp)
return result
def ___subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
for i in range(1<<length):
temp = []
for key,value in enumerate(nums):
if 1<<key & i != 0:
temp.append(value)
result.append(temp)
return result
def ____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# Best of All
result = [[]]
for n in nums:
current = result[:]
for t in current:
result.append(t+[n])
return result
def _____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
return self.helper(nums, 0, [[]])
def helper(self, nums, index, result):
if index >= len(nums):
return result
temp = result[:]
for t in temp:
result.append(t+[nums[index]])
return self.helper(nums, index+1, result)
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
start = 0
e = len(nums)
result = []
def backtrack(s, p):
result.append(p)
for i in range(s, e):
backtrack(i+1, p+[nums[i]])
backtrack(start, [])
return result
# if __name__ == "__main__":
# s = Solution()
# print s.subsets([1,2,3])
| [
"[email protected]"
]
| |
329444be3e093f08598e5a613a554721a6d6e9b1 | f99e42d181267f35ffaa7a47106d188bbe9f1094 | /codekata/91.py | 086d614fa3eba2704fdf10a7a701cdfee2d485c5 | []
| no_license | DEEPTHA26/guvi | 92d0992548a8ccbffc5e2ba8b702184cc2c42929 | 84816f470c08ede86a87b010cfd5ef5573d045f8 | refs/heads/master | 2020-06-15T20:29:44.864601 | 2019-06-30T15:18:17 | 2019-06-30T15:18:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | z,x,c=map(int,input().split())
v=z*x*c
t=(2*z*x)+(2*x*c)+(2*c*z)
print(t,v)
| [
"[email protected]"
]
| |
246e17ff0c48c787a0a932071216fd5a5e87c321 | 770e3f4fcb3d2f96ea8cc36bfa47625778c40c71 | /unit_tests/test_provides.py | 154f566f2b1cbe63d5f075866c676d2654f56ed0 | []
| no_license | openstack-charmers/charm-interface-pacemaker-remote | 8d12a0594bc580f74c9a591b44429320912c8cbf | f1297f72a5c6f8dc4f89461850a7d8ebaa01cf04 | refs/heads/master | 2020-04-30T03:33:29.086571 | 2019-03-20T07:31:55 | 2019-03-20T07:31:55 | 176,589,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,454 | py | import unittest
import mock
with mock.patch('charmhelpers.core.hookenv.metadata') as _meta:
_meta.return_Value = 'ss'
import provides
_hook_args = {}
TO_PATCH = [
]
def mock_hook(*args, **kwargs):
def inner(f):
# remember what we were passed. Note that we can't actually determine
# the class we're attached to, as the decorator only gets the function.
_hook_args[f.__name__] = dict(args=args, kwargs=kwargs)
return f
return inner
class _unit_mock:
def __init__(self, unit_name, received=None):
self.unit_name = unit_name
self.received = received or {}
class _relation_mock:
def __init__(self, application_name=None, units=None):
self.to_publish_raw = {}
self.to_publish = {}
self.application_name = application_name
self.units = units
class TestPacemakerRemoteProvides(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._patched_hook = mock.patch('charms.reactive.when', mock_hook)
cls._patched_hook_started = cls._patched_hook.start()
# force provides to rerun the mock_hook decorator:
# try except is Python2/Python3 compatibility as Python3 has moved
# reload to importlib.
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
@classmethod
def tearDownClass(cls):
cls._patched_hook.stop()
cls._patched_hook_started = None
cls._patched_hook = None
# and fix any breakage we did to the module
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
def patch(self, method):
_m = mock.patch.object(self.obj, method)
_mock = _m.start()
self.addCleanup(_m.stop)
return _mock
def setUp(self):
self.relation_obj = provides.PacemakerRemoteProvides(
'some-relation',
[])
self._patches = {}
self._patches_start = {}
self.obj = provides
for method in TO_PATCH:
setattr(self, method, self.patch(method))
def tearDown(self):
self.relation_obj = None
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch_relation_obj(self, attr, return_value=None):
mocked = mock.patch.object(self.relation_obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_publish_info(self):
mock_rel = _relation_mock()
self.relation_obj._relations = [mock_rel]
self.relation_obj.publish_info(
'node1.az1.local',
stonith_hostname='node1.stonith',
enable_resources=True)
expect = {
'remote-hostname': 'node1.az1.local',
'stonith-hostname': 'node1.stonith',
'enable-resources': True}
self.assertEqual(
mock_rel.to_publish,
expect)
def test_get_pacemaker_key(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
mock_rel = _relation_mock(units=[unit1])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
b'pmakerkey1\n')
def test_get_pacemaker_key_inconsistent(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
unit2 = _unit_mock(
'unit2',
received={'pacemaker-key': 'cG1ha2Vya2V5Mgo='})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
with self.assertRaises(Exception):
self.relation_obj.get_pacemaker_key()
def test_get_pacemaker_key_missing(self):
unit1 = _unit_mock(
'unit1',
received={})
unit2 = _unit_mock(
'unit2',
received={})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
None)
| [
"[email protected]"
]
| |
894b732050372338c14fa012e7f9b16f6e1eadbf | 11812a0cc7b818292e601ecdd4aa4c4e03d131c5 | /02_多任务/3_协程/hm_15_使用协程完成多任务终极.py | ed3306cb3d774702e938b02a0d1ebc14291efd90 | []
| no_license | SunshineFaxixi/Python_Learning | f1e55adcfa898489cc9146ccfb220f0b48a31a22 | ab3ca44d013311b6de02124091acc4c36a83c4d9 | refs/heads/master | 2021-08-16T05:47:29.963118 | 2021-01-04T13:48:30 | 2021-01-04T13:48:30 | 238,857,341 | 1 | 0 | null | 2020-03-03T13:53:08 | 2020-02-07T06:21:46 | HTML | UTF-8 | Python | false | false | 409 | py | import gevent
import time
import random
from gevent import monkey
def coroutine_work(coroutine_name):
for i in range(10):
print(coroutine_name, i)
time.sleep(random.random())
def main():
monkey.patch_all() # 打补丁
gevent.joinall([
gevent.spawn(coroutine_work, "work1"),
gevent.spawn(coroutine_work, "work2")
])
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
a9dd8620e61118abf0707b5fb0f71735b60984ba | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /gp/business_logic/business_objects/monopoly.py | f40bcc4ebbf90d3908c5f5b1da8a279f7018e9f4 | []
| no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import datetime
__author__ = 'erezrubinstein'
class Monopoly(object):
def __init__(self, store_id, monopoly_type_id, trade_area_id, start_date, end_date):
self.store_id = store_id
self.monopoly_type_id = monopoly_type_id
self.trade_area_id = trade_area_id
self.start_date = start_date
self.end_date = end_date
def __eq__(self, other):
# sometimes mongo selects the start date slightly off. so this just makes sure they're within one seconds
return self.store_id == other.store_id and self.monopoly_type_id == other.monopoly_type_id and self.trade_area_id == other.trade_area_id and \
(other.start_date - self.start_date) < datetime.timedelta(seconds = 1) and \
(other.end_date - self.end_date) < datetime.timedelta(seconds = 1) | [
"[email protected]"
]
| |
db6dac8b0b6013de4ea57a1b57fa20f6b8d368f8 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/add_and_search_word.py | 13c99ec33dbc7abde7199d0dc2552efa2636dc28 | []
| no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 2,277 | py | # https://leetcode.com/problems/add-and-search-word-data-structure-design/
"""
Design a data structure that supports the following two operations:
void addWord(word)
bool search(word)
search(word) can search a literal word or a regular expression string containing only letters a-z or .. A . means it can represent any one letter.
Example:
addWord("bad")
addWord("dad")
addWord("mad")
search("pad") -> false
search("bad") -> true
search(".ad") -> true
search("b..") -> true
Note:
You may assume that all words are consist of lowercase letters a-z.
"""
from dataclasses import dataclass, field
from typing import Any, Dict, Optional
@dataclass
class TrieNode:
children: Dict[str, "TrieNode"] = field(default_factory=dict)
# we don't need to save the val, simply using a isEnd flag is enough
value: Optional[Any] = None
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
node = self.root
for char in word:
if char not in node.children:
node.children[char] = TrieNode()
node = node.children[char]
node.value = word
def searchHelper(self, word: str, index: int, node: TrieNode) -> bool:
if index == len(word):
return node.value != None
if word[index] == ".":
return any(
[
self.searchHelper(word, index + 1, node.children[child])
for child in node.children
]
)
if word[index] not in node.children:
return False
return self.searchHelper(word, index + 1, node.children[word[index]])
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
return self.searchHelper(word, 0, self.root)
if __name__ == "__main__":
obj = WordDictionary()
for word in ["bad", "dad", "mad", "pad"]:
obj.addWord(word)
for word in ["bad", ".ad", "b.."]:
print(f"{obj.search(word)=}")
| [
"[email protected]"
]
| |
9bdd0b54603f4bced8f4c82edb28d3dca4e88841 | 4a191e5aecd53c4cea28482a0179539eeb6cd74b | /blogproject/settings.py | a7f99cc15f936dff53808f9385c2c2992e57abbc | []
| no_license | jiangjingwei/blogproject | 631a2e8e2f72420cce45ddaf152174852376d831 | daf14e88092dc030a3ab0c295ee06fb6b2164372 | refs/heads/master | 2020-03-14T23:29:08.052253 | 2018-05-10T11:35:59 | 2018-05-10T11:35:59 | 131,846,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | """
Django settings for blogproject project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm&=b!x8(eqh&ek!4e_)#h@=g$6sjfd1ulx*exs4$d1!h&tef@@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.jjwxy.com', '139.196.81.14']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'blog',
'comments',
'haystack',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor' | [
"[email protected]"
]
| |
795985da57f6d924af7ddb13359a42bc964faca8 | 334d0a4652c44d0c313e11b6dcf8fb89829c6dbe | /checkov/terraform/checks/resource/aws/ImagebuilderImageRecipeEBSEncrypted.py | 754146fc760da36332b301b41159066dcef14f23 | [
"Apache-2.0"
]
| permissive | schosterbarak/checkov | 4131e03b88ae91d82b2fa211f17e370a6f881157 | ea6d697de4de2083c8f6a7aa9ceceffd6b621b58 | refs/heads/master | 2022-05-22T18:12:40.994315 | 2022-04-28T07:44:05 | 2022-04-28T07:59:17 | 233,451,426 | 0 | 0 | Apache-2.0 | 2020-03-23T12:12:23 | 2020-01-12T20:07:15 | Python | UTF-8 | Python | false | false | 1,199 | py | from typing import Dict, List, Any
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class ImagebuilderImageRecipeEBSEncrypted(BaseResourceCheck):
def __init__(self):
name = "Ensure that Image Recipe EBS Disk are encrypted with CMK"
id = "CKV_AWS_200"
supported_resources = ["aws_imagebuilder_image_recipe"]
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if conf.get('block_device_mapping'):
mappings = conf.get('block_device_mapping')
for mapping in mappings:
if mapping.get("ebs"):
ebs = mapping["ebs"][0]
if not ebs.get("encrypted"):
return CheckResult.FAILED
if not ebs.get("kms_key_id"):
return CheckResult.FAILED
# pass thru
return CheckResult.PASSED
check = ImagebuilderImageRecipeEBSEncrypted() | [
"[email protected]"
]
| |
0a3714c4393419c790f0b83b5e274f57f3d9effd | c140ad38b1463024e289ceb0d5d6d44a45c91724 | /test/test_sed.py | de9b9a31a4d19a6bce8f59f8af9aff375038c1e6 | [
"Apache-2.0"
]
| permissive | NVIDIA/hpc-container-maker | 3a333526decbd18352ef8d1fb3bec0033be221e8 | 60fd2a51c171258a6b3f93c2523101cb7018ba1b | refs/heads/master | 2023-08-21T13:32:27.132476 | 2023-06-12T21:12:40 | 2023-06-12T21:12:40 | 126,385,168 | 419 | 88 | Apache-2.0 | 2023-09-11T18:33:26 | 2018-03-22T19:26:41 | Python | UTF-8 | Python | false | false | 1,626 | py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the sed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.sed import sed
class Test_sed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_basic(self):
"""Basic sed"""
s = sed()
self.assertEqual(s.sed_step(file='foo',
patterns=[r's/a/A/g',
r's/FOO = BAR/FOO = BAZ/g']),
r'''sed -i -e s/a/A/g \
-e 's/FOO = BAR/FOO = BAZ/g' foo''')
def test_nofile(self):
"""No file specified"""
s = sed()
self.assertEqual(s.sed_step(patterns=[r's/a/A/g']), '')
def test_nopatterns(self):
"""No patterns specified"""
s = sed()
self.assertEqual(s.sed_step(file='foo'), '')
| [
"[email protected]"
]
| |
da0e360ef04be5b4a9aef897331aa98e4b9ce97c | 4d93c6999f1c938f12b7ff6fb779557e1a77479f | /chapter11/names.py | 37cab2bbcddd9ca3a3f64613ed94eea1aa8473fc | []
| no_license | MadhuV99/pywork | 5efd1aac74f2c88413bb90bbc9e0d0c250057e7c | 81ea17d8bed89ba57cdd35d2ceb0560f68a21cc8 | refs/heads/main | 2023-01-20T06:50:03.004849 | 2020-11-29T16:01:06 | 2020-11-29T16:01:06 | 312,609,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # names.py
from name_function import get_formatted_name
print("Enter 'q' at any time to quit.")
while True:
first = input("\nPlease give me a first name: ")
if first.strip().lower() == 'q':
break
last = input("Please give me a last name: ")
if last.strip().lower() == 'q':
break
formatted_name = get_formatted_name(first, last)
print(f"\tNeatly formatted name: {formatted_name}.") | [
"[email protected]"
]
| |
4869a312afecf5587acf929abf9f9adcd24f3ff4 | 3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75 | /thrift/lib/python/any/test/serializer.py | e10f1866cd979f95c40dfcde5b071bca2dbe8ba4 | [
"Apache-2.0"
]
| permissive | facebook/fbthrift | 3b7b94a533666c965ce69cfd6054041218b1ea6f | 53cf6f138a7648efe5aef9a263aabed3d282df91 | refs/heads/main | 2023-08-24T12:51:32.367985 | 2023-08-24T08:28:35 | 2023-08-24T08:28:35 | 11,131,631 | 2,347 | 666 | Apache-2.0 | 2023-09-01T01:44:39 | 2013-07-02T18:15:51 | C++ | UTF-8 | Python | false | false | 7,122 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing
import unittest
from apache.thrift.type.standard.thrift_types import TypeName, Void
from apache.thrift.type.type.thrift_types import Type
from folly.iobuf import IOBuf
from testing.thrift_types import Color
from thrift.python.any.serializer import (
deserialize_list,
deserialize_map,
deserialize_primitive,
deserialize_set,
serialize_list,
serialize_map,
serialize_primitive,
serialize_set,
)
from thrift.python.any.typestub import PrimitiveType, SerializableType, TKey, TValue
# @manual=//thrift/test/testset:testset-python-types
from thrift.test.testset import thrift_types
class SerializerTests(unittest.TestCase):
def _test_round_trip(
self, value: PrimitiveType, thrift_type: typing.Optional[Type] = None
) -> None:
iobuf = serialize_primitive(value, thrift_type=thrift_type)
decoded = deserialize_primitive(type(value), iobuf, thrift_type=thrift_type)
self.assertIs(type(value), type(decoded))
if isinstance(value, float):
assert isinstance(decoded, float)
self.assertAlmostEqual(float(value), float(decoded), places=3)
else:
self.assertEqual(value, decoded)
def test_bool_round_trip(self) -> None:
self._test_round_trip(True)
def test_int_round_trip(self) -> None:
self._test_round_trip(42)
def test_float_round_trip(self) -> None:
self._test_round_trip(123456.789)
def test_str_round_trip(self) -> None:
self._test_round_trip("thrift-python")
def test_bytes_round_trip(self) -> None:
self._test_round_trip(b"raw bytes")
def test_iobuf_round_trip(self) -> None:
self._test_round_trip(IOBuf(b"iobuf"))
def test_enum_round_trip(self) -> None:
self._test_round_trip(Color.green)
def _test_round_trip_with_type_names(
self, value: PrimitiveType, type_names: typing.Sequence[TypeName]
) -> None:
for type_name in type_names:
with self.subTest(type_name=type_name):
self._test_round_trip(value, thrift_type=Type(name=type_name))
def test_int_round_trip_with_type_name(self) -> None:
self._test_round_trip_with_type_names(
42,
[
TypeName(byteType=Void.Unused),
TypeName(i16Type=Void.Unused),
TypeName(i32Type=Void.Unused),
TypeName(i64Type=Void.Unused),
],
)
def test_float_round_trip_with_type_name(self) -> None:
self._test_round_trip_with_type_names(
123456.789,
[
TypeName(floatType=Void.Unused),
TypeName(doubleType=Void.Unused),
],
)
def _test_list_round_trip(
self,
value: typing.Sequence[SerializableType],
) -> None:
iobuf = serialize_list(value)
decoded = deserialize_list(
type(value[0]) if value else str,
iobuf,
)
self.assertEqual(value, decoded)
def test_empty_list_round_trip(self) -> None:
self._test_list_round_trip([])
def test_list_of_ints_round_trip(self) -> None:
self._test_list_round_trip([1, 1, 2, 3, 5, 8])
def test_list_of_structs_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.struct_map_string_i32(field_1={"one": 1}),
thrift_types.struct_map_string_i32(field_1={"two": 2}),
]
)
def test_list_of_unions_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.union_map_string_string(field_2={"foo": "bar"}),
thrift_types.union_map_string_string(field_2={"hello": "world"}),
]
)
def test_list_of_exceptions_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.exception_map_string_i64(field_1={"code": 400}),
thrift_types.exception_map_string_i64(field_1={"code": 404}),
]
)
def test_thrift_list_round_trip(self) -> None:
self._test_list_round_trip(
thrift_types.struct_list_i32(field_1=[1, 2, 3, 4]).field_1
)
def _test_set_round_trip(
self,
value: typing.AbstractSet[SerializableType],
) -> None:
iobuf = serialize_set(value)
decoded = deserialize_set(
type(next(iter(value))) if value else bytes, # doesn't matter for empty set
iobuf,
)
self.assertEqual(value, decoded)
def test_empty_set_round_trip(self) -> None:
self._test_set_round_trip(set())
def test_set_of_ints_round_trip(self) -> None:
self._test_set_round_trip({1, 1, 2, 3, 5, 8})
def test_set_of_structs_round_trip(self) -> None:
self._test_set_round_trip(
{
thrift_types.struct_map_string_i32(field_1={"one": 1}),
thrift_types.struct_map_string_i32(field_1={"two": 2}),
}
)
def test_thrift_set_round_trip(self) -> None:
self._test_set_round_trip(
thrift_types.struct_set_i64(field_1={1, 2, 3, 4}).field_1
)
def _test_map_round_trip(
self,
original: typing.Mapping[TKey, TValue],
) -> None:
iobuf = serialize_map(original)
if original:
k, v = next(iter(original.items()))
key_cls = type(k)
value_cls = type(v)
else:
key_cls = bool # doesn't matter for empty dict
value_cls = bool # doesn't matter for empty dict
decoded = deserialize_map(
key_cls,
value_cls,
iobuf,
)
self.assertEqual(original, decoded)
def test_empty_map_round_trip(self) -> None:
self._test_map_round_trip({})
def test_int_to_str_map_round_trip(self) -> None:
self._test_map_round_trip({1: "one", 2: "two"})
def test_str_to_struct_map_round_trip(self) -> None:
self._test_map_round_trip(
{
"one": thrift_types.struct_map_string_i32(field_1={"one": 1}),
"two": thrift_types.struct_map_string_i32(field_1={"two": 2}),
}
)
def test_thrift_map_round_trip(self) -> None:
self._test_map_round_trip(
thrift_types.struct_map_string_i32(field_1={"one": 1}).field_1
)
| [
"[email protected]"
]
| |
eb621fa706f1fb5e6a98134f911aa9907b0257da | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__numpy_1_15_4/numpy/matrixlib/tests/__init__.py | aeeeb27fd221b666a211ced69a956b7500092e85 | [
"Apache-2.0"
]
| permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__numpy_1_15_4/numpy/matrixlib/tests/__init__.py | [
"[email protected]"
]
| |
f0e30cd721e3980995d0f449df77418b9cfddd8a | 30e1dc84fe8c54d26ef4a1aff000a83af6f612be | /deps/src/libxml2-2.9.1/python/tests/reader5.py | 220a3e5bbafc048a0ea35a0277fe47bbaca38f99 | [
"MIT",
"BSD-3-Clause"
]
| permissive | Sitispeaks/turicreate | 0bda7c21ee97f5ae7dc09502f6a72abcb729536d | d42280b16cb466a608e7e723d8edfbe5977253b6 | refs/heads/main | 2023-05-19T17:55:21.938724 | 2021-06-14T17:53:17 | 2021-06-14T17:53:17 | 385,034,849 | 1 | 0 | BSD-3-Clause | 2021-07-11T19:23:21 | 2021-07-11T19:23:20 | null | UTF-8 | Python | false | false | 1,246 | py | #!/usr/bin/python -u
#
# this tests the Expand() API of the xmlTextReader interface
# this extract the Dragon bibliography entries from the XML specification
#
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
expect="""<bibl id="Aho" key="Aho/Ullman">Aho, Alfred V.,
Ravi Sethi, and Jeffrey D. Ullman.
<emph>Compilers: Principles, Techniques, and Tools</emph>.
Reading: Addison-Wesley, 1986, rpt. corr. 1988.</bibl>"""
f = open('../../test/valid/REC-xml-19980210.xml', 'rb')
input = libxml2.inputBuffer(f)
reader = input.newTextReader("REC")
res=""
while reader.Read() > 0:
while reader.Name() == 'bibl':
node = reader.Expand() # expand the subtree
if node.xpathEval("@id = 'Aho'"): # use XPath on it
res = res + node.serialize()
if reader.Next() != 1: # skip the subtree
break;
if res != expect:
print("Error: didn't get the expected output")
print("got '%s'" % (res))
print("expected '%s'" % (expect))
#
# cleanup
#
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| [
"[email protected]"
]
| |
8528817f2e818ab95c640dec2fbc42d988e68de4 | 8bd63bc56b39d26458ad54b7f18c4b149c1e3ce2 | /sphinx-files/rst-files/Data/code/2011/11/000032/binary_liquid_mixture_immiscibility_and_stability.py | d86c0be99462bbdd27b0749ff15621910c02ba82 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer"
]
| permissive | isabella232/scipy-central-rescue | 43270c0e1850b989fbe9a5b1a06c3be11d16464a | 2b331610d52c189ae96bea4f4ce2ec343146b608 | refs/heads/master | 2021-09-06T09:17:30.627497 | 2018-02-04T19:41:11 | 2018-02-04T19:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,469 | py | # License: Creative Commons Zero (almost public domain) http://scpyce.org/cc0
#Determines regions of immiscibility and any limits of essential instability
#for a binary liquid mixture of components B and C. the excess Gibbs energy of
#mixing is given explicitly by an empirical equation:
#deltaGex/RT = xBxC[k1+k2(xB-xC)+k3(xB-xC)^2] where xB+xC=1
import numpy as np
from matplotlib.pylab import *
# These are the functions called by the bisection method
def f(x, id):
if id == 1:
return (-2 * (k1 + k2 * (6 * x - 3) + k3 * (24 * x**2 - 24 * x + 5))
+ 1 / (x - x**2))
elif id == 2:
return (-2 * k1 * x + k1 + k2 * (-6 * x**2 + 6 * x - 1) + k3 *
( -16 * x**3 + 24 * x**2 - 10 * x + 1) + log(x) - log(1 - x))
elif id == 3:
return (dummys - (-2 * k1 * x + k1 + k2 * (-6 * x**2 + 6 * x - 1) +
k3 * (-16 * x**3 + 24 * x**2 - 10 * x + 1) +
log(x) - log(1 - x)))
#This function is to calculate values for the y-axis on the figure
def g(x):
return (x * (1 - x) * (k1 + k2 * (x - (1 - x)) + k3 * (x - (1 - x))**2) +
x * log(x) + (1 - x) * log(1 - x))
#The incremental search method is used to start off the bisection method
def incremental(x0,xf,id):
dx = (xf - x0) / 998
for i in range(998):
y1 = f(x0,id)
y2 = f(x0 + (i + 1) * dx,id)
if y1 * y2 < 0:
for j in range(10):
y1 = f(x0 + i * dx,id)
y2 = f(x0 + i * dx + (j + 1) * dx/10,id)
if y1 * y2 < 0:
x1 = x0 + i * dx + j * dx / 10
x2 = x0 + i * dx + (j + 1) * dx / 10
y1 = f(x1,id)
y2 = f(x2,id)
return x1, x2, y1, y2
# Bisection method used to solve for non-linear equation
def bisec(x0,xf,id):
x1, x2, y1, y2 = incremental(x0,xf,id)
e = 1
while e > 1e-6:
x3 = (x1 + x2) / 2
y3 = f(x3,id)
if y1 * y3 < 0:
x2 = x3
y2 = y3
else:
x1 = x3
y1 = y3
e = abs(1 - (x1 / x2))
return x2
# Constants
k1 = 2.0
k2 = 0.2
k3 = -0.8
#Set up vectors of composition values
xB = np.linspace(0.001,0.999,101)
xC = 1 - xB
#This is deltaG/RT calculated from the excess Gibbs given at top
deltaGoverRT = (xB * xC * (k1 + k2 * (xB - xC) + k3 * (xB - xC)**2) +
xB * log(xB) + xC * log(xC))
#First and second derivative of deltaG/RT
derivative = (-2 * k1 * xB + k1 + k2 * (-6 * xB**2 + 6 * xB - 1) + k3 *
(-16 * xB**3 + 24 * xB**2 - 10 * xB + 1) + log(xB) - log(1 - xB))
derivative2 = (-2 * (k1 + k2 * (6 * xB - 3) + k3 * (24 * xB**2 - 24 * xB + 5))
+ 1 / (xB - xB**2))
#find spinodal points for instability region using bisection method
xspin1 = bisec(0.001, 0.999, 1)
xspin2 = bisec(xspin1, 0.999, 1)
#initial guess at binodal points at minima of function
xB1 = bisec(0.001, 0.999, 2)
xB2 = bisec(xB1, 0.999, 2)
xB3 = bisec(xB2, 0.999, 2)
xBa = xB1
xBb = xB3
#Solve for binodal points using bisection method
converged = False
while not converged:
dummys = (g(xBb) - g(xBa)) / (xBb - xBa) #dummy slope
e = abs(1 - (dummys / f(xBb, 2)))
if e < 1e-4:
converged = True
else:
xBa = bisec(0.001, 0.999, 3)
xBu = bisec(xBa, 0.999, 3)
xBb = bisec(xBu, 0.999, 3)
yint = g(xBa) - dummys * xBa
y = yint + dummys * xB
figure()
plot(xB, deltaGoverRT, '-')
plot(xB, y, '-')
plot(xB1, g(xB1), '.', color='blue', markersize=12)
plot(xB3, g(xB3), '.', color='blue', markersize=12)
plot(xBa, g(xBa), '.', color='red', markersize=12)
plot(xBb, g(xBb), '.', color='red', markersize=12)
plot(xspin1, g(xspin1), '.', color='orange', markersize=12)
plot(xspin2, g(xspin2), '.', color='orange', markersize=12)
grid('on')
xlabel(' xB ')
ylabel(' deltaG/RT ')
title('DeltaG/RT vs xB')
show()
print 'There is one-phase instability between xB = ', "%.2f" % xspin1,
'and xB = ', "%.2f" % xspin2
print '(Orange points on figure, "spinodal points")'
print 'The region of immiscibility is between xB = ', "%.2f" % xBa,
'and xB = ', "%.2f" % xBb
print '(Red points on figure, "binodal points")'
print 'Blue points on fig show minima, which do not equal to the binodal points' | [
"[email protected]"
]
| |
7a4ea8fb4fbee69b0722d5a4867d805e6c6c95da | 3d65a2d72e65083c752281368cf040ae977e4757 | /plot_scripts/plot_spagetti.py | b30fed38d6694fb61a31c8c4cb7aff780abb1d61 | []
| no_license | florisvb/OdorAnalysis | 6b4b2c32979b9139856aee20cc63c34cfe63819e | 18beae8d3c6be271f171b1c36c9fd932a8a404ba | refs/heads/master | 2020-06-03T14:48:34.962795 | 2012-10-23T22:28:21 | 2012-10-23T22:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,207 | py | #!/usr/bin/env python
import sys, os
from optparse import OptionParser
sys.path.append('../')
sys.path.append('../analysis_modules')
import flydra_analysis_tools as fat
import fly_plot_lib
fly_plot_lib.set_params.pdf()
import fly_plot_lib.plot as fpl
fad = fat.flydra_analysis_dataset
dac = fat.dataset_analysis_core
fap = fat.flydra_analysis_plot
tac = fat.trajectory_analysis_core
import odor_packet_analysis as opa
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
################# get trajectory keys #################
def get_keys(dataset):
keys = dataset.trajecs.keys()
return keys
################# plotting functions #######################
def plot_colored_cartesian_spagetti(config, dataset, axis='xy', xlim=(-0.2, .2), ylim=(-0.75, .25), zlim=(0, 0.3), keys=None, keys_to_highlight=[], show_saccades=False, colormap='jet', color_attribute='speed', norm=(0,0.5), artists=None, save_figure_path='', figname=None, show_start=False):
if keys is None:
keys = get_keys(dataset)
print 'plotting spagetti, axis: ', axis
print 'number of keys: ', len(keys)
if len(keys) < 1:
print 'No data'
return
fig = plt.figure()
ax = fig.add_subplot(111)
height = config.post_center[2]-config.ticks['z'][0]
print 'ARTISTS STARTING'
print artists
if axis=='xy': # xy plane
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlim(xlim[0], xlim[1])
ax.set_autoscale_on(True)
ax.set_aspect('equal')
axes=[0,1]
fap.cartesian_spagetti(ax, dataset, keys=keys, nkeys=10, start_key=0, axes=axes, show_saccades=show_saccades, keys_to_highlight=[], colormap=colormap, color_attribute=color_attribute, norm=norm, show_start=show_start)
post = patches.Circle(config.post_center[0:2], config.post_radius, color='black')
if axis=='yz': # yz plane
ax.set_ylim(zlim[0], zlim[1])
ax.set_xlim(ylim[0], ylim[1])
ax.set_autoscale_on(True)
ax.set_aspect('equal')
axes=[1,2]
fap.cartesian_spagetti(ax, dataset, keys=keys, nkeys=10, start_key=0, axes=axes, show_saccades=show_saccades, keys_to_highlight=[], colormap=colormap, color_attribute=color_attribute, norm=norm, show_start=show_start)
post = patches.Rectangle([-1*config.post_radius, config.ticks['z'][0]], config.post_radius*2, height, color='black')
if axis=='xz': # xz plane
ax.set_ylim(zlim[0], zlim[1])
ax.set_xlim(xlim[0], xlim[1])
ax.set_autoscale_on(True)
ax.set_aspect('equal')
axes=[0,2]
fap.cartesian_spagetti(ax, dataset, keys=keys, nkeys=10, start_key=0, axes=axes, show_saccades=show_saccades, keys_to_highlight=[], colormap=colormap, color_attribute=color_attribute, norm=norm, show_start=show_start)
post = patches.Rectangle([-1*config.post_radius, config.ticks['z'][0]], config.post_radius*2, height, color='black')
if artists is None:
artists = []
artists.append(post)
if artists is not None:
for artist in artists:
ax.add_artist(artist)
#prep_cartesian_spagetti_for_saving(ax)
xticks = config.ticks['x']
yticks = config.ticks['y']
zticks = config.ticks['z']
if axis=='xy':
fpl.adjust_spines(ax, ['left', 'bottom'], xticks=xticks, yticks=yticks)
ax.set_xlabel('x axis, m')
ax.set_ylabel('y axis, m')
ax.set_title('xy plot, color=speed from 0-0.5 m/s')
if axis=='yz':
fpl.adjust_spines(ax, ['left', 'bottom'], xticks=yticks, yticks=zticks)
ax.set_xlabel('y axis, m')
ax.set_ylabel('z axis, m')
ax.set_title('yz plot, color=speed from 0-0.5 m/s')
if axis=='xz':
fpl.adjust_spines(ax, ['left', 'bottom'], xticks=xticks, yticks=zticks)
ax.set_xlabel('x axis, m')
ax.set_ylabel('z axis, m')
ax.set_title('xz plot, color=speed from 0-0.5 m/s')
fig.set_size_inches(8,8)
if figname is None:
figname = save_figure_path + 'spagetti_' + axis + '.pdf'
else:
figname = os.path.join(save_figure_path, figname)
fig.savefig(figname, format='pdf')
return ax
def main(config, culled_dataset, save_figure_path=''):
print
print 'Plotting spagetti'
if 1:
# in odor
print
print 'Odor: '
dataset_in_odor = fad.make_dataset_with_attribute_filter(culled_dataset, 'odor_stimulus', 'on')
if len(dataset_in_odor.trajecs.keys()) > 0:
plot_colored_cartesian_spagetti(config, dataset_in_odor, axis='xy', save_figure_path=save_figure_path, figname='spagetti_odor_xy.pdf')
plot_colored_cartesian_spagetti(config, dataset_in_odor, axis='yz', save_figure_path=save_figure_path, figname='spagetti_odor_yz.pdf')
plot_colored_cartesian_spagetti(config, dataset_in_odor, axis='xz', save_figure_path=save_figure_path, figname='spagetti_odor_xz.pdf')
# not in odor
print
print 'No odor: '
dataset_no_odor = fad.make_dataset_with_attribute_filter(culled_dataset, 'odor_stimulus', 'none')
if len(dataset_no_odor.trajecs.keys()) > 0:
plot_colored_cartesian_spagetti(config, dataset_no_odor, axis='xy', save_figure_path=save_figure_path, figname='spagetti_no_odor_xy.pdf')
plot_colored_cartesian_spagetti(config, dataset_no_odor, axis='yz', save_figure_path=save_figure_path, figname='spagetti_no_odor_yz.pdf')
plot_colored_cartesian_spagetti(config, dataset_no_odor, axis='xz', save_figure_path=save_figure_path, figname='spagetti_no_odor_xz.pdf')
# pulse odor
print
print 'Pulsing odor: '
dataset_pulsing_odor = fad.make_dataset_with_attribute_filter(culled_dataset, 'odor_stimulus', 'pulsing')
if len(dataset_pulsing_odor.trajecs.keys()) > 0:
plot_colored_cartesian_spagetti(config, dataset_pulsing_odor, axis='xy', save_figure_path=save_figure_path, figname='spagetti_pulsing_odor_xy.pdf')
plot_colored_cartesian_spagetti(config, dataset_pulsing_odor, axis='yz', save_figure_path=save_figure_path, figname='spagetti_pulsing_odor_yz.pdf')
plot_colored_cartesian_spagetti(config, dataset_pulsing_odor, axis='xz', save_figure_path=save_figure_path, figname='spagetti_pulsing_xz.pdf')
# odor plot
print
print 'Best odor trajectory: '
if 1:
keys = opa.get_trajectories_with_odor(culled_dataset, 50)
keys = keys[0]
plot_colored_cartesian_spagetti(config, culled_dataset, axis='xy', keys=keys, color_attribute='odor', norm=(0,200), save_figure_path=save_figure_path, figname='odor_trajectory_xy.pdf', show_start=True)
plot_colored_cartesian_spagetti(config, culled_dataset, axis='xz', keys=keys, color_attribute='odor', norm=(0,200), save_figure_path=save_figure_path, figname='odor_trajectory_xz.pdf', show_start=True)
if 0:
keys = opa.get_trajectories_with_odor(culled_dataset, 175)
plot_colored_cartesian_spagetti(config, culled_dataset, axis='xy', keys=keys, color_attribute='odor', norm=(0,100), save_figure_path=save_figure_path, figname='odor_trajectory_xy.pdf')
plot_colored_cartesian_spagetti(config, culled_dataset, axis='xz', keys=keys, color_attribute='odor', norm=(0,100), save_figure_path=save_figure_path, figname='odor_trajectory_xz.pdf')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--path", type="str", dest="path", default='',
help="path to empty data folder, where you have a configuration file")
(options, args) = parser.parse_args()
path = options.path
sys.path.append(path)
import analysis_configuration
config = analysis_configuration.Config()
culled_dataset_name = os.path.join(path, config.culled_datasets_path, config.culled_dataset_name)
culled_dataset = fad.load(culled_dataset_name)
figure_path = os.path.join(path, config.figure_path)
main(config, culled_dataset, save_figure_path=os.path.join(figure_path, 'spagetti/') )
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.