blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be5e0b741dc6ad841c668031edaee115bfe5314e | 36059411cedfeec7478fd725c43f2120ab5ad38d | /boulder_ftp.py | a055a456e089b72f19569b633bd3059a4fd66cd2 | [] | no_license | tytechortz/Arctic-Ice | a8345746bdd2d73559941ea71efe06601212a7f1 | 83aac39c00027cca6bd85fd2709fcfe86cf3ef31 | refs/heads/master | 2022-07-09T13:38:41.735808 | 2020-01-02T16:22:52 | 2020-01-02T16:22:52 | 167,096,158 | 1 | 0 | null | 2022-06-21T23:10:16 | 2019-01-23T01:35:54 | Jupyter Notebook | UTF-8 | Python | false | false | 541 | py | from ftplib import FTP
import os
import pandas as pd
# Log into ftp site.
ftp = FTP('sidads.colorado.edu')
ftp.login(user='anonymous', passwd='ICE_PSWD')
ftp.login()
# Read file.
ftp.cwd('/pub/DATASETS/NOAA/G02135/north/daily/data/')
ftp.retrbinary('RETR N_seaice_extent_daily_v3.0.csv', open('N_seaice_extent_daily_v3.0.csv', 'wb').write)
ftp.quit()
# Read data.
df = pd.read_csv('N_seaice_extent_daily_v3.0.csv',skiprows=[i for i in range(1,2436)])
# df.columns = []
pd.options.display.float_format = '{:,}'.format
print(df.head()) | [
"[email protected]"
] | |
51834523099e3ca59c889ea40d94cfd03ffdbb05 | f31fda8014ecadf6af7d4e3392fb917c49e0352a | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPu5CaloJetSequence_pp_data_cff.py | ee3d9695514ad371c28857cd6d3dce622b1ebe50 | [] | no_license | jniedzie/lightbylight | acea5051f053c49824a49a0b78bac3a2247ee75f | f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8 | refs/heads/master | 2020-03-18T12:24:31.970468 | 2018-02-09T15:50:00 | 2018-02-09T15:50:00 | 134,724,759 | 0 | 1 | null | 2018-05-24T14:11:12 | 2018-05-24T14:11:12 | null | UTF-8 | Python | false | false | 14,202 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPu5Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu5CaloJets"),
matched = cms.InputTag("ak5GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.5
)
akPu5CalomatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak5GenJets"),
matched = cms.InputTag("ak5GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.5
)
akPu5Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu5CaloJets")
)
akPu5Calocorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu5CaloJets"),
payload = "AKPu5Calo_offline"
)
akPu5CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu5CaloJets'))
#akPu5Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak5GenJets'))
akPu5CalobTagger = bTaggers("akPu5Calo",0.5)
#create objects locally since they dont load properly otherwise
#akPu5Calomatch = akPu5CalobTagger.match
akPu5Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu5CaloJets"), matched = cms.InputTag("genParticles"))
akPu5CaloPatJetFlavourAssociationLegacy = akPu5CalobTagger.PatJetFlavourAssociationLegacy
akPu5CaloPatJetPartons = akPu5CalobTagger.PatJetPartons
akPu5CaloJetTracksAssociatorAtVertex = akPu5CalobTagger.JetTracksAssociatorAtVertex
akPu5CaloJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.CombinedSecondaryVertexV2BJetTags
akPu5CaloJetBProbabilityBJetTags = akPu5CalobTagger.JetBProbabilityBJetTags
akPu5CaloSoftPFMuonByPtBJetTags = akPu5CalobTagger.SoftPFMuonByPtBJetTags
akPu5CaloSoftPFMuonByIP3dBJetTags = akPu5CalobTagger.SoftPFMuonByIP3dBJetTags
akPu5CaloTrackCountingHighEffBJetTags = akPu5CalobTagger.TrackCountingHighEffBJetTags
akPu5CaloTrackCountingHighPurBJetTags = akPu5CalobTagger.TrackCountingHighPurBJetTags
akPu5CaloPatJetPartonAssociationLegacy = akPu5CalobTagger.PatJetPartonAssociationLegacy
akPu5CaloImpactParameterTagInfos = akPu5CalobTagger.ImpactParameterTagInfos
akPu5CaloImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu5CaloJetProbabilityBJetTags = akPu5CalobTagger.JetProbabilityBJetTags
akPu5CaloSecondaryVertexTagInfos = akPu5CalobTagger.SecondaryVertexTagInfos
akPu5CaloSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPu5CaloSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPu5CaloCombinedSecondaryVertexBJetTags = akPu5CalobTagger.CombinedSecondaryVertexBJetTags
akPu5CaloCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.CombinedSecondaryVertexV2BJetTags
akPu5CaloSecondaryVertexNegativeTagInfos = akPu5CalobTagger.SecondaryVertexNegativeTagInfos
akPu5CaloNegativeSimpleSecondaryVertexHighEffBJetTags = akPu5CalobTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPu5CaloNegativeSimpleSecondaryVertexHighPurBJetTags = akPu5CalobTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPu5CaloNegativeCombinedSecondaryVertexBJetTags = akPu5CalobTagger.NegativeCombinedSecondaryVertexBJetTags
akPu5CaloPositiveCombinedSecondaryVertexBJetTags = akPu5CalobTagger.PositiveCombinedSecondaryVertexBJetTags
akPu5CaloNegativeCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPu5CaloPositiveCombinedSecondaryVertexV2BJetTags = akPu5CalobTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPu5CaloSoftPFMuonsTagInfos = akPu5CalobTagger.SoftPFMuonsTagInfos
akPu5CaloSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu5CaloSoftPFMuonBJetTags = akPu5CalobTagger.SoftPFMuonBJetTags
akPu5CaloSoftPFMuonByIP3dBJetTags = akPu5CalobTagger.SoftPFMuonByIP3dBJetTags
akPu5CaloSoftPFMuonByPtBJetTags = akPu5CalobTagger.SoftPFMuonByPtBJetTags
akPu5CaloNegativeSoftPFMuonByPtBJetTags = akPu5CalobTagger.NegativeSoftPFMuonByPtBJetTags
akPu5CaloPositiveSoftPFMuonByPtBJetTags = akPu5CalobTagger.PositiveSoftPFMuonByPtBJetTags
akPu5CaloPatJetFlavourIdLegacy = cms.Sequence(akPu5CaloPatJetPartonAssociationLegacy*akPu5CaloPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPu5CaloPatJetFlavourAssociation = akPu5CalobTagger.PatJetFlavourAssociation
#akPu5CaloPatJetFlavourId = cms.Sequence(akPu5CaloPatJetPartons*akPu5CaloPatJetFlavourAssociation)
akPu5CaloJetBtaggingIP = cms.Sequence(akPu5CaloImpactParameterTagInfos *
(akPu5CaloTrackCountingHighEffBJetTags +
akPu5CaloTrackCountingHighPurBJetTags +
akPu5CaloJetProbabilityBJetTags +
akPu5CaloJetBProbabilityBJetTags
)
)
akPu5CaloJetBtaggingSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexTagInfos
* (akPu5CaloSimpleSecondaryVertexHighEffBJetTags+
akPu5CaloSimpleSecondaryVertexHighPurBJetTags+
akPu5CaloCombinedSecondaryVertexBJetTags+
akPu5CaloCombinedSecondaryVertexV2BJetTags
)
)
akPu5CaloJetBtaggingNegSV = cms.Sequence(akPu5CaloImpactParameterTagInfos
*
akPu5CaloSecondaryVertexNegativeTagInfos
* (akPu5CaloNegativeSimpleSecondaryVertexHighEffBJetTags+
akPu5CaloNegativeSimpleSecondaryVertexHighPurBJetTags+
akPu5CaloNegativeCombinedSecondaryVertexBJetTags+
akPu5CaloPositiveCombinedSecondaryVertexBJetTags+
akPu5CaloNegativeCombinedSecondaryVertexV2BJetTags+
akPu5CaloPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPu5CaloJetBtaggingMu = cms.Sequence(akPu5CaloSoftPFMuonsTagInfos * (akPu5CaloSoftPFMuonBJetTags
+
akPu5CaloSoftPFMuonByIP3dBJetTags
+
akPu5CaloSoftPFMuonByPtBJetTags
+
akPu5CaloNegativeSoftPFMuonByPtBJetTags
+
akPu5CaloPositiveSoftPFMuonByPtBJetTags
)
)
akPu5CaloJetBtagging = cms.Sequence(akPu5CaloJetBtaggingIP
*akPu5CaloJetBtaggingSV
*akPu5CaloJetBtaggingNegSV
# *akPu5CaloJetBtaggingMu
)
akPu5CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu5CaloJets"),
genJetMatch = cms.InputTag("akPu5Calomatch"),
genPartonMatch = cms.InputTag("akPu5Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu5Calocorr")),
JetPartonMapSource = cms.InputTag("akPu5CaloPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPu5CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu5CaloJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPu5CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu5CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu5CaloCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPu5CaloJetBProbabilityBJetTags"),
cms.InputTag("akPu5CaloJetProbabilityBJetTags"),
#cms.InputTag("akPu5CaloSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPu5CaloSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighEffBJetTags"),
cms.InputTag("akPu5CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu5CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPu5CaloNjettiness = Njettiness.clone(
src = cms.InputTag("akPu5CaloJets"),
R0 = cms.double( 0.5)
)
akPu5CalopatJetsWithBtagging.userData.userFloats.src += ['akPu5CaloNjettiness:tau1','akPu5CaloNjettiness:tau2','akPu5CaloNjettiness:tau3']
akPu5CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu5CalopatJetsWithBtagging"),
genjetTag = 'ak5GenJets',
rParam = 0.5,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = False,
isMC = False,
doSubEvent = False,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPu5Calo"),
jetName = cms.untracked.string("akPu5Calo"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(False),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak5GenJets"),
doGenTaus = False
)
akPu5CaloJetSequence_mc = cms.Sequence(
#akPu5Caloclean
#*
akPu5Calomatch
#*
#akPu5CalomatchGroomed
*
akPu5Caloparton
*
akPu5Calocorr
*
#akPu5CaloJetID
#*
akPu5CaloPatJetFlavourIdLegacy
#*
#akPu5CaloPatJetFlavourId # Use legacy algo till PU implemented
*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CaloNjettiness
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_data = cms.Sequence(akPu5Calocorr
*
#akPu5CaloJetID
#*
akPu5CaloJetTracksAssociatorAtVertex
*
akPu5CaloJetBtagging
*
akPu5CaloNjettiness
*
akPu5CalopatJetsWithBtagging
*
akPu5CaloJetAnalyzer
)
akPu5CaloJetSequence_jec = cms.Sequence(akPu5CaloJetSequence_mc)
akPu5CaloJetSequence_mb = cms.Sequence(akPu5CaloJetSequence_mc)
akPu5CaloJetSequence = cms.Sequence(akPu5CaloJetSequence_data)
| [
"[email protected]"
] | |
076bf3038294b33eac256cefdfcdc846835b8fba | 9905901a2beae3ff4885fbc29842b3c34546ffd7 | /nitro-python/build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/filter/filterglobal_filterpolicy_binding.py | 5aac2b8dd0a9540023a40b6dd3d6be45a9da1f39 | [
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | culbertm/NSttyPython | f354ebb3dbf445884dbddb474b34eb9246261c19 | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | refs/heads/master | 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,576 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filterglobal_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to filterglobal.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._state = None
self.___count = None
@property
def priority(self) :
r"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def state(self) :
r"""State of the binding.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
r"""State of the binding.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def policyname(self) :
r"""The name of the filter policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""The name of the filter policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filterglobal_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filterglobal_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = filterglobal_filterpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.state = resource.state
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [filterglobal_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].state = resource[i].state
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = filterglobal_filterpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [filterglobal_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
r""" Use this API to fetch a filterglobal_filterpolicy_binding resources.
"""
try :
obj = filterglobal_filterpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
r""" Use this API to fetch filtered set of filterglobal_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
r""" Use this API to count filterglobal_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
r""" Use this API to count the filtered set of filterglobal_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class filterglobal_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.filterglobal_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filterglobal_filterpolicy_binding = [filterglobal_filterpolicy_binding() for _ in range(length)]
| [
"[email protected]"
] | |
f4291ee93deaf810818660a525dacb66b3a2eb7c | 6227d63532f2d657ef66d90709a3a1f484e9784b | /oviqpr/wsgi.py | 02a76e64db03ba8feffdd1981fb6c14b65e4ad1b | [] | no_license | vassily-la/oviq | 2edff4e61e5ac8cb94b462e2ed5c5bec2c5b014a | 65a86ca5cddd0180e8309a7659eaab6a35a5c785 | refs/heads/master | 2021-04-28T03:05:02.908010 | 2018-02-22T17:14:32 | 2018-02-22T17:14:32 | 122,131,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for oviqpr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oviqpr.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
d5046c3c0b613b372fc250a121575cad4d03bc38 | 89d230ad44d17b18897da507725b0a10c32960d8 | /Gen2_0_PP/Contest/weaponsProblem.py | 4262d5dcd74a8bb81dd8ecc8b8b5afcbc6146ab4 | [] | no_license | KB-perByte/CodePedia | aeeae87b56cf0ff6e02200cfd6b34da42a007338 | 287e7a3ce981bbf594436cdc06dde23a02b53bb0 | refs/heads/master | 2021-06-19T07:32:53.849871 | 2021-01-23T16:17:27 | 2021-01-23T16:17:27 | 163,250,017 | 0 | 1 | null | 2020-03-21T14:39:36 | 2018-12-27T05:13:55 | JavaScript | UTF-8 | Python | false | false | 2,014 | py | '''
Daenerys has N types of weapons. There are Ai number of weapons of type i (1 <= i <= N). She wants to distribute these weapons among K soldiers. She wants to distribute them in such a way that:
All soldier get equal number of weapons.
All the weapons which a soldier gets must be of same type.
As she wants to make all of them more powerful so she wants to give as many weapons as possible. Help Daenerys in finding out what is the maximum number of weapons which a soldier can get.
Input Format
The first line consists two space seperated integer N and K.
The second line consists of N space seperated integers A1, A2, A3.... An, as described above.
Constraints
1 <= N <= 100000
1 <= Ai, K <= 1,000,000,000
Output Format
Output a single integer denoting the maximum weapons a soldier can get .
Sample Input 0
3 2
3 1 4
Sample Output 0
3
Explanation 0
She can give 3 weapons of type 1 to first soldier and 3 weapons of type 3 to second soldier.
'''
def binarySearch(array, l, r, toSearch): #not so needed
while l <= r:
mid = l + (r - l)//2
if array[mid] == toSearch:
return mid
elif array[mid] < toSearch:
l = mid + 1
else:
r = mid - 1
return -1
def checkDistribution(lst, mid , k):
s = 0
for i in range(len(lst)):
s+=lst[i]//mid
print('val of s',s)
print('val of k',k)
return s>=k
def makimumWeapons(lst,k):
l = min(lst)
h = max(lst)
while h >= l:
mid = l+(h-l)//2
print("value of l and h", l ,h)
if checkDistribution(lst, mid, k):
if not checkDistribution(lst, mid+1, k):
return mid
else:
l = mid + 1
else:
h = mid - 1
return 0
import sys
def get_ints(): return list(map(int, sys.stdin.readline().strip().split()))
input1 = list(map(int,input().split()))
#input2 = list(map(int,input().split()))
input2 = get_ints()
print(makimumWeapons(input2, input1[1]))
| [
"[email protected]"
] | |
bf5c6fee091e8426d7dd6d71ed755b2e3e1eaeed | 7e4425342a4d7e0f40978af17091f32d2712c79c | /Cb_DeepLearning_lec/Day_04_02_collection.py | 280b5a625f46315f5d0d9b50bfda8439d9fccca1 | [] | no_license | yunhui21/CB_Ai_NLP | eca3da00c6c9615c8737b50d2c5ebe8dd1e3ba8a | b66ecc24abfd988fc9e7f19fa1941826b1bf38a4 | refs/heads/master | 2023-01-07T14:21:26.758030 | 2020-11-16T05:57:30 | 2020-11-16T05:57:30 | 291,835,156 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | # Day_04_02_collection.py
a = [1, 3, 7, 9]
for i in a:
print(i, end=' ')
print()
# 문제
# 리스트를 거꾸로 출력하세요
for i in reversed(a):
print(i, end=' ')
print()
# 튜플 : 상수 버전의 리스트 (읽기 전용 리스트)
b = (1, 3, 7, 9)
print(b)
print(b[0], b[1])
for i in b:
print(i, end=' ')
print()
# b[0] = -1 # error
# b.append(99) # error
c1 = (1, 4)
c2 = 1, 4 # packing
print(c1)
print(c2)
c3, c4 = 1, 4
c3, c4 = c2 # unpacking
# c3, c4, c5 = c2 # error
print(c3, c4)
def multi(d1, d2):
return d1 + d2, d1 * d2
e = multi(3, 5)
print(e, e[0], e[1])
e1, e2 = multi(3, 5)
print(e1, e2)
print('-' * 30)
# set (집합)
g = {1, 3, 5, 1, 3, 5, 1, 3, 5, } # 순서 없음
print(g)
h = [1, 3, 5, 1, 3, 5, 1, 3, 5, ] # 순서 보장
print(h)
print(set(h))
print(list(set(h)))
for i in g:
print(i)
print('-' * 30)
# 딕셔너리 (사전)
# 영한사전 : 영어 단어를 찾으면 한글 설명 나옴
# 영어단어 : key
# 한글설명 : value
info = {'age': 21, 'addr': 'ochang', 'hobby': 'minton', 12: 34}
print(type(info), type(set()), type((3,)))
print(info[12])
# <class 'dict'> <class 'set'> <class 'tuple'>
info = dict(age=21, addr='ochang')
print(info)
print(info['age'], info['addr'], info['hobby'])
info.pop('hobby')
info.pop('addr')
info['blood'] = 'AB' # insert
info['blood'] = 'O' # update
# info.popitem() # 마지막에 추가한 항목 삭제
print('-' * 30)
print(info.keys())
print(info.values())
print(info.items())
for k in info.keys():
print(k, info[k])
# 문제
# items()를 반복문에 적용하세요
p = list(info.items())
print(p)
for i in info.items():
print(i, i[0], i[1])
for k, v in info.items():
print(k, v)
# range, reversed, enumerate
a = ['A', 'B', 'C']
for i in a:
print(i)
# 문제
# 아래 코드를 파이썬답게 바꾸세요
for i in enumerate(a):
print(i)
for i, letter in enumerate(a):
print(i, letter)
# 문제
# items()에 enumerate를 연결하세요
for i in enumerate(info.items()):
# print(i, i[0], i[1], i[1][0], i[1][1])
print(i[0], i[1][0], i[1][1])
for i, kv in enumerate(info.items()):
# print(i, kv, kv[0], kv[1])
print(i, kv[0], kv[1])
for i, (k, v) in enumerate(info.items()):
print(i, k, v)
for k in info:
print(k, info[k])
print('\n\n\n') | [
"[email protected]"
] | |
ae9c23f5fdb98de82ae8cbf6a8e4ee62419a45d6 | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/client_20210614185044.py | dc83e9c3495b85ade6ac751b06199f40df2ea143 | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 4,772 | py | import socket
import threading
import tcppacket
import struct
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "calc 2 ^ 10"
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
for i in range(1):
print("Demo calculation function")
threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
threads[-1].start() | [
"[email protected]"
] | |
21108a445d65be0ac0386514b2c345649b88fd66 | 32fb781cb6718ef90c4fdfba0469f9278380a256 | /AntShares/Core/Transaction.py | 83091faf4d380c5c037c959eb17dbfab2778feca | [
"MIT"
] | permissive | OTCGO/sync_antshares | 1dcd6f809518ff2f9f89047d7da5efedb14e8625 | 5724a5a820ec5f59e0c886a3c1646db2d07b4d78 | refs/heads/master | 2021-01-22T22:49:26.094742 | 2017-12-14T17:15:33 | 2017-12-14T17:15:33 | 85,582,206 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,899 | py | # -*- coding:utf-8 -*-
"""
Description:
Transaction Basic Class
Usage:
from AntShares.Core.Transaction import Transaction
"""
from AntShares.Core.AssetType import AssetType
from AntShares.Core.TransactionType import TransactionType
from AntShares.Helper import *
from AntShares.Fixed8 import Fixed8
from AntShares.Network.Inventory import Inventory
class Transaction(Inventory):
"""docstring for Transaction"""
def __init__(self, inputs, outputs, attributes):
super(Transaction, self).__init__()
self.inputs = inputs
self.outputs = outputs
self.attributes = attributes
self.scripts = []
self.TransactionType = TransactionType.ContractTransaction
self.InventoryType = 0x01 # InventoryType TX 0x01
self.systemFee = self.getSystemFee()
def getAllInputs(self):
return self.inputs
def getReference(self):
inputs = self.getAllInputs()
# TODO
# Blockchain.getTransaction
txs = [Blockchain.getTransaction(_input.prevHash) for _input in inputs]
if inputs == []:
raise Exception, 'No Inputs.'
else:
res = {}
for _input in inputs:
i = inputs.index(_input)
res.update({_input.toString(): txs[i].outputs[_input.prevIndex]})
return res
def getSystemFee(self):
return Fixed8(0)
def getScriptHashesForVerifying(self):
"""Get ScriptHash From SignatureContract"""
hashes = {}
result = self.getReference()
if result == None:
raise Exception, 'getReference None.'
for _input in self.inputs:
_hash = result.get(_input.toString()).scriptHash
hashes.update({_hash.toString(), _hash})
# TODO
# Blockchain.getTransaction
txs = [Blockchain.getTransaction(output.AssetId) for output in self.outputs]
for output in self.outputs:
tx = txs[self.outputs.index(output)]
if tx == None:
raise Exception, "Tx == None"
else:
if tx.AssetType & AssetType.DutyFlag:
hashes.update(output.ScriptHash.toString(), output.ScriptHash)
array = sorted(hashes.keys())
return array
def serialize(self, writer):
self.serializeUnsigned(writer)
writer.writeSerializableArray(self.scripts)
def serializeUnsigned(self, writer):
writer.writeByte(self.TransactionType)
writer.writeByte(0) #Version
self.serializeExclusiveData(writer)
writer.writeSerializableArray(self.attributes)
writer.writeSerializableArray(self.inputs)
writer.writeSerializableArray(self.outputs)
def serializeExclusiveData(self, writer):
# ReWrite in RegisterTransaction and IssueTransaction#
pass
| [
"[email protected]"
] | |
cc3e8e736099a578fdb1575de3070766cc9c13fb | e66770daf4d1679c735cfab1ac24dd1f5107bd83 | /Chapter06/Ch06_Code/GUI_multiple_threads_starting_a_thread.py | a33887dfb3fb91f64788072ca95a614df27e233a | [] | no_license | CodedQuen/Python-GUI-Programming-Cookbook | c038eb6cec4945ff4f2b09e1551f9db712dd2502 | f02b0f9916fb8272edc7ed4704eecce53ae0231c | refs/heads/master | 2022-05-27T19:35:35.004455 | 2020-05-05T01:00:51 | 2020-05-05T01:00:51 | 261,329,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,778 | py | '''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import Spinbox
from time import sleep
import Ch04_Code.ToolTip as tt
from threading import Thread
GLOBAL_CONST = 42
#=====================================================
class OOP():
def __init__(self): # Initializer method
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
self.create_widgets()
def method_in_a_thread(self):
print('Hi, how are you?')
for idx in range(10):
sleep(5)
self.scrol.insert(tk.INSERT, str(idx) + '\n')
# Running methods in Threads
def create_thread(self):
self.run_thread = Thread(target=self.method_in_a_thread)
self.run_thread.start() # start the thread
print(self.run_thread)
# Button callback
def click_me(self):
self.action.configure(text='Hello ' + self.name.get())
self.create_thread()
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scrol.insert(tk.INSERT, value + '\n')
# GUI Callback
def checkCallback(self, *ignored_args):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton Callback
def radCall(self):
radSel = self.radVar.get()
if radSel == 0: self.mighty2.configure(text='Blue')
elif radSel == 1: self.mighty2.configure(text='Gold')
elif radSel == 2: self.mighty2.configure(text='Red')
# update progressbar in callback loop
def run_progressbar(self):
self.progress_bar["maximum"] = 100
for i in range(101):
sleep(0.05)
self.progress_bar["value"] = i # increment progressbar
self.progress_bar.update() # have to call update() in loop
self.progress_bar["value"] = 0 # reset/clear progressbar
def start_progressbar(self):
self.progress_bar.start()
def stop_progressbar(self):
self.progress_bar.stop()
def progressbar_stop_after(self, wait_ms=1000):
self.win.after(wait_ms, self.progress_bar.stop)
def usingGlobal(self):
global GLOBAL_CONST
GLOBAL_CONST = 777
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
#####################################################################################
def create_widgets(self):
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='Tab 1') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Tab 2') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# LabelFrame using tab1 as the parent
mighty = ttk.LabelFrame(tab1, text=' Mighty Python ')
mighty.grid(column=0, row=0, padx=8, pady=4)
# Modify adding a Label using mighty as the parent instead of win
a_label = ttk.Label(mighty, text="Enter a name:")
a_label.grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
self.name = tk.StringVar()
self.name_entered = ttk.Entry(mighty, width=24, textvariable=self.name)
self.name_entered.grid(column=0, row=1, sticky='W')
# Adding a Button
self.action = ttk.Button(mighty, text="Click Me!", command=self.click_me)
self.action.grid(column=2, row=1)
ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
self.number_chosen = ttk.Combobox(mighty, width=14, textvariable=number, state='readonly')
self.number_chosen['values'] = (1, 2, 4, 42, 100)
self.number_chosen.grid(column=1, row=1)
self.number_chosen.current(0)
# Adding a Spinbox widget
self.spin = Spinbox(mighty, values=(1, 2, 4, 42, 100), width=5, bd=9, command=self._spin) # using range
self.spin.grid(column=0, row=2, sticky='W') # align left
# Using a scrolled Text control
scrol_w = 40; scrol_h = 10 # increase sizes
self.scrol = scrolledtext.ScrolledText(mighty, width=scrol_w, height=scrol_h, wrap=tk.WORD)
self.scrol.grid(column=0, row=3, sticky='WE', columnspan=3)
for child in mighty.winfo_children(): # add spacing to align widgets within tabs
child.grid_configure(padx=4, pady=2)
#=====================================================================================
# Tab Control 2 ----------------------------------------------------------------------
self.mighty2 = ttk.LabelFrame(tab2, text=' The Snake ')
self.mighty2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(self.mighty2, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=0, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(self.mighty2, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=0, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(self.mighty2, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# create three Radiobuttons using one variable
self.radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
self.radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(self.mighty2, text=colors[col], variable=self.radVar,
value=col, command=self.radCall)
curRad.grid(column=col, row=1, sticky=tk.W) # row=6
# And now adding tooltips
tt.create_ToolTip(curRad, 'This is a Radiobutton control')
# Add a Progressbar to Tab 2
self.progress_bar = ttk.Progressbar(tab2, orient='horizontal', length=286, mode='determinate')
self.progress_bar.grid(column=0, row=3, pady=2)
# Create a container to hold buttons
buttons_frame = ttk.LabelFrame(self.mighty2, text=' ProgressBar ')
buttons_frame.grid(column=0, row=2, sticky='W', columnspan=2)
# Add Buttons for Progressbar commands
ttk.Button(buttons_frame, text=" Run Progressbar ", command=self.run_progressbar).grid(column=0, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Start Progressbar ", command=self.start_progressbar).grid(column=0, row=1, sticky='W')
ttk.Button(buttons_frame, text=" Stop immediately ", command=self.stop_progressbar).grid(column=0, row=2, sticky='W')
ttk.Button(buttons_frame, text=" Stop after second ", command=self.progressbar_stop_after).grid(column=0, row=3, sticky='W')
for child in buttons_frame.winfo_children():
child.grid_configure(padx=2, pady=2)
for child in self.mighty2.winfo_children():
child.grid_configure(padx=8, pady=2)
# Creating a Menu Bar
menu_bar = Menu(self.win)
self.win.config(menu=menu_bar)
# Add menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self._quit)
menu_bar.add_cascade(label="File", menu=file_menu)
# Display a Message Box
def _msgBox():
msg.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2017.')
# Add another Menu to the Menu Bar and an item
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About", command=_msgBox) # display messagebox when clicked
menu_bar.add_cascade(label="Help", menu=help_menu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# It is not necessary to create a tk.StringVar()
# strData = tk.StringVar()
strData = self.spin.get()
# call function
self.usingGlobal()
self.name_entered.focus()
# Add Tooltips -----------------------------------------------------
# Add a Tooltip to the Spinbox
tt.create_ToolTip(self.spin, 'This is a Spinbox control')
# Add Tooltips to more widgets
tt.create_ToolTip(self.name_entered, 'This is an Entry control')
tt.create_ToolTip(self.action, 'This is a Button control')
tt.create_ToolTip(self.scrol, 'This is a ScrolledText control')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
| [
"[email protected]"
] | |
22b06f917a2e60d9e5443d0a32cf7b4cb27e71c3 | 50f42e142c7b989afc9bc9d9fd53515923aceb56 | /ML_practice/test_field.py | 824433785cb92c1abe62e59a015e4140ff9a6c0c | [] | no_license | shincling/MyCommon | 7d02da4408f1ab0acf883845cbb8b8e54e364076 | ae362fdef8d51c808645f7827a86e43d07db6e0f | refs/heads/master | 2021-01-17T04:10:57.546936 | 2018-11-06T13:17:27 | 2018-11-06T13:17:27 | 45,384,609 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | def f():
print "Before first yield"
yield 1
print "Before second yield"
yield 2
print "After second yield"
g = f()
# g.next()
# g.next()
print "Before first next"
g.next()
print "Before second next"
g.next()
print "Before third yield"
g.next() | [
"[email protected]"
] | |
b77f6cb4c37844887fe9769139d3b4cf953d6420 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/PrivateSamples/EMJ_UL18_mMed-1000_mDark-20_ctau-500_unflavored-down_cff.py | a689762b309b5edaf406483255496d14a1d0b40b | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 1,981 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-9.root',
] )
| [
"[email protected]"
] | |
bb7711a1d9a0542bf0147818f036a11eb8eb630f | 5f65e12a62b59aea9263f35240c960b7e6009aa5 | /cb_scripts/game_on.py | 5cd86411560201702863ecffeab27460f20cfee6 | [
"MIT"
] | permissive | christopher-burke/python-scripts | 23e80b8e7f26a74ab68dc7d0ad1a8093d900cf8b | f5dceca0bdbe9de6197b26858600b792f6adff8a | refs/heads/main | 2022-05-20T01:36:04.668447 | 2022-04-25T20:31:33 | 2022-04-25T20:31:33 | 6,054,247 | 1 | 1 | MIT | 2022-03-16T02:24:45 | 2012-10-03T01:49:53 | Python | UTF-8 | Python | false | false | 1,233 | py | #!/usr/bin/env python3
"""Game on.
Games won tracker.
"""
from dataclasses import dataclass, asdict
import json
import sys
# from datetime import date
@dataclass
class Player:
"""Player dataclass."""
name: str
@dataclass
class Match:
"""Match dataclass."""
game: str
date: date = date.today().__str__()
@dataclass
class Results:
"""Results dataclass."""
match: Match
player: Player
wins: int = 0
losses: int = 0
def load():
"""Load data from json file."""
with open('game_on.json') as json_file:
data = json.load(json_file)
return data
def write(data, *args, **kwargs):
"""Write data to the json file."""
with open('game_on.json', 'w') as json_file:
json.dump(data, json_file)
return True
def main():
"""Game on main funtion."""
pass
if __name__ == "__main__":
if not len(sys.argv) < 1:
exit(0)
match = Match('Name') # -g "Name"
p1 = Player('Player 1') # -p1 "Name"
p2 = Player('Player 2') # -p1 "Name"
r1 = Results(match, p1, 2) # -r1 2
r2 = Results(match, p2, 12) # -r2 2
r1.losses = r2.wins
r2.losses = r1.wins
data = {}
data['result'] = [asdict(r1), asdict(r2)]
| [
"[email protected]"
] | |
9f1faec8e0731fbad823f5000c61ae7553ec1af1 | 9083d620ec89d3c85f4270fd724010c20799368e | /app/admin.py | a6361094fdf44cebc131a84ddfb668ce2f22b52a | [] | no_license | riyadhswe/CovidHelp | e122aa1fefacb985c862e758a3021af4af08712e | 5e004739ec3facebbccdf0e9e46f96d3c01b2bb6 | refs/heads/master | 2023-08-14T04:14:37.458150 | 2021-10-10T05:02:23 | 2021-10-10T05:02:23 | 370,762,838 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.contrib import admin
from app.models import *
# Register your models here.
admin.site.register(Division)
admin.site.register(City)
admin.site.register(Hospital)
admin.site.register(Service)
| [
"[email protected]"
] | |
f0da7aa51ef368c2762cf0033e027208273b4603 | 41188a72facc51c65d0d58efe127f5e8c8811f5e | /0046. Permutations/Solution.py | 76886c436d83c51c92b29bc0f627d71268d88c1c | [
"MIT"
] | permissive | furutuki/LeetCodeSolution | 74ccebc8335125bbc4cbf1a76eb8d4281802f5b9 | 089d27af04bf81149251787409d1866c7c4390fb | refs/heads/master | 2022-10-31T08:46:15.124759 | 2022-10-25T02:57:54 | 2022-10-25T02:57:54 | 168,449,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from typing import List
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def dfs(num: List[int], cur_res: List[int]):
if not num:
ret.append(cur_res)
return
else:
for i in range(len(num)):
dfs(num[:i] + num[i + 1:], cur_res + [num[i]])
ret = []
dfs(nums, [])
return ret
| [
"[email protected]"
] | |
32d2b673b4421719313ac17c64560921dade7d60 | 2b8d4e22d10ca118fba0100cc87af04f3939448f | /ioud10/ioud_sale_order/__manifest__.py | 22982bed7d88a58ac835d123e58c4e47090afaf9 | [] | no_license | ahmed-amine-ellouze/personal | f10c0a161da709f689a3254ec20486411102a92d | 4fe19ca76523cf274a3a85c8bcad653100ff556f | refs/heads/master | 2023-03-28T23:17:05.402578 | 2021-03-25T13:33:18 | 2021-03-25T13:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
{
'name': "ioud_sale_order",
'summary': """
This module is for customization of sale for iOud """,
'description': """
This module is for customization of sale for iOud
""",
'author': "SolutionFounder",
'website': "http://www.solutionfounder.com",
# for the full list
'category': 'sale',
'version': '10.4.18',
# any module necessary for this one to work correctly
'depends': ['base','sale','mail','ioud_email_alerts','account_reports','delivery'],
# always loaded
'data': [
'data/partner_sequnce.xml',
'security/user_groups.xml',
'security/ir.model.access.csv',
'views/branches.xml',
'views/account_invoice_view.xml',
'views/sale_order_view.xml',
'views/res_partner_view.xml',
'views/region_config_view.xml',
'views/config.xml',
'views/stcok.xml',
#Backend View Load - JS
'views/assets.xml'
],
# only loaded in demonstration mode
}
| [
"[email protected]"
] | |
193122adf0ef9170907c47e035ebe8434d378807 | e3910a25ca4456a35112d41f184fe2a919214ac0 | /reservation/migrations/0003_auto_20160310_2101.py | 4bd2beed2f08e5987ae67f1bc5dbe13adea43864 | [] | no_license | RobertPastor/studio_reservation | a498f1ae2077bb21199651d245f22cb59ef13370 | 63a47de856cc1d5aedbd4024d8696b39470d11f2 | refs/heads/master | 2021-01-10T16:13:32.935529 | 2018-01-28T14:19:28 | 2018-01-28T14:19:28 | 54,514,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-10 20:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reservation', '0002_reservation_made_when'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='made_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Guest',
),
]
| [
"[email protected]"
] | |
fad28a7559308bee0c5acdfc8681f51b8076f9be | c101c4f7dfdb4492a380e7564beaf2892c9ae527 | /modules/s3/s3resource.py | f30e194ceb9f99545db7f694b1314d6241141f10 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | somayjain/eden | 0137b4c89f04ae35b431881d27a175deb4b31ebb | d401e20a7512e7b7781f16a13503bbd984bf2dbb | refs/heads/master | 2021-01-18T11:56:50.477613 | 2014-11-16T11:34:27 | 2014-11-20T12:34:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210,832 | py | # -*- coding: utf-8 -*-
""" S3 Resources
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@group Resource API: S3Resource,
@group Filter API: S3ResourceFilter
@group Helper Classes: S3RecordMerger
"""
__all__ = ("S3AxisFilter",
"S3Resource",
"S3ResourceFilter",
)
import datetime
import sys
from itertools import chain
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current
from gluon.html import A, TAG
from gluon.http import HTTP
from gluon.validators import IS_EMPTY_OR
try:
from gluon.dal import Field
from gluon.dal.objects import Row, Rows, Table, Expression
except ImportError:
# old web2py
from gluon.dal import Row, Rows, Table, Field, Expression
from gluon.storage import Storage
from gluon.tools import callback
from s3data import S3DataTable, S3DataList, S3PivotTable
from s3fields import S3Represent, s3_all_meta_field_names
from s3query import FS, S3ResourceField, S3ResourceQuery, S3Joins, S3URLQuery
from s3utils import s3_has_foreign_key, s3_get_foreign_key, s3_unicode, s3_get_last_record_id, s3_remove_last_record_id
from s3validators import IS_ONE_OF
from s3xml import S3XMLFormat
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3Resource: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
osetattr = object.__setattr__
ogetattr = object.__getattribute__
MAXDEPTH = 10
# Compact JSON encoding
#SEPARATORS = (",", ":")
# =============================================================================
class S3Resource(object):
"""
API for resources.
A "resource" is a set of records in a database table including their
references in certain related resources (components). A resource can
be defined like:
resource = S3Resource(table)
A resource defined like this would include all records in the table.
Further parameters for the resource constructor as well as methods
of the resource instance can be used to filter for particular subsets.
This API provides extended standard methods to access and manipulate
data in resources while respecting current authorization and other
S3 framework rules.
"""
def __init__(self, tablename,
id=None,
prefix=None,
uid=None,
filter=None,
vars=None,
parent=None,
linked=None,
linktable=None,
alias=None,
components=None,
filter_component=None,
include_deleted=False,
approved=True,
unapproved=False,
context=False):
"""
Constructor
@param tablename: tablename, Table, or an S3Resource instance
@param prefix: prefix to use for the tablename
@param id: record ID (or list of record IDs)
@param uid: record UID (or list of record UIDs)
@param filter: filter query
@param vars: dictionary of URL query variables
@param components: list of component aliases
to load for this resource
@param filter_component: alias of the component the URL filters
apply for (filters for this component
must be handled separately)
@param alias: the alias for this resource (internal use only)
@param parent: the parent resource (internal use only)
@param linked: the linked resource (internal use only)
@param linktable: the link table (internal use only)
@param include_deleted: include deleted records (used for
synchronization)
@param approved: include approved records
@param unapproved: include unapproved records
@param context: apply context filters
"""
s3db = current.s3db
auth = current.auth
# Names ---------------------------------------------------------------
self.table = None
self._alias = None
if prefix is None:
if not isinstance(tablename, basestring):
if isinstance(tablename, Table):
self.table = tablename
self._alias = self.table._tablename
tablename = self._alias
elif isinstance(tablename, S3Resource):
self.table = tablename.table
self._alias = self.table._tablename
tablename = tablename.tablename
else:
error = "%s is not a valid type for a tablename" % tablename
raise SyntaxError(error)
if "_" in tablename:
prefix, name = tablename.split("_", 1)
else:
raise SyntaxError("invalid tablename: %s" % tablename)
else:
name = tablename
tablename = "%s_%s" % (prefix, name)
self.prefix = prefix
""" Module prefix of the tablename """
self.name = name
""" Tablename without module prefix """
self.tablename = tablename
""" Tablename """
self.alias = alias or name
"""
Alias of the resource, defaults to tablename
without module prefix
"""
# Table ---------------------------------------------------------------
if self.table is None:
self.table = s3db[tablename]
table = self.table
# Set default approver
auth.permission.set_default_approver(table)
if not self._alias:
self._alias = tablename
""" Table alias (the tablename used in joins/queries) """
if parent is not None:
if parent.tablename == self.tablename:
alias = "%s_%s_%s" % (prefix, self.alias, name)
pkey = table._id.name
table = table = table.with_alias(alias)
table._id = table[pkey]
self._alias = alias
self.table = table
self.fields = table.fields
self._id = table._id
# Hooks ---------------------------------------------------------------
# Authorization hooks
self.accessible_query = auth.s3_accessible_query
# Filter --------------------------------------------------------------
# Default query options
self.include_deleted = include_deleted
self._approved = approved
self._unapproved = unapproved
# Component Filter
self.filter = None
# Resource Filter
self.rfilter = None
# Rows ----------------------------------------------------------------
self._rows = None
self._rowindex = None
self.rfields = None
self.dfields = None
self._ids = []
self._uids = []
self._length = None
# Request attributes --------------------------------------------------
self.vars = None # set during build_query
self.lastid = None
self.files = Storage()
# Components ----------------------------------------------------------
# Initialize component properties (will be set during _attach)
self.link = None
self.linktable = None
self.actuate = None
self.lkey = None
self.rkey = None
self.pkey = None
self.fkey = None
self.multiple = True
self.parent = parent # the parent resource
self.linked = linked # the linked resource
self.components = Storage()
self.links = Storage()
if parent is None:
# This is the master resource - attach components
attach = self._attach
hooks = s3db.get_components(table, names=components)
[attach(alias, hooks[alias]) for alias in hooks]
# Build query
self.build_query(id=id,
uid=uid,
filter=filter,
vars=vars,
filter_component=filter_component)
if context:
self.add_filter(s3db.context)
# Component - attach link table
elif linktable is not None:
# This is link-table component - attach the link table
self.link = S3Resource(linktable,
parent=self.parent,
linked=self,
include_deleted=self.include_deleted,
approved=self._approved,
unapproved=self._unapproved)
# Export and Import ---------------------------------------------------
# Pending Imports
self.skip_import = False
self.job = None
self.mtime = None
self.error = None
self.error_tree = None
self.import_count = 0
self.import_created = []
self.import_updated = []
self.import_deleted = []
# Export meta data
self.muntil = None # latest mtime of the exported records
self.results = None # number of exported records
# Standard methods ----------------------------------------------------
# CRUD
from s3crud import S3CRUD
self.crud = S3CRUD()
self.crud.resource = self
# -------------------------------------------------------------------------
def _attach(self, alias, hook):
"""
Attach a component
@param alias: the alias
@param hook: the hook
"""
if alias is not None and hook.filterby is not None:
table_alias = "%s_%s_%s" % (hook.prefix,
hook.alias,
hook.name)
table = hook.table.with_alias(table_alias)
table._id = table[table._id.name]
hook.table = table
else:
table_alias = None
# Create as resource
component = S3Resource(hook.table,
parent=self,
alias=alias,
linktable=hook.linktable,
include_deleted=self.include_deleted,
approved=self._approved,
unapproved=self._unapproved)
if table_alias:
component.tablename = hook.tablename
component._alias = table_alias
# Update component properties
component.pkey = hook.pkey
component.fkey = hook.fkey
component.linktable = hook.linktable
component.lkey = hook.lkey
component.rkey = hook.rkey
component.actuate = hook.actuate
component.autodelete = hook.autodelete
component.autocomplete = hook.autocomplete
component.alias = alias
component.multiple = hook.multiple
component.values = hook.values
if hook.filterby is not None:
filterfor = hook.filterfor
is_list = isinstance(filterfor, (tuple, list))
if is_list and len(filterfor) == 1:
is_list = False
filterfor = filterfor[0]
if not is_list:
component.filter = (hook.table[hook.filterby] == filterfor)
elif filterfor:
component.filter = (hook.table[hook.filterby].belongs(filterfor))
else:
component.filter = None
else:
component.filter = None
# Copy properties to the link
if component.link is not None:
link = component.link
link.pkey = component.pkey
link.fkey = component.lkey
link.actuate = component.actuate
link.autodelete = component.autodelete
link.multiple = component.multiple
# @todo: possible ambiguity if the same link is used
# in multiple components (e.g. filtered or 3-way),
# need a better aliasing mechanism here
self.links[link.name] = link
self.components[alias] = component
return
# -------------------------------------------------------------------------
# Query handling
# -------------------------------------------------------------------------
def build_query(self,
id=None,
uid=None,
filter=None,
vars=None,
filter_component=None):
"""
Query builder
@param id: record ID or list of record IDs to include
@param uid: record UID or list of record UIDs to include
@param filter: filtering query (DAL only)
@param vars: dict of URL query variables
@param filter_component: the alias of the component the URL
filters apply for (filters for this
component must be handled separately)
"""
# Reset the rows counter
self._length = None
self.rfilter = S3ResourceFilter(self,
id=id,
uid=uid,
filter=filter,
vars=vars,
filter_component=filter_component)
return self.rfilter
# -------------------------------------------------------------------------
def add_filter(self, f=None, c=None):
"""
Extend the current resource filter
@param f: a Query or a S3ResourceQuery instance
@param c: alias of the component this filter concerns,
automatically adds the respective component join
(not needed for S3ResourceQuery instances)
"""
if f is None:
return
self.clear()
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=c)
# -------------------------------------------------------------------------
def add_component_filter(self, alias, f=None):
"""
Extend the resource filter of a particular component, does
not affect the master resource filter (as opposed to add_filter)
@param alias: the alias of the component
@param f: a Query or a S3ResourceQuery instance
"""
if f is None:
return
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=alias, master=False)
# -------------------------------------------------------------------------
def get_query(self):
""" Get the effective query """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_query()
# -------------------------------------------------------------------------
def get_filter(self):
""" Get the effective virtual fields filter """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_filter()
# -------------------------------------------------------------------------
def clear_query(self):
""" Removes the current query (does not remove the set!) """
self.rfilter = None
components = self.components
if components:
for c in components:
components[c].clear_query()
# -------------------------------------------------------------------------
# Data access (new API)
# -------------------------------------------------------------------------
def count(self, left=None, distinct=False):
"""
Get the total number of available records in this resource
@param left: left outer joins, if required
@param distinct: only count distinct rows
"""
if self.rfilter is None:
self.build_query()
if self._length is None:
self._length = self.rfilter.count(left=left,
distinct=distinct)
return self._length
# -------------------------------------------------------------------------
def select(self,
fields,
start=0,
limit=None,
left=None,
orderby=None,
groupby=None,
distinct=False,
virtual=True,
count=False,
getids=False,
as_rows=False,
represent=False,
show_links=True,
raw_data=False):
"""
Extract data from this resource
@param fields: the fields to extract (selector strings)
@param start: index of the first record
@param limit: maximum number of records
@param left: additional left joins required for filters
@param orderby: orderby-expression for DAL
@param groupby: fields to group by (overrides fields!)
@param distinct: select distinct rows
@param virtual: include mandatory virtual fields
@param count: include the total number of matching records
@param getids: include the IDs of all matching records
@param as_rows: return the rows (don't extract)
@param represent: render field value representations
@param raw_data: include raw data in the result
"""
data = S3ResourceData(self,
fields,
start=start,
limit=limit,
left=left,
orderby=orderby,
groupby=groupby,
distinct=distinct,
virtual=virtual,
count=count,
getids=getids,
as_rows=as_rows,
represent=represent,
show_links=show_links,
raw_data=raw_data)
if as_rows:
return data.rows
else:
return data
# -------------------------------------------------------------------------
def insert(self, **fields):
"""
Insert a record into this resource
@param fields: dict of field/value pairs to insert
"""
# Check permission
authorised = current.auth.s3_has_permission("create", self.tablename)
if not authorised:
raise IOError("Operation not permitted: INSERT INTO %s" %
self.tablename)
# Insert new record
record_id = self.table.insert(**fields)
# Audit
if record_id:
record = Storage(fields).update(id=record_id)
current.audit("create", self.prefix, self.name, form=record)
return record_id
# -------------------------------------------------------------------------
def update(self):
raise NotImplementedError
# -------------------------------------------------------------------------
def delete(self,
format=None,
cascade=False,
replaced_by=None):
"""
Delete all (deletable) records in this resource
@param format: the representation format of the request (optional)
@param cascade: this is a cascade delete (prevents rollbacks/commits)
@param replaced_by: used by record merger
@return: number of records deleted
"""
s3db = current.s3db
# Reset error
self.error = None
table = self.table
get_config = self.get_config
pkey = self._id.name
# Determine relevant fields
fields = [pkey]
add_field = fields.append
supertables = get_config("super_entity")
if supertables:
# Add super-keys (avoids reloading in delete_super)
if not isinstance(supertables, (list, tuple)):
supertables = [supertables]
for sname in supertables:
stable = s3db.table(sname) \
if isinstance(sname, str) else sname
if stable is None:
continue
key = stable._id.name
if key in table.fields:
add_field(key)
if "uuid" in table.fields:
add_field("uuid")
# Get all rows
rows = self.select(fields, as_rows=True)
if not rows:
# No rows? => that was it already :)
return 0
numrows = 0
db = current.db
has_permission = current.auth.s3_has_permission
audit = current.audit
prefix = self.prefix
name = self.name
define_resource = s3db.resource
delete_super = s3db.delete_super
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
if current.deployment_settings.get_security_archive_not_delete() and \
DELETED in table:
# Find all references
if not cascade:
# Must load all models to detect dependencies
s3db.load_all_models()
if db._lazy_tables:
# Must roll out all lazy tables to detect dependencies
for tn in db._LAZY_TABLES.keys():
db[tn]
references = table._referenced_by
try:
rfields = [f for f in references if f.ondelete == "RESTRICT"]
except AttributeError:
# older web2py
references = [db[tn][fn] for tn, fn in references]
rfields = [f for f in references if f.ondelete == "RESTRICT"]
# Determine deletable rows
deletable = set(row[pkey] for row in rows)
for rfield in rfields:
if deletable:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
query = (rfield.belongs(deletable))
if tn == self.tablename:
query &= (rfield != rtable._id)
if DELETED in rtable:
query &= (rtable[DELETED] != True)
rrows = db(query).select(rfield)
for rrow in rrows:
deletable.discard(rrow[fn])
# Get custom ondelete-cascade
ondelete_cascade = get_config("ondelete_cascade")
for row in rows:
record_id = row[pkey]
# Check permission to delete this record
if not has_permission("delete", table, record_id=record_id):
continue
error = self.error
self.error = None
# Run custom ondelete_cascade first
if ondelete_cascade:
try:
callback(ondelete_cascade, row, tablename=tablename)
except:
# Custom RESTRICT or cascade failure: row not deletable
continue
if record_id not in deletable:
# Check deletability again
restricted = False
for rfield in rfields:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
#rfield = rtable[fn]
query = (rfield == record_id)
if tn == self.tablename:
query &= (rfield != rtable._id)
if DELETED in rtable:
query &= (rtable[DELETED] != True)
rrow = db(query).select(rfield,
limitby=(0, 1)).first()
if rrow:
restricted = True
break
if not restricted:
deletable.add(record_id)
if record_id not in deletable:
# Row is not deletable
self.error = INTEGRITY_ERROR
continue
# Run automatic ondelete-cascade
for rfield in references:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
query = (rfield == record_id)
if tn == self.tablename:
query &= (rfield != rtable._id)
if rfield.ondelete == "CASCADE":
rresource = define_resource(tn,
filter=query,
unapproved=True)
rresource.delete(cascade=True)
if rresource.error:
self.error = rresource.error
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
# Unlink all super-records
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
# Error in deletion cascade: roll back + skip row
if not cascade:
db.rollback()
continue
else:
# Auto-delete linked records if this was the last link
linked = self.linked
if linked and self.autodelete and linked.autodelete:
rkey = linked.rkey
fkey = linked.fkey
if rkey in table:
query = (table._id == record_id)
this = db(query).select(table._id,
table[rkey],
limitby=(0, 1)).first()
query = (table._id != this[pkey]) & \
(table[rkey] == this[rkey])
if DELETED in table:
query &= (table[DELETED] != True)
remaining = db(query).select(table._id,
limitby=(0, 1)).first()
if not remaining:
linked_table = s3db.table(linked.tablename)
query = (linked_table[fkey] == this[rkey])
linked = define_resource(linked_table,
filter=query,
unapproved=True)
linked.delete(cascade=True)
# Pull back prior error status
self.error = error
error = None
# "Park" foreign keys to resolve constraints, "un-delete"
# would then restore any still-valid FKs from this field!
fields = dict(deleted=True)
if "deleted_fk" in table:
record = table[record_id]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Annotate the replacement record
idstr = str(record_id)
if replaced_by and idstr in replaced_by and \
"deleted_rb" in table.fields:
fields.update(deleted_rb=replaced_by[idstr])
# Update the row, finally
db(table._id == record_id).update(**fields)
numrows += 1
# Clear session
if s3_get_last_record_id(tablename) == record_id:
s3_remove_last_record_id(tablename)
# Audit
audit("delete", prefix, name,
record=record_id, representation=format)
# On-delete hook
ondelete = get_config("ondelete")
if ondelete:
callback(ondelete, row)
# Commit after each row to not have it rolled back by
# subsequent cascade errors
if not cascade:
db.commit()
else:
# Hard delete
for row in rows:
record_id = row[pkey]
# Check permission to delete this row
if not has_permission("delete", table, record_id=record_id):
continue
# @ToDo: ondelete_cascade?
# Delete super-entity
success = delete_super(table, row)
if not success:
self.error = INTEGRITY_ERROR
continue
# Delete the row
try:
del table[record_id]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
continue
else:
# Successfully deleted
numrows += 1
# Clear session
if s3_get_last_record_id(tablename) == record_id:
s3_remove_last_record_id(tablename)
# Audit
audit("delete", prefix, name,
record=row[pkey], representation=format)
# On-delete hook
ondelete = get_config("ondelete")
if ondelete:
callback(ondelete, row)
# Commit after each row to not have it rolled back by
# subsequent cascade errors
if not cascade:
db.commit()
if numrows == 0 and not deletable:
# No deletable rows found
self.error = INTEGRITY_ERROR
return numrows
# -------------------------------------------------------------------------
def approve(self, components=[], approve=True):
"""
Approve all records in this resource
@param components: list of component aliases to include, None
for no components, empty list for all components
@param approve: set to approved (False for reset to unapproved)
"""
db = current.db
auth = current.auth
if auth.s3_logged_in():
user_id = approve and auth.user.id or None
else:
return False
tablename = self.tablename
table = self._table
records = self.select([self._id.name], limit=None)
for record in records["rows"]:
record_id = record[str(self._id)]
# Forget any cached permission for this record
auth.permission.forget(table, record_id)
if "approved_by" in table.fields:
dbset = db(table._id == record_id)
success = dbset.update(approved_by = user_id)
if not success:
current.db.rollback()
return False
else:
onapprove = self.get_config("onapprove", None)
if onapprove is not None:
row = dbset.select(limitby=(0, 1)).first()
if row:
callback(onapprove, row, tablename=tablename)
if components is None:
continue
for alias in self.components:
if components and alias not in components:
continue
component = self.components[alias]
success = component.approve(components=None, approve=approve)
if not success:
current.db.rollback()
return False
return True
# -------------------------------------------------------------------------
def reject(self, cascade=False):
""" Reject (delete) all records in this resource """
db = current.db
s3db = current.s3db
define_resource = s3db.resource
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
table = self.table
pkey = table._id.name
# Get hooks configuration
get_config = s3db.get_config
ondelete = get_config(tablename, "ondelete")
onreject = get_config(tablename, "onreject")
ondelete_cascade = get_config(tablename, "ondelete_cascade")
# Get all rows
if "uuid" in table.fields:
rows = self.select([table._id.name, "uuid"], as_rows=True)
else:
rows = self.select([table._id.name], as_rows=True)
if not rows:
return True
delete_super = s3db.delete_super
if DELETED in table:
references = table._referenced_by
for row in rows:
error = self.error
self.error = None
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# Automatic cascade
for ref in references:
try:
tn, fn = ref.tablename, ref.name
except:
# old web2py < 2.0
tn, fn = ref
rtable = db[tn]
rfield = rtable[fn]
query = (rfield == row[pkey])
# Ignore RESTRICTs => reject anyway
if rfield.ondelete in ("CASCADE", "RESTRICT"):
rresource = define_resource(tn, filter=query, unapproved=True)
rresource.reject(cascade=True)
if rresource.error:
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
db.rollback()
raise RuntimeError("Reject failed for %s.%s" %
(tablename, row[table._id]))
else:
# Pull back prior error status
self.error = error
error = None
# On-reject hook
if onreject:
callback(onreject, row, tablename=tablename)
# Park foreign keys
fields = dict(deleted=True)
if "deleted_fk" in table:
record = table[row[pkey]]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Update the row, finally
db(table._id == row[pkey]).update(**fields)
# Clear session
if s3_get_last_record_id(tablename) == row[pkey]:
s3_remove_last_record_id(tablename)
# On-delete hook
if ondelete:
callback(ondelete, row, tablename=tablename)
else:
# Hard delete
for row in rows:
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# On-reject
if onreject:
callback(onreject, row, tablename=tablename)
try:
del table[row[pkey]]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
db.rollback()
raise
else:
# Clear session
if s3_get_last_record_id(tablename) == row[pkey]:
s3_remove_last_record_id(tablename)
# Delete super-entity
delete_super(table, row)
# On-delete
if ondelete:
callback(ondelete, row, tablename=tablename)
return True
# -------------------------------------------------------------------------
def merge(self,
original_id,
duplicate_id,
replace=None,
update=None,
main=True):
""" Merge two records, see also S3RecordMerger.merge """
from s3merge import S3RecordMerger
return S3RecordMerger(self).merge(original_id,
duplicate_id,
replace=replace,
update=update,
main=main)
# -------------------------------------------------------------------------
# Exports
# -------------------------------------------------------------------------
def datatable(self,
fields=None,
start=0,
limit=None,
left=None,
orderby=None,
distinct=False,
getids=False):
"""
Generate a data table of this resource
@param fields: list of fields to include (field selector strings)
@param start: index of the first record to include
@param limit: maximum number of records to include
@param left: additional left joins for DB query
@param orderby: orderby for DB query
@param distinct: distinct-flag for DB query
@param getids: return the record IDs of all records matching the
query (used in search to create a filter)
@return: tuple (S3DataTable, numrows, ids), where numrows represents
the total number of rows in the table that match the query;
ids is empty unless getids=True
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
# Automatically include the record ID
table = self.table
if table._id.name not in selectors:
fields.insert(0, table._id.name)
selectors.insert(0, table._id.name)
# Skip representation of IDs in data tables
id_repr = table._id.represent
table._id.represent = None
# Extract the data
data = self.select(selectors,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct,
count=True,
getids=getids,
represent=True)
rows = data["rows"]
# Restore ID representation
table._id.represent = id_repr
# Empty table - or just no match?
empty = False
if not rows:
DELETED = current.xml.DELETED
if DELETED in table:
query = (table[DELETED] != True)
else:
query = (table._id > 0)
row = current.db(query).select(table._id, limitby=(0, 1)).first()
if not row:
empty = True
# Generate the data table
rfields = data["rfields"]
dt = S3DataTable(rfields, rows, orderby=orderby, empty=empty)
return dt, data["numrows"], data["ids"]
# -------------------------------------------------------------------------
def datalist(self,
fields=None,
start=0,
limit=None,
left=None,
orderby=None,
distinct=False,
getids=False,
list_id=None,
layout=None):
"""
Generate a data list of this resource
@param fields: list of fields to include (field selector strings)
@param start: index of the first record to include
@param limit: maximum number of records to include
@param left: additional left joins for DB query
@param orderby: orderby for DB query
@param distinct: distinct-flag for DB query
@param getids: return the record IDs of all records matching the
query (used in search to create a filter)
@param list_id: the list identifier
@param layout: custom renderer function (see S3DataList.render)
@return: tuple (S3DataList, numrows, ids), where numrows represents
the total number of rows in the table that match the query;
ids is empty unless getids=True
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
# Automatically include the record ID
table = self.table
if table._id.name not in selectors:
fields.insert(0, table._id.name)
selectors.insert(0, table._id.name)
# Extract the data
data = self.select(selectors,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct,
count=True,
getids=getids,
raw_data=True,
represent=True)
# Generate the data list
numrows = data["numrows"]
dl = S3DataList(self,
fields,
data["rows"],
list_id=list_id,
start=start,
limit=limit,
total=numrows,
layout=layout)
return dl, numrows, data["ids"]
# -------------------------------------------------------------------------
def pivottable(self, rows, cols, layers, strict=True):
"""
Generate a pivot table of this resource.
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param layers: list of tuples (field selector, method) for
the aggregation layers
@param strict: filter out dimension values which don't match
the resource filter
@return: an S3PivotTable instance
Supported methods: see S3PivotTable
"""
return S3PivotTable(self, rows, cols, layers, strict=strict)
# -------------------------------------------------------------------------
def json(self,
fields=None,
start=0,
limit=None,
left=None,
distinct=False,
orderby=None):
"""
Export a JSON representation of the resource.
@param fields: list of field selector strings
@param start: index of the first record
@param limit: maximum number of records
@param left: list of (additional) left joins
@param distinct: select only distinct rows
@param orderby: Orderby-expression for the query
@return: the JSON (as string), representing a list of
dicts with {"tablename.fieldname":"value"}
"""
data = self.select(fields=fields,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct)["rows"]
return json.dumps(data)
# -------------------------------------------------------------------------
# Data Object API
# -------------------------------------------------------------------------
def load(self,
fields=None,
skip=None,
start=None,
limit=None,
orderby=None,
virtual=True,
cacheable=False):
"""
Loads records from the resource, applying the current filters,
and stores them in the instance.
@param fields: list of field names to include
@param skip: list of field names to skip
@param start: the index of the first record to load
@param limit: the maximum number of records to load
@param orderby: orderby-expression for the query
@param virtual: whether to load virtual fields or not
@param cacheable: don't define Row actions like update_record
or delete_record (faster, and the record can
be cached)
@return: the records as list of Rows
"""
table = self.table
tablename = self.tablename
UID = current.xml.UID
load_uids = hasattr(table, UID)
if not skip:
skip = tuple()
if fields or skip:
s3 = current.response.s3
if "all_meta_fields" in s3:
meta_fields = s3.all_meta_fields
else:
meta_fields = s3.all_meta_fields = s3_all_meta_field_names()
s3db = current.s3db
# Field selection
qfields = ([table._id.name, UID])
append = qfields.append
for f in table.fields:
if tablename == "gis_location" and \
((f == "the_geom") or (f == "wkt" and current.auth.permission.format != "cap")):
# Filter out bulky Polygons
continue
elif f in ("wkt", "the_geom") and tablename.startswith("gis_layer_shapefile_"):
# Filter out bulky Polygons
continue
if fields or skip:
# Must include all meta-fields
if f in meta_fields:
append(f)
continue
# Must include all super-keys
ktablename = s3_get_foreign_key(table[f], m2m=False)[0]
if ktablename:
ktable = s3db.table(ktablename)
if ktable and hasattr(ktable, "instance_type"):
append(f)
continue
if f in skip:
continue
if not fields or f in fields:
qfields.append(f)
fields = list(set(filter(lambda f: hasattr(table, f), qfields)))
if self._rows is not None:
self.clear()
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
rows = self.select(fields,
start=start,
limit=limit,
orderby=orderby,
virtual=virtual,
as_rows=True)
ids = self._ids = []
new_id = ids.append
self._uids = []
new_uid = self._uids.append
self._rows = []
new_row = self._rows.append
if rows:
pkey = table._id.name
for row in rows:
if hasattr(row, tablename):
_row = ogetattr(row, tablename)
if type(_row) is Row:
row = _row
record_id = ogetattr(row, pkey)
if record_id not in ids:
new_id(record_id)
new_row(row)
if load_uids:
new_uid(ogetattr(row, UID))
self._length = len(self._rows)
return self._rows
# -------------------------------------------------------------------------
def clear(self):
""" Removes the records currently stored in this instance """
self._rows = None
self._rowindex = None
self._length = None
self._ids = None
self._uids = None
self.files = Storage()
if self.components:
for c in self.components:
self.components[c].clear()
# -------------------------------------------------------------------------
def records(self, fields=None):
"""
Get the current set as Rows instance
@param fields: the fields to include (list of Fields)
"""
if fields is None:
if self.tablename == "gis_location":
fields = [f for f in self.table
if f.name not in ("wkt", "the_geom")]
else:
fields = [f for f in self.table]
if self._rows is None:
return Rows(current.db)
else:
colnames = map(str, fields)
return Rows(current.db, self._rows, colnames=colnames)
# -------------------------------------------------------------------------
def __getitem__(self, key):
"""
Find a record currently stored in this instance by its record ID
@param key: the record ID
@return: a Row
@raises: IndexError if the record is not currently loaded
"""
index = self._rowindex
if index is None:
_id = self._id.name
rows = self._rows
if rows:
index = Storage([(str(row[_id]), row) for row in rows])
else:
index = Storage()
self._rowindex = index
key = str(key)
if key in index:
return index[key]
raise IndexError
# -------------------------------------------------------------------------
def __iter__(self):
"""
Iterate over the records currently stored in this instance
"""
if self._rows is None:
self.load()
rows = self._rows
for i in xrange(len(rows)):
yield rows[i]
return
# -------------------------------------------------------------------------
def get(self, key, component=None, link=None):
"""
Get component records for a record currently stored in this
instance.
@param key: the record ID
@param component: the name of the component
@param link: the name of the link table
@return: a Row (if component is None) or a list of rows
"""
if not key:
raise KeyError("Record not found")
if self._rows is None:
self.load()
try:
master = self[key]
except IndexError:
raise KeyError("Record not found")
if not component and not link:
return master
elif link:
if link in self.links:
c = self.links[link]
else:
raise AttributeError("Undefined link %s" % link)
else:
if component in self.components:
c = self.components[component]
else:
raise AttributeError("Undefined component %s" % component)
rows = c._rows
if rows is None:
rows = c.load()
if not rows:
return []
pkey, fkey = c.pkey, c.fkey
if pkey in master:
master_id = master[pkey]
if c.link:
lkey, rkey = c.lkey, c.rkey
lids = [r[rkey] for r in c.link if master_id == r[lkey]]
rows = [record for record in rows if record[fkey] in lids]
else:
try:
rows = [record for record in rows if master_id == record[fkey]]
except AttributeError:
# Most likely need to tweak static/formats/geoson/export.xsl
raise AttributeError("Component %s records are missing fkey %s" % (component, fkey))
else:
rows = []
return rows
# -------------------------------------------------------------------------
def get_id(self):
""" Get the IDs of all records currently stored in this instance """
if self._ids is None:
self.__load_ids()
if not self._ids:
return None
elif len(self._ids) == 1:
return self._ids[0]
else:
return self._ids
# -------------------------------------------------------------------------
def get_uid(self):
""" Get the UUIDs of all records currently stored in this instance """
if current.xml.UID not in self.table.fields:
return None
if self._ids is None:
self.__load_ids()
if not self._uids:
return None
elif len(self._uids) == 1:
return self._uids[0]
else:
return self._uids
# -------------------------------------------------------------------------
def __len__(self):
"""
The number of currently loaded rows
"""
if self._rows is not None:
return len(self._rows)
else:
return 0
# -------------------------------------------------------------------------
def __load_ids(self):
""" Loads the IDs/UIDs of all records matching the current filter """
table = self.table
UID = current.xml.UID
pkey = table._id.name
if UID in table.fields:
has_uid = True
fields = (pkey, UID)
else:
has_uid = False
fields = (pkey, )
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
else:
start = limit = None
rows = self.select(fields,
start=start,
limit=limit)["rows"]
if rows:
ID = str(table._id)
self._ids = [row[ID] for row in rows]
if has_uid:
uid = str(table[UID])
self._uids = [row[uid] for row in rows]
else:
self._ids = []
return
# -------------------------------------------------------------------------
# Representation
# -------------------------------------------------------------------------
def __repr__(self):
"""
String representation of this resource
"""
pkey = self.table._id.name
if self._rows:
ids = [r[pkey] for r in self]
return "<S3Resource %s %s>" % (self.tablename, ids)
else:
return "<S3Resource %s>" % self.tablename
# -------------------------------------------------------------------------
def __contains__(self, item):
"""
Tests whether this resource contains a (real) field.
@param item: the field selector or Field instance
"""
fn = str(item)
if "." in fn:
tn, fn = fn.split(".", 1)
if tn == self.tablename:
item = fn
try:
rf = self.resolve_selector(str(item))
except (SyntaxError, AttributeError):
return 0
if rf.field is not None:
return 1
else:
return 0
# -------------------------------------------------------------------------
def __nonzero__(self):
"""
Boolean test of this resource
"""
return self is not None
# -------------------------------------------------------------------------
# XML Export
# -------------------------------------------------------------------------
def export_xml(self,
start=None,
limit=None,
msince=None,
fields=None,
dereference=True,
maxdepth=MAXDEPTH,
mcomponents=[],
rcomponents=None,
references=None,
stylesheet=None,
as_tree=False,
as_json=False,
maxbounds=False,
filters=None,
pretty_print=False,
location_data=None,
map_data=None,
**args):
"""
Export this resource as S3XML
@param start: index of the first record to export (slicing)
@param limit: maximum number of records to export (slicing)
@param msince: export only records which have been modified
after this datetime
@param fields: data fields to include (default: all)
@param dereference: include referenced resources
@param maxdepth:
@param mcomponents: components of the master resource to
include (list of tablenames), empty list
for all
@param rcomponents: components of referenced resources to
include (list of tablenames), empty list
for all
@param references: foreign keys to include (default: all)
@param stylesheet: path to the XSLT stylesheet (if required)
@param as_tree: return the ElementTree (do not convert into string)
@param as_json: represent the XML tree as JSON
@param maxbounds: include lat/lon boundaries in the top
level element (off by default)
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param pretty_print: insert newlines/indentation in the output
@param location_data: dictionary of location data which has been
looked-up in bulk ready for xml.gis_encode()
@param map_data: dictionary of options which can be read by the map
@param args: dict of arguments to pass to the XSLT stylesheet
"""
xml = current.xml
output = None
args = Storage(args)
xmlformat = S3XMLFormat(stylesheet) if stylesheet else None
# Export as element tree
#if DEBUG:
#_start = datetime.datetime.now()
#tablename = self.tablename
#_debug("export_tree of %s starting" % tablename)
tree = self.export_tree(start=start,
limit=limit,
msince=msince,
fields=fields,
dereference=dereference,
maxdepth=maxdepth,
mcomponents=mcomponents,
rcomponents=rcomponents,
references=references,
filters=filters,
maxbounds=maxbounds,
xmlformat=xmlformat,
location_data=location_data,
map_data=map_data)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("export_tree of %s completed in %s seconds" % \
#(tablename, duration))
# XSLT transformation
if tree and xmlformat is not None:
#if DEBUG:
# _start = datetime.datetime.now()
import uuid
tfmt = xml.ISOFORMAT
args.update(domain=xml.domain,
base_url=current.response.s3.base_url,
prefix=self.prefix,
name=self.name,
utcnow=datetime.datetime.utcnow().strftime(tfmt),
msguid=uuid.uuid4().urn)
tree = xmlformat.transform(tree, **args)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("transform of %s using %s completed in %s seconds" % \
#(tablename, stylesheet, duration))
# Convert into the requested format
# (Content Headers are set by the calling function)
if tree:
if as_tree:
output = tree
elif as_json:
#if DEBUG:
#_start = datetime.datetime.now()
output = xml.tree2json(tree, pretty_print=pretty_print)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("tree2json of %s completed in %s seconds" % \
#(tablename, duration))
else:
output = xml.tostring(tree, pretty_print=pretty_print)
return output
# -------------------------------------------------------------------------
def export_tree(self,
start=0,
limit=None,
msince=None,
fields=None,
references=None,
dereference=True,
maxdepth=MAXDEPTH,
mcomponents=None,
rcomponents=None,
filters=None,
maxbounds=False,
xmlformat=None,
location_data=None,
map_data=None,
):
"""
Export the resource as element tree
@param start: index of the first record to export
@param limit: maximum number of records to export
@param msince: minimum modification date of the records
@param fields: data fields to include (default: all)
@param references: foreign keys to include (default: all)
@param dereference: also export referenced records
@param maxdepth:
@param mcomponents: components of the master resource to
include (list of tablenames), empty list
for all
@param rcomponents: components of referenced resources to
include (list of tablenames), empty list
for all
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param maxbounds: include lat/lon boundaries in the top
level element (off by default)
@param xmlformat:
@param location_data: dictionary of location data which has been
looked-up in bulk ready for xml.gis_encode()
@param map_data: dictionary of options which can be read by the map
"""
xml = current.xml
if xml.show_urls:
base_url = current.response.s3.base_url
else:
base_url = None
# Split reference/data fields
(rfields, dfields) = self.split_fields(data=fields,
references=references)
# Filter for MCI >= 0 (setting)
table = self.table
if xml.filter_mci and "mci" in table.fields:
mci_filter = (table.mci >= 0)
self.add_filter(mci_filter)
# Sync filters
tablename = self.tablename
if filters and tablename in filters:
queries = S3URLQuery.parse(self, filters[tablename])
[self.add_filter(q) for a in queries for q in queries[a]]
# Initialize export metadata
self.muntil = None
self.results = 0
# Load slice
if msince is not None and "modified_on" in table.fields:
orderby = "%s ASC" % table["modified_on"]
else:
orderby = None
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(self.tablename)
else:
include, exclude = None, None
self.load(fields=include,
skip=exclude,
start=start,
limit=limit,
orderby=orderby,
virtual=False,
cacheable=True)
# Total number of results
results = self.count()
if not location_data:
format = current.auth.permission.format
if format == "geojson":
if results > current.deployment_settings.get_gis_max_features():
headers = {"Content-Type": "application/json"}
message = "Too Many Records"
status = 509
raise HTTP(status,
body=xml.json_message(success=False,
statuscode=status,
message=message),
web2py_error=message,
**headers)
# Lookups per layer not per record
if tablename == "gis_layer_shapefile":
# GIS Shapefile Layer
location_data = current.gis.get_shapefile_geojson(self) or {}
elif tablename == "gis_theme_data":
# GIS Theme Layer
location_data = current.gis.get_theme_geojson(self) or {}
else:
# e.g. GIS Feature Layer
# e.g. Search results
location_data = current.gis.get_location_data(self) or {}
elif format in ("georss", "kml", "gpx"):
location_data = current.gis.get_location_data(self) or {}
else:
# @ToDo: Bulk lookup of LatLons for S3XML.latlon()
location_data = {}
# Build the tree
#if DEBUG:
# _start = datetime.datetime.now()
root = etree.Element(xml.TAG.root)
if map_data:
# Gets loaded before re-dumping, so no need to compact or avoid double-encoding
# NB Ensure we don't double-encode unicode!
#root.set("map", json.dumps(map_data, separators=SEPARATORS,
# ensure_ascii=False))
root.set("map", json.dumps(map_data))
export_map = Storage()
all_references = []
prefix = self.prefix
name = self.name
if base_url:
url = "%s/%s/%s" % (base_url, prefix, name)
else:
url = "/%s/%s" % (prefix, name)
# Use lazy representations
lazy = []
current.auth_user_represent = S3Represent(lookup="auth_user",
fields=["email"])
export_resource = self.__export_resource
# Collect all references from master records
reference_map = []
for record in self._rows:
element = export_resource(record,
rfields=rfields,
dfields=dfields,
parent=root,
base_url=url,
reference_map=reference_map,
export_map=export_map,
lazy=lazy,
components=mcomponents,
filters=filters,
msince=msince,
location_data=location_data,
xmlformat=xmlformat)
if element is None:
results -= 1
if reference_map:
all_references.extend(reference_map)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - _start
# duration = '{:.2f}'.format(duration.total_seconds())
# _debug("export_resource of primary resource and components completed in %s seconds" % \
# duration)
# Add referenced resources to the tree
#if DEBUG:
# _start = datetime.datetime.now()
define_resource = current.s3db.resource
# Iteratively resolve all references
depth = maxdepth if dereference else 0
while reference_map and depth:
depth -= 1
load_map = dict()
get_exported = export_map.get
for ref in reference_map:
if "table" in ref and "id" in ref:
# Get tablename and IDs
tname = ref["table"]
ids = ref["id"]
if not isinstance(ids, list):
ids = [ids]
# Exclude records which are already in the tree
exported = get_exported(tname, [])
ids = [x for x in ids if x not in exported]
if not ids:
continue
# Append the new ids to load_map[tname]
if tname in load_map:
ids = [x for x in ids if x not in load_map[tname]]
load_map[tname] += ids
else:
load_map[tname] = ids
# Collect all references from the referenced records
reference_map = []
REF = xml.ATTRIBUTE.ref
for tablename in load_map:
load_list = load_map[tablename]
# Sync filters
if filters:
filter_vars = filters.get(tablename, None)
else:
filter_vars = None
prefix, name = tablename.split("_", 1)
rresource = define_resource(tablename,
id=load_list,
components=[],
vars=filter_vars)
table = rresource.table
if base_url:
url = "%s/%s/%s" % (base_url, prefix, name)
else:
url = "/%s/%s" % (prefix, name)
rfields, dfields = rresource.split_fields(data=fields,
references=references)
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(rresource.tablename)
else:
include, exclude = None, None
rresource.load(fields=include,
skip=exclude,
limit=None,
virtual=False,
cacheable=True)
export_resource = rresource.__export_resource
for record in rresource:
element = export_resource(record,
rfields=rfields,
dfields=dfields,
parent=root,
base_url=url,
reference_map=reference_map,
export_map=export_map,
components=rcomponents,
lazy=lazy,
filters=filters,
master=False,
location_data=location_data,
xmlformat=xmlformat)
# Mark as referenced element (for XSLT)
if element is not None:
element.set(REF, "True")
if reference_map:
all_references.extend(reference_map)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - _start
# duration = '{:.2f}'.format(duration.total_seconds())
# _debug("export_resource of referenced resources and their components completed in %s seconds" % \
# duration)
# Render all pending lazy representations
if lazy:
for renderer, element, attr, f in lazy:
renderer.render_node(element, attr, f)
# Add Lat/Lon attributes to all location references
if all_references:
xml.latlon(all_references)
# Complete the tree
tree = xml.tree(None,
root=root,
domain=xml.domain,
url=base_url,
results=results,
start=start,
limit=limit,
maxbounds=maxbounds)
# Store number of results
self.results = results
return tree
# -------------------------------------------------------------------------
def __export_resource(self,
record,
rfields=[],
dfields=[],
parent=None,
base_url=None,
reference_map=None,
export_map=None,
lazy=None,
components=None,
filters=None,
msince=None,
master=True,
location_data=None,
xmlformat=None):
"""
Add a <resource> to the element tree
@param record: the record
@param rfields: list of reference fields to export
@param dfields: list of data fields to export
@param parent: the parent element
@param base_url: the base URL of the resource
@param reference_map: the reference map of the request
@param export_map: the export map of the request
@param lazy:
@param components: list of components to include from referenced
resources (tablenames)
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param msince: the minimum update datetime for exported records
@param master: True of this is the master resource
@param location_data: the location_data for GIS encoding
@param xmlformat:
"""
xml = current.xml
pkey = self.table._id
# Construct the record URL
if base_url:
record_url = "%s/%s" % (base_url, record[pkey])
else:
record_url = None
# Export the record
add = False
export = self._export_record
element, rmap = export(record,
rfields=rfields,
dfields=dfields,
parent=parent,
export_map=export_map,
lazy=lazy,
url=record_url,
msince=msince,
master=master,
location_data=location_data)
if element is not None:
add = True
# Export components
if components is not None:
resource_components = self.components.values()
unfiltered = [c for c in resource_components if c.filter is None]
for component in resource_components:
ctablename = component.tablename
# Shall this component be included?
if components and ctablename not in components:
continue
# We skip a filtered component if an unfiltered
# component of the same table is available:
if component.filter is not None and ctablename in unfiltered:
continue
cpkey = component.table._id
if component.link is not None:
c = component.link
calias = None
lalias = c.alias
else:
c = component
calias = c.alias
lalias = None
# Before loading the component: add filters
if c._rows is None:
# MCI filter
ctable = c.table
if xml.filter_mci and xml.MCI in ctable.fields:
mci_filter = FS(xml.MCI) >= 0
c.add_filter(mci_filter)
# Sync filters
ctablename = c.tablename
if filters and ctablename in filters:
queries = S3URLQuery.parse(self, filters[ctablename])
[c.add_filter(q) for a in queries for q in queries[a]]
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(c.tablename)
else:
include, exclude = None, None
# Load the records
c.load(fields=include,
skip=exclude,
limit=None,
virtual=False,
cacheable=True)
# Split fields
crfields, cdfields = c.split_fields(skip=[c.fkey])
# Construct the component base URL
if record_url:
component_url = "%s/%s" % (record_url, c.alias)
else:
component_url = None
# Find related records
crecords = self.get(record[pkey],
component = calias,
link = lalias,
)
# @todo: load() should limit this automatically:
if not c.multiple and len(crecords):
crecords = [crecords[0]]
# Export records
export = c._export_record
map_record = c.__map_record
for crecord in crecords:
# Construct the component record URL
if component_url:
crecord_url = "%s/%s" % (component_url, crecord[cpkey])
else:
crecord_url = None
# Export the component record
celement, crmap = export(crecord,
rfields=crfields,
dfields=cdfields,
parent=element,
export_map=export_map,
lazy=lazy,
url=crecord_url,
msince=msince,
master=False,
location_data=location_data)
if celement is not None:
add = True # keep the parent record
# Update "modified until" from component
if not self.muntil or \
c.muntil and c.muntil > self.muntil:
self.muntil = c.muntil
map_record(crecord, crmap,
reference_map, export_map)
# Update reference_map and export_map
if add:
self.__map_record(record, rmap, reference_map, export_map)
elif parent is not None and element is not None:
idx = parent.index(element)
if idx:
del parent[idx]
return None
return element
# -------------------------------------------------------------------------
def _export_record(self,
record,
rfields=[],
dfields=[],
parent=None,
export_map=None,
lazy=None,
url=None,
msince=None,
master=True,
location_data=None):
"""
Exports a single record to the element tree.
@param record: the record
@param rfields: list of foreign key fields to export
@param dfields: list of data fields to export
@param parent: the parent element
@param export_map: the export map of the current request
@param url: URL of the record
@param msince: minimum last update time
@param master: True if this is a record in the master resource
@param location_data: the location_data for GIS encoding
"""
xml = current.xml
tablename = self.tablename
table = self.table
# Replace user ID representation by lazy method
auth_user_represent = Storage()
if hasattr(current, "auth_user_represent"):
user_ids = ("created_by", "modified_by", "owned_by_user")
for fn in user_ids:
if hasattr(table, fn):
f = ogetattr(table, fn)
auth_user_represent[fn] = f.represent
f.represent = current.auth_user_represent
default = (None, None)
# Do not export the record if it already is in the export map
if tablename in export_map and record[table._id] in export_map[tablename]:
return default
# Do not export the record if it hasn't been modified since msince
# NB This can't be moved to tree level as we do want to export records
# which have modified components
MTIME = xml.MTIME
if MTIME in record:
if msince is not None and record[MTIME] <= msince:
return default
if not self.muntil or record[MTIME] > self.muntil:
self.muntil = record[MTIME]
# Audit read
current.audit("read", self.prefix, self.name,
record=record[table._id], representation="xml")
# Reference map for this record
rmap = xml.rmap(table, record, rfields)
# Use alias if distinct from resource name
linked = self.linked
if self.parent is not None and linked is not None:
alias = linked.alias
name = linked.name
else:
alias = self.alias
name = self.name
if alias == name:
alias = None
postprocess = self.get_config("xml_post_render")
# Generate the element
element = xml.resource(parent, table, record,
fields=dfields,
alias=alias,
lazy=lazy,
url=url,
postprocess=postprocess)
# Add the references
xml.add_references(element, rmap,
show_ids=current.xml.show_ids, lazy=lazy)
if master:
# GIS-encode the element
# @ToDo: Do this 1/tree not 1/record
xml.gis_encode(self, record, element, location_data=location_data)
# Restore normal user_id representations
for fn in auth_user_represent:
ogetattr(table, fn).represent = auth_user_represent[fn]
return (element, rmap)
# -------------------------------------------------------------------------
def __map_record(self, record, rmap, reference_map, export_map):
"""
Add the record to the export map, and update the
reference map with the record's references
@param record: the record
@param rmap: the reference map of the record
@param reference_map: the reference map of the request
@param export_map: the export map of the request
"""
tablename = self.tablename
record_id = record[self.table._id]
if rmap:
reference_map.extend(rmap)
if tablename in export_map:
export_map[tablename].append(record_id)
else:
export_map[tablename] = [record_id]
return
# -------------------------------------------------------------------------
# XML Import
# -------------------------------------------------------------------------
def import_xml(self, source,
files=None,
id=None,
format="xml",
stylesheet=None,
extra_data=None,
ignore_errors=False,
job_id=None,
commit_job=True,
delete_job=False,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None,
**args):
"""
XML Importer
@param source: the data source, accepts source=xxx, source=[xxx, yyy, zzz] or
source=[(resourcename1, xxx), (resourcename2, yyy)], where the
xxx has to be either an ElementTree or a file-like object
@param files: attached files (None to read in the HTTP request)
@param id: ID (or list of IDs) of the record(s) to update (performs only update)
@param format: type of source = "xml", "json" or "csv"
@param stylesheet: stylesheet to use for transformation
@param extra_data: for CSV imports, dict of extra cols to add to each row
@param ignore_errors: skip invalid records silently
@param job_id: resume from previous import job_id
@param commit_job: commit the job to the database
@param delete_job: delete the import job from the queue
@param strategy: tuple of allowed import methods (create/update/delete)
@param update_policy: policy for updates (sync)
@param conflict_policy: policy for conflict resolution (sync)
@param last_sync: last synchronization datetime (sync)
@param onconflict: callback hook for conflict resolution (sync)
@param args: parameters to pass to the transformation stylesheet
"""
# Check permission for the resource
has_permission = current.auth.s3_has_permission
authorised = has_permission("create", self.table) and \
has_permission("update", self.table)
if not authorised:
raise IOError("Insufficient permissions")
xml = current.xml
tree = None
self.job = None
if not job_id:
# Resource data
prefix = self.prefix
name = self.name
# Additional stylesheet parameters
tfmt = xml.ISOFORMAT
utcnow = datetime.datetime.utcnow().strftime(tfmt)
domain = xml.domain
base_url = current.response.s3.base_url
args.update(domain=domain,
base_url=base_url,
prefix=prefix,
name=name,
utcnow=utcnow)
# Build import tree
if not isinstance(source, (list, tuple)):
source = [source]
for item in source:
if isinstance(item, (list, tuple)):
resourcename, s = item[:2]
else:
resourcename, s = None, item
if isinstance(s, etree._ElementTree):
t = s
elif format == "json":
if isinstance(s, basestring):
source = StringIO(s)
t = xml.json2tree(s)
else:
t = xml.json2tree(s)
elif format == "csv":
t = xml.csv2tree(s,
resourcename=resourcename,
extra_data=extra_data)
elif format == "xls":
t = xml.xls2tree(s,
resourcename=resourcename,
extra_data=extra_data)
else:
t = xml.parse(s)
if not t:
if xml.error:
raise SyntaxError(xml.error)
else:
raise SyntaxError("Invalid source")
if stylesheet is not None:
t = xml.transform(t, stylesheet, **args)
_debug(t)
if not t:
raise SyntaxError(xml.error)
if not tree:
tree = t.getroot()
else:
tree.extend(list(t.getroot()))
if files is not None and isinstance(files, dict):
self.files = Storage(files)
else:
# job ID given
pass
response = current.response
# Flag to let onvalidation/onaccept know this is coming from a Bulk Import
response.s3.bulk = True
success = self.import_tree(id, tree,
ignore_errors=ignore_errors,
job_id=job_id,
commit_job=commit_job,
delete_job=delete_job,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
response.s3.bulk = False
self.files = Storage()
# Response message
if format == "json":
# Whilst all Responses are JSON, it's easier to debug by having the
# response appear in the browser than launching a text editor
response.headers["Content-Type"] = "application/json"
if self.error_tree is not None:
tree = xml.tree2json(self.error_tree)
else:
tree = None
import_info = {"records":self.import_count}
created = self.import_created
if created:
import_info["created"] = created
updated = self.import_updated
if updated:
import_info["updated"] = updated
deleted = self.import_deleted
if deleted:
import_info["deleted"] = deleted
if success is True:
return xml.json_message(message=self.error, tree=tree,
**import_info)
elif success and hasattr(success, "job_id"):
self.job = success
return xml.json_message(message=self.error, tree=tree,
**import_info)
else:
return xml.json_message(False, 400,
message=self.error, tree=tree)
# -------------------------------------------------------------------------
def import_tree(self, id, tree,
job_id=None,
ignore_errors=False,
delete_job=False,
commit_job=True,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None):
"""
Import data from an S3XML element tree.
@param id: record ID or list of record IDs to update
@param tree: the element tree
@param ignore_errors: continue at errors (=skip invalid elements)
@param job_id: restore a job from the job table (ID or UID)
@param delete_job: delete the import job from the job table
@param commit_job: commit the job (default)
@todo: update for link table support
"""
from s3import import S3ImportJob
db = current.db
xml = current.xml
auth = current.auth
tablename = self.tablename
table = self.table
if job_id is not None:
# Restore a job from the job table
self.error = None
self.error_tree = None
try:
import_job = S3ImportJob(table,
job_id=job_id,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
except:
self.error = current.ERROR.BAD_SOURCE
return False
# Delete the job?
if delete_job:
import_job.delete()
return True
# Load all items
job_id = import_job.job_id
item_table = import_job.item_table
items = db(item_table.job_id == job_id).select()
load_item = import_job.load_item
for item in items:
success = load_item(item)
if not success:
self.error = import_job.error
self.error_tree = import_job.error_tree
import_job.restore_references()
# this is only relevant for commit_job=True
if commit_job:
if self.error and not ignore_errors:
return False
else:
return import_job
# Call the import pre-processor to prepare tables
# and cleanup the tree as necessary
import_prep = current.response.s3.import_prep
if import_prep:
tree = import_job.get_tree()
callback(import_prep,
# takes tuple (resource, tree) as argument
(self, tree),
tablename=tablename)
# Skip import?
if self.skip_import:
_debug("Skipping import to %s" % self.tablename)
self.skip_import = False
return True
else:
# Create a new job from an element tree
# Do not import into tables without "id" field
if "id" not in table.fields:
self.error = current.ERROR.BAD_RESOURCE
return False
# Reset error and error tree
self.error = None
self.error_tree = None
# Call the import pre-processor to prepare tables
# and cleanup the tree as necessary
import_prep = current.response.s3.import_prep
if import_prep:
if not isinstance(tree, etree._ElementTree):
tree = etree.ElementTree(tree)
callback(import_prep,
# takes tuple (resource, tree) as argument
(self, tree),
tablename=tablename)
# Skip import?
if self.skip_import:
_debug("Skipping import to %s" % self.tablename)
self.skip_import = False
return True
# Select the elements for this table
elements = xml.select_resources(tree, tablename)
if not elements:
# nothing to import => still ok
return True
# Find matching elements, if a target record ID is given
UID = xml.UID
if id and UID in table:
if not isinstance(id, (tuple, list)):
query = (table._id == id)
else:
query = (table._id.belongs(id))
originals = db(query).select(table[UID])
uids = [row[UID] for row in originals]
matches = []
import_uid = xml.import_uid
append = matches.append
for element in elements:
element_uid = import_uid(element.get(UID, None))
if not element_uid:
continue
if element_uid in uids:
append(element)
if not matches:
first = elements[0]
if len(elements) and not first.get(UID, None):
first.set(UID, uids[0])
matches = [first]
if not matches:
self.error = current.ERROR.NO_MATCH
return False
else:
elements = matches
# Import all matching elements
import_job = S3ImportJob(table,
tree=tree,
files=self.files,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
add_item = import_job.add_item
for element in elements:
success = add_item(element=element,
components=self.components)
if not success:
self.error = import_job.error
self.error_tree = import_job.error_tree
if self.error and not ignore_errors:
return False
# Commit the import job
auth.rollback = not commit_job
success = import_job.commit(ignore_errors=ignore_errors,
log_items = self.get_config("oncommit_import_item"))
auth.rollback = False
self.error = import_job.error
self.import_count += import_job.count
self.import_created += import_job.created
self.import_updated += import_job.updated
self.import_deleted += import_job.deleted
job_mtime = import_job.mtime
if self.mtime is None or \
job_mtime and job_mtime > self.mtime:
self.mtime = job_mtime
if self.error:
if ignore_errors:
self.error = "%s - invalid items ignored" % self.error
self.error_tree = import_job.error_tree
elif not success:
# Oops - how could this happen? We can have an error
# without failure, but not a failure without error!
# If we ever get here, then there's a bug without a
# chance to recover - hence let it crash:
raise RuntimeError("Import failed without error message")
if not success or not commit_job:
db.rollback()
if not commit_job:
import_job.store()
return import_job
else:
# Remove the job when committed
if job_id is not None:
import_job.delete()
return self.error is None or ignore_errors
# -------------------------------------------------------------------------
# XML introspection
# -------------------------------------------------------------------------
def export_options(self,
component=None,
fields=None,
only_last=False,
show_uids=False,
hierarchy=False,
as_json=False):
"""
Export field options of this resource as element tree
@param component: name of the component which the options are
requested of, None for the primary table
@param fields: list of names of fields for which the options
are requested, None for all fields (which have
options)
@param as_json: convert the output into JSON
@param only_last: obtain only the latest record
"""
if component is not None:
c = self.components.get(component)
if c:
tree = c.export_options(fields=fields,
only_last=only_last,
show_uids=show_uids,
hierarchy=hierarchy,
as_json=as_json)
return tree
else:
# If we get here, we've been called from the back-end,
# otherwise the request would have failed during parse.
# So it's safe to raise an exception:
raise AttributeError
else:
if as_json and only_last and len(fields) == 1:
# Identify the field
default = {"option":[]}
try:
field = self.table[fields[0]]
except AttributeError:
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Check that the validator has a lookup table
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
from s3validators import IS_LOCATION
if not isinstance(requires, (IS_ONE_OF, IS_LOCATION)):
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Identify the lookup table
db = current.db
lookuptable = requires.ktable
lookupfield = db[lookuptable][requires.kfield]
# Fields to extract
fields = [lookupfield]
h = None
if hierarchy:
from s3hierarchy import S3Hierarchy
h = S3Hierarchy(lookuptable)
if not h.config:
h = None
elif h.pkey.name != lookupfield.name:
# Also extract the node key for the hierarchy
fields.append(h.pkey)
# Get the latest record
# NB: this assumes that the lookupfield is auto-incremented
row = db().select(orderby=~lookupfield,
limitby=(0, 1),
*fields).first()
# Represent the value and generate the output JSON
if row:
value = row[lookupfield]
widget = field.widget
if hasattr(widget, "represent") and widget.represent:
# Prefer the widget's represent as options.json
# is usually called to Ajax-update the widget
represent = widget.represent(value)
elif field.represent:
represent = field.represent(value)
else:
represent = s3_unicode(value)
if isinstance(represent, A):
represent = represent.components[0]
item = {"@value": value, "$": represent}
if h:
parent = h.parent(row[h.pkey])
if parent:
item["@parent"] = str(parent)
result = [item]
else:
result = []
return json.dumps({'option': result})
xml = current.xml
tree = xml.get_options(self.table,
fields=fields,
show_uids=show_uids,
hierarchy=hierarchy)
if as_json:
return xml.tree2json(tree, pretty_print=False,
native=True)
else:
return xml.tostring(tree, pretty_print=False)
# -------------------------------------------------------------------------
def export_fields(self, component=None, as_json=False):
"""
Export a list of fields in the resource as element tree
@param component: name of the component to lookup the fields
(None for primary table)
@param as_json: convert the output XML into JSON
"""
if component is not None:
c = self.components.get(component, None)
if c:
tree = c.export_fields()
return tree
else:
raise AttributeError
else:
xml = current.xml
tree = xml.get_fields(self.prefix, self.name)
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True)
# -------------------------------------------------------------------------
def export_struct(self,
meta=False,
options=False,
references=False,
stylesheet=None,
as_json=False,
as_tree=False):
"""
Get the structure of the resource
@param options: include option lists in option fields
@param references: include option lists even for reference fields
@param stylesheet: the stylesheet to use for transformation
@param as_json: convert into JSON after transformation
"""
xml = current.xml
# Get the structure of the main resource
root = etree.Element(xml.TAG.root)
main = xml.get_struct(self.prefix, self.name,
alias=self.alias,
parent=root,
meta=meta,
options=options,
references=references)
# Include the selected components
for component in self.components.values():
prefix = component.prefix
name = component.name
xml.get_struct(prefix, name,
alias = component.alias,
parent = main,
meta = meta,
options = options,
references = references)
# Transformation
tree = etree.ElementTree(root)
if stylesheet is not None:
tfmt = xml.ISOFORMAT
args = dict(domain=xml.domain,
base_url=current.response.s3.base_url,
prefix=self.prefix,
name=self.name,
utcnow=datetime.datetime.utcnow().strftime(tfmt))
tree = xml.transform(tree, stylesheet, **args)
if tree is None:
return None
# Return tree if requested
if as_tree:
return tree
# Otherwise string-ify it
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True)
# -------------------------------------------------------------------------
# Data Model Helpers
# -------------------------------------------------------------------------
@classmethod
def original(cls, table, record, mandatory=None):
"""
Find the original record for a possible duplicate:
- if the record contains a UUID, then only that UUID is used
to match the record with an existing DB record
- otherwise, if the record contains some values for unique
fields, all of them must match the same existing DB record
@param table: the table
@param record: the record as dict or S3XML Element
"""
db = current.db
xml = current.xml
xml_decode = xml.xml_decode
VALUE = xml.ATTRIBUTE["value"]
UID = xml.UID
ATTRIBUTES_TO_FIELDS = xml.ATTRIBUTES_TO_FIELDS
# Get primary keys
pkeys = [f for f in table.fields if table[f].unique]
pvalues = Storage()
# Get the values from record
get = record.get
if type(record) is etree._Element: #isinstance(record, etree._Element):
xpath = record.xpath
xexpr = "%s[@%s='%%s']" % (xml.TAG["data"],
xml.ATTRIBUTE["field"])
for f in pkeys:
v = None
if f == UID or f in ATTRIBUTES_TO_FIELDS:
v = get(f, None)
else:
child = xpath(xexpr % f)
if child:
child = child[0]
v = child.get(VALUE, xml_decode(child.text))
if v:
pvalues[f] = v
elif isinstance(record, dict):
for f in pkeys:
v = get(f, None)
if v:
pvalues[f] = v
else:
raise TypeError
# Build match query
query = None
for f in pvalues:
if f == UID:
continue
_query = (table[f] == pvalues[f])
if query is not None:
query = query | _query
else:
query = _query
fields = cls.import_fields(table, pvalues, mandatory=mandatory)
# Try to find exactly one match by non-UID unique keys
if query is not None:
original = db(query).select(limitby=(0, 2), *fields)
if len(original) == 1:
return original.first()
# If no match, then try to find a UID-match
if UID in pvalues:
uid = xml.import_uid(pvalues[UID])
query = (table[UID] == uid)
original = db(query).select(limitby=(0, 1), *fields).first()
if original:
return original
# No match or multiple matches
return None
# -------------------------------------------------------------------------
@staticmethod
def import_fields(table, data, mandatory=None):
fnames = set(s3_all_meta_field_names())
fnames.add(table._id.name)
if mandatory:
fnames |= set(mandatory)
for fn in data:
fnames.add(fn)
return [table[fn] for fn in fnames if fn in table.fields]
# -------------------------------------------------------------------------
def readable_fields(self, subset=None):
"""
Get a list of all readable fields in the resource table
@param subset: list of fieldnames to limit the selection to
"""
fkey = None
table = self.table
if self.parent and self.linked is None:
component = self.parent.components.get(self.alias, None)
if component:
fkey = component.fkey
elif self.linked is not None:
component = self.linked
if component:
fkey = component.lkey
if subset:
return [ogetattr(table, f) for f in subset
if f in table.fields and \
ogetattr(table, f).readable and f != fkey]
else:
return [ogetattr(table, f) for f in table.fields
if ogetattr(table, f).readable and f != fkey]
# -------------------------------------------------------------------------
def resolve_selectors(self, selectors,
skip_components=False,
extra_fields=True,
show=True):
"""
Resolve a list of field selectors against this resource
@param selectors: the field selectors
@param skip_components: skip fields in components
@param extra_fields: automatically add extra_fields of all virtual
fields in this table
@param show: default for S3ResourceField.show
@return: tuple of (fields, joins, left, distinct)
"""
prefix = lambda s: "~.%s" % s \
if "." not in s.split("$", 1)[0] else s
# Store field selectors
display_fields = []
append = display_fields.append
for _s in selectors:
if isinstance(_s, tuple):
s = _s[-1]
else:
s = _s
if isinstance(s, S3ResourceField):
selector = s.selector
elif isinstance(s, FS):
selector = s.name
else:
selector = s
append(prefix(selector))
slist = list(selectors)
# Collect extra fields from virtual tables
if extra_fields:
append = slist.append
extra = self.get_config("extra_fields", [])
for selector in extra:
s = prefix(selector)
if s not in display_fields:
append(s)
joins = {}
left = {}
distinct = False
rfields = []
columns = []
append = rfields.append
for s in slist:
# Allow to override the field label
if isinstance(s, tuple):
label, selector = s
else:
label, selector = None, s
# Resolve the selector
if isinstance(selector, str):
selector = prefix(selector)
try:
rfield = S3ResourceField(self, selector, label=label)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, FS):
try:
rfield = selector.resolve(self)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, S3ResourceField):
rfield = selector
else:
continue
# Unresolvable selector?
if rfield.field is None and not rfield.virtual:
continue
# Replace default label
if label is not None:
rfield.label = label
# Skip components
if skip_components:
head = rfield.selector.split("$", 1)[0]
if "." in head and head.split(".")[0] not in ("~", self.alias):
continue
# De-duplicate columns
if rfield.colname in columns:
continue
else:
columns.append(rfield.colname)
# Resolve the joins
if rfield.distinct:
left.update(rfield._joins)
distinct = True
elif rfield.join:
joins.update(rfield._joins)
rfield.show = show and rfield.selector in display_fields
append(rfield)
return (rfields, joins, left, distinct)
# -------------------------------------------------------------------------
def resolve_selector(self, selector):
"""
Wrapper for S3ResourceField, retained for backward compatibility
"""
return S3ResourceField(self, selector)
# -------------------------------------------------------------------------
def split_fields(self, skip=[], data=None, references=None):
"""
Split the readable fields in the resource table into
reference and non-reference fields.
@param skip: list of field names to skip
@param data: data fields to include (None for all)
@param references: foreign key fields to include (None for all)
"""
rfields = self.rfields
dfields = self.dfields
if rfields is None or dfields is None:
if self.tablename == "gis_location":
if "wkt" not in skip and current.auth.permission.format != "cap":
# Skip bulky WKT fields
skip.append("wkt")
if current.deployment_settings.get_gis_spatialdb() and \
"the_geom" not in skip:
skip.append("the_geom")
xml = current.xml
UID = xml.UID
IGNORE_FIELDS = xml.IGNORE_FIELDS
FIELDS_TO_ATTRIBUTES = xml.FIELDS_TO_ATTRIBUTES
show_ids = current.xml.show_ids
rfields = []
dfields = []
table = self.table
pkey = table._id.name
for f in table.fields:
if f == UID or \
f in skip or \
f in IGNORE_FIELDS:
if f != pkey or not show_ids:
continue
if s3_has_foreign_key(table[f]) and \
f not in FIELDS_TO_ATTRIBUTES and \
(references is None or f in references):
rfields.append(f)
elif data is None or \
f in data or \
f in FIELDS_TO_ATTRIBUTES:
dfields.append(f)
self.rfields = rfields
self.dfields = dfields
return (rfields, dfields)
# -------------------------------------------------------------------------
# Utility functions
# -------------------------------------------------------------------------
def configure(self, **settings):
"""
Update configuration settings for this resource
@param settings: configuration settings for this resource
as keyword arguments
"""
current.s3db.configure(self.tablename, **settings)
# -------------------------------------------------------------------------
def get_config(self, key, default=None):
"""
Get a configuration setting for the current resource
@param key: the setting key
@param default: the default value to return if the setting
is not configured for this resource
"""
return current.s3db.get_config(self.tablename, key, default=default)
# -------------------------------------------------------------------------
def limitby(self, start=0, limit=0):
"""
Convert start+limit parameters into a limitby tuple
- limit without start => start = 0
- start without limit => limit = ROWSPERPAGE
- limit 0 (or less) => limit = 1
- start less than 0 => start = 0
@param start: index of the first record to select
@param limit: maximum number of records to select
"""
if limit is None:
return None
if start is None:
start = 0
if limit == 0:
limit = current.response.s3.ROWSPERPAGE
if limit <= 0:
limit = 1
if start < 0:
start = 0
return (start, start + limit)
# -------------------------------------------------------------------------
def _join(self, implicit=False, reverse=False):
"""
Get a join for this component
@param implicit: return a subquery with an implicit join rather
than an explicit join
@param reverse: get the reverse join (joining master to component)
@return: a Query if implicit=True, otherwise a list of joins
"""
if self.parent is None:
# This isn't a component
return None
else:
ltable = self.parent.table
rtable = self.table
pkey = self.pkey
fkey = self.fkey
DELETED = current.xml.DELETED
if self.linked:
return self.linked._join(implicit=implicit, reverse=reverse)
elif self.linktable:
linktable = self.linktable
lkey = self.lkey
rkey = self.rkey
lquery = (ltable[pkey] == linktable[lkey])
if DELETED in linktable:
lquery &= (linktable[DELETED] != True)
if self.filter is not None and not reverse:
rquery = (linktable[rkey] == rtable[fkey]) & self.filter
else:
rquery = (linktable[rkey] == rtable[fkey])
if reverse:
join = [linktable.on(rquery), ltable.on(lquery)]
else:
join = [linktable.on(lquery), rtable.on(rquery)]
else:
lquery = (ltable[pkey] == rtable[fkey])
if DELETED in rtable and not reverse:
lquery &= (rtable[DELETED] != True)
if self.filter is not None:
lquery &= self.filter
if reverse:
join = [ltable.on(lquery)]
else:
join = [rtable.on(lquery)]
if implicit:
query = None
for expression in join:
if query is None:
query = expression.second
else:
query &= expression.second
return query
else:
return join
# -------------------------------------------------------------------------
def get_join(self):
""" Get join for this component """
return self._join(implicit=True)
# -------------------------------------------------------------------------
def get_left_join(self):
""" Get a left join for this component """
return self._join()
# -------------------------------------------------------------------------
def link_id(self, master_id, component_id):
"""
Helper method to find the link table entry ID for
a pair of linked records.
@param master_id: the ID of the master record
@param component_id: the ID of the component record
"""
if self.parent is None or self.linked is None:
return None
join = self.get_join()
ltable = self.table
mtable = self.parent.table
ctable = self.linked.table
query = join & \
(mtable._id == master_id) & \
(ctable._id == component_id)
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if row:
return row[ltable._id.name]
else:
return None
# -------------------------------------------------------------------------
def component_id(self, master_id, link_id):
"""
Helper method to find the component record ID for
a particular link of a particular master record
@param link: the link (S3Resource)
@param master_id: the ID of the master record
@param link_id: the ID of the link table entry
"""
if self.parent is None or self.linked is None:
return None
join = self.get_join()
ltable = self.table
mtable = self.parent.table
ctable = self.linked.table
query = join & (ltable._id == link_id)
if master_id is not None:
# master ID is redundant, but can be used to check negatives
query &= (mtable._id == master_id)
row = current.db(query).select(ctable._id, limitby=(0, 1)).first()
if row:
return row[ctable._id.name]
else:
return None
# -------------------------------------------------------------------------
def update_link(self, master, record):
"""
Create a new link in a link table if it doesn't yet exist.
This function is meant to also update links in "embed"
actuation mode once this gets implemented, therefore the
method name "update_link".
@param master: the master record
@param record: the new component record to be linked
"""
if self.parent is None or self.linked is None:
return None
# Find the keys
resource = self.linked
pkey = resource.pkey
lkey = resource.lkey
rkey = resource.rkey
fkey = resource.fkey
if pkey not in master:
return None
_lkey = master[pkey]
if fkey not in record:
return None
_rkey = record[fkey]
if not _lkey or not _rkey:
return None
ltable = self.table
ltn = ltable._tablename
# Create the link if it does not already exist
query = ((ltable[lkey] == _lkey) &
(ltable[rkey] == _rkey))
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if not row:
s3db = current.s3db
onaccept = s3db.get_config(ltn, "create_onaccept")
if onaccept is None:
onaccept = s3db.get_config(ltn, "onaccept")
data = {lkey:_lkey, rkey:_rkey}
link_id = ltable.insert(**data)
data[ltable._id.name] = link_id
s3db.update_super(ltable, data)
if link_id and onaccept:
callback(onaccept, Storage(vars=Storage(data)))
else:
link_id = row[ltable._id.name]
return link_id
# -------------------------------------------------------------------------
def datatable_filter(self, fields, get_vars):
"""
Parse datatable search/sort vars into a tuple of
query, orderby and left joins
@param fields: list of field selectors representing
the order of fields in the datatable (list_fields)
@param get_vars: the datatable GET vars
@return: tuple of (query, orderby, left joins)
"""
db = current.db
left_joins = S3Joins(self.tablename)
sSearch = "sSearch"
iColumns = "iColumns"
iSortingCols = "iSortingCols"
parent = self.parent
fkey = self.fkey
# Skip joins for linked tables
if self.linked is not None:
skip = self.linked.tablename
else:
skip = None
# Resolve the list fields
rfields = self.resolve_selectors(fields)[0]
# FILTER --------------------------------------------------------------
searchq = None
if sSearch in get_vars and iColumns in get_vars:
# Build filter
text = get_vars[sSearch]
words = [w for w in text.lower().split()]
if words:
try:
numcols = int(get_vars[iColumns])
except ValueError:
numcols = 0
flist = []
for i in xrange(numcols):
try:
rfield = rfields[i]
field = rfield.field
except (KeyError, IndexError):
continue
if field is None:
continue
ftype = str(field.type)
# Add left joins
left_joins.extend(rfield.left)
if ftype[:9] == "reference" and \
hasattr(field, "sortby") and field.sortby:
# For foreign keys, we search through their sortby
# Get the lookup table
tn = ftype[10:]
if parent is not None and \
parent.tablename == tn and field.name != fkey:
alias = "%s_%s_%s" % (parent.prefix,
"linked",
parent.name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
elif tn == field.tablename:
prefix, name = field.tablename.split("_", 1)
alias = "%s_%s_%s" % (prefix, field.name, name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
else:
ktable = db[tn]
# Add left join for lookup table
if tn != skip:
left_joins.add(ktable.on(field == ktable._id))
if isinstance(field.sortby, (list, tuple)):
flist.extend([ktable[f] for f in field.sortby
if f in ktable.fields])
else:
if field.sortby in ktable.fields:
flist.append(ktable[field.sortby])
else:
# Otherwise, we search through the field itself
flist.append(field)
# Build search query
# @todo: migrate this to S3ResourceQuery?
opts = Storage()
queries = []
for w in words:
wqueries = []
for field in flist:
ftype = str(field.type)
options = None
fname = str(field)
if fname in opts:
options = opts[fname]
elif ftype[:7] in ("integer",
"list:in",
"list:st",
"referen",
"list:re",
"string"):
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
r = r.other
if hasattr(r, "options"):
try:
options = r.options()
except:
options = []
if options is None and ftype in ("string", "text"):
wqueries.append(field.lower().like("%%%s%%" % w))
elif options is not None:
opts[fname] = options
vlist = [v for v, t in options
if s3_unicode(t).lower().find(s3_unicode(w)) != -1]
if vlist:
wqueries.append(field.belongs(vlist))
if len(wqueries):
queries.append(reduce(lambda x, y: x | y \
if x is not None else y,
wqueries))
if len(queries):
searchq = reduce(lambda x, y: x & y \
if x is not None else y, queries)
# ORDERBY -------------------------------------------------------------
orderby = []
if iSortingCols in get_vars:
# Sorting direction
def direction(i):
sort_dir = get_vars["sSortDir_%s" % str(i)]
return sort_dir and " %s" % sort_dir or ""
# Get the fields to order by
try:
numcols = int(get_vars[iSortingCols])
except:
numcols = 0
columns = []
pkey = str(self._id)
for i in xrange(numcols):
try:
iSortCol = int(get_vars["iSortCol_%s" % i])
except (AttributeError, KeyError):
# iSortCol_x not present in get_vars => ignore
columns.append(Storage(field=None))
continue
# Map sortable-column index to the real list_fields
# index: for every non-id non-sortable column to the
# left of sortable column subtract 1
for j in xrange(iSortCol):
if get_vars.get("bSortable_%s" % j, "true") == "false":
try:
if rfields[j].colname != pkey:
iSortCol -= 1
except KeyError:
break
try:
rfield = rfields[iSortCol]
except KeyError:
# iSortCol specifies a non-existent column, i.e.
# iSortCol_x>=numcols => ignore
columns.append(Storage(field=None))
else:
columns.append(rfield)
# Process the orderby-fields
for i in xrange(len(columns)):
rfield = columns[i]
field = rfield.field
if field is None:
continue
ftype = str(field.type)
represent = field.represent
if not hasattr(represent, "skip_dt_orderby") and \
hasattr(represent, "dt_orderby"):
# Custom orderby logic in field.represent
field.represent.dt_orderby(field,
direction(i),
orderby,
left_joins)
elif ftype[:9] == "reference" and \
hasattr(field, "sortby") and field.sortby:
# Foreign keys with sortby will be sorted by sortby
# Get the lookup table
tn = ftype[10:]
if parent is not None and \
parent.tablename == tn and field.name != fkey:
alias = "%s_%s_%s" % (parent.prefix, "linked", parent.name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
elif tn == field.tablename:
prefix, name = field.tablename.split("_", 1)
alias = "%s_%s_%s" % (prefix, field.name, name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
else:
ktable = db[tn]
# Add left joins for lookup table
if tn != skip:
left_joins.extend(rfield.left)
left_joins.add(ktable.on(field == ktable._id))
# Construct orderby from sortby
if not isinstance(field.sortby, (list, tuple)):
orderby.append("%s.%s%s" % (tn, field.sortby, direction(i)))
else:
orderby.append(", ".join(["%s.%s%s" %
(tn, fn, direction(i))
for fn in field.sortby]))
else:
# Otherwise, we sort by the field itself
orderby.append("%s%s" % (field, direction(i)))
if orderby:
orderby = ", ".join(orderby)
else:
orderby = None
left_joins = left_joins.as_list(tablenames=left_joins.joins.keys())
return (searchq, orderby, left_joins)
# -------------------------------------------------------------------------
def axisfilter(self, axes):
"""
Get all values for the given S3ResourceFields (axes) which
match the resource query, used in pivot tables to filter out
additional values where dimensions can have multiple values
per record
@param axes: the axis fields as list/tuple of S3ResourceFields
@return: a dict with values per axis, only containes those
axes which are affected by the resource filter
"""
axisfilter = {}
qdict = self.get_query().as_dict(flat=True)
for rfield in axes:
field = rfield.field
if field is None:
# virtual field or unresolvable selector
continue
left_joins = S3Joins(self.tablename)
left_joins.extend(rfield.left)
tablenames = left_joins.joins.keys()
tablenames.append(self.tablename)
af = S3AxisFilter(qdict, tablenames)
if af.op is not None:
query = af.query()
left = left_joins.as_list()
# @todo: this does not work with virtual fields: need
# to retrieve all extra_fields for the dimension table
# and can't groupby (=must deduplicate afterwards)
rows = current.db(query).select(field,
left=left,
groupby=field)
colname = rfield.colname
if rfield.ftype[:5] == "list:":
values = []
vappend = values.append
for row in rows:
v = row[colname]
if v:
vappend(v)
values = set(chain.from_iterable(values))
include, exclude = af.values(rfield)
fdict = {}
if include:
for v in values:
vstr = s3_unicode(v)
if vstr in include and vstr not in exclude:
fdict[v] = None
else:
fdict = dict((v, None) for v in values)
axisfilter[colname] = fdict
else:
axisfilter[colname] = dict((row[colname], None)
for row in rows)
return axisfilter
# -------------------------------------------------------------------------
def prefix_selector(self, selector):
"""
Helper method to ensure consistent prefixing of field selectors
@param selector: the selector
"""
head = selector.split("$", 1)[0]
if "." in head:
prefix = head.split(".", 1)[0]
if prefix == self.alias:
return selector.replace("%s." % prefix, "~.")
else:
return selector
else:
return "~.%s" % selector
# -------------------------------------------------------------------------
def list_fields(self, key="list_fields", id_column=0):
"""
Get the list_fields for this resource
@param key: alternative key for the table configuration
@param id_column: True/False, whether to include the record ID
or not, or 0 to enforce the record ID to be
the first column
"""
list_fields = self.get_config(key, None)
if not list_fields and key != "list_fields":
list_fields = self.get_config("list_fields", None)
if not list_fields:
list_fields = [f.name for f in self.readable_fields()]
pkey = _pkey = self._id.name
fields = []
append = fields.append
selectors = set()
seen = selectors.add
for f in list_fields:
selector = f if type(f) is not tuple else f[1]
if selector == _pkey and not id_column:
pkey = f
elif selector not in selectors:
seen(selector)
append(f)
if id_column is 0:
fields.insert(0, pkey)
return fields
# -------------------------------------------------------------------------
@property
def _table(self):
"""
Get the original Table object (without SQL Alias), this
is required for SQL update (DAL doesn't detect the alias
and uses the wrong tablename).
"""
if self.tablename != self._alias:
return current.s3db[self.tablename]
else:
return self.table
# =============================================================================
class S3AxisFilter(object):
"""
Experimental: helper class to extract filter values for pivot
table axis fields
"""
# -------------------------------------------------------------------------
def __init__(self, qdict, tablenames):
"""
Constructor, recursively introspect the query dict and extract
all relevant subqueries.
@param qdict: the query dict (from Query.as_dict(flat=True))
@param tablenames: the names of the relevant tables
"""
self.l = None
self.r = None
self.op = None
self.tablename = None
self.fieldname = None
if not qdict:
return
l = qdict["first"]
if "second" in qdict:
r = qdict["second"]
else:
r = None
op = qdict["op"]
if "tablename" in l:
if l["tablename"] in tablenames:
self.tablename = l["tablename"]
self.fieldname = l["fieldname"]
if isinstance(r, dict):
self.op = None
else:
self.op = op
self.r = r
elif op == "AND":
self.l = S3AxisFilter(l, tablenames)
self.r = S3AxisFilter(r, tablenames)
if self.l.op or self.r.op:
self.op = op
elif op == "OR":
self.l = S3AxisFilter(l, tablenames)
self.r = S3AxisFilter(r, tablenames)
if self.l.op and self.r.op:
self.op = op
elif op == "NOT":
self.l = S3AxisFilter(l, tablenames)
self.op = op
else:
self.l = S3AxisFilter(l, tablenames)
if self.l.op:
self.op = op
# -------------------------------------------------------------------------
def query(self):
""" Reconstruct the query from this filter """
op = self.op
if op is None:
return None
if self.tablename and self.fieldname:
l = current.s3db[self.tablename][self.fieldname]
elif self.l:
l = self.l.query()
else:
l = None
r = self.r
if op in ("AND", "OR", "NOT"):
r = r.query() if r else True
if op == "AND":
if l is not None and r is not None:
return l & r
elif r is not None:
return r
else:
return l
elif op == "OR":
if l is not None and r is not None:
return l | r
else:
return None
elif op == "NOT":
if l is not None:
return ~l
else:
return None
elif l is None:
return None
if isinstance(r, S3AxisFilter):
r = r.query()
if r is None:
return None
if op == "LOWER":
return l.lower()
elif op == "UPPER":
return l.upper()
elif op == "EQ":
return l == r
elif op == "NE":
return l != r
elif op == "LT":
return l < r
elif op == "LE":
return l <= r
elif op == "GE":
return l >= r
elif op == "GT":
return l > r
elif op == "BELONGS":
return l.belongs(r)
elif op == "CONTAINS":
return l.contains(r)
else:
return None
# -------------------------------------------------------------------------
def values(self, rfield):
"""
Helper method to filter list:type axis values
@param rfield: the axis field
@return: pair of value lists [include], [exclude]
"""
op = self.op
tablename = self.tablename
fieldname = self.fieldname
if tablename == rfield.tname and \
fieldname == rfield.fname:
value = self.r
if isinstance(value, (list, tuple)):
value = [s3_unicode(v) for v in value]
else:
value = [s3_unicode(value)]
if op == "CONTAINS":
return value, []
elif op == "EQ":
return value, []
elif op == "NE":
return [], value
elif op == "AND":
li, le = self.l.values(rfield)
ri, re = self.r.values(rfield)
return [v for v in li + ri if v not in le + re], []
elif op == "OR":
li, le = self.l.values(rfield)
ri, re = self.r.values(rfield)
return [v for v in li + ri], []
if op == "NOT":
li, le = self.l.values(rfield)
return [], li
return [], []
# =============================================================================
class S3ResourceFilter(object):
""" Class representing a resource filter """
def __init__(self,
resource,
id=None,
uid=None,
filter=None,
vars=None,
filter_component=None):
"""
Constructor
@param resource: the S3Resource
@param id: the record ID (or list of record IDs)
@param uid: the record UID (or list of record UIDs)
@param filter: a filter query (Query or S3ResourceQuery)
@param vars: the dict of GET vars (URL filters)
@param filter_component: the alias of the component the URL
filters apply for (filters for this
component must be handled separately)
"""
self.resource = resource
self.queries = []
self.filters = []
self.cqueries = {}
self.cfilters = {}
self.query = None
self.rfltr = None
self.vfltr = None
self.transformed = None
self.multiple = True
self.distinct = False
# Joins
self.ijoins = {}
self.ljoins = {}
table = resource.table
# Accessible/available query
if resource.accessible_query is not None:
method = []
if resource._approved:
method.append("read")
if resource._unapproved:
method.append("review")
mquery = resource.accessible_query(method, table)
else:
mquery = (table._id > 0)
# Deletion status
DELETED = current.xml.DELETED
if DELETED in table.fields and not resource.include_deleted:
remaining = (table[DELETED] != True)
mquery = remaining & mquery
# ID query
if id is not None:
if not isinstance(id, (list, tuple)):
self.multiple = False
mquery = mquery & (table._id == id)
else:
mquery = mquery & (table._id.belongs(id))
# UID query
UID = current.xml.UID
if uid is not None and UID in table:
if not isinstance(uid, (list, tuple)):
self.multiple = False
mquery = mquery & (table[UID] == uid)
else:
mquery = mquery & (table[UID].belongs(uid))
parent = resource.parent
if not parent:
# Standard master query
self.mquery = mquery
# URL queries
if vars:
resource.vars = Storage(vars)
# BBox
bbox, joins = self.parse_bbox_query(resource, vars)
if bbox is not None:
self.queries.append(bbox)
if joins:
self.ljoins.update(joins)
# Filters
add_filter = self.add_filter
# Current concept:
# Interpret all URL filters in the context of master
queries = S3URLQuery.parse(resource, vars)
# @todo: Alternative concept (inconsistent?):
# Interpret all URL filters in the context of filter_component:
#if filter_component and \
#filter_component in resource.components:
#context = resource.components[filter_component]
#else:
#context = resource
#queries = S3URLQuery.parse(context, vars)
for alias in queries:
if filter_component == alias:
for q in queries[alias]:
add_filter(q, component=alias, master=False)
else:
for q in queries[alias]:
add_filter(q)
self.cfilters = queries
else:
# Parent filter
pf = parent.rfilter
if not pf:
pf = parent.build_query()
# Extended master query
self.mquery = mquery & pf.get_query()
# Join the master
self.ijoins[parent._alias] = resource._join(reverse=True)
# Component/link-table specific filters
add_filter = self.add_filter
aliases = [resource.alias]
if resource.link is not None:
aliases.append(resource.link.alias)
elif resource.linked is not None:
aliases.append(resource.linked.alias)
for alias in aliases:
for filter_set in (pf.cqueries, pf.cfilters):
if alias in filter_set:
[add_filter(q) for q in filter_set[alias]]
# Additional filters
if filter is not None:
self.add_filter(filter)
# -------------------------------------------------------------------------
def add_filter(self, query, component=None, master=True):
"""
Extend this filter
@param query: a Query or S3ResourceQuery object
@param component: alias of the component the filter shall be
added to (None for master)
@param master: False to filter only component
"""
alias = None
if not master:
if not component:
return
if component != self.resource.alias:
alias = component
if isinstance(query, S3ResourceQuery):
self.transformed = None
filters = self.filters
cfilters = self.cfilters
self.distinct |= query._joins(self.resource)[1]
else:
# DAL Query
filters = self.queries
cfilters = self.cqueries
self.query = None
if alias:
if alias in self.cfilters:
cfilters[alias].append(query)
else:
cfilters[alias] = [query]
else:
filters.append(query)
return
# -------------------------------------------------------------------------
def get_query(self):
""" Get the effective DAL query """
if self.query is not None:
return self.query
resource = self.resource
query = reduce(lambda x, y: x & y, self.queries, self.mquery)
if self.filters:
if self.transformed is None:
# Combine all filters
filters = reduce(lambda x, y: x & y, self.filters)
# Transform with external search engine
transformed = filters.transform(resource)
self.transformed = transformed
# Split DAL and virtual filters
self.rfltr, self.vfltr = transformed.split(resource)
# Add to query
rfltr = self.rfltr
if rfltr is not None:
if isinstance(rfltr, S3ResourceQuery):
query &= rfltr.query(resource)
else:
# Combination of virtual field filter and web2py Query
query &= rfltr
self.query = query
return query
# -------------------------------------------------------------------------
def get_filter(self):
""" Get the effective virtual filter """
if self.query is None:
self.get_query()
return self.vfltr
# -------------------------------------------------------------------------
def get_joins(self, left=False, as_list=True):
"""
Get the joins required for this filter
@param left: get the left joins
@param as_list: return a flat list rather than a nested dict
"""
if self.query is None:
self.get_query()
joins = dict(self.ljoins if left else self.ijoins)
resource = self.resource
for q in self.filters:
subjoins = q._joins(resource, left=left)[0]
joins.update(subjoins)
# Cross-component left joins
parent = resource.parent
if parent:
pf = parent.rfilter
if pf is None:
pf = parent.build_query()
parent_left = pf.get_joins(left=True, as_list=False)
if parent_left:
tablename = resource._alias
if left:
for tn in parent_left:
if tn not in joins and tn != tablename:
joins[tn] = parent_left[tn]
joins[parent._alias] = resource._join(reverse=True)
else:
joins.pop(parent._alias, None)
if as_list:
return [j for tablename in joins for j in joins[tablename]]
else:
return joins
# -------------------------------------------------------------------------
def get_fields(self):
""" Get all field selectors in this filter """
if self.query is None:
self.get_query()
if self.vfltr:
return self.vfltr.fields()
else:
return []
# -------------------------------------------------------------------------
@staticmethod
def parse_bbox_query(resource, get_vars):
"""
Generate a Query from a URL boundary box query; supports multiple
bboxes, but optimised for the usual case of just 1
@param resource: the resource
@param get_vars: the URL GET vars
"""
tablenames = ("gis_location",
"gis_feature_query",
"gis_layer_shapefile")
POLYGON = "POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))"
query = None
joins = {}
if get_vars:
table = resource.table
tablename = resource.tablename
fields = table.fields
introspect = tablename not in tablenames
for k, v in get_vars.items():
if k[:4] == "bbox":
if type(v) is list:
v = v[-1]
try:
minLon, minLat, maxLon, maxLat = v.split(",")
except ValueError:
# Badly-formed bbox - ignore
continue
# Identify the location reference
field = None
rfield = None
alias = False
if k.find(".") != -1:
# Field specified in query
fname = k.split(".")[1]
if fname not in fields:
# Field not found - ignore
continue
field = table[fname]
if query is not None or "bbox" in get_vars:
# Need alias
alias = True
elif introspect:
# Location context?
context = resource.get_config("context")
if context and "location" in context:
try:
rfield = resource.resolve_selector("(location)$lat")
except (SyntaxError, AttributeError):
rfield = None
else:
if not rfield.field or rfield.tname != "gis_location":
# Invalid location context
rfield = None
# Fall back to location_id (or site_id as last resort)
if rfield is None:
fname = None
for f in fields:
ftype = str(table[f].type)
if ftype[:22] == "reference gis_location":
fname = f
break
elif not fname and \
ftype[:18] == "reference org_site":
fname = f
field = table[fname] if fname else None
if not rfield and not field:
# No location reference could be identified => skip
continue
# Construct the join to gis_location
gtable = current.s3db.gis_location
if rfield:
joins.update(rfield.left)
elif field:
fname = field.name
gtable = current.s3db.gis_location
if alias:
gtable = gtable.with_alias("gis_%s_location" % fname)
tname = str(gtable)
ftype = str(field.type)
if ftype == "reference gis_location":
joins[tname] = [gtable.on(gtable.id == field)]
elif ftype == "reference org_site":
stable = current.s3db.org_site
if alias:
stable = stable.with_alias("org_%s_site" % fname)
joins[tname] = [stable.on(stable.site_id == field),
gtable.on(gtable.id == stable.location_id)]
elif introspect:
# => not a location or site reference
continue
elif tablename in ("gis_location", "gis_feature_query"):
gtable = table
elif tablename == "gis_layer_shapefile":
# @todo: this needs a join too, no?
gtable = resource.components.items()[0][1].table
# Construct the bbox filter
bbox_filter = None
if current.deployment_settings.get_gis_spatialdb():
# Use the Spatial Database
minLon = float(minLon)
maxLon = float(maxLon)
minLat = float(minLat)
maxLat = float(maxLat)
bbox = POLYGON % (minLon, minLat,
minLon, maxLat,
maxLon, maxLat,
maxLon, minLat,
minLon, minLat)
try:
# Spatial DAL & Database
bbox_filter = gtable.the_geom \
.st_intersects(bbox)
except:
# Old DAL or non-spatial database
pass
if bbox_filter is None:
# Standard Query
bbox_filter = (gtable.lon > float(minLon)) & \
(gtable.lon < float(maxLon)) & \
(gtable.lat > float(minLat)) & \
(gtable.lat < float(maxLat))
# Add bbox filter to query
if query is None:
query = bbox_filter
else:
# Merge with the previous BBOX
query = query & bbox_filter
return query, joins
# -------------------------------------------------------------------------
def __call__(self, rows, start=None, limit=None):
"""
Filter a set of rows by the effective virtual filter
@param rows: a Rows object
@param start: index of the first matching record to select
@param limit: maximum number of records to select
"""
vfltr = self.get_filter()
if rows is None or vfltr is None:
return rows
resource = self.resource
if start is None:
start = 0
first = start
if limit is not None:
last = start + limit
if last < first:
first, last = last, first
if first < 0:
first = 0
if last < 0:
last = 0
else:
last = None
i = 0
result = []
append = result.append
for row in rows:
if last is not None and i >= last:
break
success = vfltr(resource, row, virtual=True)
if success or success is None:
if i >= first:
append(row)
i += 1
return Rows(rows.db, result,
colnames=rows.colnames, compact=False)
# -------------------------------------------------------------------------
def count(self, left=None, distinct=False):
"""
Get the total number of matching records
@param left: left outer joins
@param distinct: count only distinct rows
"""
distinct |= self.distinct
resource = self.resource
if resource is None:
return 0
table = resource.table
vfltr = self.get_filter()
if vfltr is None and not distinct:
tablename = table._tablename
ijoins = S3Joins(tablename, self.get_joins(left=False))
ljoins = S3Joins(tablename, self.get_joins(left=True))
ljoins.add(left)
join = ijoins.as_list(prefer=ljoins)
left = ljoins.as_list()
cnt = table._id.count()
row = current.db(self.query).select(cnt,
join=join,
left=left).first()
if row:
return row[cnt]
else:
return 0
else:
data = resource.select([table._id.name],
# We don't really want to retrieve
# any rows but just count, hence:
limit=1,
count=True)
return data["numrows"]
# -------------------------------------------------------------------------
def __repr__(self):
""" String representation of the instance """
resource = self.resource
left_joins = self.get_joins(left=True)
if left_joins:
left = S3Joins(resource.tablename, left_joins)
joins = ", ".join([str(j) for j in left.as_list()])
else:
left = None
joins = None
vfltr = self.get_filter()
if vfltr:
vfltr = vfltr.represent(resource)
else:
vfltr = None
represent = "<S3ResourceFilter %s, " \
"query=%s, " \
"left=[%s], " \
"distinct=%s, " \
"filter=%s>" % (
resource.tablename,
self.get_query(),
joins,
self.distinct,
vfltr
)
return represent
# -------------------------------------------------------------------------
def serialize_url(self):
"""
Serialize this filter as URL query
@return: a Storage of URL GET variables
"""
resource = self.resource
url_vars = Storage()
for f in self.filters:
sub = f.serialize_url(resource=resource)
url_vars.update(sub)
return url_vars
# =============================================================================
class S3ResourceData(object):
""" Class representing data in a resource """
def __init__(self,
resource,
fields,
start=0,
limit=None,
left=None,
orderby=None,
groupby=None,
distinct=False,
virtual=True,
count=False,
getids=False,
as_rows=False,
represent=False,
show_links=True,
raw_data=False):
"""
Constructor, extracts (and represents) data from a resource
@param resource: the resource
@param fields: the fields to extract (selector strings)
@param start: index of the first record
@param limit: maximum number of records
@param left: additional left joins required for custom filters
@param orderby: orderby-expression for DAL
@param groupby: fields to group by (overrides fields!)
@param distinct: select distinct rows
@param virtual: include mandatory virtual fields
@param count: include the total number of matching records
@param getids: include the IDs of all matching records
@param as_rows: return the rows (don't extract/represent)
@param represent: render field value representations
@param raw_data: include raw data in the result
@note: as_rows / groupby prevent automatic splitting of
large multi-table joins, so use with care!
@note: with groupby, only the groupby fields will be returned
(i.e. fields will be ignored), because aggregates are
not supported (yet)
"""
# The resource
self.resource = resource
self.table = table = resource.table
# Dict to collect accessible queries for differential
# field authorization (each joined table is authorized
# separately)
self.aqueries = aqueries = {}
# Joins (inner/left)
tablename = table._tablename
self.ijoins = ijoins = S3Joins(tablename)
self.ljoins = ljoins = S3Joins(tablename)
# The query
master_query = query = resource.get_query()
# Joins from filters
# @note: in components, rfilter is None until after get_query!
rfilter = resource.rfilter
filter_tables = set(ijoins.add(rfilter.get_joins(left=False)))
filter_tables.update(ljoins.add(rfilter.get_joins(left=True)))
# Left joins from caller
master_tables = set(ljoins.add(left))
filter_tables.update(master_tables)
resolve = resource.resolve_selectors
# Virtual fields and extra fields required by filter
virtual_fields = rfilter.get_fields()
vfields, vijoins, vljoins, d = resolve(virtual_fields, show=False)
extra_tables = set(ijoins.extend(vijoins))
extra_tables.update(ljoins.extend(vljoins))
distinct |= d
# Display fields (fields to include in the result)
if fields is None:
fields = [f.name for f in resource.readable_fields()]
dfields, dijoins, dljoins, d = resolve(fields, extra_fields=False)
ijoins.extend(dijoins)
ljoins.extend(dljoins)
distinct |= d
# Initialize field data and effort estimates
if not groupby or as_rows:
self.init_field_data(dfields)
else:
self.field_data = self.effort = None
# Resolve ORDERBY
orderby, orderby_aggr, orderby_fields, tables = self.resolve_orderby(orderby)
if tables:
filter_tables.update(tables)
# Virtual fields filter and limitby
vfltr = resource.get_filter()
if vfltr is None:
limitby = resource.limitby(start=start, limit=limit)
else:
# Skip start/limit in master query if we filter by virtual
# fields: we need to extract all matching rows first, then
# filter by virtual fields, then apply page limits
limitby = None
# Filter Query:
# If we need to determine the number and/or ids of all matching
# records, but not to extract all records, then we run a
# separate query here to extract just this information:
# Joins for filter query
filter_ijoins = ijoins.as_list(tablenames=filter_tables,
aqueries=aqueries,
prefer=ljoins)
filter_ljoins = ljoins.as_list(tablenames=filter_tables,
aqueries=aqueries)
ids = page = totalrows = None
if getids or count or ljoins or ijoins:
if not groupby and \
not vfltr and \
(count or limitby or extra_tables != filter_tables):
# Execute the filter query
totalrows, ids = self.filter_query(query,
join=filter_ijoins,
left=filter_ljoins,
getids=getids or ljoins or ijoins,
orderby=orderby_aggr)
if ids is not None:
if limitby:
page = ids[limitby[0]:limitby[1]]
else:
page = ids
# Once we have the ids, we don't need to apply the
# filter query (and the joins it requires) again,
# but can use a simplified master query:
master_query = table._id.belongs(page)
# Order and limits are also determined by the page
# (which is an ordered list of record IDs), so we
# do not need to retain them (and join orderby
# fields in subsequent queries) either.
orderby = None
limitby = None
# If we don't use a simplified master_query, we must include
# all necessary joins for filter and orderby (=filter_tables) in
# the master query
if ids is None and (filter_ijoins or filter_ljoins):
master_tables = filter_tables
# Determine fields in master query
if not groupby:
master_tables.update(extra_tables)
tables, qfields, mfields, groupby = self.master_fields(dfields,
vfields,
master_tables,
as_rows=as_rows,
groupby=groupby)
# Additional tables to join?
if tables:
master_tables.update(tables)
# ORDERBY settings
pkey = str(table._id)
if groupby:
distinct = False
orderby = orderby_aggr
has_id = pkey in qfields
else:
if distinct and orderby:
# With DISTINCT, ORDERBY-fields must appear in SELECT
# (required by postgresql?)
for orderby_field in orderby_fields:
fn = str(orderby_field)
if fn not in qfields:
qfields[fn] = orderby_field
# Make sure we have the primary key in SELECT
if pkey not in qfields:
qfields[pkey] = resource._id
has_id = True
# Joins for master query
master_ijoins = ijoins.as_list(tablenames=master_tables,
aqueries=aqueries,
prefer=ljoins)
master_ljoins = ljoins.as_list(tablenames=master_tables,
aqueries=aqueries)
# Suspend (mandatory) virtual fields if so requested
if not virtual:
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Execute master query
db = current.db
rows = db(master_query).select(join=master_ijoins,
left=master_ljoins,
distinct=distinct,
groupby=groupby,
orderby=orderby,
limitby=limitby,
cacheable=not as_rows,
*qfields.values())
# Restore virtual fields
if not virtual:
osetattr(table, "virtualfields", vf)
# Apply virtual fields filter
if rows and vfltr is not None:
if count:
rows = rfilter(rows)
totalrows = len(rows)
if limit and start is None:
start = 0
if start is not None and limit is not None:
rows = Rows(db,
records=rows.records[start:start+limit],
colnames=rows.colnames,
compact=False)
elif start is not None:
rows = Rows(db,
records=rows.records[start:],
colnames=rows.colnames,
compact=False)
else:
rows = rfilter(rows, start=start, limit=limit)
if (getids or ljoins or ijoins) and has_id:
ids = self.getids(rows, pkey)
totalrows = len(ids)
# Build the result
self.rfields = dfields
self.numrows = 0 if totalrows is None else totalrows
self.ids = ids
if groupby or as_rows:
# Just store the rows, no further queries or extraction
self.rows = rows
elif not rows:
# No rows found => empty list
self.rows = []
else:
# Extract the data from the master rows
records = self.extract(rows,
pkey,
list(mfields),
join = hasattr(rows[0], tablename),
represent = represent)
# Extract the page record IDs if we don't have them yet
if page is None:
if ids is None:
self.ids = ids = self.getids(rows, pkey)
page = ids
# Execute any joined queries
joined_fields = self.joined_fields(dfields, qfields)
joined_query = table._id.belongs(page)
for jtablename, jfields in joined_fields.items():
records = self.joined_query(jtablename,
joined_query,
jfields,
records,
represent=represent)
# Re-combine and represent the records
results = {}
field_data = self.field_data
NONE = current.messages["NONE"]
render = self.render
for dfield in dfields:
if represent:
# results = {RecordID: {ColumnName: Representation}}
results = render(dfield,
results,
none=NONE,
raw_data=raw_data,
show_links=show_links)
else:
# results = {RecordID: {ColumnName: Value}}
colname = dfield.colname
fdata = field_data[colname]
frecords = fdata[1]
list_type = fdata[3]
for record_id in records:
if record_id not in results:
result = results[record_id] = Storage()
else:
result = results[record_id]
data = frecords[record_id].keys()
if len(data) == 1 and not list_type:
data = data[0]
result[colname] = data
self.rows = [results[record_id] for record_id in page]
# -------------------------------------------------------------------------
def init_field_data(self, rfields):
"""
Initialize field data and effort estimates for representation
Field data: allow representation per unique value (rather than
record by record), together with bulk-represent this
can reduce the total lookup effort per field to a
single query
Effort estimates: if no bulk-represent is available for a
list:reference, then a lookup per unique value
is only faster if the number of unique values
is significantly lower than the number of
extracted rows (and the number of values per
row), otherwise a per-row lookup is more
efficient.
E.g. 5 rows with 2 values each,
10 unique values in total
=> row-by-row lookup more efficient
(5 queries vs 10 queries)
but: 5 rows with 2 values each,
2 unique values in total
=> value-by-value lookup is faster
(5 queries vs 2 queries)
However: 15 rows with 15 values each,
20 unique values in total
=> value-by-value lookup faster
(15 queries á 15 values vs.
20 queries á 1 value)!
The required effort is estimated
during the data extraction, and then used to
determine the lookup strategy for the
representation.
@param rfields: the fields to extract ([S3ResourceField])
"""
table = self.resource.table
tablename = table._tablename
pkey = str(table._id)
field_data = {pkey: ({}, {}, False, False, False, False)}
effort = {pkey: 0}
for dfield in rfields:
colname = dfield.colname
effort[colname] = 0
ftype = dfield.ftype[:4]
field_data[colname] = ({}, {},
dfield.tname != tablename,
ftype == "list",
dfield.virtual,
ftype == "json",
)
self.field_data = field_data
self.effort = effort
return
# -------------------------------------------------------------------------
def resolve_orderby(self, orderby):
"""
Resolve the ORDERBY expression.
@param orderby: the orderby expression from the caller
@return: tuple (expr, aggr, fields, tables):
expr: the orderby expression (resolved into Fields)
aggr: the orderby expression with aggregations
fields: the fields in the orderby
tables: the tables required for the orderby
@note: for GROUPBY id (e.g. filter query), all ORDERBY fields
must appear in aggregation functions, otherwise ORDERBY
can be ambiguous => use aggr instead of expr
"""
table = self.resource.table
tablename = table._tablename
pkey = str(table._id)
ljoins = self.ljoins
ijoins = self.ijoins
tables = set()
if orderby:
db = current.db
items = self.resolve_expression(orderby)
expr = []
aggr = []
fields = []
for item in items:
expression = None
if type(item) is Expression:
f = item.first
op = item.op
if op == db._adapter.AGGREGATE:
# Already an aggregation
expression = item
elif isinstance(f, Field) and op == db._adapter.INVERT:
direction = "desc"
else:
# Other expression - not supported
continue
elif isinstance(item, Field):
direction = "asc"
f = item
elif isinstance(item, str):
fn, direction = (item.strip().split() + ["asc"])[:2]
tn, fn = ([tablename] + fn.split(".", 1))[-2:]
try:
f = db[tn][fn]
except (AttributeError, KeyError):
continue
else:
continue
fname = str(f)
tname = fname.split(".", 1)[0]
if tname != tablename:
if tname in ljoins or tname in ijoins:
tables.add(tname)
else:
# No join found for this field => skip
continue
fields.append(f)
if expression is None:
expression = f if direction == "asc" else ~f
expr.append(expression)
direction = direction.strip().lower()[:3]
if fname != pkey:
expression = f.min() if direction == "asc" else ~(f.max())
else:
expr.append(expression)
aggr.append(expression)
else:
expr = None
aggr = None
fields = None
return expr, aggr, fields, tables
# -------------------------------------------------------------------------
def filter_query(self, query,
join=None,
left=None,
getids=False,
orderby=None):
"""
Execute a query to determine the number/record IDs of all
matching rows
@param query: the query to execute
@param join: the inner joins for this query
@param left: the left joins for this query
@param getids: also extract the IDs if all matching records
@param orderby: ORDERBY expression for this query
@return: tuple of (TotalNumberOfRecords, RecordIDs)
"""
db = current.db
table = self.table
if getids:
field = table._id
distinct = False
groupby = field
else:
field = table._id.count()
distinct = True
groupby = None
# Temporarily deactivate virtual fields
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Extract the data
rows = db(query).select(field,
join=join,
left=left,
distinct=distinct,
orderby=orderby,
groupby=groupby,
cacheable=True)
# Restore the virtual fields
osetattr(table, "virtualfields", vf)
if getids:
pkey = str(table._id)
ids = [row[pkey] for row in rows]
totalrows = len(ids)
else:
ids = None
totalrows = rows.first()[field]
return totalrows, ids
# -------------------------------------------------------------------------
def master_fields(self,
dfields,
vfields,
joined_tables,
as_rows=False,
groupby=None):
"""
Find all tables and fields to retrieve in the master query
@param dfields: the requested fields (S3ResourceFields)
@param vfields: the virtual filter fields
@param joined_tables: the tables joined in the master query
@param as_rows: whether to produce web2py Rows
@param groupby: the GROUPBY expression from the caller
@return: tuple (tables, fields, extract, groupby):
tables: the tables required to join
fields: the fields to retrieve
extract: the fields to extract from the result
groupby: the GROUPBY expression (resolved into Fields)
"""
db = current.db
tablename = self.resource.table._tablename
# Names of additional tables to join
tables = set()
# Fields to retrieve in the master query, as dict {ColumnName: Field}
fields = {}
# Column names of fields to extract from the master rows
extract = set()
if groupby:
# Resolve the groupby into Fields
items = self.resolve_expression(groupby)
groupby = []
groupby_append = groupby.append
for item in items:
# Identify the field
tname = None
if isinstance(item, Field):
f = item
elif isinstance(item, str):
fn = item.strip()
tname, fn = ([tablename] + fn.split(".", 1))[-2:]
try:
f = db[tname][fn]
except (AttributeError, KeyError):
continue
else:
continue
groupby_append(f)
# Add to fields
fname = str(f)
if not tname:
tname = f.tablename
fields[fname] = f
# Do we need to join additional tables?
if tname == tablename:
# no join required
continue
else:
# Get joins from dfields
tnames = None
for dfield in dfields:
if dfield.colname == fname:
tnames = self.rfield_tables(dfield)
break
if tnames:
tables |= tnames
else:
# Join at least the table that holds the fields
tables.add(tname)
# Only extract GROUPBY fields (as we don't support aggregates)
extract = set(fields.keys())
else:
rfields = dfields + vfields
for rfield in rfields:
# Is the field in a joined table?
tname = rfield.tname
joined = tname == tablename or tname in joined_tables
if as_rows or joined:
colname = rfield.colname
if rfield.show:
# If show => add to extract
extract.add(colname)
if rfield.field:
# If real field => add to fields
fields[colname] = rfield.field
if not joined:
# Not joined yet? => add all required tables
tables |= self.rfield_tables(rfield)
return tables, fields, extract, groupby
# -------------------------------------------------------------------------
def joined_fields(self, all_fields, master_fields):
"""
Determine which fields in joined tables haven't been
retrieved in the master query
@param all_fields: all requested fields (list of S3ResourceFields)
@param master_fields: all fields in the master query, a dict
{ColumnName: Field}
@return: a nested dict {TableName: {ColumnName: Field}},
additionally required left joins are stored per
table in the inner dict as "_left"
"""
resource = self.resource
table = resource.table
tablename = table._tablename
fields = {}
for rfield in all_fields:
colname = rfield.colname
if colname in master_fields or rfield.tname == tablename:
continue
tname = rfield.tname
if tname not in fields:
sfields = fields[tname] = {}
left = rfield.left
joins = S3Joins(table)
if left:
[joins.add(left[tn]) for tn in left]
sfields["_left"] = joins
else:
sfields = fields[tname]
if colname not in sfields:
sfields[colname] = rfield.field
return fields
# -------------------------------------------------------------------------
def joined_query(self, tablename, query, fields, records, represent=False):
"""
Extract additional fields from a joined table: if there are
fields in joined tables which haven't been extracted in the
master query, then we perform a separate query for each joined
table (this is faster than building a multi-table-join)
@param tablename: name of the joined table
@param query: the Query
@param fields: the fields to extract
@param records: the output dict to update, structure:
{RecordID: {ColumnName: RawValues}}
@param represent: store extracted data (self.field_data) for
fast representation, and estimate lookup
efforts (self.effort)
@return: the output dict
"""
s3db = current.s3db
ljoins = self.ljoins
table = self.resource.table
pkey = str(table._id)
# Get the extra fields for subtable
sresource = s3db.resource(tablename)
efields, ejoins, l, d = sresource.resolve_selectors([])
# Get all left joins for subtable
tnames = ljoins.extend(l) + list(fields["_left"].tables)
sjoins = ljoins.as_list(tablenames=tnames,
aqueries=self.aqueries)
if not sjoins:
return records
del fields["_left"]
# Get all fields for subtable query
extract = fields.keys()
for efield in efields:
fields[efield.colname] = efield.field
sfields = [f for f in fields.values() if f]
if not sfields:
sfields.append(sresource._id)
sfields.insert(0, table._id)
# Retrieve the subtable rows
rows = current.db(query).select(left=sjoins,
distinct=True,
cacheable=True,
*sfields)
# Extract and merge the data
records = self.extract(rows,
pkey,
extract,
records=records,
join=True,
represent=represent)
return records
# -------------------------------------------------------------------------
def extract(self,
rows,
pkey,
columns,
join=True,
records=None,
represent=False):
"""
Extract the data from rows and store them in self.field_data
@param rows: the rows
@param pkey: the primary key
@param columns: the columns to extract
@param join: the rows are the result of a join query
@param records: the records dict to merge the data into
@param represent: collect unique values per field and estimate
representation efforts for list:types
"""
field_data = self.field_data
effort = self.effort
if records is None:
records = {}
def get(key):
t, f = key.split(".", 1)
if join:
return lambda row, t=t, f=f: ogetattr(ogetattr(row, t), f)
else:
return lambda row, f=f: ogetattr(row, f)
getkey = get(pkey)
getval = [get(c) for c in columns]
from itertools import groupby
for k, g in groupby(rows, key=getkey):
group = list(g)
record = records.get(k, {})
for idx, col in enumerate(columns):
fvalues, frecords, joined, list_type, virtual, json_type = field_data[col]
values = record.get(col, {})
lazy = False
for row in group:
try:
value = getval[idx](row)
except AttributeError:
_debug("Warning S3Resource.extract: column %s not in row" % col)
value = None
if lazy or callable(value):
# Lazy virtual field
value = value()
lazy = True
if virtual and not list_type and type(value) is list:
# Virtual field that returns a list
list_type = True
if list_type and value is not None:
if represent and value:
effort[col] += 30 + len(value)
for v in value:
if v not in values:
values[v] = None
if represent and v not in fvalues:
fvalues[v] = None
elif json_type:
# Returns unhashable types
value = json.dumps(value)
if value not in values:
values[value] = None
if represent and value not in fvalues:
fvalues[value] = None
else:
if value not in values:
values[value] = None
if represent and value not in fvalues:
fvalues[value] = None
record[col] = values
if k not in frecords:
frecords[k] = record[col]
records[k] = record
return records
# -------------------------------------------------------------------------
def render(self,
rfield,
results,
none="-",
raw_data=False,
show_links=True):
"""
Render the representations of the values for rfield in
all records in the result
@param rfield: the field (S3ResourceField)
@param results: the output dict to update with the representations,
structure: {RecordID: {ColumnName: Representation}},
the raw data will be a special item "_row" in the
inner dict holding a Storage of the raw field values
@param none: default representation of None
@param raw_data: retain the raw data in the output dict
@param show_links: allow representation functions to render
links as HTML
"""
colname = rfield.colname
field_data = self.field_data
fvalues, frecords, joined, list_type, virtual, json_type = field_data[colname]
# Get the renderer
renderer = rfield.represent
if not callable(renderer):
# @ToDo: Don't convert unformatted numbers to strings
renderer = lambda v: s3_unicode(v) if v is not None else none
# Deactivate linkto if so requested
if not show_links and hasattr(renderer, "show_link"):
show_link = renderer.show_link
renderer.show_link = False
else:
show_link = None
per_row_lookup = list_type and \
self.effort[colname] < len(fvalues) * 30
# Render all unique values
if hasattr(renderer, "bulk") and not list_type:
per_row_lookup = False
fvalues = renderer.bulk(fvalues.keys(), list_type=False)
elif not per_row_lookup:
for value in fvalues:
try:
text = renderer(value)
except:
text = s3_unicode(value)
fvalues[value] = text
# Write representations into result
for record_id in frecords:
if record_id not in results:
results[record_id] = Storage() \
if not raw_data \
else Storage(_row=Storage())
record = frecords[record_id]
result = results[record_id]
# List type with per-row lookup?
if per_row_lookup:
value = record.keys()
if None in value and len(value) > 1:
value = [v for v in value if v is not None]
try:
text = renderer(value)
except:
text = s3_unicode(value)
result[colname] = text
if raw_data:
result["_row"][colname] = value
# Single value (master record)
elif len(record) == 1 or \
not joined and not list_type:
value = record.keys()[0]
result[colname] = fvalues[value] \
if value in fvalues else none
if raw_data:
result["_row"][colname] = value
continue
# Multiple values (joined or list-type)
else:
vlist = []
for value in record:
if value is None and not list_type:
continue
value = fvalues[value] \
if value in fvalues else none
vlist.append(value)
# Concatenate multiple values
if any([hasattr(v, "xml") for v in vlist]):
data = TAG[""](
list(
chain.from_iterable(
[(v, ", ") for v in vlist])
)[:-1]
)
else:
data = ", ".join([s3_unicode(v) for v in vlist])
result[colname] = data
if raw_data:
result["_row"][colname] = record.keys()
# Restore linkto
if show_link is not None:
renderer.show_link = show_link
return results
# -------------------------------------------------------------------------
def __getitem__(self, key):
"""
Helper method to access the results as dict items, for
backwards-compatibility
@param key: the key
@todo: migrate use-cases to .<key> notation, then deprecate
"""
if key in ("rfields", "numrows", "ids", "rows"):
return getattr(self, key)
else:
raise AttributeError
# -------------------------------------------------------------------------
def getids(self, rows, pkey):
"""
Extract all unique record IDs from rows, preserving the
order by first match
@param rows: the Rows
@param pkey: the primary key
@return: list of unique record IDs
"""
x = set()
seen = x.add
result = []
append = result.append
for row in rows:
row_id = row[pkey]
if row_id not in x:
seen(row_id)
append(row_id)
return result
# -------------------------------------------------------------------------
@staticmethod
def rfield_tables(rfield):
"""
Get the names of all tables that need to be joined for a field
@param rfield: the field (S3ResourceField)
@return: a set of tablenames
"""
left = rfield.left
if left:
# => add all left joins required for that table
tablenames = set(j.first._tablename
for tn in left for j in left[tn])
else:
# => we don't know any further left joins,
# but as a minimum we need to add this table
tablenames = set([rfield.tname])
return tablenames
# -------------------------------------------------------------------------
@staticmethod
def resolve_expression(expr):
"""
Resolve an orderby or groupby expression into its items
@param expr: the orderby/groupby expression
"""
if isinstance(expr, str):
items = expr.split(",")
elif not isinstance(expr, (list, tuple)):
items = [expr]
else:
items = expr
return items
# END =========================================================================
| [
"[email protected]"
] | |
8b346eaacf62c7cde882fe6c60be97b4649c2519 | 1620e0af4a522db2bac16ef9c02ac5b5a4569d70 | /Ekeopara_Praise/Phase 2/DICTIONARY/Day48 Tasks/Task2.py | 81b89aa1ff7a364846dcb6ab7608ea8ed5a16508 | [
"MIT"
] | permissive | Ekeopara-Praise/python-challenge-solutions | cda07902c9ffc09ba770ae7776e5e01026406a05 | 068b67c05524b5c5a0d6084315eca3424c768421 | refs/heads/master | 2022-12-15T15:29:03.031583 | 2020-09-25T06:46:27 | 2020-09-25T06:46:27 | 263,758,530 | 2 | 0 | null | 2020-05-13T22:37:33 | 2020-05-13T22:37:32 | null | UTF-8 | Python | false | false | 211 | py | '''2. Write a Python script to add a key to a dictionary.
Sample Dictionary : {0: 10, 1: 20}
Expected Result : {0: 10, 1: 20, 2: 30} '''
original_dict = {0: 10, 1: 20}
original_dict[2] = 30
print(original_dict) | [
"[email protected]"
] | |
8753a921a15f6a43bf864b793500b8df7df5a232 | bc437dc74647765b51996f64b35fda3d047daf93 | /2_Intermediate/day18_The_Hirst_Painting_Project/main.py | 4c03978fb420c12c9f275227d28b734e5c0a907b | [] | no_license | macosta-42/100_days_of_code | e06720d57b6ed870a3dd4fa4e6d019296206a08f | 5b527dc18bae2ef556c26f653ef3c4badf94bb82 | refs/heads/main | 2023-05-22T03:26:02.422275 | 2021-06-10T10:31:26 | 2021-06-10T10:31:26 | 328,963,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | # import colorgram
#
# Extract 30 colors from an image.
# colors = colorgram.extract('image.jpg', 30)
#
# rgb_colors = []
#
# for color in colors:
# r = color.rgb.r
# g = color.rgb.g
# b = color.rgb.b
# new_color = (r, g, b)
# rgb_colors.append(new_color)
#
#
# print(rgb_colors)
import turtle as t
import random
color_list = [
(8, 16, 67),
(63, 8, 28),
(192, 70, 22),
(144, 11, 35),
(248, 237, 242),
(13, 45, 142),
(30, 103, 175),
(123, 162, 201),
(249, 216, 64),
(170, 16, 5),
(204, 71, 124),
(62, 34, 12),
(224, 135, 86),
(12, 45, 32),
(200, 174, 38),
(143, 194, 173),
(213, 74, 55),
(174, 50, 76),
(59, 161, 118),
(252, 206, 0),
(215, 134, 145),
(78, 111, 80),
(82, 111, 199),
(12, 100, 4),
(177, 185, 218),
(231, 166, 180),
(237, 171, 160)
]
tim = t.Turtle()
tim.hideturtle()
tim.speed(0)
t.colormode(255)
tim.penup()
pos_x = -250
pos_y = -250
for pos in range(10):
tim.setpos(pos_x, pos_y)
for dot in range(10):
tim.dot(20, random.choice(color_list))
tim.forward(50)
pos_y += 50
screen = t.Screen()
screen.exitonclick()
| [
"[email protected]"
] | |
39449e677ee1bf94f14738a476fbaeffef554460 | 11e484590b27585facf758f0432eeebe66bf790a | /fal_default_discount/__openerp__.py | ebb37f69d114ff8b401e6a98002ffca961d71f5d | [] | no_license | jeanabreu/falinwa_branch | 51b38ee5a3373d42417b84a0431bad9f7295f373 | be96a209479259cd5b47dec73694938848a2db6c | refs/heads/master | 2021-01-18T10:25:49.866747 | 2015-08-25T10:05:05 | 2015-08-25T10:05:05 | 41,369,368 | 0 | 1 | null | 2015-08-25T14:51:50 | 2015-08-25T14:51:50 | null | UTF-8 | Python | false | false | 569 | py | # -*- coding: utf-8 -*-
{
"name": "GEN-39_Default Discount",
"version": "1.0",
'author': 'Falinwa Hans',
"description": """
Module to give default discount
""",
"depends" : ['base','account','sale','purchase'],
'init_xml': [],
'data': [
],
'update_xml': [
'res_partner_view.xml',
'sale_view.xml',
'account_view.xml',
],
'css': [],
'installable': True,
'active': False,
'application' : False,
'js': [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"[email protected]"
] | |
63a3859655163da8d1b68d6100318174b51087b3 | fa69eadde7b449647ebd976214d2f99886b6db18 | /FireHydrant/common/enum/task/type.py | da3d9f0df70ac9947f6d9e55b12919b3b7ed67be | [] | no_license | shoogoome/FireHydrant | 0da1d6e06aa9e853837f6435a30ac4ef73118764 | 7467cd66e1fc91f0b3a264f8fc9b93f22f09fe7b | refs/heads/master | 2020-06-21T01:29:25.711595 | 2019-12-18T00:31:01 | 2019-12-18T00:31:01 | 197,309,304 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # -*- coding: utf-8 -*-
# coding:utf-8
from common.core.dao.enumBase import EnumBase
class TaskTypeEnum(EnumBase):
PERSONAL = 0
TEAM = 1
__default__ = PERSONAL
__desc__ = {
'PERSONAL': '个人任务',
'TEAM': '团队任务',
} | [
"[email protected]"
] | |
7db647d25a21499083092c001e5dbe7f68539f5a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/506ae8f067379afa4417a57db5814487ea198a23-<_ldflags>-fix.py | e74dc5c62be559f47e3819254ac49089008a296f | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | @utils.memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
'Extract list of compilation flags from a string.\n\n Depending on the options, different type of flags will be kept.\n\n Parameters\n ----------\n ldflags_str : string\n The string to process. Typically, this will be the content of\n `theano.config.blas.ldflags`.\n libs : bool\n Extract flags starting with "-l".\n flags: bool\n Extract all the other flags.\n libs_dir: bool\n Extract flags starting with "-L".\n include_dir: bool\n Extract flags starting with "-I".\n\n Returns\n -------\n list of strings\n Extracted flags.\n\n '
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split() if x.startswith('-L')]
l = _ldflags(ldflags_str=ldflags_str, libs=True, flags=False, libs_dir=False, include_dir=False)
for d in dirs:
for f in os.listdir(d.strip('"')):
if (f.endswith('.so') or f.endswith('.dylib') or f.endswith('.dll')):
if any([(f.find(ll) >= 0) for ll in l]):
found_dyn = True
if ((not found_dyn) and dirs):
_logger.warning('We did not found a dynamic library into the library_dir of the library we use for blas. If you use ATLAS, make sure to compile it with dynamics library.')
for t in ldflags_str.split():
if ((t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"'))):
t = t[1:(- 1)]
try:
(t0, t1, t2) = t[0:3]
assert (t0 == '-')
except Exception:
raise ValueError(('invalid token "%s" in ldflags_str: "%s"' % (t, ldflags_str)))
if (libs_dir and (t1 == 'L')):
rval.append(t[2:])
elif (include_dir and (t1 == 'I')):
raise ValueError('Include dirs are not used for blas. We disable this as this can hide other headers and this is not wanted.', t)
rval.append(t[2:])
elif (libs and (t1 == 'l')):
rval.append(t[2:])
elif (flags and (t1 not in ['L', 'I', 'l'])):
rval.append(t)
elif (flags and (t1 == 'L')):
rval.append(('-Wl,-rpath,' + t[2:]))
return rval | [
"[email protected]"
] | |
3020613b94d8ab6d48331de09fbcc650efe92b54 | 1978a9455159b7c2f3286e0ad602652bc5277ffa | /exercises/15_module_re/task_15_2a.py | ff8cb7e603b04c43d8bed5f08c6262dda11c4009 | [] | no_license | fortredux/py_net_eng | 338fd7a80debbeda55b5915dbfba4f5577279ef0 | 61cf0b2a355d519c58bc9f2b59d7e5d224922890 | refs/heads/master | 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | # -*- coding: utf-8 -*-
'''
Задание 15.2a
Создать функцию convert_to_dict, которая ожидает два аргумента:
* список с названиями полей
* список кортежей со значениями
Функция возвращает результат в виде списка словарей, где ключи - взяты из первого списка,
а значения подставлены из второго.
Например, если функции передать как аргументы список headers и список
[('FastEthernet0/0', 'up', 'up', '10.0.1.1'),
'FastEthernet0/1', 'up', 'up', '10.0.2.1')]
Функция должна вернуть такой список со словарями (порядок полей может быть другой):
[{'interface': 'FastEthernet0/0', 'status': 'up', 'protocol': 'up', 'address': '10.0.1.1'},
{'interface': 'FastEthernet0/1', 'status': 'up', 'protocol': 'up', 'address': '10.0.2.1'}]
Проверить работу функции:
* первый аргумент - список headers
* второй аргумент - результат, который возвращает функция parse_sh_ip_int_br из задания 15.2, если ей как аргумент передать sh_ip_int_br.txt.
Функцию parse_sh_ip_int_br не нужно копировать.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
import re
from task_15_2 import parse_sh_ip_int_br
parsed_sh_ip_int_br = parse_sh_ip_int_br('/home/vagrant/GitHub/pynet_rep/exercises/15_module_re/sh_ip_int_br.txt')
headers = ['interface', 'address', 'status', 'protocol']
def convert_to_dict(list_headers, list_values):
final_list = []
for tup in list_values:
final_list.append(dict(zip(list_headers, tup)))
return final_list
if __name__ == '__main__':
from pprint import pprint
pprint(convert_to_dict(headers, parsed_sh_ip_int_br)) | [
"[email protected]"
] | |
042afc513c24332f122836a2cec49692b2f77a28 | 7a63ce94e1806a959c9c445c2e0bae95afb760c8 | /tests/incident/test_resolve.py | 8ccf653a5dbc4b46fd96837ef309be097512d6e1 | [
"MIT"
] | permissive | pklauke/pycamunda | 20b54ceb4a40e836148e84912afd04d78d6ba0ec | 3faac4037212df139d415ee1a54a6594ae5e9ac5 | refs/heads/master | 2023-08-18T10:23:30.503737 | 2022-04-17T18:34:40 | 2022-04-17T18:34:40 | 240,333,835 | 40 | 16 | MIT | 2023-09-12T13:29:08 | 2020-02-13T18:37:25 | Python | UTF-8 | Python | false | false | 1,602 | py | # -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_resolve_params(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
assert resolve_incident.url == engine_url + '/incident/anId'
assert resolve_incident.query_parameters() == {}
assert resolve_incident.body_parameters() == {}
@unittest.mock.patch('requests.Session.request')
def test_resolve_calls_requests(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'DELETE'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_resolve_raises_pycamunda_exception(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
resolve_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_resolve_raises_for_status(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_resolve_returns_none(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
result = resolve_incident()
assert result is None
| [
"[email protected]"
] | |
b1363d2eeea65f67da9c4da23778667e39565849 | ee4152e9b5eafa7afafe05de04391a9a3606eea3 | /client/API/AddRecord.py | 431bc9058aefc1020df12034d650ed008e3998a5 | [] | no_license | adibl/password_saver | 3a06c8c04905d82f01fc14b41b646a6578af2b70 | 2ea73781db92ce750f91039251f2c06e929da7bb | refs/heads/master | 2020-04-09T23:51:34.804870 | 2019-06-16T10:13:42 | 2019-06-16T10:13:42 | 160,665,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | """
name:
date:
description
"""
import base64
import json
import requests
import os
from .connection import Request
class Passwords(object):
FILE_NAME = 'token.txt'
@classmethod
def handle(cls, url, username, password):
return cls.POST(url, username, password)
@classmethod
def GET(cls):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
responce = conn = Request().get_conn().get(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)})
if responce.status_code == 200:
return json.loads(responce.text)
else:
return {'general': responce.status_code}
@classmethod
def POST(cls, url, username, password):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
print base64.urlsafe_b64encode(url)
encode_url = base64.urlsafe_b64encode(url)
responce = conn = Request().get_conn().post(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)}
, json={'username': username, 'password': password,
'program_id': encode_url})
if responce.status_code == 200:
return True
elif responce.status_code == 442:
return json.loads(responce.text)
else:
return {'general': 'general error'}
@classmethod
def read_jwt(cls):
if os.path.isfile(cls.FILE_NAME):
with open(cls.FILE_NAME, 'rb')as handel:
jwt = handel.read()
return jwt
else:
return None
| [
"[email protected]"
] | |
881b5c0fc9bea295c8d51dcae0942461610bb9c2 | 8c5f1e07333edfd14a58677ea90ea9a8ec24daa7 | /examples/simple_pendulum/custom_simple_pendulum.py | 423dcab619d69ba966d9a866ae2b925a8862fb9f | [
"MIT"
] | permissive | echoix/pyro | 52c37b3c14fb3b52977be510545fdc43922dd8f9 | 787920cb14e3669bc65c530fd8f91d4277a24279 | refs/heads/master | 2020-09-07T09:08:21.114064 | 2019-11-10T05:59:50 | 2019-11-10T05:59:50 | 220,733,155 | 0 | 0 | MIT | 2019-11-10T02:52:39 | 2019-11-10T02:52:38 | null | UTF-8 | Python | false | false | 1,412 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 12:19:01 2018
@author: nvidia
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
###############################################################################
###############################################################################
class MyCustomPendulum( pendulum.SinglePendulum ):
"""
"""
###########################################################################
# Only overload functions that are different from base version
###########################################################################
def setparams(self):
""" Set model parameters here """
# kinematic
self.l1 = 3
self.lc1 = 2
# dynamic
self.m1 = 10
self.I1 = 10
self.gravity = 9.81
self.d1 = 50
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
sys = MyCustomPendulum()
x0 = np.array([0.8,0])
sys.plot_animation( x0 ) | [
"[email protected]"
] | |
2b8edfa347b5b9d6a6b2c2d912242611e9907980 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /nonfatal_code/hospital/Formatting/001_pre_format_UK_UTLA_fit_models.py | a401509726a0ff362b8b717c593c63c90020b098 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,688 | py | # -*- coding: utf-8 -*-
"""
formatting UK UTLA data
"""
import pandas as pd
import numpy as np
import platform
import sys
import statsmodels.formula.api as smf
import statsmodels.api as sm
import time
sys.path.append("FILEPATH")
from hosp_prep import *
# Environment:
if platform.system() == "Linux":
root = "FILEPATH"
else:
root = "FILEPATH"
print("need to incorporate injuries data which are stored in separate files")
################################################
# Use data prepped on the cluster
###############################################
# was too big to merge locally so merged on the cluster and written to FILEPATH
# just read in the merged data from drive
both = pd.read_csv("FILEPATH", compression='gzip')
# both = pd.read_csv("FILEPATH", compression='gzip')
#both = pd.read_csv("FILEPATH", compression='gzip')
# back = both.copy()
# the regional level data needs to be split to include age start 90
# it's breaking the models so I'm gonna subset that age group out
# both = both[both.age_start < 80]
# also drop 2011, 2012
# both = both[both.fiscal_year < 2011]
# drop the rows that don't match (only 2 rows before 2011)
both = both[~both.log_rate.isnull()]
##################################
# FIT THE LINEAR MODELS
###################################
causes = both.cause_code.unique()
# both = both[both.cause_code.isin(causes)]
both['preds'] = np.nan # initialize pred col
# loop over causes and sexes
start = time.time()
counter = 0
counter_denom = causes.size
for cause in causes:
for s in [1, 2]:
# create the mask
mask = (both['cause_code'] == cause) & (both['sex_id'] == s)
if both[mask].log_rate.isnull().sum() == both[mask].shape[0]:
print("there's no data")
continue
# our formula for predictions
formula = "log_rate ~ C(age_start) + C(location_id)"
# fit the model
fit = smf.ols(formula, data=both[mask]).fit()
# exponentiate the predicted values
both.loc[mask, 'preds'] = np.exp(fit.predict(both[mask]))
if s == 1:
counter += 1
if counter % 125 == 0:
print(round((counter / counter_denom) * 100, 1), "% Done")
print("Run time: ", (time.time()-start)/60, " minutes")
print("Done in ", (time.time()-start) / 60, " minutes")
# both.to_csv("FILEPATH")
###################################################
# both = back.copy()
# subtract off the existing cases that we have at utla level
# use a groupby transform to leave the data in same format but create sums of
# known values at the regional level
reg_groups = ['cause_code', 'location_parent_id', 'age_start', 'age_end',
'sex_id', 'fiscal_year']
# fill missing utla level data with zeroes instead of NA so rows will be
# included in groupby
both['value'].fillna(value=0, inplace=True)
# sum the existing utla values up to the regional level
both['utla_val_to_reg'] = both.groupby(reg_groups)['value'].transform('sum')
# split the data
# subset the data to get only rows where utla value was suppressed
pred_df = both[both.utla_log_rate.isnull()].copy()
# drop the rows where utla value was suppressed
both = both[both.utla_log_rate.notnull()]
# subtract the known utla values from the regional values to get
# residual (unknown) values
pred_df['reg_resid_value'] = pred_df['reg_value'] - pred_df['utla_val_to_reg']
# new method
# get into count space
pred_df['pred_counts'] = pred_df['preds'] * pred_df['utla_population']
# sum utla predicted counts to region level
pred_df['utla_pred_to_reg'] = pred_df.groupby(reg_groups)['pred_counts'].\
transform('sum')
# make the weights
pred_df['weight'] = pred_df['reg_resid_value'] / pred_df['utla_pred_to_reg']
# apply weights to predicted values
pred_df['weighted_counts'] = pred_df['pred_counts'] * pred_df['weight']
# now test
reg_compare = pred_df.copy()
# get the sum of values at the regional level
reg_compare = reg_compare[['cause_code', 'location_parent_id', 'age_start',
'age_end', 'sex_id', 'fiscal_year',
'reg_resid_value']]
reg_compare.drop_duplicates(inplace=True)
reg_sum = reg_compare.reg_resid_value.sum()
# get the sum of desuppressed values
pred_df_sum = pred_df.weighted_counts.sum()
# pretty dang close to zero
assert round(reg_sum - pred_df_sum, 5) == 0
# assert residual vals are smaller than regional vals
assert (pred_df.reg_value >= pred_df.reg_resid_value).all()
# concat de-suppressed and un-suppressed data back together
both = pd.concat([both, pred_df])
# merge data that needed to be de-suppressed and data that didn't into same col
# fill value with desuppressed val where value = 0 and desuppressed isn't null
condition = (both['value'] == 0) & (both['weighted_counts'].notnull())
both.loc[condition, 'value'] = both.loc[condition, 'weighted_counts']
# write to a csv for use with a Shiny app
both['rates'] = both['value'] / both['utla_population']
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value',
'weight', 'rates', 'utla_population']].\
to_csv("FILEPATH", index=False)
# write to FILEPATH intermediate data
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value', 'weight']].\
to_csv("FILEPATH", index=False)
| [
"[email protected]"
] | |
ae3c07417196b04210dbed26d9b1fba5aac5f9ec | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/numpy/core/tests/test_regression.py | 39a92211635a6dcc5cd242241cf5f18f0e08b70e | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2d5a65e7c1da1e87651cabd3481c0012ad15f784275aad1259a1312faf19cfc2
size 81211
| [
"[email protected]"
] | |
bce22db2adda5234a705ff0d1fb719565b3bddd8 | 9692a20a1e7a224a72785e4495f31421639b9f3b | /frex/pipeline_stages/filters/candidate_filterer.py | 2d79e3b31e1ec3776b5978e1f52488af2826dfdb | [] | no_license | solashirai/FREx | 6b0cb040930761a0e269f4591d7dde36e3f636d1 | 36ad09a0cb0020661ee990c7800bafd110e2ec04 | refs/heads/master | 2023-08-14T08:49:49.270281 | 2021-09-29T14:58:23 | 2021-09-29T14:58:23 | 291,760,109 | 0 | 0 | null | 2021-09-24T22:41:19 | 2020-08-31T15:57:47 | Python | UTF-8 | Python | false | false | 2,535 | py | from abc import abstractmethod
from typing import Generator, Optional, Any
from frex.models import Explanation, Candidate
from frex.pipeline_stages import PipelineStage
class CandidateFilterer(PipelineStage):
"""
CandidateFilterer is a PipelineStage that determines whether input candidates should be removed from consideration
or continue on through the FREx Pipeline.
A new CandidateFilterer class can be minimally defined by creating a new subclass of CandidateFilterer and
defining the filter() function.
"""
def __init__(
self, *, filter_explanation: Explanation, filter_score: float = 0, **kwargs
):
"""
:param filter_explanation: The explanation to add to the Candidate if it passes the filter function.
:param filter_score: The score to apply to the candidate if it passes the filter. This is 0 by default.
"""
self.filter_explanation = filter_explanation
self.filter_score = filter_score
@abstractmethod
def filter(self, *, candidate: Candidate) -> bool:
"""
A filter to determine whether or not the current candidate is suitable to move on through the Pipeline.
This function should return True when the candidate should be removed and False when it should continue on.
:param candidate: A domain-specific candidate to filter
:return: True if the candidate should be removed, False if it should be kept and passed on to later stages.
"""
pass
def __call__(
self, *, candidates: Generator[Candidate, None, None], context: Any
) -> Generator[Candidate, None, None]:
"""
For each of candidate being yielded by the Generator, apply a filtering function to decide whether or not
to yield the candidate forward to the next PipelineStage.
:param candidates: A Generator yielding candidates. In the setup of a FREx Pipeline, this is typically another
PipelineStage that is yielding candidates into the next stage.
:param context: The current context being used to execute the Pipeline.
:return: A Generator, yielding updated Candidate objects that have not been caught by this stage's
filtering function.
"""
for candidate in candidates:
if not self.filter(candidate=candidate):
candidate.applied_explanations.append(self.filter_explanation)
candidate.applied_scores.append(self.filter_score)
yield candidate
| [
"[email protected]"
] | |
113af3e207e4b01797c11ec0d406ac5a136b56c2 | 801418efbd049078c8aad4cd17297f3ece571412 | /temp/toy/python/238. Product of Array Except Self.py | d7da2b067439b8c2b107a462617c0fb4b8eac579 | [] | no_license | xixihaha1995/CS61B_SP19_SP20 | 2b654f0c864a80a0462fdd4b1561bdc697a8c1e2 | 7d6599596f7f49b38f1c256ece006b94555c1900 | refs/heads/master | 2023-01-01T18:41:48.027058 | 2020-10-29T04:50:01 | 2020-10-29T04:50:01 | 240,976,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res, p, q = [1], 1, 1
for i in range(len(nums)-1):
p *= nums[i]
res.append(p)
for i in range(len(nums)-1, 0, -1):
q *= nums[i]
res[i-1] *= q
return res
| [
"[email protected]"
] | |
75d2f93063a4feaf6b869a50b0e5a88d40500e00 | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /httpy/tags/0.7/tests/TestCaseHttpy.py | 08a1fc6dd3fb6eb41284fefc3f7dc8c1602cb96c | [] | no_license | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | """The idea and code for running a test._server in another thread are from the
standard library's test/test_socke._server.py.
TODO: This is out of date now that we are using asyncore (via httpy._zope._server).
"""
import asyncore
import os
import select
import socket
import threading
import time
import unittest
from httpy._zope.server.taskthreads import ThreadedTaskDispatcher
from httpy._zope.server.tests.asyncerror import AsyncoreErrorHook
from httpy.Config import Config
from httpy.Request import Request, ZopeRequest
from httpy.Server import Server
td = ThreadedTaskDispatcher()
opts = [ '--mode', 'development'
, '--sockfam', 'AF_INET'
, '--root', 'root'
, '--address', ':65370'
, '--verbosity', '99'
#, '--apps', '/' discover automatically
]
class TestCaseHttpy(unittest.TestCase, AsyncoreErrorHook):
# unittest.TestCase hooks
# =======================
want_config = False
def setUp(self):
self.scrubenv()
# [re]build a temporary website tree in ./root
self.removeTestSite()
self.buildTestSite()
if self.server:
self.startServer()
if self.want_config:
self.config = Config()
def tearDown(self):
if self.server:
self.stopServer()
self.removeTestSite()
self.restoreenv()
# server support
# ==============
server = False # Override to True if your subclass needs a server
def startServer(self):
if len(asyncore.socket_map) != 1:
# Let sockets die off.
# TODO tests should be more careful to clear the socket map.
asyncore.poll(0.1)
self.orig_map_size = len(asyncore.socket_map)
#self.hook_asyncore_error()
config = Config(opts)
self._server = Server(config, threads=4)
self._server.accept_connections()
self.port = self._server.socket.getsockname()[1]
self.run_loop = 1
self.counter = 0
self.thread_started = threading.Event()
self.thread = threading.Thread(target=self.loop)
self.thread.setDaemon(True)
self.thread.start()
self.thread_started.wait(10.0)
self.assert_(self.thread_started.isSet())
def stopServer(self):
self.run_loop = 0
self.thread.join()
td.shutdown()
self._server.close()
# Make sure all sockets get closed by asyncore normally.
timeout = time.time() + 5
while 1:
if len(asyncore.socket_map) == self.orig_map_size:
# Clean!
break
if time.time() >= timeout:
self.fail('Leaked a socket: %s' % `asyncore.socket_map`)
asyncore.poll(0.1)
#self.unhook_asyncore_error()
def loop(self):
self.thread_started.set()
while self.run_loop:
self.counter = self.counter + 1
asyncore.poll(0.1)
# environment
# ===========
def scrubenv(self):
save = {}
for opt in Config.options:
envvar = 'HTTPY_%s' % opt.upper()
if os.environ.has_key(envvar):
save[envvar] = os.environ[envvar]
del os.environ[envvar]
self.env = save
def restoreenv(self):
for k, v in self.env.items():
os.environ[k] = v
self.env = {}
# test site
# =========
# testsite is a list of strings and tuples. If a string, it is interpreted
# as a path to a directory that should be created. If a tuple, the first
# element is a path to a file, the second is the contents of the file.
# We do it this way to ease cross-platform testing.
#
# siteroot is the filesystem path under which to create the test site.
siteroot = 'root'
testsite = []
def buildTestSite(self):
"""Build the site described in self.testsite
"""
os.mkdir(self.siteroot)
for item in self.testsite:
if isinstance(item, basestring):
path = self.convert_path(item.lstrip('/'))
path = os.sep.join([self.siteroot, path])
os.mkdir(path)
elif isinstance(item, tuple):
filepath, contents = item
path = self.convert_path(filepath.lstrip('/'))
path = os.sep.join([self.siteroot, path])
file(path, 'w').write(contents)
def removeTestSite(self):
if os.path.isfile('httpy.conf'):
os.remove('httpy.conf')
if not os.path.isdir(self.siteroot):
return
for root, dirs, files in os.walk(self.siteroot, topdown=False):
for name in dirs:
os.rmdir(os.path.join(root, name))
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.siteroot)
def convert_path(self, path):
"""Given a Unix path, convert it for the current platform.
"""
return os.sep.join(path.split('/'))
def convert_paths(self, paths):
"""Given a tuple of Unix paths, convert them for the current platform.
"""
return tuple([self.convert_path(p) for p in paths])
# utils
# =====
@staticmethod
def neuter_traceback(tb):
"""Given a traceback, return just the system-independent lines.
"""
tb_list = tb.split(os.linesep)
if not tb_list[-1]:
tb_list = tb_list[:-1]
neutered = []
for i in range(0,len(tb_list),2):
neutered.append(tb_list[i])
neutered.append(tb_list[-1])
return os.linesep.join(neutered)
@staticmethod
def dict2tuple(d):
return tuple(sorted(d.iteritems()))
@staticmethod
def make_request(uri, headers=None, Zope=False):
if headers is None:
headers = {}
request = ZopeRequest()
request.received("GET %s HTTP/1.1\r\n" % uri)
for header in headers.items():
request.received("%s: %s\r\n" % header)
request.received('\r\n')
if Zope:
return request
else:
return Request(request)
| [
"[email protected]"
] | |
e827ef9de12fa0211e6677aa82084594cd16d444 | 6b76819d395bb76591fc12e9de83161b37d61672 | /woot/apps/expt/management/commands/step02_zmod.py | f30ef4f4d650e4b9e4688253eed2cfb7feb067a9 | [] | no_license | NicholasPiano/img | 8426530512ee80a4ed746874c4219b1de56acbfd | 3a91c65c3c9680ba7ed7c94308a721dd0cff9ad5 | refs/heads/master | 2020-05-18T15:48:50.566974 | 2015-07-16T00:01:17 | 2015-07-16T00:01:17 | 38,632,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # expt.command: step03_zmod
# django
from django.core.management.base import BaseCommand, CommandError
# local
from apps.img.models import Composite
from apps.expt.util import *
# util
from optparse import make_option
spacer = ' ' * 20
### Command
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--expt', # option that will appear in cmd
action='store', # no idea
dest='expt', # refer to this in options variable
default='050714-test', # some default
help='Name of the experiment to import' # who cares
),
make_option('--series', # option that will appear in cmd
action='store', # no idea
dest='series', # refer to this in options variable
default='13', # some default
help='Name of the series' # who cares
),
)
args = ''
help = ''
def handle(self, *args, **options):
'''
1. What does this script do?
> Make images that can be recognized by CellProfiler by multiplying smoothed GFP with the flattened Brightfield
2. What data structures are input?
> Channel
3. What data structures are output?
> Channel
4. Is this stage repeated/one-time?
> One-time
Steps:
1. Select composite
2. Call pmod mod on composite
3. Run
'''
# 1. select composite
composite = Composite.objects.get(experiment__name=options['expt'], series__name=options['series'])
# 2. Call pmod mod
mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zmod')
# 3. Run mod
print('step02 | processing mod_zmod...', end='\r')
mod.run()
print('step02 | processing mod_zmod... done.{}'.format(spacer))
| [
"[email protected]"
] | |
16bf0ef9ec53acb6b4376b1146bb236b50565626 | fddad101c7be2fcbc05131081e708f31948c002f | /329. Longest Increasing Path in a Matrix/answer_bfs.py | a9141a61f5be8c4c3d3ff273a059e79b03652077 | [] | no_license | LennyDuan/AlgorithmPython | a10c9278c676829ab5a284a618f6352414888061 | 523c11e8a5728168c4978c5a332e7e9bc4533ef7 | refs/heads/master | 2021-07-16T12:31:08.284846 | 2021-03-28T20:31:28 | 2021-03-28T20:31:28 | 244,040,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | def longestIncreasingPath(self, matrix) -> int:
if not matrix:
return 0
res = 0
visited = set()
rows, cols = len(matrix), len(matrix[0])
directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
def traverse(i, j, visited):
if (i, j) in visited:
return 0
res = 1
for direction in directions:
next_i, next_j = i + direction[0], j + direction[1]
direction_count = 0
if 0 <= next_i < rows and 0 <= next_j < cols:
if matrix[next_i][next_j] > matrix[i][j]:
direction_count = 1 + traverse(next_i, next_j, visited)
res = max(res, direction_count)
return res
for row in range(rows):
for col in range(cols):
res = max(traverse(row, col, visited), res)
return res
nums = [
[3, 4, 5],
[3, 2, 6],
[2, 2, 1]
]
print(longestIncreasingPath(None, nums))
| [
"[email protected]"
] | |
8679eb15e7abddc2ffc51114e648c08423ab7ebd | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /QDS_Test/case/dbwytest.py | 22710e1c97b825043ebe5514995dd8e8038a0300 | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 3,860 | py | # coding=utf-8
import time
import logging
from selenium.webdriver import ActionChains
from utils.mytestcase import MyTestCase
from utils.logincookie import DengLuPage
from utils.random import unicode
from utils.screenshort import get_screenshort
class DbWyTest(MyTestCase):
"""担保无忧测试集"""
def test_dbwy(self):
"""担保无忧测试"""
# logging.basicConfig(filename='../LOG/' + __name__ + '.log',
# format='[%(asctime)s-%(filename)s-%(levelname)s: %(message)s]', level=logging.DEBUG,
# filemode='a', datefmt='%Y-%m-%d%I:%M:%S %p')
dl = DengLuPage(self.driver)
# 官方推荐有find_element(By.*(""))代替find_element_by_*("")
# self.driver.find_element_by_id()
# self.driver.find_element()
dl.login()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > h3 > span")).perform()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > h3 > a")).perform()
ActionChains(self.driver).release()
self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > div > dl:nth-child(3) > dd > a:nth-child(2)").click()
# 获取打开的多个窗口句柄
windows = self.driver.window_handles
# 切换到当前最新打开的窗口
self.driver.switch_to.window(windows[-1])
time.sleep(2)
self.driver.set_window_size(1920, 1080)
time.sleep(3)
self.assertIn("商标担保注册|商标注册费用|商标申请流程-权大师", self.driver.title)
print(self.driver.title)
# abwy注册
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-serviceItems > table > tbody > tr > td.td-cont > ul > li:nth-child(2)").click()
for a in self.driver.find_elements_by_css_selector("#total-price"):
print("费用总计:"+a.text)
aa=a.text
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-btnBuy > a.btn.btn-next.buynow").click()
self.driver.find_element_by_name("ownerContactPerson").send_keys("{}".format(unicode()))
self.driver.find_element_by_name("ownerContactPhone").send_keys("15624992498")
self.driver.find_element_by_name("contactMail").send_keys("[email protected]")
self.driver.find_element_by_css_selector("#remark").send_keys(time.strftime("%Y-%m-%d_%H-%M-%S") + "测试订单")
get_screenshort(self.driver, "test_dbwy.png")
for i in self.driver.find_elements_by_css_selector("body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.last-pay.personal-last-pay > ul > li.row-sense > em > i"):
print("总价:"+i.text)
ii=i.text
self.assertIn(aa,ii)
print("价格一致")
self.driver.find_element_by_css_selector(
"body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.btns > a.btn-next.submitOrder").click()
for o in self.driver.find_elements_by_class_name("payable"):
print("订单提交成功,应付金额:"+o.text)
oo=o.text
self.assertIn(oo,ii)
print("测试通过")
self.driver.find_element_by_css_selector("#alisubmit").click() | [
"[email protected]"
] | |
2ac108f270cf5ffa0bfbca7755b958d446b3a030 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/21-tideGauge.py | 784ddb0d0f655471f76357e1f1df6c7540900599 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 21
y = 22
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
72228f507a4ac8d98397a992ca802e652f3d5c8f | 2207cf4fb992b0cb106e2daf5fc912f23d538d0d | /src/catalog/serializers.py | 1e85a0316ce6f1e7fa4b866254126cb6dd9a095a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | litedesk/litedesk-webserver-provision | 95bc75f61532c5f1c7cb21fb5372ff288999689e | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | refs/heads/master | 2021-05-15T01:35:31.984067 | 2020-08-18T10:55:20 | 2020-08-18T10:55:20 | 25,595,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
import models
class OfferSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='offer-detail')
class Meta:
model = models.Offer
fields = ('url', 'name', 'currency', 'price', 'setup_price', 'status')
read_only_fields = ('name', 'asset', 'currency', )
| [
"[email protected]"
] | |
424a153fb67403733012e88be8f95f8f6783f4bc | 4872375eeb0b2a45c0d3046bbfb5cd2d202b2295 | /quiz.py | e89862b18df06fdbf7fe361b76ea3a36a1613f18 | [
"MIT"
] | permissive | ash018/discordQuizBot | ee3aae7171220f39bd9a0bb057c2fa5eab017dd5 | b00441553bbbeeab2c4da0264eeed8480a33c3a1 | refs/heads/master | 2020-04-13T09:00:18.410702 | 2018-12-25T17:05:28 | 2018-12-25T17:05:28 | 163,098,967 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,308 | py | # -*- coding: utf-8 -*-
"""
Quiz / Question classes for quizbot.
@author: drkatnz
"""
import asyncio
import random
import re
import os
#todo: probably need to remove punctuation from answers
class Quiz:
def __init__(self, client, win_limit=10, hint_time=30):
#initialises the quiz
self.__running = False
self.current_question = None
self._win_limit = win_limit
self._hint_time = hint_time
self._questions = []
self._asked = []
self.scores = {}
self._client = client
self._quiz_channel = None
self._cancel_callback = True
#load in some questions
datafiles = os.listdir('quizdata')
for df in datafiles:
filepath = 'quizdata' + os.path.sep + df
self._load_questions(filepath)
print('Loaded: ' + filepath)
print('Quiz data loading complete.\n')
def _load_questions(self, question_file):
# loads in the questions for the quiz
with open(question_file, encoding='utf-8',errors='replace') as qfile:
lines = qfile.readlines()
question = None
category = None
answer = None
regex = None
position = 0
while position < len(lines):
if lines[position].strip().startswith('#'):
#skip
position += 1
continue
if lines[position].strip() == '': #blank line
#add question
if question is not None and answer is not None:
q = Question(question=question, answer=answer,
category=category, regex=regex)
self._questions.append(q)
#reset everything
question = None
category = None
answer = None
regex = None
position += 1
continue
if lines[position].strip().lower().startswith('category'):
category = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('question'):
question = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('answer'):
answer = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('regexp'):
regex = lines[position].strip()[lines[position].find(':') + 1:].strip()
#else ignore
position += 1
def started(self):
#finds out whether a quiz is running
return self.__running
def question_in_progress(self):
#finds out whether a question is currently in progress
return self.__current_question is not None
async def _hint(self, hint_question, hint_number):
#offers a hint to the user
if self.__running and self.current_question is not None:
await asyncio.sleep(self._hint_time)
if (self.current_question == hint_question
and self._cancel_callback == False):
if (hint_number >= 5):
await self.next_question(self._channel)
hint = self.current_question.get_hint(hint_number)
await self._client.send_message(self._channel, 'Hint {}: {}'.format(hint_number, hint), tts=True)
if hint_number < 5:
await self._hint(hint_question, hint_number + 1)
async def start(self, channel):
#starts the quiz in the given channel.
if self.__running:
#don't start again
await self._client.send_message(channel,
'Quiz already started in channel {}, you can stop it with !stop or !halt'.format(self._channel.name), tts=True)
else:
await self.reset()
self._channel = channel
await self._client.send_message(self._channel, '@here Quiz starting in 10 seconds...', tts=True)
await asyncio.sleep(10)
self.__running = True
await self.ask_question()
async def reset(self):
if self.__running:
#stop
await self.stop()
#reset the scores
self.current_question = None
self._cancel_callback = True
self.__running = False
self._questions.append(self._asked)
self._asked = []
self.scores = {}
async def stop(self):
#stops the quiz from running
if self.__running:
#print results
#stop quiz
await self._client.send_message(self._channel, 'Quiz stopping.', tts=True)
if(self.current_question is not None):
await self._client.send_message(self._channel,
'The answer to the current question is: {}'.format(self.current_question.get_answer()), tts=True)
await self.print_scores()
self.current_question = None
self._cancel_callback = True
self.__running = False
else:
await self._client.send_message(self._channel, 'No quiz running, start one with !ask or !quiz', tts=True)
async def ask_question(self):
#asks a question in the quiz
if self.__running:
#grab a random question
qpos = random.randint(0,len(self._questions) - 1)
self.current_question = self._questions[qpos]
self._questions.remove(self.current_question)
self._asked.append(self.current_question)
await self._client.send_message(self._channel,
'Question {}: {}'.format(len(self._asked), self.current_question.ask_question()), tts=True)
self._cancel_callback = False
await self._hint(self.current_question, 1)
async def next_question(self, channel):
#moves to the next question
if self.__running:
if channel == self._channel:
await self._client.send_message(self._channel,
'Moving onto next question. The answer I was looking for was: {}'.format(self.current_question.get_answer()), tts=True)
self.current_question = None
self._cancel_callback = True
await self.ask_question()
async def answer_question(self, message):
#checks the answer to a question
if self.__running and self.current_question is not None:
if message.channel != self._channel:
pass
if self.current_question.answer_correct(message.content):
#record success
self._cancel_callback = True
if message.author.name in self.scores:
self.scores[message.author.name] += 1
else:
self.scores[message.author.name] = 1
await self._client.send_message(self._channel,
'Well done, {}, the correct answer was: {}'.format(message.author.name, self.current_question.get_answer()), tts=True)
self.current_question = None
#check win
if self.scores[message.author.name] == self._win_limit:
await self.print_scores()
await self._client.send_message(self._channel, '{} has won! Congratulations.'.format(message.author.name), tts=True)
self._questions.append(self._asked)
self._asked = []
self.__running = False
#print totals?
elif len(self._asked) % 5 == 0:
await self.print_scores()
await self.ask_question()
async def print_scores(self):
#prints out a table of scores.
if self.__running:
await self._client.send_message(self._channel,'Current quiz results:', tts=True)
else:
await self._client.send_message(self._channel,'Most recent quiz results:', tts=True)
highest = 0
for name in self.scores:
await self._client.send_message(self._channel,'{}:\t{}'.format(name,self.scores[name]), tts=True)
if self.scores[name] > highest:
highest = self.scores[name]
if len(self.scores) == 0:
await self._client.send_message(self._channel,'No results to display.', tts=True)
leaders = []
for name in self.scores:
if self.scores[name] == highest:
leaders.append(name)
if len(leaders) > 0:
if len(leaders) == 1:
await self._client.send_message(self._channel,'Current leader: {}'.format(leaders[0]), tts=True)
else:
await self._client.send_message(self._channel,'Print leaders: {}'.format(leaders), tts=True)
class Question:
# A question in a quiz
def __init__(self, question, answer, category=None, author=None, regex=None):
self.question = question
self.answer = answer
self.author = author
self.regex = regex
self.category = category
self._hints = 0
def ask_question(self):
# gets a pretty formatted version of the question.
question_text = ''
if self.category is not None:
question_text+='({}) '.format(self.category)
else:
question_text+='(General) '
if self.author is not None:
question_text+='Posed by {}. '.format(self.author)
question_text += self.question
return question_text
def answer_correct(self, answer):
#checks if an answer is correct or not.
#should check regex
if self.regex is not None:
match = re.fullmatch(self.regex.strip(),answer.strip())
return match is not None
#else just string match
return answer.lower().strip() == self.answer.lower().strip()
def get_hint(self, hint_number):
# gets a formatted hint for the question
hint = []
for i in range(len(self.answer)):
if i % 5 < hint_number:
hint = hint + list(self.answer[i])
else:
if self.answer[i] == ' ':
hint += ' '
else:
hint += '-'
return ''.join(hint)
def get_answer(self):
# gets the expected answer
return self.answer
| [
"[email protected]"
] | |
5edaa1b154eb40102fe6ec6a4a37b893c4eab07f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /hv572GaPtbqwhJpTb_2.py | 8e0bb6a39e996aa650ed4adf5f67abcc31d4539a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | """
In this challenge, you must think about words as elastics. What happens when
do you tend an elastic applying a constant traction force at both ends? Every
part (or letter, in this case) of the elastic will expand, with the minimum
expansion at the ends, and the maximum expansion in the center.
If the word has an odd length, the effective central character of the word
will be the pivot that splits the word into two halves.
"ABC" -> Left = "A" | Center = "B" | Right = "C"
If the word has an even length, you will consider two parts of equal length,
with the last character of the left half and the first character of the right
half being the center.
"ABCD" -> Left = "AB" | Right = "CD"
You will represent the expansion of a letter repeating it as many times as its
numeric position (so counting the indexes from/to 1, and not from 0 as usual)
in its half, with a crescent order in the left half and a decrescent order in
the right half.
Word = "ANNA"
Left = "AN"
Right = "NA"
Left = "A" * 1 + "N" * 2 = "ANN"
Right = "N" * 2 + "A" * 1 = "NNA"
Word = Left + Right = "ANNNNA"
If the word has an odd length, the pivot (the central character) will be the
peak (as to say, the highest value) that delimits the two halves of the word.
Word = "KAYAK"
Left = "K" * 1 + "A" * 2 = "KAA"
Pivot = "Y" * 3 = "YYY"
Right = "A" * 2 + "K" * 1 = "AAK"
Word = Left + Pivot + Right = "KAAYYYAAK"
Given a `word`, implement a function that returns the elasticized version of
the word as a string.
### Examples
elasticize("ANNA") ➞ "ANNNNA"
elasticize("KAYAK") ➞ "KAAYYYAAK"
elasticize("X") ➞ "X"
### Notes
* For words with less than three characters, the function must return the same word (no traction appliable).
* Remember, into the left part characters are counted from 1 to the end, and, in reverse order until 1 is reached, into the right.
"""
def elasticize(word):
def is_even(n):
return n%2==0
def first_half(word, n):
l8rs = {}
for num in range(n):
l8rs[num] = word[num] * (num+1)
return l8rs
def last_half(word, n):
l8rs = {}
y = 1
while len(word) - y > n-1:
l8rs[y] = word[len(word)-y]*y
y += 1
return l8rs
def combine(fh, lh):
lst = []
for key in sorted(list(fh.keys())):
lst.append(fh[key])
for key in reversed(sorted(list(lh.keys()))):
lst.append(lh[key])
return lst
if len(word) < 3:
return word
if is_even(len(word)) == False:
x = 0
y = 1
while x != len(word) - y:
x += 1
y += 1
middle = x
else:
middle = int(len(word)/2)
fh = first_half(word, middle)
lh = last_half(word, middle)
combined = combine(fh, lh)
return ''.join(combined)
| [
"[email protected]"
] | |
327169a1cb6be4099ccb7f13fab70dfa92f4742e | 7deda84f7a280f5a0ee69b98c6a6e7a2225dab24 | /Receptionist/migrations/0027_package_manage_reception.py | 45248c462110a952feffbb09a7008787a2c97129 | [] | no_license | Cornex-Inc/Coffee | 476e30f29412373fb847b2d518331e6c6b9fdbbf | fcd86f20152e2b0905f223ff0e40b1881db634cf | refs/heads/master | 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 | Python | UTF-8 | Python | false | false | 549 | py | # Generated by Django 2.1.15 on 2020-05-19 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Receptionist', '0026_package_manage_grouping'),
]
operations = [
migrations.AddField(
model_name='package_manage',
name='reception',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.DO_NOTHING, to='Receptionist.Reception'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
845f77bc8d39737647f4a55d183df4f8f7afdbf3 | 43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1 | /LPTW-SRC/例3_21.py | 8430bd36f542e524ac1f1798a936dc9eba351ed6 | [] | no_license | wiky2/mytestproject | f694cf71dd3031e4597086f3bc90d246c4b26298 | e7b79df6304476d76e87f9e8a262f304b30ca312 | refs/heads/master | 2021-09-07T20:54:19.569970 | 2018-02-28T23:39:00 | 2018-02-28T23:39:00 | 100,296,844 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | #这个循环用来保证必须输入大于2的整数作为评委人数
while True:
try:
n = int(input('请输入评委人数:'))
if n <= 2:
print('评委人数太少,必须多于2个人。')
else:
#如果输入大于2的整数,就结束循环
break
except:
Pass
#用来保存所有评委的打分
scores = []
for i in range(n):
#这个while循环用来保证用户必须输入0到100之间的数字
while True:
try:
score = input('请输入第{0}个评委的分数:'.format(i+1))
#把字符串转换为实数
score = float(score)
#用来保证输入的数字在0到100之间
assert 0<=score<=100
scores.append(score)
#如果数据合法,跳出while循环,继续输入下一个评委的得分
break
except:
print('分数错误')
#计算并删除最高分与最低分
highest = max(scores)
lowest = min(scores)
scores.remove(highest)
scores.remove(lowest)
#计算平均分,保留2位小数
finalScore = round(sum(scores)/len(scores), 2)
formatter = '去掉一个最高分{0}\n去掉一个最低分{1}\n最后得分{2}'
print(formatter.format(highest, lowest, finalScore))
| [
"[email protected]"
] | |
3d0f58b74138d3d783dd0a71510afd2354a9ac4e | 243eddaee6dff4551da9c10f725d8828e13840ac | /get_premium/apps.py | d24dcc77204cd467ed804330c4f12a0a7f693080 | [
"MIT"
] | permissive | BarunBlog/Link_People | 46b0c2c141ae042b481893aee869977755790dc8 | 1ffd07bc5b31a715133c99efbbb478efe18d632b | refs/heads/master | 2023-01-24T04:54:13.545951 | 2020-12-03T05:56:33 | 2020-12-03T05:56:33 | 304,888,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class GetPremiumConfig(AppConfig):
name = 'get_premium'
| [
"[email protected]"
] | |
afa9a1d0944e4af29df98932dd9113870175e138 | 3ac0a169aa2a123e164f7434281bc9dd6373d341 | /singleNumber.py | 4a7b92101b0350685936c92368994f2cf80679bc | [] | no_license | sfeng77/myleetcode | 02a028b5ca5a0354e99b8fb758883902a768f410 | a2841fdb624548fdc6ef430e23ca46f3300e0558 | refs/heads/master | 2021-01-23T02:06:37.569936 | 2017-04-21T20:31:06 | 2017-04-21T20:31:06 | 85,967,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = 0
for v in nums:
s = s ^ v
return s
| [
"[email protected]"
] | |
d8756586064d46abf0b01f2f255a4408170c98ca | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j19485-4225/sdB_GALEX_J19485-4225_lc.py | ad5e79f01dd4bec1f067eebd2a8c3dee9507a2f5 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[297.137792,-42.429325], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J19485-4225 /sdB_GALEX_J19485-4225_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
be016283897b8b97fcd923c3c66271b85639e383 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /rl_metrics_aaai2021/utils.py | fdb1f66a5371b5960ba1746220fe5dec986ad621 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 7,577 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import collections
from absl import logging
import numpy as np
from rl_metrics_aaai2021 import bisimulation
from rl_metrics_aaai2021 import d_delta
from rl_metrics_aaai2021 import d_delta_star
from rl_metrics_aaai2021 import discrete_bisimulation
from rl_metrics_aaai2021 import discrete_lax_bisimulation
from rl_metrics_aaai2021 import lax_bisimulation
MetricData = collections.namedtuple('metric_data', ['constructor', 'label'])
MDPStats = collections.namedtuple(
'MDPStats', ['time', 'num_iterations', 'min_gap', 'avg_gap', 'max_gap'])
# Dictionary mapping metric name to constructor and LaTeX label.
METRICS = {
'bisimulation':
MetricData(bisimulation.Bisimulation, r'$d^{\sim}$'),
'discrete_bisimulation':
MetricData(discrete_bisimulation.DiscreteBisimulation, r'$e^{\sim}$'),
'lax_bisimulation':
MetricData(lax_bisimulation.LaxBisimulation, r'$d^{\sim_{lax}}$'),
'discrete_lax_bisimulation':
MetricData(discrete_lax_bisimulation.DiscreteLaxBisimulation,
r'$e^{\sim_{lax}}$'),
'd_delta_1':
MetricData(d_delta.DDelta1, r'$d_{\Delta1}$'),
'd_delta_5':
MetricData(d_delta.DDelta5, r'$d_{\Delta5}$'),
'd_delta_10':
MetricData(d_delta.DDelta10, r'$d_{\Delta10}$'),
'd_delta_15':
MetricData(d_delta.DDelta15, r'$d_{\Delta15}$'),
'd_delta_20':
MetricData(d_delta.DDelta20, r'$d_{\Delta20}$'),
'd_delta_50':
MetricData(d_delta.DDelta50, r'$d_{\Delta50}$'),
'd_delta_100':
MetricData(d_delta.DDelta100, r'$d_{\Delta100}$'),
'd_delta_500':
MetricData(d_delta.DDelta500, r'$d_{\Delta500}$'),
'd_delta_1000':
MetricData(d_delta.DDelta1000, r'$d_{\Delta1000}$'),
'd_delta_5000':
MetricData(d_delta.DDelta5000, r'$d_{\Delta5000}$'),
'd_Delta_star':
MetricData(d_delta_star.DDeltaStar, r'$d_{\Delta^*}$'),
}
def value_iteration(env, tolerance, verbose=False):
"""Run value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V* and Q*.
"""
values = np.zeros(env.num_states)
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
new_values = np.copy(values)
for s in range(env.num_states):
for a in range(env.num_actions):
q_values[s, a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
new_values[s] = np.max(q_values[s, :])
error = np.max(abs(new_values - values))
values = new_values
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, error)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values, q_values
def q_value_iteration(env, tolerance):
"""Run q value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
Returns:
Numpy array with V* and Q*.
"""
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
for s in range(env.num_states):
for a in range(env.num_actions):
old_q_values = np.copy(q_values[s, a])
q_values[s, a] = (
env.rewards[s, a] + env.gamma *
np.matmul(env.transition_probs[s, a, :], np.max(q_values, axis=1)))
error = np.max(abs(old_q_values - q_values[s, a]))
i += 1
return q_values
def policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
values = np.zeros(env.num_states)
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = np.sum(env.rewards[s, :] * policy[s, :] + env.gamma * policy[s, :] *
np.matmul(env.transition_probs[s, :, :], values))
delta = max(delta, abs(v - values[s]))
values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
g = np.zeros(env.num_actions, dtype=float)
for a in range(env.num_actions):
g[a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
greed_actions = np.argwhere(g == np.amax(g))
for a in range(env.num_actions):
if a in greed_actions:
policy[s, a] = 1 / len(greed_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values
def q_policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
q_values = np.zeros((env.num_states, env.num_actions))
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = env.rewards[s, :] + env.gamma * np.matmul(
env.transition_probs[s, :, :], np.sum(q_values * policy, axis=1))
delta = max(delta, np.max(abs(v- q_values[s])))
q_values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
greedy_actions = np.argwhere(q_values[s] == np.amax(q_values[s]))
for a in range(env.num_actions):
if a in greedy_actions:
policy[s, a] = 1 / len(greedy_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(q_values)
return q_values
| [
"[email protected]"
] | |
7a3484ca24eee71aa63e4e1eb0f4a392f1f4784a | 41b4702e359e3352116eeecf2bdf59cb13c71cf2 | /contextual_bcq/rand_param_envs/mujoco_py/mjlib.py | 8f2cf8a780c82d64a893cfd22c85aaf7d6219ce8 | [] | no_license | CaralHsi/Multi-Task-Batch-RL | b0aad53291c1713fd2d89fa4fff4a85c98427d4d | 69d29164ab7d82ec5e06a929ed3b96462db21853 | refs/heads/master | 2022-12-22T19:23:45.341092 | 2020-10-01T00:05:36 | 2020-10-01T00:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,301 | py | from ctypes import *
import os
from .util import *
from .mjtypes import *
from rand_param_envs.mujoco_py import config
path_prefix = config.mjpro_path
if sys.platform.startswith("darwin"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.dylib")
elif sys.platform.startswith("linux"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.so")
elif sys.platform.startswith("win"):
libfile = os.path.join(path_prefix, "bin/mujoco131.lib")
else:
raise RuntimeError("Unrecognized platform %s" % sys.platform)
if not os.path.exists(libfile):
raise RuntimeError("Missing path: %s. (HINT: you should have unzipped the mjpro131.zip bundle without modification.)" % libfile)
mjlib = cdll.LoadLibrary(os.path.abspath(libfile))
mjlib.mj_loadXML.argtypes = [String, String, c_char_p, c_int]
mjlib.mj_loadXML.restype = POINTER(MJMODEL)
mjlib.mj_saveXML.argtypes = [String, POINTER(MJMODEL), String]
mjlib.mj_saveXML.restype = c_int
#mjlib.mj_printSchema.argtypes = [String, String, c_int, c_int, c_int]
#mjlib.mj_printSchema.restype = c_int
mjlib.mj_activate.argtypes = [String]
mjlib.mj_activate.restype = c_int
mjlib.mj_step.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step.restype = None
mjlib.mj_step1.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step1.restype = None
mjlib.mj_step2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step2.restype = None
mjlib.mj_forward.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_forward.restype = None
#mjlib.mj_inverse.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_inverse.restype = None
#mjlib.mj_forwardSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_forwardSkip.restype = None
#mjlib.mj_inverseSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_inverseSkip.restype = None
#mjlib.mj_sensor.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_sensor.restype = None
#mjlib.mj_energy.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_energy.restype = None
#mjlib.mj_defaultSolRefImp.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mj_defaultSolRefImp.restype = None
#mjlib.mj_defaultOption.argtypes = [POINTER(mjOption)]
#mjlib.mj_defaultOption.restype = None
#mjlib.mj_defaultVisual.argtypes = [POINTER(mjVisual)]
#mjlib.mj_defaultVisual.restype = None
#mjlib.mj_copyModel.argtypes = [POINTER(MJMODEL), POINTER(MJMODEL)]
#mjlib.mj_copyModel.restype = POINTER(MJMODEL)
#mjlib.mj_saveModel.argtypes = [POINTER(MJMODEL), String, c_int, POINTER(None)]
#mjlib.mj_saveModel.restype = None
#mjlib.mj_loadModel.argtypes = [String, c_int, POINTER(None)]
#mjlib.mj_loadModel.restype = POINTER(MJMODEL)
mjlib.mj_deleteModel.argtypes = [POINTER(MJMODEL)]
mjlib.mj_deleteModel.restype = None
#mjlib.mj_sizeModel.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_sizeModel.restype = c_int
mjlib.mj_makeData.argtypes = [POINTER(MJMODEL)]
mjlib.mj_makeData.restype = POINTER(MJDATA)
#mjlib.mj_copyData.argtypes = [POINTER(MJDATA), POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_copyData.restype = POINTER(MJDATA)
mjlib.mj_resetData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_resetData.restype = None
#mjlib.mj_stackAlloc.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_stackAlloc.restype = POINTER(c_double)
mjlib.mj_deleteData.argtypes = [POINTER(MJDATA)]
mjlib.mj_deleteData.restype = None
#mjlib.mj_resetCallbacks.argtypes = []
#mjlib.mj_resetCallbacks.restype = None
#mjlib.mj_setConst.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_setConst.restype = None
#mjlib.mj_printModel.argtypes = [POINTER(MJMODEL), String]
#mjlib.mj_printModel.restype = None
#mjlib.mj_printData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), String]
#mjlib.mj_printData.restype = None
#mjlib.mju_printMat.argtypes = [POINTER(c_double), c_int, c_int]
#mjlib.mju_printMat.restype = None
#mjlib.mj_fwdPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdPosition.restype = None
#mjlib.mj_fwdVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdVelocity.restype = None
#mjlib.mj_fwdActuation.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdActuation.restype = None
#mjlib.mj_fwdAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdAcceleration.restype = None
#mjlib.mj_fwdConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdConstraint.restype = None
#mjlib.mj_Euler.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_Euler.restype = None
#mjlib.mj_RungeKutta.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_RungeKutta.restype = None
#mjlib.mj_invPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invPosition.restype = None
#mjlib.mj_invVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invVelocity.restype = None
#mjlib.mj_invConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invConstraint.restype = None
#mjlib.mj_compareFwdInv.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_compareFwdInv.restype = None
#mjlib.mj_checkPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkPos.restype = None
#mjlib.mj_checkVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkVel.restype = None
#mjlib.mj_checkAcc.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkAcc.restype = None
#mjlib.mj_kinematics.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_kinematics.restype = None
#mjlib.mj_comPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comPos.restype = None
#mjlib.mj_tendon.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_tendon.restype = None
#mjlib.mj_transmission.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_transmission.restype = None
#mjlib.mj_crb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_crb.restype = None
#mjlib.mj_factorM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_factorM.restype = None
#mjlib.mj_backsubM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM.restype = None
#mjlib.mj_backsubM2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM2.restype = None
#mjlib.mj_comVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comVel.restype = None
#mjlib.mj_passive.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_passive.restype = None
#mjlib.mj_rne.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_rne.restype = None
#mjlib.mj_rnePostConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_rnePostConstraint.restype = None
#mjlib.mj_collision.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_collision.restype = None
#mjlib.mj_makeConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_makeConstraint.restype = None
#mjlib.mj_projectConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_projectConstraint.restype = None
#mjlib.mj_referenceConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_referenceConstraint.restype = None
#mjlib.mj_isPyramid.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isPyramid.restype = c_int
#mjlib.mj_isSparse.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isSparse.restype = c_int
#mjlib.mj_mulJacVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacVec.restype = None
#mjlib.mj_mulJacTVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacTVec.restype = None
#mjlib.mj_jac.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jac.restype = None
#mjlib.mj_jacBody.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBody.restype = None
#mjlib.mj_jacBodyCom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBodyCom.restype = None
#mjlib.mj_jacGeom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacGeom.restype = None
#mjlib.mj_jacSite.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacSite.restype = None
#mjlib.mj_jacPointAxis.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacPointAxis.restype = None
mjlib.mj_name2id.argtypes = [POINTER(MJMODEL), c_int, String] # The middle term is a mjtObj (an enum) in C.
mjlib.mj_name2id.restype = c_int
#mjlib.mj_id2name.argtypes = [POINTER(MJMODEL), mjtObj, c_int]
#mjlib. mj_id2name.restype = ReturnString
#mjlib.else:
#mjlib. mj_id2name.restype = String
#mjlib. mj_id2name.errcheck = ReturnString
#mjlib.mj_fullM.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_fullM.restype = None
#mjlib.mj_mulM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulM.restype = None
#mjlib.mj_applyFT.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, POINTER(c_double)]
#mjlib.mj_applyFT.restype = None
#mjlib.mj_objectVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectVelocity.restype = None
#mjlib.mj_objectAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectAcceleration.restype = None
#mjlib.mj_contactForce.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_contactForce.restype = None
#mjlib.mj_integratePos.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mj_integratePos.restype = None
#mjlib.mj_normalizeQuat.argtypes = [POINTER(MJMODEL), POINTER(c_double)]
#mjlib.mj_normalizeQuat.restype = None
#mjlib.mj_local2Global.argtypes = [POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_local2Global.restype = None
#mjlib.mj_getTotalmass.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_getTotalmass.restype = c_double
#mjlib.mj_setTotalmass.argtypes = [POINTER(MJMODEL), c_double]
#mjlib.mj_setTotalmass.restype = None
#mjlib.mj_version.argtypes = []
#mjlib.mj_version.restype = c_double
mjlib.mjv_makeObjects.argtypes = [POINTER(MJVOBJECTS), c_int]
mjlib.mjv_makeObjects.restype = None
mjlib.mjv_freeObjects.argtypes = [POINTER(MJVOBJECTS)]
mjlib.mjv_freeObjects.restype = None
mjlib.mjv_defaultOption.argtypes = [POINTER(MJVOPTION)]
mjlib.mjv_defaultOption.restype = None
#mjlib.mjv_defaultCameraPose.argtypes = [POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_defaultCameraPose.restype = None
mjlib.mjv_defaultCamera.argtypes = [POINTER(MJVCAMERA)]
mjlib.mjv_defaultCamera.restype = None
mjlib.mjv_setCamera.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVCAMERA)]
mjlib.mjv_setCamera.restype = None
mjlib.mjv_updateCameraPose.argtypes = [POINTER(MJVCAMERA), c_double]
mjlib.mjv_updateCameraPose.restype = None
#mjlib.mjv_convert3D.argtypes = [POINTER(c_double), POINTER(c_double), c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert3D.restype = None
#mjlib.mjv_convert2D.argtypes = [POINTER(c_double), mjtMouse, c_double, c_double, c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert2D.restype = None
mjlib.mjv_moveCamera.argtypes = [c_int, c_float, c_float, POINTER(MJVCAMERA), c_float, c_float]
mjlib.mjv_moveCamera.restype = None
#mjlib.mjv_moveObject.argtypes = [mjtMouse, c_float, c_float, POINTER(MJVCAMERAPOSE), c_float, c_float, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_moveObject.restype = None
mjlib.mjv_mousePerturb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_mousePerturb.restype = None
#mjlib.mjv_mouseEdit.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mouseEdit.restype = None
mjlib.mjv_makeGeoms.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS), POINTER(MJVOPTION), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_makeGeoms.restype = None
mjlib.mjv_makeLights.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS)]
mjlib.mjv_makeLights.restype = None
mjlib.mjr_overlay.argtypes = [MJRRECT, c_int, c_int, String, String, POINTER(MJRCONTEXT)]
mjlib.mjr_overlay.restype = None
#mjlib.mjr_rectangle.argtypes = [c_int, MJRRECT, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
#mjlib.mjr_rectangle.restype = None
#mjlib.mjr_finish.argtypes = []
#mjlib.mjr_finish.restype = None
#mjlib.mjr_text.argtypes = [String, POINTER(MJRCONTEXT), c_int, c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_text.restype = None
#mjlib.mjr_textback.argtypes = [String, POINTER(MJRCONTEXT), c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_textback.restype = None
#mjlib.mjr_textWidth.argtypes = [String, POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_textWidth.restype = c_int
mjlib.mjr_defaultOption.argtypes = [POINTER(MJROPTION)]
mjlib.mjr_defaultOption.restype = None
mjlib.mjr_defaultContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_defaultContext.restype = None
#mjlib.mjr_uploadTexture.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_uploadTexture.restype = None
mjlib.mjr_makeContext.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
mjlib.mjr_makeContext.restype = None
mjlib.mjr_freeContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_freeContext.restype = None
mjlib.mjr_render.argtypes = [c_int, MJRRECT, POINTER(MJVOBJECTS), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
mjlib.mjr_render.restype = None
#mjlib.mjr_select.argtypes = [MJRRECT, POINTER(MJVOBJECTS), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
#mjlib.mjr_select.restype = c_int
#mjlib.mjr_showOffscreen.argtypes = [c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showOffscreen.restype = None
#mjlib.mjr_showBuffer.argtypes = [POINTER(c_ubyte), c_int, c_int, c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showBuffer.restype = None
#mjlib.mjr_getOffscreen.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getOffscreen.restype = None
#mjlib.mjr_getBackbuffer.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getBackbuffer.restype = None
#mjlib.
#mjlib.
#mjlib.mju_error.argtypes = [String]
#mjlib.mju_error.restype = None
#mjlib.mju_error_i.argtypes = [String, c_int]
#mjlib.mju_error_i.restype = None
#mjlib.mju_error_s.argtypes = [String, String]
#mjlib.mju_error_s.restype = None
#mjlib.mju_warning.argtypes = [String]
#mjlib.mju_warning.restype = None
#mjlib.mju_warning_i.argtypes = [String, c_int]
#mjlib.mju_warning_i.restype = None
#mjlib.mju_warning_s.argtypes = [String, String]
#mjlib.mju_warning_s.restype = None
#mjlib.mju_clearHandlers.argtypes = []
#mjlib.mju_clearHandlers.restype = None
#mjlib.mju_malloc.argtypes = [c_size_t]
#mjlib.mju_malloc.restype = POINTER(None)
#mjlib.mju_free.argtypes = [POINTER(None)]
#mjlib.mju_free.restype = None
#mjlib.mj_warning.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_warning.restype = None
#mjlib.mju_zero3.argtypes = [POINTER(c_double)]
#mjlib.mju_zero3.restype = None
#mjlib.mju_copy3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_copy3.restype = None
#mjlib.mju_scl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_scl3.restype = None
#mjlib.mju_add3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_add3.restype = None
#mjlib.mju_sub3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_sub3.restype = None
#mjlib.mju_addTo3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_addTo3.restype = None
#mjlib.mju_addToScl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addToScl3.restype = None
#mjlib.mju_addScl3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addScl3.restype = None
#mjlib.mju_normalize3.argtypes = [POINTER(c_double)]
#mjlib.mju_normalize3.restype = c_double
#mjlib.mju_norm3.argtypes = [POINTER(c_double)]
#mjlib.mju_norm3.restype = c_double
#mjlib.mju_dot3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dot3.restype = c_double
#mjlib.mju_dist3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dist3.restype = c_double
#mjlib.mju_rotVecMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMat.restype = None
#mjlib.mju_rotVecMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMatT.restype = None
#mjlib.mju_cross.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_cross.restype = None
#mjlib.mju_zero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_zero.restype = None
#mjlib.mju_copy.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_copy.restype = None
#mjlib.mju_scl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_scl.restype = None
#mjlib.mju_add.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_add.restype = None
#mjlib.mju_sub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_sub.restype = None
#mjlib.mju_addTo.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_addTo.restype = None
#mjlib.mju_addToScl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addToScl.restype = None
#mjlib.mju_addScl.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addScl.restype = None
#mjlib.mju_normalize.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_normalize.restype = c_double
#mjlib.mju_norm.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_norm.restype = c_double
#mjlib.mju_dot.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_dot.restype = c_double
#mjlib.mju_mulMatVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatVec.restype = None
#mjlib.mju_mulMatTVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatTVec.restype = None
#mjlib.mju_transpose.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_transpose.restype = None
#mjlib.mju_mulMatMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMat.restype = None
#mjlib.mju_mulMatMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMatT.restype = None
#mjlib.mju_sqrMat.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int, POINTER(c_double), c_int]
#mjlib.mju_sqrMat.restype = None
#mjlib.mju_mulMatTMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatTMat.restype = None
#mjlib.mju_transformSpatial.argtypes = [POINTER(c_double), POINTER(c_double), mjtByte, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_transformSpatial.restype = None
#mjlib.mju_rotVecQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecQuat.restype = None
#mjlib.mju_negQuat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_negQuat.restype = None
#mjlib.mju_mulQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuat.restype = None
#mjlib.mju_mulQuatAxis.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuatAxis.restype = None
#mjlib.mju_axisAngle2Quat.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_axisAngle2Quat.restype = None
#mjlib.mju_quat2Vel.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quat2Vel.restype = None
#mjlib.mju_quat2Mat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quat2Mat.restype = None
#mjlib.mju_mat2Quat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mat2Quat.restype = None
#mjlib.mju_derivQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_derivQuat.restype = None
#mjlib.mju_quatIntegrate.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quatIntegrate.restype = None
#mjlib.mju_quatZ2Vec.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quatZ2Vec.restype = None
#mjlib.mju_cholFactor.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_double, c_double, POINTER(c_double)]
#mjlib.mju_cholFactor.restype = c_int
#mjlib.mju_cholBacksub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_cholBacksub.restype = None
#mjlib.mju_eig3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_eig3.restype = c_int
#mjlib.mju_muscleFVL.argtypes = [c_double, c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_muscleFVL.restype = c_double
#mjlib.mju_musclePassive.argtypes = [c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_musclePassive.restype = c_double
#mjlib.mju_pneumatic.argtypes = [c_double, c_double, c_double, POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_pneumatic.restype = c_double
#mjlib.mju_encodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_encodePyramid.restype = None
#mjlib.mju_decodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_decodePyramid.restype = None
#mjlib.mju_springDamper.argtypes = [c_double, c_double, c_double, c_double, c_double]
#mjlib.mju_springDamper.restype = c_double
#mjlib.mju_min.argtypes = [c_double, c_double]
#mjlib.mju_min.restype = c_double
#mjlib.mju_max.argtypes = [c_double, c_double]
#mjlib.mju_max.restype = c_double
#mjlib.mju_sign.argtypes = [c_double]
#mjlib.mju_sign.restype = c_double
#mjlib.mju_round.argtypes = [c_double]
#mjlib.mju_round.restype = c_int
#mjlib.mju_type2Str.argtypes = [c_int]
#mjlib. mju_type2Str.restype = ReturnString
#mjlib.else:
#mjlib. mju_type2Str.restype = String
#mjlib. mju_type2Str.errcheck = ReturnString
#mjlib.mju_str2Type.argtypes = [String]
#mjlib.mju_str2Type.restype = mjtObj
#mjlib.mju_warningText.argtypes = [c_int]
#mjlib. mju_warningText.restype = ReturnString
#mjlib.else:
#mjlib. mju_warningText.restype = String
#mjlib. mju_warningText.errcheck = ReturnString
#mjlib.mju_isBad.argtypes = [c_double]
#mjlib.mju_isBad.restype = c_int
#mjlib.mju_isZero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_isZero.restype = c_int
| [
"[email protected]"
] | |
6f87b92696de2420ba9b14956ac1d08db4e16a86 | bc6c0cda914c23e80921793eb0ce71c45202ada4 | /src/endoexport/export.py | 66f3970d48311c18dc3f984c553dd2e423f77298 | [
"MIT"
] | permissive | karlicoss/endoexport | a2221799113a12b400e298dea8d95559926de138 | 98c8805cbcc00187822737ef32c2e0434c4f450e | refs/heads/master | 2023-04-04T09:56:57.716411 | 2023-03-15T02:19:15 | 2023-03-15T02:22:45 | 230,617,833 | 3 | 0 | MIT | 2023-03-15T02:22:46 | 2019-12-28T14:05:23 | Python | UTF-8 | Python | false | false | 1,512 | py | #!/usr/bin/env python3
import argparse
import json
from .exporthelpers.export_helper import Json
import endoapi
def get_json(**params) -> Json:
endomondo = endoapi.endomondo.Endomondo(**params)
maximum_workouts = None # None means all
workouts = endomondo.get_workouts_raw(maximum_workouts)
return workouts
Token = str
def login(email: str) -> Token:
print(f"Logging in as {email}")
password = input('Your password: ')
endomondo = endoapi.endomondo.Endomondo(email=email, password=password)
token = endomondo.token
print('Your token:')
print(token)
return token
def make_parser():
from .exporthelpers.export_helper import setup_parser, Parser
parser = Parser("Tool to export your personal Endomondo data")
setup_parser(parser=parser, params=['email', 'token']) # TODO exports -- need help for each param?
parser.add_argument('--login', action='store_true', help='''
This will log you in and give you the token (you'll need your password).
You only need to do it once, after that just store the token and use it.
''')
return parser
def main() -> None:
# TODO add logger configuration to export_helper?
# TODO autodetect logzero?
args = make_parser().parse_args()
params = args.params
dumper = args.dumper
if args.login:
login(email=params['email'])
return
j = get_json(**params)
js = json.dumps(j, indent=1, ensure_ascii=False)
dumper(js)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2acbc2e004d4d067218de078794ec2dd281455fd | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_sql_container_create_update.py | 4eb9b7c581d3ad5045f9f14afe3e0ab5a7f5f6c1 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 3,434 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_sql_container_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.sql_resources.begin_create_update_sql_container(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
container_name="containerName",
create_update_sql_container_parameters={
"location": "West US",
"properties": {
"options": {},
"resource": {
"clientEncryptionPolicy": {
"includedPaths": [
{
"clientEncryptionKeyId": "keyId",
"encryptionAlgorithm": "AEAD_AES_256_CBC_HMAC_SHA256",
"encryptionType": "Deterministic",
"path": "/path",
}
],
"policyFormatVersion": 2,
},
"conflictResolutionPolicy": {"conflictResolutionPath": "/path", "mode": "LastWriterWins"},
"defaultTtl": 100,
"id": "containerName",
"indexingPolicy": {
"automatic": True,
"excludedPaths": [],
"includedPaths": [
{
"indexes": [
{"dataType": "String", "kind": "Range", "precision": -1},
{"dataType": "Number", "kind": "Range", "precision": -1},
],
"path": "/*",
}
],
"indexingMode": "consistent",
},
"partitionKey": {"kind": "Hash", "paths": ["/AccountNumber"]},
"uniqueKeyPolicy": {"uniqueKeys": [{"paths": ["/testPath"]}]},
},
},
"tags": {},
},
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/stable/2023-04-15/examples/CosmosDBSqlContainerCreateUpdate.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d10c3fb59eb602e7a438fe8b8b7ccca52fcc45d2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_syphon.py | 1ef3547d3d666728720ba4bfc26206b8a9d76bc4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py |
#calss header
class _SYPHON():
def __init__(self,):
self.name = "SYPHON"
self.definitions = [u'a siphon noun ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
99f477ff8ee5eee19b30adddfcaa704802c97c42 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/Floobits-flootty/allPythonContent.py | 233f4eda6ac3b66566c18b3214288161442dcb88 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,459 | py | __FILENAME__ = api
import sys
import base64
import json
try:
from urllib.request import Request, urlopen
assert Request and urlopen
except ImportError:
from urllib2 import Request, urlopen
try:
from . import shared as G, utils
assert G and utils
except (ImportError, ValueError):
import shared as G
import utils
def get_basic_auth():
# TODO: use api_key if it exists
basic_auth = ('%s:%s' % (G.USERNAME, G.SECRET)).encode('utf-8')
basic_auth = base64.encodestring(basic_auth)
return basic_auth.decode('ascii').replace('\n', '')
def api_request(url, data=None):
if data:
data = json.dumps(data).encode('utf-8')
r = Request(url, data=data)
r.add_header('Authorization', 'Basic %s' % get_basic_auth())
r.add_header('Accept', 'application/json')
r.add_header('Content-type', 'application/json')
r.add_header('User-Agent', 'Flootty py-%s.%s' % (sys.version_info[0], sys.version_info[1]))
return urlopen(r, timeout=5)
def create_workspace(post_data):
url = 'https://%s/api/workspace/' % G.DEFAULT_HOST
return api_request(url, post_data)
def get_workspace_by_url(url):
result = utils.parse_url(url)
api_url = 'https://%s/api/workspace/%s/%s/' % (result['host'], result['owner'], result['workspace'])
return api_request(api_url)
def get_workspace(owner, workspace):
api_url = 'https://%s/api/workspace/%s/%s/' % (G.DEFAULT_HOST, owner, workspace)
return api_request(api_url)
def get_workspaces():
api_url = 'https://%s/api/workspace/can/view/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_now_editing_workspaces():
api_url = 'https://%s/api/workspaces/now_editing/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs():
api_url = 'https://%s/api/orgs/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs_can_admin():
api_url = 'https://%s/api/orgs/can/admin/' % (G.DEFAULT_HOST)
return api_request(api_url)
def send_error(data):
try:
api_url = 'https://%s/api/error/' % (G.DEFAULT_HOST)
return api_request(api_url, data)
except Exception as e:
print(e)
return None
########NEW FILE########
__FILENAME__ = cert
CA_CERT = '''-----BEGIN CERTIFICATE-----
MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
-----END CERTIFICATE-----'''
########NEW FILE########
__FILENAME__ = flootty
#!/usr/bin/env python
# coding: utf-8
try:
unicode()
except NameError:
unicode = str
# Heavily influenced by the work of Joshua D. Bartlett
# see: http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
# original copyright
# Copyright (c) 2011 Joshua D. Bartlett
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# boilerplate to allow running as script directly
if __name__ == "__main__" and __package__ is None:
import sys
import os
# The following assumes the script is in the top level of the package
# directory. We use dirname() to help get the parent directory to add to
# sys.path, so that we can import the current package. This is necessary
# since when invoked directly, the 'current' package is not automatically
# imported.
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import flootty
assert flootty
__package__ = str("flootty")
del sys, os
import atexit
import fcntl
import json
import optparse
import array
import os
import pty
import select
import socket
import ssl
import sys
import tempfile
import termios
import tty
import signal
import time
import base64
import collections
import errno
PY2 = sys.version_info < (3, 0)
try:
import __builtin__
input = getattr(__builtin__, 'raw_input')
except (ImportError, AttributeError):
pass
try:
from . import api, cert, shared as G, utils, version
assert api and cert and G and utils
except (ImportError, ValueError):
import api
import cert
import shared as G
import utils
import version
PROTO_VERSION = '0.11'
CLIENT = 'flootty %s' % version.FLOOTTY_VERSION
INITIAL_RECONNECT_DELAY = 1000
FD_READ_BYTES = 65536
# Seconds
SELECT_TIMEOUT = 0.1
NET_TIMEOUT = 10
MAX_BYTES_TO_BUFFER = 65536
DEFAULT_HOST = "floobits.com"
DEFAULT_PORT = 3448
def read_floorc():
settings = {}
p = os.path.expanduser('~/.floorc')
try:
fd = open(p, 'rb')
except IOError as e:
if e.errno == 2:
return settings
raise
data = fd.read().decode('utf-8')
fd.close()
for line in data.split('\n'):
position = line.find(' ')
if position < 0:
continue
settings[line[:position]] = line[position + 1:]
return settings
def write(fd, b):
if (not PY2) and isinstance(b, str):
b = b.encode('utf-8')
elif PY2 and isinstance(b, unicode):
b = b.encode('utf-8')
while len(b):
try:
n = os.write(fd, b)
b = b[n:]
except (IOError, OSError):
pass
def read(fd):
buf = b''
while True:
try:
d = os.read(fd, FD_READ_BYTES)
if not d or d == '':
break
buf += d
except (IOError, OSError):
break
return buf
def out(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDOUT_FILENO, buf)
def err(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDERR_FILENO, buf)
def die(*args):
err(*args)
sys.exit(1)
usage = '''usage: %prog [options] [terminal_name]\n
For more help, see https://github.com/Floobits/flootty'''
def main():
settings = read_floorc()
parser = optparse.OptionParser(usage=usage)
parser.add_option("-u", "--username",
dest="username",
default=settings.get('username'),
help="Your Floobits username")
parser.add_option("-s", "--secret",
dest="secret",
default=settings.get('secret'),
help="Your Floobits secret (api key)")
parser.add_option("-c", "--create",
dest="create",
default=False,
action="store_true",
help="The terminal name to create")
parser.add_option("--host",
dest="host",
default=DEFAULT_HOST,
help="The host to connect to. Deprecated. Use --url instead.")
parser.add_option("-p", "--port",
dest="port",
default=DEFAULT_PORT,
help="The port to connect to. Deprecated. Use --url instead.")
parser.add_option("-w", "--workspace",
dest="workspace",
help="The workspace name. --owner is required with this option. Deprecated. Use --url instead.")
parser.add_option("-o", "--owner",
dest="owner",
help="The workspace owner. --workspace is required with this option. Deprecated. Use --url instead.")
parser.add_option("-l", "--list",
dest="list",
default=False,
action="store_true",
help="List all terminals in the workspace")
parser.add_option("--unsafe",
dest="safe",
default=True,
action="store_false",
help="Less safe terminal. This allows other users to send enter in your terminal.")
parser.add_option("--no-ssl",
dest="use_ssl",
default=True,
action="store_false",
help="Do not use this option unless you know what you are doing!")
parser.add_option("--url",
dest="workspace_url",
default=None,
help="The URL of the workspace to connect to.")
parser.add_option("--resize",
dest="resize",
default=False,
action="store_true",
help="Resize your terminal to the host terminal size.")
parser.add_option("-P", "--preserve-ps1",
dest="set_prompt",
default=True,
action="store_false",
help="Don't change $PS1 (bash/zsh prompt)")
parser.add_option("-v", "--version",
dest="version",
default=False,
action="store_true",
help="Print version")
options, args = parser.parse_args()
if options.version:
print(CLIENT)
return
G.USERNAME = options.username
G.SECRET = options.secret
default_term_name = ""
if options.create:
default_term_name = "ftty"
term_name = args and args[0] or default_term_name
if options.workspace and options.owner and options.workspace_url:
# TODO: confusing
parser.error("You can either specify --workspace and --owner, or --url, but not both.")
if bool(options.workspace) != bool(options.owner):
parser.error("You must specify a workspace and owner or neither.")
for opt in ['owner', 'workspace']:
if getattr(options, opt):
print('%s is deprecated. Please use --url instead.' % opt)
if not options.workspace or not options.owner:
floo = {}
if options.workspace_url:
floo = utils.parse_url(options.workspace_url)
else:
for floo_path in walk_up(os.path.realpath('.')):
try:
floo = json.loads(open(os.path.join(floo_path, '.floo'), 'rb').read().decode('utf-8'))
floo = utils.parse_url(floo['url'])
except Exception:
pass
else:
break
options.host = floo.get('host')
options.workspace = floo.get('workspace')
options.owner = floo.get('owner')
options.use_ssl = floo.get('secure')
if not options.port:
options.port = floo.get('port')
if not options.host:
options.host = floo.get('host')
if not options.workspace or not options.owner:
now_editing = api.get_now_editing_workspaces()
now_editing = json.loads(now_editing.read().decode('utf-8'))
if len(now_editing) == 1:
options.workspace = now_editing[0]['name']
options.owner = now_editing[0]['owner']
# TODO: list possible workspaces to join if > 1 is active
if options.list:
if len(term_name) != 0:
die("I don't understand why you gave me a positional argument.")
for opt in ['workspace', 'owner', 'username', 'secret']:
if not getattr(options, opt):
parser.error('%s not given' % opt)
color_reset = '\033[0m'
if options.safe:
green = '\033[92m'
print('%sTerminal is safe. Other users will not be able to send [enter]%s' % (green, color_reset))
else:
yellorange = '\033[93m'
print('%sTerminal is unsafe. Other users will be able to send [enter]. Be wary!%s' % (yellorange, color_reset))
f = Flootty(options, term_name)
atexit.register(f.cleanup)
f.connect_to_internet()
f.select()
def walk_up(path):
step_up = lambda x: os.path.realpath(os.path.join(x, '..'))
parent = step_up(path)
while parent != path:
yield path
path = parent
parent = step_up(path)
yield path
class FD(object):
def __init__(self, fileno, reader=None, writer=None, errer=None, name=None):
self.fileno = fileno
self.reader = reader
self.writer = writer
self.errer = errer
self.name = name
def __getitem__(self, key):
return getattr(self, key, None)
def __str__(self):
return str(self.name)
class Flootty(object):
'''Mostly OK at sharing a shell'''
def __init__(self, options, term_name):
self.master_fd = None
self.original_wincher = None
self.fds = {}
self.readers = set()
self.writers = set()
self.errers = set()
self.empty_selects = 0
self.reconnect_timeout = None
self.buf_out = collections.deque()
self.buf_in = b''
self.host = options.host
self.port = int(options.port)
self.workspace = options.workspace
self.owner = options.owner
self.options = options
self.term_name = term_name
self.authed = False
self.term_id = None
self.orig_stdin_atts = None
self.orig_stdout_atts = None
self.last_stdin = 0
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def add_fd(self, fileno, **kwargs):
try:
fileno = fileno.fileno()
except:
fileno = fileno
fd = FD(fileno, **kwargs)
self.fds[fileno] = fd
if fd.reader:
self.readers.add(fileno)
if fd.writer:
self.writers.add(fileno)
if fd.errer:
self.errers.add(fileno)
def remove_fd(self, fileno):
self.readers.discard(fileno)
self.writers.discard(fileno)
self.errers.discard(fileno)
try:
del self.fds[fileno]
except KeyError:
pass
def transport(self, name, data):
data['name'] = name
self.buf_out.append(data)
def select(self):
'''
'''
attrs = ('errer', 'reader', 'writer')
while True:
utils.call_timeouts()
if len(self.buf_out) == 0 and self.sock:
self.writers.remove(self.sock.fileno())
try:
# NOTE: you will never have to write anything without reading first from a different one
_in, _out, _except = select.select(self.readers, self.writers, self.errers, SELECT_TIMEOUT)
except (IOError, OSError) as e:
continue
except (select.error, socket.error, Exception) as e:
# Interrupted system call.
if e[0] == 4:
continue
self.reconnect()
continue
finally:
if self.sock:
self.writers.add(self.sock.fileno())
for position, fds in enumerate([_except, _in, _out]):
attr = attrs[position]
for fd in fds:
# the handler can remove itself from self.fds...
handler = self.fds.get(fd)
if handler is None:
continue
handler = handler[attr]
if handler:
handler(fd)
else:
raise Exception('no handler for fd: %s %s' % (fd, attr))
def cloud_read(self, fd):
buf = b''
try:
while True:
d = self.sock.recv(FD_READ_BYTES)
if not d:
break
buf += d
except (socket.error, TypeError):
pass
if buf:
self.empty_selects = 0
self.handle(buf)
else:
self.empty_selects += 1
if (int(self.empty_selects * SELECT_TIMEOUT)) > NET_TIMEOUT:
err('No data from sock.recv() {0} times.'.format(self.empty_selects))
return self.reconnect()
def cloud_write(self, fd):
new_buf_out = collections.deque()
try:
while True:
item = self.buf_out.popleft()
data = json.dumps(item) + '\n'
if self.authed or item['name'] == 'auth':
if not PY2:
data = data.encode('utf-8')
self.sock.sendall(data)
else:
new_buf_out.append(item)
except socket.error:
self.buf_out.appendleft(item)
self.reconnect()
except IndexError:
pass
self.buf_out.extendleft(new_buf_out)
def cloud_err(self, err):
out('reconnecting because of %s' % err)
self.reconnect()
def handle(self, req):
self.buf_in += req
while True:
before, sep, after = self.buf_in.partition(b'\n')
if not sep:
break
data = json.loads(before.decode('utf-8'), encoding='utf-8')
self.handle_event(data)
self.buf_in = after
def handle_event(self, data):
name = data.get('name')
if not name:
return out('no name in data?!?')
func = getattr(self, "on_%s" % (name), None)
if not func:
return
func(data)
def on_room_info(self, ri):
self.authed = True
self.ri = ri
def list_terms(terms):
term_name = ""
for term_id, term in terms.items():
owner = str(term['owner'])
term_name = term['term_name']
out('terminal %s created by %s' % (term['term_name'], ri['users'][owner]['username']))
return term_name
if self.options.create:
buf = self._get_pty_size()
term_name = self.term_name
i = 0
term_names = [term['term_name'] for term_id, term in ri['terms'].items()]
while term_name in term_names:
i += 1
term_name = self.term_name + str(i)
self.term_name = term_name
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
elif self.options.list:
out('Terminals in %s::%s' % (self.owner, self.workspace))
list_terms(ri['terms'])
return die()
elif not self.term_name:
if len(ri['terms']) == 0:
out('There is no active terminal in this workspace. Do you want to share your terminal? (y/n)')
choice = input().lower()
self.term_name = "_"
if choice == 'y':
self.options.create = True
buf = self._get_pty_size()
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
else:
die('If you ever change your mind, you can share your terminal using the --create [super_awesome_name] flag.')
elif len(ri['terms']) == 1:
term_id, term = list(ri['terms'].items())[0]
self.term_id = int(term_id)
self.term_name = term['term_name']
else:
out('More than one active term exists in this workspace.')
example_name = list_terms(ri['terms'])
die('Please pick a workspace like so: flootty %s' % example_name)
else:
for term_id, term in ri['terms'].items():
if term['term_name'] == self.term_name:
self.term_id = int(term_id)
break
if self.term_id is None:
die('No terminal with name %s' % self.term_name)
return self.join_term()
def on_ping(self, data):
self.transport('pong', {})
def on_disconnect(self, data):
reason = data.get('reason')
out('Disconnected by server!')
if reason:
# TODO: don't kill terminal until current process is done or something
die('Reason: %s' % reason)
self.reconnect()
def on_error(self, data):
if self.term_id is None:
die(data.get('msg'))
else:
out('Error from server: %s' % data.get('msg'))
def on_create_term(self, data):
if data.get('term_name') != self.term_name:
return
self.term_id = data.get('id')
self.create_term()
def on_delete_term(self, data):
if data.get('id') != self.term_id:
return
die('User %s killed the terminal. Exiting.' % (data.get('username')))
def on_update_term(self, data):
if data.get('id') != self.term_id:
return
self._set_pty_size()
def on_term_stdin(self, data):
if data.get('id') != self.term_id:
return
if not self.options.create:
return
self.handle_stdio(base64.b64decode(data['data']), data.get('user_id'))
def on_term_stdout(self, data):
if data.get('id') != self.term_id:
return
self.handle_stdio(data['data'])
def reconnect(self):
if self.reconnect_timeout:
return
new_buf_out = collections.deque()
total_len = 0
while True:
try:
item = self.buf_out.popleft()
except IndexError:
break
if item['name'] == 'term_stdout':
total_len += len(item['data'])
if total_len > MAX_BYTES_TO_BUFFER:
continue
new_buf_out.appendleft(item)
self.buf_out = new_buf_out
if self.sock:
self.remove_fd(self.sock.fileno())
try:
self.sock.shutdown(2)
except Exception:
pass
try:
self.sock.close()
except Exception:
pass
self.sock = None
self.authed = False
self.reconnect_delay *= 1.5
if self.reconnect_delay > 10000:
self.reconnect_delay = 10000
self.reconnect_timeout = utils.set_timeout(self.connect_to_internet, self.reconnect_delay)
def send_auth(self):
self.buf_out.appendleft({
'name': 'auth',
'username': self.options.username,
'secret': self.options.secret,
'room': self.workspace,
'room_owner': self.owner,
'client': CLIENT,
'platform': sys.platform,
'version': PROTO_VERSION
})
def connect_to_internet(self):
self.empty_selects = 0
self.reconnect_timeout = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.options.use_ssl:
self.cert_fd = tempfile.NamedTemporaryFile()
self.cert_fd.write(cert.CA_CERT.encode('utf-8'))
self.cert_fd.flush()
self.sock = ssl.wrap_socket(self.sock, ca_certs=self.cert_fd.name, cert_reqs=ssl.CERT_REQUIRED)
elif self.port == 3448:
self.port = 3148
out('Connecting to %s' % self.workspace_url())
try:
self.sock.connect((self.host, self.port))
if self.options.use_ssl:
self.sock.do_handshake()
except socket.error as e:
out('Error connecting: %s.' % e)
return self.reconnect()
self.sock.setblocking(0)
out('Connected!')
self.send_auth()
self.add_fd(self.sock, reader=self.cloud_read, writer=self.cloud_write, errer=self.cloud_err, name='net')
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def workspace_url(self):
proto = {True: "https", False: "http"}
proto_str = proto[self.options.use_ssl]
port_str = ''
if self.options.use_ssl:
if self.port != 3448:
port_str = ':%s' % self.port
else:
if self.port != 3148:
port_str = ':%s' % self.port
return '%s://%s%s/%s/%s' % (proto_str, self.host, port_str, self.owner, self.workspace)
def join_term(self):
out('Successfully joined %s' % (self.workspace_url()))
self.orig_stdout_atts = tty.tcgetattr(sys.stdout)
stdout = sys.stdout.fileno()
tty.setraw(stdout)
fl = fcntl.fcntl(stdout, fcntl.F_GETFL)
fcntl.fcntl(stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin)
stdin = sys.stdin.fileno()
tty.setraw(stdin)
fl = fcntl.fcntl(stdin, fcntl.F_GETFL)
fcntl.fcntl(stdin, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def ship_stdin(fd):
data = read(fd)
if data:
self.transport("term_stdin", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
if 'term_stdin' in self.ri['perms']:
out('You have permission to write to this terminal. Remember: With great power comes great responsibility.')
self.add_fd(stdin, reader=ship_stdin, name='join_term_stdin')
else:
out('You do not have permission to write to this terminal.')
def stdout_write(buf):
write(stdout, base64.b64decode(buf))
self.handle_stdio = stdout_write
self._set_pty_size(self.ri['terms'][str(self.term_id)]['size'])
def create_term(self):
'''
Create a spawned process.
Based on the code for pty.spawn().
'''
if self.master_fd:
# reconnected. don't spawn a new shell
out('Reconnected to %s' % (self.workspace_url()))
return
shell = os.environ['SHELL']
out('Successfully joined %s' % (self.workspace_url()))
self.child_pid, self.master_fd = pty.fork()
if self.child_pid == pty.CHILD:
os.execlpe(shell, shell, '--login', os.environ)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin.fileno())
tty.setraw(pty.STDIN_FILENO)
self.original_wincher = signal.signal(signal.SIGWINCH, self._signal_winch)
self._set_pty_size()
def slave_death(fd):
die('Exiting flootty because child exited.')
self.extra_data = b''
def stdout_write(fd):
'''
Called when there is data to be sent from the child process back to the user.
'''
try:
data = self.extra_data + os.read(fd, FD_READ_BYTES)
except:
data = None
if not data:
return die("Time to go!")
self.transport("term_stdout", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
write(pty.STDOUT_FILENO, data)
self.add_fd(self.master_fd, reader=stdout_write, errer=slave_death, name='create_term_stdout_write')
def stdin_write(fd):
data = os.read(fd, FD_READ_BYTES)
if data:
write(self.master_fd, data)
now = time.time()
# Only send stdin event if it's been > 2 seconds. This prevents people from figuring out password lengths
if now - self.last_stdin > 2:
self.transport("term_stdin", {'data': ' ', 'id': self.term_id})
self.last_stdin = now
self.add_fd(pty.STDIN_FILENO, reader=stdin_write, name='create_term_stdin_write')
def net_stdin_write(buf, user_id=None):
if self.options.safe:
if buf.find('\n') != -1 or buf.find('\r') != -1:
to = user_id or []
self.transport('datamsg', {
'to': to,
'data': {
'name': 'safe_term',
'term_id': self.term_id,
'msg': 'Terminal %s is in safe mode. Other users are not allowed to press enter.' % self.term_name,
}})
self.transport('term_stdout', {
'id': self.term_id,
'data': base64.b64encode('\a').decode('utf8'),
})
buf = buf.replace('\n', '')
buf = buf.replace('\r', '')
if not buf:
return
write(self.master_fd, buf)
self.handle_stdio = net_stdin_write
color_green = '\\[\\e[32m\\]'
color_reset = '\\[\\033[0m\\]'
color_yellorange = '\\[\\e[93m\\]'
# TODO: other shells probably use weird color escapes
if 'zsh' in shell:
color_green = "%{%F{green}%}"
color_reset = "%{%f%}"
color_yellorange = "%{%F{yellow}%}"
if self.options.set_prompt:
term_color = color_yellorange
if self.options.safe:
term_color = color_green
# Not confusing at all </sarcasm>
cmd = 'PS1="%s%s::%s::%s%s%s%s $PS1"\n' % (color_green, self.owner, self.workspace, color_reset, term_color, self.term_name, color_reset)
write(self.master_fd, cmd)
def _signal_winch(self, signum, frame):
'''
Signal handler for SIGWINCH - window size has changed.
'''
self._set_pty_size()
def _get_pty_size(self):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)
return buf
def _set_pty_size(self, size=None):
'''
Sets the window size of the child pty based on the window size of our own controlling terminal.
'''
# Get the terminal size of the real terminal, set it on the pseudoterminal.
buf = self._get_pty_size()
if size:
buf[0] = size[1]
buf[1] = size[0]
if self.options.create:
assert self.master_fd is not None
fcntl.ioctl(self.master_fd, termios.TIOCSWINSZ, buf)
if self.term_id:
self.transport('update_term', {'id': self.term_id, 'size': [buf[1], buf[0]]})
else:
# XXXX: this resizes the window :/
if self.options.resize:
os.write(pty.STDOUT_FILENO, "\x1b[8;{rows};{cols}t".format(rows=buf[0], cols=buf[1]))
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCSWINSZ, buf)
def cleanup(self):
if self.orig_stdout_atts:
self.orig_stdout_atts[3] = self.orig_stdout_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdout, tty.TCSAFLUSH, self.orig_stdout_atts)
if self.orig_stdin_atts:
self.orig_stdin_atts[3] = self.orig_stdin_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdin, tty.TCSAFLUSH, self.orig_stdin_atts)
if self.original_wincher:
signal.signal(signal.SIGWINCH, self.original_wincher)
try:
self.cert_fd.close()
except Exception:
pass
print('ciao.')
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = shared
import os
__VERSION__ = ''
__PLUGIN_VERSION__ = ''
# Config settings
USERNAME = ''
SECRET = ''
API_KEY = ''
DEBUG = False
SOCK_DEBUG = False
ALERT_ON_MSG = True
LOG_TO_CONSOLE = False
BASE_DIR = os.path.expanduser(os.path.join('~', 'floobits'))
# Shared globals
DEFAULT_HOST = 'floobits.com'
DEFAULT_PORT = 3448
SECURE = True
SHARE_DIR = None
COLAB_DIR = ''
PROJECT_PATH = ''
JOINED_WORKSPACE = False
PERMS = []
STALKER_MODE = False
AUTO_GENERATED_ACCOUNT = False
PLUGIN_PATH = None
WORKSPACE_WINDOW = None
CHAT_VIEW = None
CHAT_VIEW_PATH = None
TICK_TIME = 100
AGENT = None
IGNORE_MODIFIED_EVENTS = False
VIEW_TO_HASH = {}
FLOORC_PATH = os.path.expanduser(os.path.join('~', '.floorc'))
########NEW FILE########
__FILENAME__ = utils
import re
import time
from collections import defaultdict
try:
from urllib.parse import urlparse
assert urlparse
except ImportError:
from urlparse import urlparse
try:
from . import shared as G
assert G
except (ImportError, ValueError):
import shared as G
top_timeout_id = 0
cancelled_timeouts = set()
timeout_ids = set()
timeouts = defaultdict(list)
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id += 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
timeout_ids.add(timeout_id)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeout_ids:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
now = time.time()
to_remove = []
for t, tos in timeouts.copy().items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
def parse_url(workspace_url):
secure = G.SECURE
owner = None
workspace_name = None
parsed_url = urlparse(workspace_url)
port = parsed_url.port
if not port:
port = G.DEFAULT_PORT
if parsed_url.scheme == 'http':
if not port:
port = 3148
secure = False
result = re.match('^/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if not result:
result = re.match('^/r/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if result:
(owner, workspace_name) = result.groups()
else:
raise ValueError('%s is not a valid Floobits URL' % workspace_url)
return {
'host': parsed_url.hostname,
'owner': owner,
'port': port,
'workspace': workspace_name,
'secure': secure,
}
########NEW FILE########
__FILENAME__ = version
FLOOTTY_VERSION = '2.1.4'
########NEW FILE########
| [
"[email protected]"
] | |
ee27313bde085575df70e1d42550c376748fe931 | 08a9dc04e6defa9dc9378bfbfbe0b6185af6a86a | /manager/views.py | 78b92fee93ead9c43d6d958d58f90642c7277c7f | [] | no_license | Felicity-jt/50.008-Project-1 | 8ecc63d2302b2eaa4060f4c900d7fed2e958927c | 960b5e57a39bfda1c31653798c23ddc051a2ff19 | refs/heads/master | 2021-08-24T00:40:27.886634 | 2017-12-07T09:14:12 | 2017-12-07T09:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | from json import loads
from django.http import Http404
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
from common.db import sql, page
from common.utils import pagination
from common.messages import NOT_STAFF
from common.decorators import json_response
@require_POST
@json_response
def new(request):
"""Add item or entity into inventory."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
s = """INSERT INTO item (id, name)
VALUES (DEFAULT, %s)"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (rq['name'],)
except (ValueError, KeyError):
return None
sql(s, *values)
return {}
@json_response
def stock(request, item_id):
"""Get or update current stock."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
q = 'SELECT id, price, quantity FROM item WHERE id = %s'
if request.method == 'POST':
# update price and/or quantity from post data
s = """UPDATE item SET
quantity = %s
WHERE id = %s"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (int(rq['quantity']),)
except (ValueError, KeyError):
return None
sql(s, *values, item_id)
try:
r = sql(q, item_id)[0]
except IndexError:
raise Http404
return {
'id': r[0],
'price': r[1],
'quantity': r[2],
}
@json_response
def stats(request, entity, year, month):
"""Get stats for entity."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
if entity not in ('item', 'company', 'creator'):
raise Http404
q = """SELECT item_id, SUM(quantity) AS total FROM purchase_item
INNER JOIN purchase p ON p.id = purchase_item.purchase_id
WHERE YEAR(p.made_on) = %s AND MONTH(p.made_on) = %s
GROUP BY item_id"""
pg = pagination(request)
pg['sort'].append('-total')
return sql(q + page(**pg), year, month)
| [
"[email protected]"
] | |
39ce07857213f8a281675528cad52ce7943c5bf1 | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /zpt/trunk/site-packages/zpt/_pytz/zoneinfo/US/Indiana_minus_Starke.py | f06a4f85e29494d5c49f070ed6153788987fe72d | [
"MIT",
"ZPL-2.1"
] | permissive | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,743 | py | '''tzinfo timezone information for US/Indiana_minus_Starke.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Indiana_minus_Starke(DstTzInfo):
'''US/Indiana_minus_Starke timezone definition. See datetime.tzinfo for details'''
zone = 'US/Indiana_minus_Starke'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,4,30,8,0,0),
d(1950,9,24,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1955,10,30,7,0,0),
d(1956,4,29,8,0,0),
d(1956,10,28,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,10,25,7,0,0),
d(1960,4,24,8,0,0),
d(1960,10,30,7,0,0),
d(1961,4,30,8,0,0),
d(1961,10,29,7,0,0),
d(1962,4,29,8,0,0),
d(1963,10,27,7,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,7,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,7,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,7,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,7,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,7,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,7,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,7,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,7,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,7,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,7,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,7,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,7,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,7,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,7,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Indiana_minus_Starke = Indiana_minus_Starke()
| [
"[email protected]"
] | |
511fe8b79650e5129209a33e6c7d768af423c6e6 | 2a1f4c4900693c093b2fcf4f84efa60650ef1424 | /py/dome/backend/apps.py | fc8e9e1db58cfc9dbc955eb7df36461f862fe2b5 | [
"BSD-3-Clause"
] | permissive | bridder/factory | b925f494303728fa95017d1ba3ff40ac5cf6a2fd | a1b0fccd68987d8cd9c89710adc3c04b868347ec | refs/heads/master | 2023-08-10T18:51:08.988858 | 2021-09-21T03:25:28 | 2021-09-21T03:25:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import unicode_literals
from django.apps import AppConfig
class BackendConfig(AppConfig):
name = 'backend'
| [
"[email protected]"
] | |
bfa4051b7daa99e35be4c69d94d185b37ba84f1b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_035/ch37_2020_03_25_14_04_04_120072.py | a165e2f3c23563f7b30d6684819d8aca366bc2cd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | Senha = True
resposta = input("Qual é a senha")
while Senha:
if resposta=="desisto":
Senha = False
else:
Senha = True
return resposta
print("Você acertou a senha!") | [
"[email protected]"
] | |
e56f0bd33da3d74267fd6ab2971ead15aa9263b8 | 1c488f486d14c19e19af1a46474af224498be193 | /experimental/serengeti/blankIBCC.py | 649a35a733279dc7605d90eb8296b4e245101794 | [
"Apache-2.0"
] | permissive | JiaminXuan/aggregation | fc2117494372428adeed85a9a413e2ff47244664 | 9a7ecbc2d4b143a73e48b1826b3727b6976fa770 | refs/heads/master | 2020-12-11T01:49:42.977664 | 2015-05-22T16:21:15 | 2015-05-22T16:21:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,087 | py | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['serengeti_2014-07-28']
collection = db["serengeti_classifications"]
collection2 = db["serengeti_subjects"]
subjects = []
users = []
classifications = []
class_count = {}
blank_count = {}
retiredBlanks = {}
with open("/home/greg/Databases/serengeti_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/serengeti_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/serengeti_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/serengeti_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
with open("/home/greg/Databases/serengeti_ibcc.csv","wb") as f:
f.write("a,b,c\n")
import datetime
def update(individual_classifications):
#start by removing all temp files
try:
os.remove("/home/greg/Databases/serengeti_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/serengeti_ibcc.csv","a") as f:
for u, s, b in individual_classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/serengeti_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
def analyze():
with open("/home/greg/Databases/serengeti_ibcc.out","rb") as f:
reader = csv.reader(f,delimiter=" ")
for subject_index,p0,p1 in reader:
subject_index = int(float(subject_index))
subject_id = subjects[subject_index]
c = class_count[subject_id]
if (float(p1) >= 0.995) and (c>= 2):
if not(subject_id in retiredBlanks):
retiredBlanks[subject_id] = c
#print str(c) + " :: " + str(p1)
i = 0
unknownUsers = []
for r in collection.find({"tutorial": {"$ne": True}}):
try:
user_name = r["user_name"]
except KeyError:
unknownUsers.append(r["user_ip"])
continue
zooniverse_id = r["subjects"][0]["zooniverse_id"]
if zooniverse_id in retiredBlanks:
continue
if ((i%10000) == 0) and (i > 0):
print i
update(classifications)
classifications = []
analyze()
if not(user_name in users):
users.append(user_name)
if not(zooniverse_id in subjects):
subjects.append(zooniverse_id)
class_count[zooniverse_id] = 0
blank_count[zooniverse_id] = 0
i += 1
user_index = users.index(user_name)
subject_index = subjects.index(zooniverse_id)
class_count[zooniverse_id] += 1
a = r["annotations"]
if not("nothing" in a[-1]):
assert('species' in a[0])
blank = 0
else:
blank = 1
blank_count[zooniverse_id] += 1
classifications.append((user_index,subject_index,blank))
if i >= 300000:
break
#print len(unknownUsers)
#print len(list(set(unknownUsers)))
tBlank = 0
fBlank = 0
speciesList = ['blank','elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
errors = {s.lower():0 for s in speciesList}
for zooniverse_id in retiredBlanks:
r = collection2.find_one({"zooniverse_id" : zooniverse_id})
retire_reason = r["metadata"]["retire_reason"]
if retire_reason in ["blank", "blank_consensus"]:
tBlank += 1
else:
fBlank += 1
print zooniverse_id + " :: " + str(r["location"]["standard"][0])
f = max(r["metadata"]["counters"].items(), key = lambda x:x[1])
print f
try:
errors[f[0].lower()] += 1
print str(blank_count[zooniverse_id]) + "/" + str(class_count[zooniverse_id])
except KeyError:
print "---***"
#print str(r["metadata"]["counters"].values())
print "==---"
print tBlank
print fBlank
print np.mean(retiredBlanks.values())
print np.median(retiredBlanks.values())
print "===---"
for s in speciesList:
if errors[s.lower()] != 0:
print s + " - " + str(errors[s.lower()]) | [
"[email protected]"
] | |
52b11a09076f3904dc2f45e1e998edf62a885d87 | aae0432eede626a0ac39ff6d81234e82f8d678c2 | /leetcode/algorithm/4.median-of-two-sorted-arrays.py | 63670a63bf49ee10613895df33ff3b9ae3388fc8 | [] | no_license | KIDJourney/algorithm | 81c00186a6dfdc278df513d25fad75c78eb1bf68 | e1cf8e12050b9f1419a734ff93f9c626fc10bfe0 | refs/heads/master | 2022-11-24T09:30:16.692316 | 2022-11-06T09:33:51 | 2022-11-06T09:33:51 | 40,428,125 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | #
# @lc app=leetcode id=4 lang=python3
#
# [4] Median of Two Sorted Arrays
#
# @lc code=start
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
return self.easy(nums1, nums2)
def easy(self, nums1, nums2):
result = []
idx1, idx2 = 0, 0
while True:
if idx1 == len(nums1) and idx2 == (len(nums2)):
break
if idx1 == len(nums1):
result.append(nums2[idx2])
idx2 += 1
continue
if idx2 == len(nums2):
result.append(nums1[idx1])
idx1 += 1
continue
if nums1[idx1] > nums2[idx2]:
result.append(nums2[idx2])
idx2 += 1
else:
result.append(nums1[idx1])
idx1 += 1
mid = len(result) // 2
if len(result) % 2 == 0:
return (result[mid] + result[mid-1]) / 2.0
else:
return (result[mid])
# @lc code=end
| [
"[email protected]"
] | |
3fd8971af0057cfe6f9120d8654640df8c971099 | 99e76e9e4c8031418c4c50217b48adf1d880cf2f | /setup.py | 6974fdc5b21fd1b544eac798d4363569ad4198d7 | [
"MIT"
] | permissive | grow/grow-ext-responsive-styles | d75a5abb070613641e3da9f3f4cf7dc07e88c51f | bb3d8f68edc1f3e1bdf508bb5df8d5b296574e9b | refs/heads/master | 2021-01-03T14:04:15.882718 | 2020-05-20T20:38:09 | 2020-05-20T20:38:09 | 240,096,948 | 0 | 0 | MIT | 2020-05-20T20:34:58 | 2020-02-12T19:27:42 | HTML | UTF-8 | Python | false | false | 349 | py | from setuptools import setup
setup(
name='grow-ext-responsive-styles',
version='1.0.0',
zip_safe=False,
license='MIT',
author='Grow Authors',
author_email='[email protected]',
include_package_data=True,
packages=[
'responsive_styles',
],
package_data={
'responsive_styles': ['*.html'],
},
)
| [
"[email protected]"
] | |
4f21bdabf36e65773d6c9289dad471ce6aa16e31 | 178ae62be7de20a50f96361e80bdcff5a5493ae2 | /koica/templatetags/koica.py | 36b3a706fcb6f684e4f9896f13b5cc8b25353d75 | [
"MIT"
] | permissive | synw/django-koica | a043800c15fad69f2024557e62fcf0ac4808ffae | d8b1c9fa70c428f0aa0db0c523524e9d2ef27377 | refs/heads/master | 2021-01-10T03:15:24.570691 | 2015-12-09T14:55:29 | 2015-12-09T14:55:29 | 46,188,691 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from django import template
from koica.utils import sanitize_html
register = template.Library()
@register.filter(is_safe=True)
def remove_pre(value):
return sanitize_html(value, remove_pre=True) | [
"[email protected]"
] | |
8e86bc7463a15ee8ba540cebbdc6dbebe01e0474 | 461d7bf019b9c7a90d15b3de05891291539933c9 | /bip_utils/bip39/bip39_entropy_generator.py | 47c75cf8f3c76ff3b2cb1f678605ec4780e1d6e9 | [
"MIT"
] | permissive | renauddahou/bip_utils | 5c21503c82644b57ddf56735841a21b6306a95fc | b04f9ef493a5b57983412c0ce460a9ca05ee1f50 | refs/heads/master | 2023-07-16T05:08:45.042084 | 2021-08-19T09:33:03 | 2021-08-19T09:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,419 | py | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import os
from enum import IntEnum, unique
from typing import List, Union
@unique
class Bip39EntropyBitLen(IntEnum):
""" Enumerative for BIP-0039 entropy bit lengths. """
BIT_LEN_128 = 128,
BIT_LEN_160 = 160,
BIT_LEN_192 = 192,
BIT_LEN_224 = 224,
BIT_LEN_256 = 256,
class Bip39EntropyGeneratorConst:
""" Class container for BIP39 entropy generator constants. """
# Accepted entropy lengths in bit
ENTROPY_BIT_LEN: List[Bip39EntropyBitLen] = [
Bip39EntropyBitLen.BIT_LEN_128,
Bip39EntropyBitLen.BIT_LEN_160,
Bip39EntropyBitLen.BIT_LEN_192,
Bip39EntropyBitLen.BIT_LEN_224,
Bip39EntropyBitLen.BIT_LEN_256,
]
class Bip39EntropyGenerator:
""" Entropy generator class. It generates random entropy bytes with the specified length. """
def __init__(self,
bits_len: Union[int, Bip39EntropyBitLen]) -> None:
""" Construct class by specifying the bits length.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Raises:
ValueError: If the bit length is not valid
"""
if not self.IsValidEntropyBitLen(bits_len):
raise ValueError("Entropy bit length is not valid (%d)" % bits_len)
self.m_bits_len = bits_len
def Generate(self) -> bytes:
""" Generate random entropy bytes with the length specified during construction.
Returns:
bytes: Generated entropy bytes
"""
return os.urandom(self.m_bits_len // 8)
@staticmethod
def IsValidEntropyBitLen(bits_len: Union[int, Bip39EntropyBitLen]) -> bool:
""" Get if the specified entropy bit length is valid.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Returns:
bool: True if valid, false otherwise
"""
return bits_len in Bip39EntropyGeneratorConst.ENTROPY_BIT_LEN
@staticmethod
def IsValidEntropyByteLen(bytes_len: int) -> bool:
""" Get if the specified entropy byte length is valid.
Args:
bytes_len (int): Entropy length in bytes
Returns:
bool: True if valid, false otherwise
"""
return Bip39EntropyGenerator.IsValidEntropyBitLen(bytes_len * 8)
| [
"[email protected]"
] | |
70b411ba66521bde662ff464e6ab782442fa0581 | 1508f7da93705839660e4fdfb87df7a9664bf087 | /a10API/a10API/flask/bin/migrate | bff34539b04e8d820b8b866d8ef3ee3bbc9995fb | [] | no_license | Younglu125/A10_Networks | 1a1ecebb28dd225f6a1f901a7c28350300df356d | 78a177ae4c8638d58dc873e4b1c589a1d5aaa717 | refs/heads/master | 2020-06-17T00:35:30.325740 | 2016-03-21T18:17:30 | 2016-03-21T18:17:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | #!/home/echou/a10API/flask/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'sqlalchemy-migrate==0.7.2','console_scripts','migrate'
__requires__ = 'sqlalchemy-migrate==0.7.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('sqlalchemy-migrate==0.7.2', 'console_scripts', 'migrate')()
)
| [
"[email protected]"
] | ||
3ad99e3d7e9841da8f65b2003210f661dc96df4a | 0296bc69a0d9608ed826ad7a719395f019df098f | /Tools/Compare_images.py | f4ba586d2dfa3fcae52e277676f2b4a82ffdf59a | [] | no_license | jcn16/Blender_HDRmap_render | c0486a77e04c5b41a6f75f123dbdb3d10c682367 | 50e6cdb79fef83081de9830e7105dd425a235a9e | refs/heads/main | 2023-07-19T22:22:53.622052 | 2021-08-20T06:29:10 | 2021-08-20T06:29:10 | 377,757,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | import cv2
import numpy as np
def tianchong(img):
m = img.shape[0]
n = img.shape[1]
append = int(np.ceil(abs(m - n) / 2))
if m > n:
constant = cv2.copyMakeBorder(img, 0, 0, append, append, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
constant = cv2.copyMakeBorder(img, append, append, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
constant = cv2.resize(constant, (512, 512))
return constant
def compare():
image_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/raytracing.png')
mask_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/alpha.png')
image_1=tianchong(image_1)
mask_1=tianchong(mask_1)
image_2=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/shading.png')
image_1=image_1/255.0*mask_1/255.0
image_2=image_2/255.0*mask_1/255.0
cv2.imshow('image_1',np.asarray(image_1*255,dtype=np.uint8))
cv2.imshow('image_2',np.asarray(image_2*255,dtype=np.uint8))
res=np.asarray(np.clip((image_1-image_2)*255,0,255),dtype=np.uint8)
cv2.imshow('res',res)
cv2.waitKey(0)
def composite():
shading=cv2.imread('/media/jcn/新加卷/JCN/RelightHDR/TEST/images_high_res/10/raytracing.png')
albedo=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/p_albedo.png')
mask=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/gt_mask.png')
relight=albedo/255.0*shading/255.0*mask/255.0
relight=np.asarray(relight*255,dtype=np.uint8)
cv2.imshow('relight',relight)
cv2.waitKey(0)
if __name__=='__main__':
compare()
| [
"[email protected]"
] | |
f7675475bf4180ae4b05a6af1aebe4521077a136 | e131e752d826ae698e12e7bc0583362741f9d942 | /AWS.py | c886890f56cf208b48066e6c151d54611fc0b574 | [] | no_license | abalberchak/TouchFace | ba30565be91b848126524aa47377789253370e04 | d093ece8890b68c72e0855a024d908105df99b94 | refs/heads/master | 2021-01-11T01:43:35.067808 | 2016-09-29T03:41:13 | 2016-09-29T03:41:13 | 69,530,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | #----------------------------------------- Intent Schema Below:------------------------------
{
"intents": [
{
"intent": "AMAZON.ResumeIntent"
},
{
"intent": "AMAZON.PauseIntent"
},
{
"intent": "DojoInfoIntent"
},
{
"intent": "AMAZON.HelpIntent"
},
{
"intent": "AMAZON.StopIntent"
},
{
"intent": "TextBrendenIntent"
},
{
"intent": "GetTouchFaceIntent"
},
{
"intent": "DojoBrendenIntent"
},
{
"intent": "AskBrendan"
},
{
"intent": "twilioIntent"
},
{
"intent": "GroupTextIntent",
"slots": [
{
"name": "Name",
"type": "MEMBERS"
}
]
}
]
}
#----------------------------------------- Utterances Below:------------------------------
DojoInfoIntent what is the coding dojo
DojoInfoIntent tell me about the coding dojo
TextBrendenIntent Text Brendan
GetTouchFaceIntent Tell what does Brenden say
DojoBrendenIntent who is brenden
AskBrendan what is touchface
twilioIntent hi annet
GroupTextIntent text {Name}
| [
"[email protected]"
] | |
af935ba661ffbdb6c3921e41c3c65c2ba9235ccd | 843d9f17acea5cfdcc5882cf8b46da82160c251c | /adafruit_stepper.py | 8e9319c17ea13b32312acbe50d018791ab2ea40a | [] | no_license | gunny26/raspberry | 7c1da63785c86412af9fa467ea231b19a97f4384 | e4eb0d2f537b319d41b6c50b59e69fb297c62d25 | refs/heads/master | 2016-09-06T14:02:30.122102 | 2014-01-29T16:31:08 | 2014-01-29T16:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import RPi.GPIO as GPIO
import time
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
enable_pin = 18
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
try:
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
except KeyboardInterrupt:
GPIO.cleanup()
| [
"[email protected]"
] | |
a7072cf5db1b5527272336c6191bab4e1770b928 | c840f190b3540bf212de2c70563e57da278fa9cb | /hyacinth.py | 055e735da50162825883a5c29dfd69fcd0f7242d | [] | no_license | edelooff/hyacinth | b768a871d476dd120f7d2d1acb039a6a9ebf2e19 | 0a6dd15fa1b1357afa566f924ad27b744582464b | refs/heads/master | 2022-04-16T13:24:18.986246 | 2020-04-01T08:15:36 | 2020-04-01T08:15:36 | 251,756,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,538 | py | from collections import (
Counter,
defaultdict)
import random
import re
import sys
DESIGN = re.compile(r'''
(?P<design>[A-Z])
(?P<size>[SL])
(?P<flowers>(:?\d+[a-z])*) # The specification is fuzzy on 1+ or 0+
(?P<total>\d+)''', re.VERBOSE)
DESIGN_FLOWER = re.compile(r'''
(?P<count>\d+)
(?P<species>[a-z])''', re.VERBOSE)
class Pool:
def __init__(self):
self.common_species = set()
self.designers = []
self.flowers = Counter()
def add_designer(self, designer):
"""Adds a BouquetDesigner for the pool size.
It also updates the set of known required species, allowing better
picking of 'filler' flowers for requested bouquets.
"""
self.designers.append(designer)
self.common_species |= designer.required_flowers.keys()
def add_flower(self, species):
"""Adds a flower of given species to the pool of available flowers."""
self.flowers[species] += 1
for designer in self.designers:
if designer.add(species):
print(self.create_bouquet(designer))
def create_bouquet(self, designer):
"""Creates a bouquet according to the given designers design.
After creating the bouquet, other designers are informed of the
removal of flower species from the shared pool.
"""
bouquet = designer.create(self.flowers, self.common_species)
bouquet_string = designer.stringify_bouquet(bouquet)
for bundle in bouquet.items():
for designer in self.designers:
designer.remove(*bundle)
return bouquet_string
class BouquetDesigner:
def __init__(self, design, flower_size, required_flowers, bouquet_size):
self.design = design
self.flower_size = flower_size
self.bouquet_size = bouquet_size
self.required_flowers = required_flowers
self.filler_quantity = bouquet_size - sum(required_flowers.values())
self.available_filler = 0
self.available_flowers = Counter()
def add(self, species):
"""Adds a species of flower to the local availability cache.
In addition. this will check whether a bouquet can be created based on
the recently seen flowers. If one can be created, this returns True.
"""
if species in self.required_flowers:
self.available_flowers[species] += 1
else:
self.available_filler += 1
return self.can_create()
def can_create(self):
"""Checks whether there are enough flowers to create a bouquet.
This will check if there is enough quantity of the required flowers and
if so, will check if there is enough filler to create a full bouquet.
"""
for flower, quantity in self.required_flowers.items():
if self.available_flowers[flower] < quantity:
return False
available = sum(self.available_flowers.values(), self.available_filler)
if available >= self.bouquet_size:
return True
return False
def create(self, pool, common_species):
"""Returns a bouquet (species listing) assembled from the given pool.
After picking the required flowers, if additional flowers are needed
as filler, this method selects a sample of flowers from the rest of
the pool in two steps:
1. Species of flowers used by other BouquetDesigners are avoided so
that selection for this bouquet causes the least conflict.
2. A random sample of flowers is picked, to avoid consistently stealing
from the same other designers. Randomly selecting also hopefully
generates nice and pleasing outcomes for the recipient, though this
hypothesis has not been tested in the least ;-)
In all cases we bias to picking filler flowers that we have a surplus
of. In an ideal world we would have a function that determines the
correct bias to introduce here.
"""
bouquet = Counter()
for species, quantity in self.required_flowers.items():
pool[species] -= quantity
bouquet[species] += quantity
# Pick the remaining flowers
if self.filler_quantity:
remaining = self.filler_quantity
for do_not_pick in (common_species, set()):
population = []
for species in pool.keys() ^ do_not_pick:
population.extend([species] * pool[species])
sample_size = min(len(population), remaining)
for species in random.sample(population, sample_size):
pool[species] -= 1
bouquet[species] += 1
remaining -= sample_size
if not remaining:
break
return bouquet
def remove(self, species, quantity):
"""Proceses removal of flowers from the flower pool.
This will update either the cache for available required flowers, or
if it's a species not -required- for this design, the filler count.
"""
if species in self.required_flowers:
self.available_flowers[species] -= quantity
else:
self.available_filler -= quantity
def stringify_bouquet(self, bouquet):
"""Returns the formatted bouquet string for this designer."""
flowers = sorted(bouquet.items())
flowerstring = (f'{count}{species}' for species, count in flowers)
return f'{self.design}{self.flower_size}{"".join(flowerstring)}'
@classmethod
def from_specification(cls, design):
"""Creates a BouquetDesigner instance from a string specification."""
spec = DESIGN.match(design).groupdict()
spec_flowers = DESIGN_FLOWER.findall(spec['flowers'])
flowers = {species: int(count) for count, species in spec_flowers}
return cls(spec['design'], spec['size'], flowers, int(spec['total']))
def read_until_empty(fp):
"""Yields lines from the given filepointer until an empty line is hit."""
while (line := fp.readline().strip()):
yield line
def main():
pools = defaultdict(Pool)
for design in read_until_empty(sys.stdin):
designer = BouquetDesigner.from_specification(design)
pools[designer.flower_size].add_designer(designer)
for species, size in read_until_empty(sys.stdin):
pools[size].add_flower(species)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
db365ccaef28c337a5d9c69e8c10f082020063ee | c940bcb25e1ed315263b25cbdac49cc4bf92cac1 | /env/vkviewer/python/georef/georeferenceutils.py | 92de981594a95d6365cfb3fdb3f7e7f015ad83b1 | [] | no_license | kwaltr/vkviewer | 281a3f1b5b08a18a89f232ecd096cea44faca58b | 01d64df0a9266c65e0c3fb223e073ef384281bdc | refs/heads/master | 2021-01-16T22:09:41.821531 | 2014-02-07T17:19:04 | 2014-02-07T17:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,844 | py | '''
Created on Oct 15, 2013
@author: mendt
'''
import subprocess
""" function: parseYSize
@param - imageFile {String} - path to a image file
@return - {Integer} - value which represents the y size of the file
This function parse the x,y size of a given image file """
def parseXYSize(imageFile):
# run gdalinfo command on imageFile and catch the response via Popen
response = subprocess.Popen("gdalinfo %s"%imageFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# read the console output line by line
for line in response.stdout:
if 'Size is ' in line:
x,y = line[8:].split(', ')
#print "X: %s, Y: %s"%(x,y)
return float(x),float(y)
""" Functions for getting the gcps. """
def getGCPsAsString(unorderedPixels, verzeichnispfad, georefCoords):
pure_gcps = getGCPs(unorderedPixels, verzeichnispfad, georefCoords)
str_gcps = []
for tuple in pure_gcps:
string = " ".join(str(i) for i in tuple[0])+", "+" ".join(str(i) for i in tuple[1])
str_gcps.append(string)
return str_gcps
def getGCPs(unorderedPixels, verzeichnispfad, georefCoords):
# transformed the pixel coordinates to the georef coordinates by recalculating the y values,
# because of a different coordinate origin
transformedUnorderedPixels = []
xSize, ySize = parseXYSize(verzeichnispfad)
for tuple in unorderedPixels:
transformedUnorderedPixels.append((tuple[0],ySize-tuple[1]))
# now order the pixel coords so that there sorting represents the order llc, ulc, urc, lrc
transformedOrderedPixels = orderPixels(transformedUnorderedPixels)
# now create the gcp list
try:
gcpPoints = []
for i in range(0,len(transformedOrderedPixels)):
pixelPoints = (transformedOrderedPixels[i][0],transformedOrderedPixels[i][1])
georefPoints = (georefCoords[i][0],georefCoords[i][1])
gcpPoints.append((pixelPoints,georefPoints))
return gcpPoints
except:
raise
def orderPixels(unorderdPixels):
"""
Function brings a list of tuples which are representing the clipping parameter from the client
in the order llc ulc urc lrc and gives them back at a list. Only valide for pixel coords
@param clippingParameterList: list whichcomprises 4 tuples of x,y coordinates
"""
xList = []
yList = []
for tuple in unorderdPixels:
xList.append(tuple[0])
yList.append(tuple[1])
orderedList = [0, 0, 0, 0]
xList.sort()
yList.sort()
for tuple in unorderdPixels:
if (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[0] = tuple
elif (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[1] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[2] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[3] = tuple
return orderedList
""" Functions for creating the commands for command line """
""" function: addGCPToTiff
@param - gcPoints {list of gcp} - list of ground control points
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@return - command {String}
Add the ground control points via gdal_translate to the src tiff file """
def addGCPToTiff(gcPoints,srs,srcPath,destPath):
def addGCPToCommandStr(command,gcPoints):
for string in gcPoints:
command = command+"-gcp "+str(string)+" "
return command
command = "gdal_translate --config GDAL_CACHEMAX 500 -a_srs epsg:%s "%srs
command = addGCPToCommandStr(command,gcPoints)
command = command+str(srcPath)+" "+str(destPath)
return command
""" function: georeferenceTiff
@param - shapefilePath {String}
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@param - tyoe {String} - if 'fast' there is less compression
@return - command {String}
Georeferencing via gdalwarp """
def georeferenceTiff(shapefilePath, srid, srcPath, destPath, type=None):
if type == 'fast':
command = "gdalwarp --config GDAL_CACHEMAX 500 -wm 500 -overwrite -co TILED=YES -cutline %s \
-crop_to_cutline -t_srs epsg:%s %s %s"%(shapefilePath,srid,srcPath,destPath)
return command
| [
"[email protected]"
] | |
49831033a0db7eb9b44e22f82a18daf733b0ede5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03698/s076680456.py | f88b228e0ad2e567dcb9e176f989690214f846c7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | S=input()
for c in S:
if S.count(c)>1:
print("no")
break
else:
print("yes")
| [
"[email protected]"
] | |
f0b7898e2cc53710b09420d379c41c3e2ac4a97a | cbf70750d6c265e4043fd9d1d3bd835662cd680f | /customer/apps.py | 845451d50116021235e04c440ee3b6c448bca321 | [
"Apache-2.0"
] | permissive | xxcfun/DJANGO_CRM | c54e249a9a3da9edaeb5d9b49e852d351c7e359a | 1f8d2d7a025f9dc54b5bf498e7a577469f74c612 | refs/heads/master | 2023-01-14T05:21:54.995601 | 2020-11-27T03:23:40 | 2020-11-27T03:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.apps import AppConfig
class CustomerConfig(AppConfig):
name = 'customer'
verbose_name = '客户管理'
| [
"[email protected]"
] | |
57c8c4f7a53557e403719802170a2e4a7bd660c6 | 9ecd7568b6e4f0f55af7fc865451ac40038be3c4 | /tianlikai/hubei/enshi_zhongbiao.py | aa1eb42ebd5cbeb6d019ac1072c18bf552fa29cc | [] | no_license | jasonTLK/scrapy | f5ac6e575e902c077a07dc0eb9d228506f1a173f | 2de8245fbc8731cfd868bbd91168e26271045300 | refs/heads/master | 2021-01-20T04:22:23.080864 | 2017-04-28T07:46:29 | 2017-04-28T07:46:29 | 89,681,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 湖北恩施招投标网站
# 中标信息
class hz_gov_Spider(scrapy.Spider):
name = "enshi_zhongbiao.py"
allowed_domains = ["eszggzy.cn"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=",
"http://www.eszggzy.cn/TPFront/jyxx/070002/070002003/?Paging=",
]
pages = [21, 20]
for i in range(len(urls)):
num=1
while num<=pages[i]:
url =urls[i]+str(num)
num+=1
# print url
yield Request(url=url,callback=self.parse)
# start_urls = [
# "http://www.eszggzy.cn/TPFront/jyxx/070001/070001003/?Paging=1"
# ]
def parse(self, response):
selector = Selector(response)
names = selector.xpath("//td[@align='left']//a/@title").extract()
urls = selector.xpath("//td[@align='left']//a/@href").extract()
print len(names),len(urls)
for i in range(len(names)):
url = "http://www.eszggzy.cn" + "".join(urls[i+4])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info).decode('gbk')
db = MongodbHandle("172.20.3.10 ", 27017, "spiderBiding")
db.get_insert(
"bid_hubei_EnShi",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"] | [
"[email protected]"
] | |
de8358d209f0dbfcb2af469c09f0adecc9434180 | 8aa1203e1a1c350da16921787133014831097391 | /luminardjangopgm/PythonCollection/ListDemi/listworkout2.py | 3c265d38f3861a8f435cfbcd15806d5de7e7f4df | [] | no_license | ashilz/luminarpython | 98fa4a87c60529d0c819e13bc5145e6f7d4ef01f | 9eb834448012bd60952cbc539409768cabd66325 | refs/heads/master | 2022-12-03T03:36:14.229723 | 2020-08-25T05:07:26 | 2020-08-25T05:07:26 | 290,109,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | lst=[10,12,13,14,15]
cnt=len(lst)
p=1
for i in range(0,cnt):
res=lst[i]**p
p+=1
print(res)
| [
"[email protected]"
] | |
8bf02c256d73472a61e065933f71d8e075957de5 | a3d1e8a67ed43e1bea59180cc51c49f25a961a49 | /scripts/dg2dotty | 1aee7a8c68572dcdabdf99da9567433445ae7d8b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | WladimirSidorenko/TextNormalization | 38b076d88a2de40dae72dc8b4096e354b774f2f4 | ac645fb41260b86491b17fbc50e5ea3300dc28b7 | refs/heads/master | 2020-04-14T16:48:42.541883 | 2019-09-29T23:38:28 | 2019-09-29T23:38:28 | 163,962,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | #!/usr/bin/env python2.7
# -*- coding: utf-8; -*-
"""
Utility for representing DG trees in DOTTY format.
Read a DG tree in CONLL-2009 format and output the read tree in GRAPHVIZ
format.
Input format (meaning of columns):
ID FORM LEMMA PLEMMA POS PPOS FEAT PFEAT HEAD PHEAD DEPREL PDEPREL FILLPRED PRED APREDs
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
Output format (meaning of columns):
"""
##################################################################
# Importing Libraries
import os
import re
import sys
from alt_argparse import argparser
from alt_fio import AltFileInput, AltFileOutput
##################################################################
# Variables and Constants
FIELDSEP = re.compile('\t')
fields = []
FEATURESEP = re.compile('\|')
features = []
QUOTE_RE = re.compile('(")')
NODE_STYLE = 'color="gray",fillcolor="palegreen",style="filled"'
FEAT_LABEL = ' [label="FEAT"];'
FEAT_STYLE = 'shape=box,fillcolor="lightblue",style="filled,rounded",'
w_id = 0
form = ''
lemma = ''
pos = ''
p_id = 0
rel = ''
edges = []
f_id = -1
##################################################################
# Methods
def escape_quote(iline):
"""Prepend all double quotes with a backslash."""
return QUOTE_RE.sub(r"\\\1", iline)
##################################################################
# Processing Arguments
argparser.description="""Utility for determining sentence boundaries."""
argparser.add_argument("-c", "--esc-char", help = """escape character which should
precede lines with meta-information""", nargs = 1, type = str, \
default = os.environ.get("SOCMEDIA_ESC_CHAR", ""))
args = argparser.parse_args()
##################################################################
# Main Body
foutput = AltFileOutput(encoding = args.encoding, \
flush = args.flush)
finput = AltFileInput(*args.files, \
skip_line = args.skip_line, \
print_func = foutput.fprint, \
errors = "replace")
# print graph header
foutput.fprint("""
graph dg {{
forcelabels=true
size="14";
node [{:s}];
0 [label="Root"];
""".format(NODE_STYLE))
for line in finput:
if line and line[0] == args.esc_char:
continue
# interpret fields
fields = line.split()
if not len(fields):
continue
w_id, form, lemma = fields[0], fields[1], fields[3]
pos, p_id, rel = fields[5], fields[9], fields[11]
features = FEATURESEP.split(fields[7])
# add node to the graph
foutput.fprint(w_id, ' [label="' + escape_quote(lemma) + \
"\\n(" + escape_quote(form) + ')"];')
# output features as additional node which will be connected to the current
# one
if features:
foutput.fprint(f_id, ' [{:s} label="'.format(FEAT_STYLE) + \
escape_quote(";\\n".join(features)) + ';"];')
edges.append(w_id + " -- " + str(f_id) + FEAT_LABEL)
f_id -= 1
# remember edge
edges.append(p_id + " -- " + w_id + ' [label="' + rel + '"];')
# output edges
foutput.fprint('\n'.join(edges), "\n}")
| [
"[email protected]"
] | ||
e61d248ab9d60f7194933ccc8cf31c297f485cc2 | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /examples/pytorch/dimenet/modules/envelope.py | b9d89620f674a562a255f52694e36235733374cc | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 610 | py | import torch.nn as nn
class Envelope(nn.Module):
"""
Envelope function that ensures a smooth cutoff
"""
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
# Envelope function divided by r
x_p_0 = x.pow(self.p - 1)
x_p_1 = x_p_0 * x
x_p_2 = x_p_1 * x
env_val = 1 / x + self.a * x_p_0 + self.b * x_p_1 + self.c * x_p_2
return env_val
| [
"[email protected]"
] | |
c85c091a3229318315dafe45d892f4fe27ad63c5 | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/All_Solutions/0480.滑动窗口中位数/0480-滑动窗口中位数.py | b6a27a3906d116af6ae8695a4eafea53559a93c4 | [
"MIT"
] | permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 940 | py | import bisect
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
"""
My solution, using sorted list
Time: O(nlog(k))
Space: O(n+k)
"""
res = []
if not nums or not k:
return res
def append_median():
median = sorted_list[k//2] if k%2==1 else (sorted_list[k//2] + sorted_list[k//2-1])/2
res.append(median)
n = len(nums)
p1, p2 = 0, k
sorted_list = sorted(nums[p1:p2])
append_median()
while p2 != n:
bisect.insort(sorted_list, nums[p2])
del_index = bisect.bisect(sorted_list, nums[p1])
# remember that the index of bisect and list are not same!
del sorted_list[del_index - 1]
append_median()
p1 += 1
p2 += 1
return res
| [
"[email protected]"
] | |
ecc631a48f59fcc28412207e3d56e26f26d614f1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/128/usersdata/222/33411/submittedfiles/al6.py | a4e5c49916c0a47643dc35834d5f8c7cd5aca7c0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # -*- coding: utf-8 -*-
a=int(input('Digite a:'))
contador=0
for i in range(2,a,1):
if n%i==0:
contador=contador+1
print(i)
for i in range(2,a,1):
if n%1==0:
contador=contador+1
print(i)
if contador==0:
print('Primo')
else:
print('Não primo')
| [
"[email protected]"
] | |
ef232dab5bc20bf3a6e6d2877ede262ab60bd9c8 | 99249dad36df26a712ae8d900041d53acf3901ea | /settings/configurations/LCLS_settings.py | 0f4d71a15a5f657650c92536b3cfb5a54b7d163c | [
"MIT"
] | permissive | bopopescu/Lauecollect | f1f79c2cc5ff106df0dedbd6939ec92630d2b305 | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | refs/heads/master | 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 | MIT | 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null | UTF-8 | Python | false | false | 1,034 | py | MicroscopeCamera.ImageWindow.Center = (679.0, 512.0)
MicroscopeCamera.Mirror = False
MicroscopeCamera.NominalPixelSize = 0.000517
MicroscopeCamera.Orientation = -90
MicroscopeCamera.camera.IP_addr = '172.21.46.202'
MicroscopeCamera.x_scale = -1.0
MicroscopeCamera.y_scale = 1.0
MicroscopeCamera.z_scale = -1.0
WideFieldCamera.ImageWindow.Center = (738.0, 486.0)
WideFieldCamera.Mirror = False
WideFieldCamera.NominalPixelSize = 0.002445
WideFieldCamera.Orientation = -90
WideFieldCamera.camera.IP_addr = '172.21.46.70'
WideFieldCamera.x_scale = -1.0
WideFieldCamera.y_scale = 1.0
WideFieldCamera.z_scale = -1.0
laser_scope.ip_address = 'femto10.niddk.nih.gov:2000'
rayonix_detector.ip_address = '172.21.46.133:2222'
sample.phi_motor_name = 'SamplePhi'
sample.rotation_center = (-0.7938775, -0.31677586081529113)
sample.x_motor_name = 'SampleX'
sample.xy_rotating = False
sample.y_motor_name = 'SampleY'
sample.z_motor_name = 'SampleZ'
timing_system.ip_address = '172.21.46.207:2000'
xray_scope.ip_address = 'pico21.niddk.nih.gov:2000' | [
"[email protected]"
] | |
a4f2c36e4c3b0cede51f060454ace8927faf42d4 | 1fd180ffcaf78a8ef5029a753e8b7ebd6aa7cdc6 | /todolistapp/wsgi.py | a0714e3a7cfcb7f303b675f6ec51b5eec97c91a5 | [] | no_license | Ayush900/todo-list-app | 05033615e7c521c16b4f840bd5401eb4c8bb7fd7 | 1f9c30dedab0ef1da9d08361a097bf31eec5c3f8 | refs/heads/master | 2022-12-25T19:53:06.353732 | 2020-10-01T07:04:22 | 2020-10-01T07:04:22 | 269,395,956 | 0 | 2 | null | 2020-10-01T07:04:23 | 2020-06-04T15:26:49 | JavaScript | UTF-8 | Python | false | false | 399 | py | """
WSGI config for todolistapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todolistapp.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
09f7ff38257927f817ca76e38b02d8f4f94da9fd | 730707fdefc2934929e1309cfbb0484d62b4bc34 | /backend/home/migrations/0001_load_initial_data.py | bc0ac08ee26bc6af244f1c1862878b762c7d3a2e | [] | no_license | crowdbotics-apps/tpl-account-securty-27301 | 885f78b6256e3da6733c534cb85b89f797476e5f | 44a580b64f14f7598b9e0c7a513976795992b15d | refs/heads/master | 2023-04-26T15:38:35.791087 | 2021-05-23T22:55:42 | 2021-05-23T22:55:42 | 370,173,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tpl-account-securty-27301.botics.co"
site_params = {
"name": "tpl account securty page",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
557dc77ea9e99dbf933860debf7334305d13e6aa | eff5f0a2470c7023f16f6962cfea35518ec0b89c | /Storage_Xs and Os Champion.py | 7d81e185c2aae6377e67314d2e8577330d0932e8 | [] | no_license | olegJF/Checkio | 94ea70b9ee8547e3b3991d17c4f75aed2c2bab2f | fc51a7244e16d8d0a97d3bb01218778db1d946aa | refs/heads/master | 2021-01-11T00:46:42.564688 | 2020-03-02T13:36:02 | 2020-03-02T13:36:02 | 70,490,008 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,829 | py | # -*- coding: utf-8 -*-
def x_and_o(grid, mark):
X_vs_O = {'X':'O', 'O':'X'}
def winner(grid, mark):
WINNER_WAYS = ((0, 1, 2), (3, 4, 5),
(6, 7, 8), (0, 3, 6),
(1, 4, 7), (2, 5, 8),
(0, 4, 8), (2, 4, 6)
)
for row in WINNER_WAYS:
line = grid[row[0]]+grid[row[1]]+grid[row[2]]
if line.count('.') == 1:
if line.count(mark) == 2 or line.count(X_vs_O[mark]) == 2:
return row[line.find('.')]
return False
BEST_MOVES = [4, 0, 2, 6, 8, 1, 3, 5, 7]
FIELD = {0:(0, 0), 1:(0, 1), 2:(0, 2),
3:(1, 0), 4:(1, 1), 5:(1, 2),
6:(2, 0), 7:(2, 1), 8:(2, 2)
}
grid = ''.join(grid)
dot_cnt = grid.count('.')
is_first_move = True if dot_cnt == 9 else False
if is_first_move: return FIELD[4]
is_second_move = True if dot_cnt == 8 else False
is_center_free = True if grid[4] =='.' else False
if is_second_move and is_center_free:
return FIELD[4]
elif is_second_move:
for i in BEST_MOVES:
if grid[i] == '.': return FIELD[i]
cnt_my_mark = grid.count(mark)
cnt_enemy_mark = grid.count(X_vs_O[mark])
was_my_first_move = True if cnt_my_mark == cnt_enemy_mark else False
legal_moves = [ i for i in range(9) if grid[i] =='.']
if was_my_first_move:
if dot_cnt == 7:
for i in (0, 2, 8, 6):
if grid[i] == '.': return FIELD[i]
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 5:
lines = ((0, 1, 2), (6, 7, 8),
(0, 3, 6), (2, 5, 8))
for x, y in ([0, 8], [2, 6]):
if x in legal_moves and y in legal_moves:
for corner in (x,y):
for line in lines:
if corner in line:
row = grid[line[0]]+grid[line[1]]+grid[line[2]]
cnt_mark = row.count(mark)
cnt_dot = row.count('.')
if cnt_mark ==1 and cnt_dot ==2:
return FIELD[corner]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
else:
is_winner = winner(grid, mark)
if is_winner is not False: return FIELD[is_winner]
if dot_cnt == 6 and grid[4] == mark:
for i in (1, 3, 5, 7):
if i in legal_moves: return FIELD[i]
for move in BEST_MOVES:
if move in legal_moves: return FIELD[move]
print(x_and_o(( "XO.", ".X.", "..O"), "X"))
#print(winner("XO..X....", 'X'))
| [
"[email protected]"
] | |
a49c16b1780e0f525fcaef9f2316c830deb44dd2 | 4cabdcc3cdf929fa7cf761a42cd3012d01494336 | /pipeline/mongodb/connector.py | 02c440be290a00f350da0205b88def477f43851c | [] | no_license | pjt3591oo/python-boilerplate | 660e3234aa45f18ed553f499674c54e3226bfaf4 | 1ea8d84cfc06c84bab934f779ead309e8e4e7c14 | refs/heads/master | 2021-01-01T06:53:18.184728 | 2017-07-19T01:06:54 | 2017-07-19T01:06:54 | 97,542,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from config.DB import CONNECTOR_INFO
CONNECTOR_INFO['mongodb'] | [
"[email protected]"
] | |
bc38069aef7b32c7c351685d0b2122f0d604529e | 2f5d221d5cd423f07da50ed8be9668d811e550b4 | /airtest/core/ios/fake_minitouch.py | b5eb7986af962fc31648c001e4259ad16c8af15a | [
"Apache-2.0"
] | permissive | Pactortester/Airtest | d1db25498591992dee525b2ceeb45de9239b319f | 18e57ae2bbde3f2b95c32f09e214fdf4aec41330 | refs/heads/master | 2022-06-03T22:52:54.939200 | 2020-06-29T01:01:30 | 2020-06-29T01:01:30 | 275,080,743 | 1 | 0 | Apache-2.0 | 2020-06-26T05:28:02 | 2020-06-26T05:28:02 | null | UTF-8 | Python | false | false | 1,979 | py | # coding=utf-8
import subprocess
import os
import re
import struct
import logging
from airtest.utils.logger import get_logger
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
class fakeMiniTouch(object):
lastDown = {'x': None, 'y': None}
recentPoint = {'x': None, 'y': None}
def __init__(self, dev):
self.dev = dev
self.swipe_threshold = 10
def setup(self):
pass
def operate(self, operate_arg):
# TODO FIX IPHONT TOUCH
# start down
if operate_arg['type'] == 'down':
self.lastDown['x'] = operate_arg['x']
self.lastDown['y'] = operate_arg['y']
# mouse up
if operate_arg['type'] == 'up':
# in case they may be None
if self.lastDown['x'] is None or self.lastDown['y'] is None:
return
# has recent point
if self.recentPoint['x'] and self.recentPoint['y']:
# swipe need to move longer
# TODO:设定滑动和点击的阈值,目前为10
if abs(self.recentPoint['x'] - self.lastDown['x']) > self.swipe_threshold \
or abs(self.recentPoint['y'] - self.lastDown['y']) > self.swipe_threshold:
self.dev.swipe((self.lastDown['x'], self.lastDown['y']),
(self.recentPoint['x'], self.recentPoint['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
else:
self.dev.touch((self.lastDown['x'], self.lastDown['y']))
# clear infos
self.lastDown = {'x': None, 'y': None}
self.recentPoint = {'x': None, 'y': None}
if operate_arg['type'] == 'move':
self.recentPoint['x'] = operate_arg['x']
self.recentPoint['y'] = operate_arg['y']
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
7066f6fd5882ec68a145a9b5116e7c5eff2d33f2 | a854f81f3ca0d6e6d6cf60662d05bc301465e28c | /backend/booking/migrations/0001_initial.py | 4d479654287d6f6f7b495a5050811e171d37cb04 | [] | no_license | crowdbotics-apps/lavadoras-19637 | 7f99e2046a6a92cdcfaec052eb9eadfd807193fd | 577d0da2626867a8a1b27d2df386c8598e4adc6d | refs/heads/master | 2022-12-02T21:15:17.103593 | 2020-08-18T08:28:47 | 2020-08-18T08:28:47 | 288,397,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,317 | py | # Generated by Django 2.2.15 on 2020-08-18 08:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('location', '0001_initial'),
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookingTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('distance', models.FloatField()),
('price', models.FloatField()),
('status', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_depart', models.DateTimeField()),
('timestamp_arrive', models.DateTimeField()),
('tip', models.FloatField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_driver', to='taxi_profile.DriverProfile')),
('dropoff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_dropoff', to='location.MapLocation')),
('pickup', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_pickup', to='location.MapLocation')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bookingtransaction_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('review', models.TextField(blank=True, null=True)),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rating_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rating_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('booking', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='message_booking', to='booking.BookingTransaction')),
('driver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_driver', to='taxi_profile.DriverProfile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_user', to='taxi_profile.UserProfile')),
],
),
]
| [
"[email protected]"
] | |
75d146601fcfb74873d0571bc7d1e05b92491d12 | 8f0b0ec0a0a2db00e2134b62a1515f0777d69060 | /scripts/study_case/ID_32/0504_softmax_regression.py | 5d1daab24d438285e89be0a81cd2092dde31f122 | [
"Apache-2.0"
] | permissive | Liang813/GRIST | 2add5b4620c3d4207e7661eba20a79cfcb0022b5 | 544e843c5430abdd58138cdf1c79dcf240168a5f | refs/heads/main | 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 | Apache-2.0 | 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null | UTF-8 | Python | false | false | 1,389 | py | import myutil as mu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import sys
sys.path.append("/data")
torch.manual_seed(1)
x_train = [[1, 2, 1, 1],
[2, 1, 3, 2],
[3, 1, 3, 4],
[4, 1, 5, 5],
[1, 7, 5, 5],
[1, 2, 5, 6],
[1, 6, 6, 6],
[1, 7, 7, 7]]
y_train = [2, 2, 2, 1, 1, 1, 0, 0]
x_train = torch.FloatTensor(x_train)
y_train = torch.LongTensor(y_train)
mu.log("x_train", x_train)
mu.log("y_train", y_train)
y_one_hot = torch.zeros(8, 3)
y_one_hot.scatter_(dim=1, index=y_train.unsqueeze(dim=1), value=1)
mu.log("y_one_hot", y_one_hot)
W = torch.zeros((4, 3), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
optimizer = optim.SGD([W, b], lr=0.1)
nb_epoches = 2000
mu.plt_init()
'''inserted code'''
import sys
sys.path.append("/data")
from scripts.utils.torch_utils import TorchScheduler
scheduler = TorchScheduler(name="PyTorchDeepLearningStart.0504_softmax_regression")
'''inserted code'''
while True:
hypothesis = F.softmax(x_train.matmul(W) + b, dim=1)
cost = (y_one_hot * -torch.log(hypothesis)).sum().mean()
optimizer.zero_grad()
cost.backward()
optimizer.step()
'''inserted code'''
scheduler.loss_checker(cost)
scheduler.check_time()
'''inserted code'''
mu.plt_show()
mu.log("W", W)
mu.log("b", b)
| [
"[email protected]"
] | |
f3342ae253a6c3ea4cdf0a8b6733c66468df32a0 | b47a907e824b52a6ee02dfb6387d24fa4d7fe88f | /config/settings.py | 711faa6f8b40f97ba26f9110ae9b2a5e620c989a | [] | no_license | hiroshi-higashiyama/DJANGO-KAKEIBO | 413a883fdef2571cacbd6c8679e63a6aecab7ae9 | 564c6047fcc6f6bb4a45b2eec121df619d158952 | refs/heads/master | 2022-12-29T19:53:15.186934 | 2020-09-21T01:04:10 | 2020-09-21T01:04:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!7$0+ew+1s-)tt%ex9gwqtf_(oq==%7celkb+i7g01_ehy&im'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'kakeibo',
'bootstrapform',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
NUMBER_GROUPING = 3
| [
"[email protected]"
] | |
6e5b22d94e41c54bed477e9c213add68291fd728 | d85cc746428e787254455c80b66a7309aa715e24 | /demo_odoo_tutorial/models/models.py | fc9e8beafbb712f17fd48d60021152bfda775a67 | [
"MIT"
] | permissive | AllenHuang101/odoo-demo-addons-tutorial | 2ef7d47432a2530f1e704f86cba78e3e975ca0f3 | e719594bc42e3a9b273f5b37980ac61773702ab9 | refs/heads/master | 2023-03-28T03:37:46.338483 | 2021-03-29T08:44:22 | 2021-03-29T08:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | from odoo import models, fields, api
from odoo.exceptions import UserError, ValidationError
class DemoOdooTutorial(models.Model):
_name = 'demo.odoo.tutorial'
_description = 'Demo Odoo Tutorial'
_inherit = ['mail.thread', 'mail.activity.mixin'] # track_visibility
name = fields.Char('Description', required=True)
# track_visibility='always' 和 track_visibility='onchange'
is_done_track_onchange = fields.Boolean(
string='Is Done?', default=False, track_visibility='onchange')
name_track_always = fields.Char(string="track_name", track_visibility='always')
start_datetime = fields.Datetime('Start DateTime', default=fields.Datetime.now())
stop_datetime = fields.Datetime('End Datetime', default=fields.Datetime.now())
field_onchange_demo = fields.Char('onchange_demo')
field_onchange_demo_set = fields.Char('onchange_demo_set', readonly=True)
# float digits
# field tutorial
input_number = fields.Float(string='input number', digits=(10,3))
field_compute_demo = fields.Integer(compute="_get_field_compute") # readonly
_sql_constraints = [
('name_uniq', 'unique(name)', 'Description must be unique'),
]
@api.constrains('start_datetime', 'stop_datetime')
def _check_date(self):
for data in self:
if data.start_datetime > data.stop_datetime:
raise ValidationError(
"data.stop_datetime > data.start_datetime"
)
@api.depends('input_number')
def _get_field_compute(self):
for data in self:
data.field_compute_demo = data.input_number * 1000
@api.onchange('field_onchange_demo')
def onchange_demo(self):
if self.field_onchange_demo:
self.field_onchange_demo_set = 'set {}'.format(self.field_onchange_demo) | [
"[email protected]"
] | |
271e0a82482eb25eaca4b7f12e7efeb08508fb7a | 9206e405e9be5f80a08e78b59d1cb79c519ae515 | /algorithms/codeforces/the_number_of_even_pairs/main.py | 7b7aac218751e1de472854d40e92a53218a4c619 | [] | no_license | mfbx9da4/mfbx9da4.github.io | ac4e34f0e269fb285e4fc4e727b8564b5db1ce3b | 0ea1a0d56a649de3ca7fde2d81b626aee0595b2c | refs/heads/master | 2023-04-13T22:15:19.426967 | 2023-04-12T12:14:40 | 2023-04-12T12:14:40 | 16,823,428 | 2 | 0 | null | 2022-12-12T04:36:08 | 2014-02-14T01:30:20 | SCSS | UTF-8 | Python | false | false | 738 | py | """
"""
from math import factorial
def int_as_array(num): return list(map(int, [y for y in str(num)]))
def array_as_int(arr): return int(''.join(map(str, arr)))
def read_int(): return int(input())
def read_array(): return list(map(int, input().split(' ')))
def array_to_string(arr, sep=' '): return sep.join(map(str, arr))
def matrix_to_string(arr, sep=' '): return '[\n' + '\n'.join(
[sep.join(map(str, row)) for row in arr]) + '\n]'
def combine(n, r):
try:
return (factorial(n) / factorial(n - r)) * (1 / r)
except:
return 0
def solve(N, M):
choose_evens = combine(N, 2)
choose_odds = combine(M, 2)
return int(choose_evens + choose_odds)
N, M = read_array()
print(solve(N, M))
| [
"[email protected]"
] | |
fb483adff09210c3a8dea90d203b5b070f3768fb | 84379e15e54ba79b7e63c1fceecf712b46f22977 | /apps/decks/migrations/0016_auto_20181011_1715.py | 2ac96bd86b326bc8447c68610a43fbba4554b4f0 | [] | no_license | CoderEnko007/HearthStoneStationBackend | a1d74c324233ebd617ad01df13bc609d1f1aa2f6 | 6cc92cb806f19f2a2a0596645028cfe2fa5895d6 | refs/heads/master | 2022-12-11T23:20:24.335737 | 2022-09-18T07:04:08 | 2022-09-18T07:04:08 | 144,392,864 | 0 | 0 | null | 2022-12-08T02:22:42 | 2018-08-11T14:40:48 | JavaScript | UTF-8 | Python | false | false | 831 | py | # Generated by Django 2.0.4 on 2018-10-11 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('decks', '0015_auto_20180928_1019'),
]
operations = [
migrations.AddField(
model_name='decks',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AddField(
model_name='trending',
name='real_game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='实际对局数'),
),
migrations.AlterField(
model_name='decks',
name='game_count',
field=models.IntegerField(blank=True, null=True, verbose_name='对局数'),
),
]
| [
"[email protected]"
] | |
47693d0710e9c072cad944e857787701b982ce3d | 0ea12ae71b3863a8279fd7200e61f5c40dc3dcb6 | /image_bosch_detect_ssd_mobile.py | 92fd277b6022c6d929dd37d5dae50ebf4863411d | [
"MIT"
] | permissive | scrambleegg7/Traffic-Light-Classification | 7dafb32f43bf1c73d62c645105cdc414ebb0cf44 | 2a9f6b8272866f289963905b162c35058ce6a234 | refs/heads/master | 2020-04-03T02:58:44.729521 | 2018-10-28T14:57:44 | 2018-10-28T14:57:44 | 154,973,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,201 | py | import tensorflow as tf
import numpy as np
import datetime
import time
import os, sys
import cv2
from PIL import Image
import yaml
from glob import glob
try:
import matplotlib
matplotlib.use('TkAgg')
finally:
from matplotlib import pyplot as plt
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
class TrafficLightClassifier(object):
def __init__(self, frozen_model_file):
PATH_TO_MODEL = frozen_model_file
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
# Works up to here.
with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.d_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.d_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.d_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_d = self.detection_graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.detection_graph)
def get_classification(self, img):
# Bounding Box Detection.
with self.detection_graph.as_default():
# Expand dimension since the model expects image to have shape [1, None, None, 3].
img_expanded = np.expand_dims(img, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.d_boxes, self.d_scores, self.d_classes, self.num_d],
feed_dict={self.image_tensor: img_expanded})
return boxes, scores, classes, num
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def get_all_labels(input_yaml, riib=False):
""" Gets all labels within label file
Note that RGB images are 1280x720 and RIIB images are 1280x736.
:param input_yaml: Path to yaml file
:param riib: If True, change path to labeled pictures
:return: images: Labels for traffic lights
"""
images = yaml.load(open(input_yaml, 'rb').read())
for i in range(len(images)):
images[i]['path'] = os.path.abspath(os.path.join(os.path.dirname(input_yaml), images[i]['path']))
if riib:
images[i]['path'] = images[i]['path'].replace('.png', '.pgm')
images[i]['path'] = images[i]['path'].replace('rgb/train', 'riib/train')
images[i]['path'] = images[i]['path'].replace('rgb/test', 'riib/test')
for box in images[i]['boxes']:
box['y_max'] = box['y_max'] + 8
box['y_min'] = box['y_min'] + 8
return images
def detect_label_images(input_yaml, output_folder=None):
"""
Shows and draws pictures with labeled traffic lights.
Can save pictures.
:param input_yaml: Path to yaml file
:param output_folder: If None, do not save picture. Else enter path to folder
"""
PATH_TO_LABELS = r'data/bosch_label_map.pbtxt'
NUM_CLASSES = 14
frozen_model_file = "./models/bosch_freeze_tf1.3/frozen_inference_graph.pb"
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
print(category_index)
# loading models
tfc = TrafficLightClassifier(frozen_model_file)
images = get_all_labels(input_yaml)
if output_folder is not None:
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for idx, image_dict in enumerate(images[:10]):
image_path = image_dict['path']
image_np = cv2.imread( image_path )
if idx == 0:
print(image_path)
timestr = time.strftime("%Y%m%d-%H%M%S")
boxes, scores, classes, num = tfc.get_classification(image_np)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=0.3,
line_thickness=8)
if idx % 10 == 0 and idx > 0:
print("%d images processed. %s" % ( (idx + 1), image_path ) )
image_file = image_path.split("/")[-1]
cv2.imwrite( os.path.join( output_folder, image_file ) , image_np )
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(-1)
label_file = sys.argv[1]
output_folder = None if len(sys.argv) < 3 else sys.argv[2]
detect_label_images(label_file, output_folder) | [
"[email protected]"
] | |
66ee42bf083364ea3975225cfe14efbc76c1c287 | 8760f182049d4caf554c02b935684f56f6a0b39a | /boar/facebook_connect/migrations/0002_profile_onetoone_to_user.py | ed79636574a8ae85a20dfee1a85138d28e7f7b15 | [
"BSD-3-Clause"
] | permissive | boar/boar | c674bc65623ee361af31c7569dd16c6eb8da3b03 | 6772ad31ee5bb910e56e650cc201a476adf216bc | refs/heads/master | 2020-06-09T06:59:31.658154 | 2012-02-28T19:28:58 | 2012-02-28T19:28:58 | 1,734,103 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,352 | py |
from south.db import db
from django.db import models
from boar.facebook_connect.models import *
class Migration:
def forwards(self, orm):
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.OneToOneField(to=orm['auth.User'], unique=True))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
# Creating unique_together for [user] on FacebookProfile.
db.create_unique('facebook_connect_facebookprofile', ['user_id'])
def backwards(self, orm):
# Deleting unique_together for [user] on FacebookProfile.
db.delete_unique('facebook_connect_facebookprofile', ['user_id'])
# Changing field 'FacebookProfile.user'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User']))
db.alter_column('facebook_connect_facebookprofile', 'user_id', orm['facebook_connect.facebookprofile:user'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_connect.facebookprofile': {
'Meta': {'unique_together': "(('user', 'uid'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['facebook_connect']
| [
"[email protected]"
] | |
12f6cd8c0d13ddb5246553b8acd29a2595a7a282 | 82ca64c6a819f3e2cb41057f2df9f758cedee28a | /BlockChain/venv/bin/python-config | ae068f06249b1f99eb784109dbf07bbc241050d5 | [] | no_license | seanxxxx/coinx | 619a18f9b2d7f83076083055bfccf0c5e404f665 | eb1a7ed430c546cf02ddcc79f436200b218d5244 | refs/heads/master | 2023-01-28T03:09:10.358463 | 2018-09-07T07:49:19 | 2018-09-07T07:49:19 | 146,564,986 | 0 | 1 | null | 2022-12-20T14:20:06 | 2018-08-29T07:52:37 | Python | UTF-8 | Python | false | false | 2,363 | #!/Users/xuanxu/PycharmProjects/BlockChain/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
cd50fc8b715db9544fca346be9d2f59be5483792 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/FrogRiver_20200723134656.py | b53537eb14ce4472bd411f219e101697e4edb59b | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def Frog(X,A):
# given x where the frog wants to go
# find earliest time
# once you get the second that has that position
# return the second
pos = set()
print(Frog(5,[1,3,1,4,2,3,5,4]))
| [
"[email protected]"
] | |
b37888fa6385baeb41115a66b55bec5886b14fbc | 387ad3775fad21d2d8ffa3c84683d9205b6e697d | /testsuite/trunk/el/el_test_036.py | cfab23e5ff03600c188c22c0c83bb31985905443 | [] | no_license | kodiyalashetty/test_iot | 916088ceecffc17d2b6a78d49f7ea0bbd0a6d0b7 | 0ae3c2ea6081778e1005c40a9a3f6d4404a08797 | refs/heads/master | 2020-03-22T11:53:21.204497 | 2018-03-09T01:43:41 | 2018-03-09T01:43:41 | 140,002,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python
"""
(C) Copyright IBM Corp. 2008
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This
file and program are licensed under a BSD style license. See
the Copying file included with the OpenHPI distribution for
full licensing terms.
Authors:
Jayashree Padmanabhan <[email protected]>
"""
import unittest
from openhpi import *
class TestSequence(unittest.TestCase):
"""
runTest : EL test
*
* This test verifies the failure of oh_el_map_from_file when el == None
*
* Return value: 0 on success, 1 on failure
"""
def runTest(self):
el = oh_el()
retc = None
# test failure of oh_el_map_from_file with el==None
el = None
retc = oh_el_map_from_file(el, "./elTest.data")
self.assertEqual (retc == SA_OK,False)
if __name__=='__main__':
unittest.main()
| [
"suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26"
] | suntrupth@a44bbd40-eb13-0410-a9b2-f80f2f72fa26 |
6f49e68819abe8b1d485500c72678faf77327817 | 146012dda21ab72badad6daa8f98e6b26fedb128 | /08day/04-练习求和.py | c539322bdecb93e196c838806f2fc360f0cb12e3 | [] | no_license | fengshuai1/1805 | 41786c3561beca580ba82d9e9d4347571e38e198 | 8dc3e6605cc1d6f91685ae45bfebfc062f0aa489 | refs/heads/master | 2020-03-19T07:41:40.608389 | 2018-06-28T01:45:43 | 2018-06-28T01:45:43 | 136,140,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | c = 0
m = 0
while c <= 100:
print("当前数字:%d"%c)
c+=1
m = c + m
print("求和是%d"%m)
| [
"[email protected]"
] | |
4b5be1fb84187f4d83d1e07885657d02b7a120f5 | 30d1b89b67d48efdacce5bceeee2c734bee2b509 | /manual_translation/devel/lib/python2.7/dist-packages/mavros_msgs/msg/_Mavlink.py | 2d4e562e868c5dec2e71bd13bbbde54c744bcc04 | [] | no_license | ParthGaneriwala/uppaal2ros | db4a6b20c78e423511e565477a2461942c2adceb | f88b2b860b0b970b61110a323d0397352785c9e2 | refs/heads/main | 2023-02-20T19:36:22.406515 | 2021-01-28T18:58:44 | 2021-01-28T18:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | /home/adi/ardu_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/msg/_Mavlink.py | [
"[email protected]"
] | |
f6c327232f55a5253a539568cc9c8d10d656384d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02686/s642611525.py | 914bb9607791cee5d353d156d9afb343faf395b3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | def main():
N = int(input())
up_lines = []
down_lines = []
for i in range(N):
s = input()
height = 0
bottom = 0
for c in s:
if c == "(":
height += 1
else:
height -= 1
bottom = min(bottom, height)
if height > 0:
up_lines.append((bottom, height))
else:
down_lines.append((bottom-height, -height))
up_lines.sort(reverse=True, key=lambda line: line[0])
down_lines.sort(reverse=True, key=lambda line: line[0])
left = 0
for bottom, height in up_lines:
if left + bottom < 0:
print("No")
return
left += height
right = 0
for bottom, height in down_lines:
if right + bottom < 0:
print("No")
return
right += height
if left == right:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c19012af2e5fe52651cc00b9775abc1d3e4e6ea1 | a71d5838e292e2c0c7371f7fc7870c7018820ae1 | /day03/03_pie.py | 71c8ec39a03c52234f30d2660394d2f3d37a995f | [] | no_license | skywalkerqwer/DataScience | be91541c3da383d15ee52d0101d2dbb0289c2fde | 4cfd42f3a9795e295393cdb045852d46e99b6e59 | refs/heads/master | 2020-06-17T11:41:40.113864 | 2019-07-15T09:49:40 | 2019-07-15T09:49:40 | 195,913,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | """
绘制饼状图
"""
import numpy as np
import matplotlib.pyplot as mp
labels = ['Python', 'JavaScript', 'C++', 'Java', 'PHP']
values = [26, 17, 21, 29, 11]
spaces = [0.05, 0.01, 0.01, 0.01, 0.01]
colors = ['dodgerblue', 'orangered', 'limegreen', 'violet', 'gold']
mp.figure('Pie Chart', facecolor='lightgray')
mp.title('Languages PR')
mp.pie(values, spaces, labels, colors, '%.1f%%', shadow=True, startangle=0, radius=1)
mp.legend()
mp.show()
| [
"[email protected]"
] | |
0da39b2b6595f0a25f70e3735197ce8c382da45b | c7522a46908dfa0556ed6e2fe584fd7124ee5cdc | /ApplicationUsers/views.py | 80dd9c405729e423ad243becbd6d5c57ca1b5930 | [] | no_license | stheartsachu/Eventlee | 461cf35961a7f294229d6c611e58a09d9f4e1eb5 | 6b67dfc873203f1322c16664923ffe5a760d50ed | refs/heads/master | 2022-11-13T14:48:39.097718 | 2020-06-30T04:54:45 | 2020-06-30T04:54:45 | 276,000,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | from django.shortcuts import render,HttpResponse,redirect,HttpResponseRedirect
from ApplicationUsers.form import ApplicationuserForm
# Create your views here.
from ApplicationUsers.models import users
def home(request):
return render(request,"index.html")
def Contact(request):
return render(request,"contact.html")
def gallery(request):
return render(request,"gallery.html")
def signup(request):
if request.method == 'POST':
form = ApplicationuserForm(request.POST)
f = form.save(commit=False)
f.first_name = request.POST['fn']
f.last_name = request.POST['ln']
f.email = request.POST['email']
if request.POST['p1'] == request.POST['p2']:
f.password = request.POST['p2']
else:
return HttpResponse("<h1> Password and Confirm password is not same</h1>")
f.status = True
f.save()
return HttpResponse("User is created sucessfully now, can login to website")
return render(request, 'registration.html')
def login(request):
if request.method == "POST":
un = request.POST["email"]
up = request.POST["password"]
try:
data = users.objects.get(email=un)
except:
return render(request, "login.html", {'emailerror': True})
dp = data.password
active = data.status
if (active == False):
return render(request, "login.html", {'activeerror': True})
else:
if (dp == up):
request.session['emailid'] = un
request.session['Authentication'] = True
return HttpResponse("You are sucessfullly login")
else:
return render(request, "login.html", {'passworderror': True})
return render(request, "login.html") | [
"[email protected]"
] | |
7bed90a14fc2ce416d14e56c5bf265e8b646487f | 7d3b096f803d1a47ad71a5c8aab30ba3aa67828c | /chibi_file/__init__.py | fe22184683cfdc5c75ca908282fad7a086a9d2bc | [] | no_license | dem4ply/chibi_file | 462244dac712d88915f2b931c5f0822f6d1fa937 | d27cef794512014b1602486edd0235052b38087a | refs/heads/master | 2020-12-03T05:09:15.825690 | 2017-08-23T09:36:57 | 2017-08-23T09:36:57 | 95,737,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | import mmap
import os
import shutil
def current_dir():
return os.getcwd()
def inflate_dir( src ):
if '~' in src:
return os.path.expanduser( src )
else:
return os.path.abspath( src )
def is_dir( src ):
return os.path.isdir( src )
def is_file( src ):
return os.path.isfile( src )
def ls( src=None ):
if src is None:
src = current_dir()
return ( name for name in os.listdir( src ) )
def ls_only_dir( src=None ):
return ( name for name in ls( src ) if is_dir( name ) )
def join( *patch ):
return os.path.join( *patch )
def exists( file_name ):
return os.path.exists( file_name )
def copy( source, dest ):
shutil.copy( source, dest )
class Chibi_file:
def __init__( self, file_name ):
self._file_name = file_name
if not self.exists:
self.touch()
self.reread()
@property
def file_name( self ):
return self._file_name
def __del__( self ):
self._file_content.close()
def find( self, string_to_find ):
if isinstance( string_to_find, str ):
string_to_find = string_to_find.encode()
return self._file_content.find( string_to_find )
def reread( self ):
with open( self._file_name, 'r' ) as f:
self._file_content = mmap.mmap( f.fileno(), 0,
prot=mmap.PROT_READ )
def __contains__( self, string ):
return self.find( string ) >= 0
def append( self, string ):
with open( self._file_name, 'a' ) as f:
f.write( string )
self.reread()
@property
def exists( self ):
return exists( self.file_name )
def touch( self ):
open( self.file_name, 'a' ).close()
def copy( self, dest ):
copy( self.file_name, dest )
| [
"[email protected]"
] | |
b7e89b7513c6151d39dc8adad4fee33e8afcf8f1 | 09cc8367edb92c2f02a0cc1c95a8290ff0f52646 | /ipypublish_plugins/example_new_plugin.py | 2fe177802ec9fd3259ca9ac9ac002ef160f3c1f2 | [
"BSD-3-Clause"
] | permissive | annefou/ipypublish | 7e80153316ab572a348afe26d309c2a9ee0fb52b | 917c7f2e84be006605de1cf8851ec13d1a163b24 | refs/heads/master | 2020-04-13T16:08:59.845707 | 2018-07-30T18:26:12 | 2018-07-30T18:26:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | """html in standard nbconvert format, but with
- a table of contents
- toggle buttons for showing/hiding code & output cells
- converts or removes (if no converter) latex tags (like \cite{abc}, \ref{})
"""
from ipypublish.filters.replace_string import replace_string
from ipypublish.html.create_tpl import create_tpl
from ipypublish.html.ipypublish import latex_doc
# from ipypublish.html.standard import inout_prompt
from ipypublish.html.ipypublish import toc_sidebar
from ipypublish.html.ipypublish import toggle_buttons
from ipypublish.html.standard import content
from ipypublish.html.standard import content_tagging
from ipypublish.html.standard import document
from ipypublish.html.standard import mathjax
from ipypublish.html.standard import widgets
from ipypublish.preprocessors.latex_doc_captions import LatexCaptions
from ipypublish.preprocessors.latex_doc_html import LatexDocHTML
from ipypublish.preprocessors.latex_doc_links import LatexDocLinks
from ipypublish.preprocessors.latextags_to_html import LatexTagsToHTML
from ipypublish.preprocessors.split_outputs import SplitOutputs
oformat = 'HTML'
config = {'TemplateExporter.filters': {'replace_string': replace_string},
'Exporter.filters': {'replace_string': replace_string},
'Exporter.preprocessors': [SplitOutputs, LatexDocLinks, LatexDocHTML, LatexTagsToHTML, LatexCaptions]}
template = create_tpl([
document.tpl_dict,
content.tpl_dict, content_tagging.tpl_dict,
mathjax.tpl_dict, widgets.tpl_dict,
# inout_prompt.tpl_dict,
toggle_buttons.tpl_dict, toc_sidebar.tpl_dict,
latex_doc.tpl_dict
])
| [
"[email protected]"
] | |
6846461a15b491de3c42e18d6aa4d646d87bad7a | 4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446 | /Python基础笔记/13/代码/3.多继承.py | 1693fc8f7b66401a95f44f287cfcb7d4c149f841 | [] | no_license | zhenguo96/test1 | fe21510aea7feb674e52fd7a86d4177666f841c5 | 0d8de7e73e7e635d26462a0bc53c773d999498be | refs/heads/master | 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # # 多继承
# class Base1:
# def __init__(self,name):
# self.name = name
# def t1(self):
# print("Base1")
#
# class Base2:
# def __init__(self,name):
# self.name = name
# def t2(self):
# print("Base2")
#
# class Base3:
# def __init__(self, name):
# self.name = name
# def t3(self):
# print("Base3")
#
# # 多继承的子类
# class Child(Base1,Base2,Base3):
# pass
# child = Child('tom')
# print(child.__dict__)
# child.t1()
# child.t2()
# # 继承顺序
# print(Child.mro())
# print(Child.__mro__)
#
| [
"[email protected]"
] | |
c02698bcbb5677d5aa1cdf687d66869a34eea59c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02742/s024664971.py | 37251941a04a71608f69d756b2f8eb6bf24e8a52 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | H, W = [int(_) for _ in input().split()]
if H == 1 or W == 1:
ans = 1
else:
ans = (H // 2) * (W // 2) * 2
if H % 2 == 1:
ans += W // 2
if W % 2 == 1:
ans += H // 2
if H % 2 == 1 and W % 2 == 1:
ans += 1
print(ans)
| [
"[email protected]"
] | |
d6e8faee78b555a964bcdabf9d7b434fba09a3c0 | b96f1bad8a74d31d8ff79bc955813bfcd17d7b26 | /24. Swap Nodes in Pairs.py | 75e6d9a0451fd14aadd62f665ddbd922cfa44910 | [] | no_license | brianhu0716/LeetCode-Solution | e7177af15e84e833ce8ab05027683ed4ac489643 | 158a4359c90b723545b22c4898047274cc1b80a6 | refs/heads/main | 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 15:06:14 2021
@author: Brian
"""
'''
交換相鄰兩node的值即可
'''
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
while not head or not head.next : return head
ptr = head
while ptr and ptr.next:
temp = ptr.val
ptr.val = ptr.next.val
ptr.next.val = temp
ptr = ptr.next.next
return head | [
"[email protected]"
] | |
7374ce7e683ccf1d4913b6f64fb04fb50b016df7 | 6c686d118e6d3072b3694c02c684a6619d4dd03e | /rsdns/tests/test_client.py | cb34bcfaef1aa74df689f00debfbff8959f697df | [
"Apache-2.0"
] | permissive | masthalter/reddwarf | 02e7b78e1e61178647fe8d98ab53eadfabe66e7f | 72cf41d573cd7c35a222d9b7a8bfaad937f17754 | HEAD | 2016-11-08T16:12:16.783829 | 2012-04-26T22:26:56 | 2012-04-26T22:26:56 | 2,387,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,033 | py | import httplib2
import mox
import unittest
from eventlet import pools
from novaclient.client import HTTPClient
from novaclient import exceptions
from rsdns.client.dns_client import DNSaasClient
ACCOUNT_ID = 1155
USERNAME = "test_user"
API_KEY="key"
AUTH_URL="urly"
MANAGEMENT_BASE_URL="mgmter"
class FakeResponse(object):
def __init__(self, status):
self.status = status
class WhenDNSaasClientConnectsSuccessfully(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.VerifyAll()
def fake_auth(self, *args, **kwargs):
self.auth_called = True
def create_mock_client(self, fake_request_method):
"""
Creates a mocked DNSaasClient object, which calls "fake_request_method"
instead of httplib2.request.
"""
class FakeHttpLib2(object):
pass
FakeHttpLib2.request = fake_request_method
mock_client = self.mox.CreateMock(DNSaasClient)
mock_client.http_pool = pools.Pool()
mock_client.http_pool.create = FakeHttpLib2
mock_client.auth_token = 'token'
return mock_client
def test_make_request(self):
kwargs = {
'headers': {},
'body': "{}"
}
def fake_request(self, *args, **kwargs):
return FakeResponse(200), '{"hi":"hello"}'
mock_client = self.create_mock_client(fake_request)
resp, body = DNSaasClient.request(mock_client, **kwargs)
self.assertEqual(200, resp.status)
self.assertEqual({"hi":"hello"}, body)
def test_make_request_with_old_token(self):
kwargs = {
'headers': {},
'body': '{"message":"Invalid authentication token. Please renew."}'
}
def fake_request(self, *args, **kwargs):
return FakeResponse(401), \
'{"message":"Invalid authentication token. Please renew."}'
mock_client = self.create_mock_client(fake_request)
mock_client.authenticate()
mock_client.authenticate()
mock_client.authenticate()
self.mox.ReplayAll()
self.assertRaises(exceptions.Unauthorized, DNSaasClient.request,
mock_client, **kwargs)
def test_make_request_with_old_token_2(self):
kwargs = {
'headers': {},
'body': "{}"
}
self.count = 0
def fake_request(_self, *args, **kwargs):
self.count += 1
if self.count > 1:
return FakeResponse(200), '{"hi":"hello"}'
else:
return FakeResponse(401), \
'{"message":"Invalid authentication token. ' \
'Please renew."}'
mock_client = self.create_mock_client(fake_request)
mock_client.authenticate()
self.mox.ReplayAll()
resp, body = DNSaasClient.request(mock_client, **kwargs)
self.assertEqual(200, resp.status)
self.assertEqual({"hi":"hello"}, body)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.