blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc693a128105938fd7647af5527a511855e80f4c | abcfd07772ce75f34e51592189c29cf84d1a3611 | /flask/lib/python3.6/site-packages/whoosh/util/times.py | aded78f78b30e1d657ebea53c144a87c93bb6d1c | []
| no_license | yuhaihui3435/p_mc | 66d89bcccf214e53729b26a0f80ddee8797e9e3e | 3039a5c691b649fc88e941a2553b1a7e0aac2a0a | refs/heads/master | 2021-06-28T18:52:00.111385 | 2017-09-15T00:26:02 | 2017-09-15T00:26:58 | 103,524,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,948 | py | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import calendar
import copy
from datetime import date, datetime, timedelta
from whoosh.compat import iteritems
class TimeError(Exception):
pass
def relative_days(current_wday, wday, dir):
"""Returns the number of days (positive or negative) to the "next" or
"last" of a certain weekday. ``current_wday`` and ``wday`` are numbers,
i.e. 0 = monday, 1 = tuesday, 2 = wednesday, etc.
>>> # Get the number of days to the next tuesday, if today is Sunday
>>> relative_days(6, 1, 1)
2
:param current_wday: the number of the current weekday.
:param wday: the target weekday.
:param dir: -1 for the "last" (past) weekday, 1 for the "next" (future)
weekday.
"""
if current_wday == wday:
return 7 * dir
if dir == 1:
return (wday + 7 - current_wday) % 7
else:
return (current_wday + 7 - wday) % 7 * -1
def timedelta_to_usecs(td):
total = td.days * 86400000000 # Microseconds in a day
total += td.seconds * 1000000 # Microseconds in a second
total += td.microseconds
return total
def datetime_to_long(dt):
"""Converts a datetime object to a long integer representing the number
of microseconds since ``datetime.min``.
"""
return timedelta_to_usecs(dt.replace(tzinfo=None) - dt.min)
def long_to_datetime(x):
"""Converts a long integer representing the number of microseconds since
``datetime.min`` to a datetime object.
"""
days = x // 86400000000 # Microseconds in a day
x -= days * 86400000000
seconds = x // 1000000 # Microseconds in a second
x -= seconds * 1000000
return datetime.min + timedelta(days=days, seconds=seconds, microseconds=x)
# Ambiguous datetime object
class adatetime(object):
"""An "ambiguous" datetime object. This object acts like a
``datetime.datetime`` object but can have any of its attributes set to
None, meaning unspecified.
"""
units = frozenset(("year", "month", "day", "hour", "minute", "second",
"microsecond"))
def __init__(self, year=None, month=None, day=None, hour=None, minute=None,
second=None, microsecond=None):
if isinstance(year, datetime):
dt = year
self.year, self.month, self.day = dt.year, dt.month, dt.day
self.hour, self.minute, self.second = dt.hour, dt.minute, dt.second
self.microsecond = dt.microsecond
else:
if month is not None and (month < 1 or month > 12):
raise TimeError("month must be in 1..12")
if day is not None and day < 1:
raise TimeError("day must be greater than 1")
if (year is not None and month is not None and day is not None
and day > calendar.monthrange(year, month)[1]):
raise TimeError("day is out of range for month")
if hour is not None and (hour < 0 or hour > 23):
raise TimeError("hour must be in 0..23")
if minute is not None and (minute < 0 or minute > 59):
raise TimeError("minute must be in 0..59")
if second is not None and (second < 0 or second > 59):
raise TimeError("second must be in 0..59")
if microsecond is not None and (microsecond < 0
or microsecond > 999999):
raise TimeError("microsecond must be in 0..999999")
self.year, self.month, self.day = year, month, day
self.hour, self.minute, self.second = hour, minute, second
self.microsecond = microsecond
def __eq__(self, other):
if not other.__class__ is self.__class__:
if not is_ambiguous(self) and isinstance(other, datetime):
return fix(self) == other
else:
return False
return all(getattr(self, unit) == getattr(other, unit)
for unit in self.units)
def __repr__(self):
return "%s%r" % (self.__class__.__name__, self.tuple())
def tuple(self):
"""Returns the attributes of the ``adatetime`` object as a tuple of
``(year, month, day, hour, minute, second, microsecond)``.
"""
return (self.year, self.month, self.day, self.hour, self.minute,
self.second, self.microsecond)
def date(self):
return date(self.year, self.month, self.day)
def copy(self):
return adatetime(year=self.year, month=self.month, day=self.day,
hour=self.hour, minute=self.minute, second=self.second,
microsecond=self.microsecond)
def replace(self, **kwargs):
"""Returns a copy of this object with the attributes given as keyword
arguments replaced.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.replace(year=2010)
(2010, 10, 31, None, None, None, None)
"""
newadatetime = self.copy()
for key, value in iteritems(kwargs):
if key in self.units:
setattr(newadatetime, key, value)
else:
raise KeyError("Unknown argument %r" % key)
return newadatetime
def floor(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their lowest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 1, 0, 0, 0, 0)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 1
if d is None:
d = 1
if h is None:
h = 0
if mn is None:
mn = 0
if s is None:
s = 0
if ms is None:
ms = 0
return datetime(y, m, d, h, mn, s, ms)
def ceil(self):
"""Returns a ``datetime`` version of this object with all unspecified
(None) attributes replaced by their highest values.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=5)
>>> adt.floor()
datetime.datetime(2009, 5, 30, 23, 59, 59, 999999)
"""
y, m, d, h, mn, s, ms = (self.year, self.month, self.day, self.hour,
self.minute, self.second, self.microsecond)
if y is None:
raise ValueError("Date has no year")
if m is None:
m = 12
if d is None:
d = calendar.monthrange(y, m)[1]
if h is None:
h = 23
if mn is None:
mn = 59
if s is None:
s = 59
if ms is None:
ms = 999999
return datetime(y, m, d, h, mn, s, ms)
def disambiguated(self, basedate):
"""Returns either a ``datetime`` or unambiguous ``timespan`` version
of this object.
Unless this ``adatetime`` object is full specified down to the
microsecond, this method will return a timespan built from the "floor"
and "ceil" of this object.
This method raises an error if the ``adatetime`` object has no year.
>>> adt = adatetime(year=2009, month=10, day=31)
>>> adt.disambiguated()
timespan(datetime(2009, 10, 31, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
dt = self
if not is_ambiguous(dt):
return fix(dt)
return timespan(dt, dt).disambiguated(basedate)
# Time span class
class timespan(object):
"""A span of time between two ``datetime`` or ``adatetime`` objects.
"""
def __init__(self, start, end):
"""
:param start: a ``datetime`` or ``adatetime`` object representing the
start of the time span.
:param end: a ``datetime`` or ``adatetime`` object representing the
end of the time span.
"""
if not isinstance(start, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % start)
if not isinstance(end, (datetime, adatetime)):
raise TimeError("%r is not a datetime object" % end)
self.start = copy.copy(start)
self.end = copy.copy(end)
def __eq__(self, other):
if not other.__class__ is self.__class__:
return False
return self.start == other.start and self.end == other.end
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.start, self.end)
def disambiguated(self, basedate, debug=0):
"""Returns an unambiguous version of this object.
>>> start = adatetime(year=2009, month=2)
>>> end = adatetime(year=2009, month=10)
>>> ts = timespan(start, end)
>>> ts
timespan(adatetime(2009, 2, None, None, None, None, None), adatetime(2009, 10, None, None, None, None, None))
>>> td.disambiguated(datetime.now())
timespan(datetime(2009, 2, 28, 0, 0, 0, 0), datetime(2009, 10, 31, 23, 59 ,59, 999999)
"""
# - If year is in start but not end, use basedate.year for end
# -- If year is in start but not end, but startdate is > basedate,
# use "next <monthname>" to get end month/year
# - If year is in end but not start, copy year from end to start
# - Support "next february", "last april", etc.
start, end = copy.copy(self.start), copy.copy(self.end)
start_year_was_amb = start.year is None
end_year_was_amb = end.year is None
if has_no_date(start) and has_no_date(end):
# The start and end points are just times, so use the basedate
# for the date information.
by, bm, bd = basedate.year, basedate.month, basedate.day
start = start.replace(year=by, month=bm, day=bd)
end = end.replace(year=by, month=bm, day=bd)
else:
# If one side has a year and the other doesn't, the decision
# of what year to assign to the ambiguous side is kind of
# arbitrary. I've used a heuristic here based on how the range
# "reads", but it may only be reasonable in English. And maybe
# even just to me.
if start.year is None and end.year is None:
# No year on either side, use the basedate
start.year = end.year = basedate.year
elif start.year is None:
# No year in the start, use the year from the end
start.year = end.year
elif end.year is None:
end.year = max(start.year, basedate.year)
if start.year == end.year:
# Once again, if one side has a month and day but the other side
# doesn't, the disambiguation is arbitrary. Does "3 am to 5 am
# tomorrow" mean 3 AM today to 5 AM tomorrow, or 3am tomorrow to
# 5 am tomorrow? What I picked is similar to the year: if the
# end has a month+day and the start doesn't, copy the month+day
# from the end to the start UNLESS that would make the end come
# before the start on that day, in which case use the basedate
# instead. If the start has a month+day and the end doesn't, use
# the basedate.
start_dm = not (start.month is None and start.day is None)
end_dm = not (end.month is None and end.day is None)
if end_dm and not start_dm:
if start.floor().time() > end.ceil().time():
start.month = basedate.month
start.day = basedate.day
else:
start.month = end.month
start.day = end.day
elif start_dm and not end_dm:
end.month = basedate.month
end.day = basedate.day
if floor(start).date() > ceil(end).date():
# If the disambiguated dates are out of order:
# - If no start year was given, reduce the start year to put the
# start before the end
# - If no end year was given, increase the end year to put the end
# after the start
# - If a year was specified for both, just swap the start and end
if start_year_was_amb:
start.year = end.year - 1
elif end_year_was_amb:
end.year = start.year + 1
else:
start, end = end, start
start = floor(start)
end = ceil(end)
if start.date() == end.date() and start.time() > end.time():
# If the start and end are on the same day, but the start time
# is after the end time, move the end time to the next day
end += timedelta(days=1)
return timespan(start, end)
# Functions for working with datetime/adatetime objects
def floor(at):
if isinstance(at, datetime):
return at
return at.floor()
def ceil(at):
if isinstance(at, datetime):
return at
return at.ceil()
def fill_in(at, basedate, units=adatetime.units):
"""Returns a copy of ``at`` with any unspecified (None) units filled in
with values from ``basedate``.
"""
if isinstance(at, datetime):
return at
args = {}
for unit in units:
v = getattr(at, unit)
if v is None:
v = getattr(basedate, unit)
args[unit] = v
return fix(adatetime(**args))
def has_no_date(at):
"""Returns True if the given object is an ``adatetime`` where ``year``,
``month``, and ``day`` are all None.
"""
if isinstance(at, datetime):
return False
return at.year is None and at.month is None and at.day is None
def has_no_time(at):
"""Returns True if the given object is an ``adatetime`` where ``hour``,
``minute``, ``second`` and ``microsecond`` are all None.
"""
if isinstance(at, datetime):
return False
return (at.hour is None and at.minute is None and at.second is None
and at.microsecond is None)
def is_ambiguous(at):
"""Returns True if the given object is an ``adatetime`` with any of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return any((getattr(at, attr) is None) for attr in adatetime.units)
def is_void(at):
"""Returns True if the given object is an ``adatetime`` with all of its
attributes equal to None.
"""
if isinstance(at, datetime):
return False
return all((getattr(at, attr) is None) for attr in adatetime.units)
def fix(at):
"""If the given object is an ``adatetime`` that is unambiguous (because
all its attributes are specified, that is, not equal to None), returns a
``datetime`` version of it. Otherwise returns the ``adatetime`` object
unchanged.
"""
if is_ambiguous(at) or isinstance(at, datetime):
return at
return datetime(year=at.year, month=at.month, day=at.day, hour=at.hour,
minute=at.minute, second=at.second,
microsecond=at.microsecond)
| [
"[email protected]"
]
| |
69e9bebc4513c00a473c70457e1a049832307ad5 | 8ebb138562884f01cae3d3ffaad9501a91e35611 | /dbCruiseKeywords/insertKeywordsAMT09.py | df5628bef408fcbba07deedb761444ed58a7b142 | []
| no_license | simonscmap/DBIngest | 7b92214034e90f8de88b06c17b48f83c769d8d35 | 9ae035cbf7453df375f0af5e920df3880a419107 | refs/heads/master | 2021-07-16T07:12:31.749027 | 2020-08-13T16:28:24 | 2020-08-13T16:28:24 | 200,295,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
############################
""" Reads in the keyword excel file"""
df = pd.read_excel(rawFilePath + rawFileName)
ID = cF.getCruiseID(cruise_name)
prov_df = cF.getLonghurstProv(cruise_name)
ocean_df = cF.getOceanName(cruise_name)
seasons_df = cF.getCruiseSeasons(cruise_name)
months_df = cF.getCruiseMonths(cruise_name)
years_df = cF.getCruiseYear(cruise_name)
details_df = cF.getCruiseDetails(cruise_name)
short_name_df = cF.getCruiseAssosiatedShortName(cruise_name)
# long_name_df = cF.getCruiseAssosiatedLongName(cruise_name)
short_name_syn_df = cF.getShortNameSynonyms(cruise_name)
dataset_name_df = cF.getCruiseAssosiatedDataset_Name(cruise_name)
df = cF.addDFtoKeywordDF(df, dataset_name_df)
df = cF.addDFtoKeywordDF(df, short_name_syn_df)
df = cF.addDFtoKeywordDF(df, prov_df)
df = cF.addDFtoKeywordDF(df, ocean_df)
df = cF.addDFtoKeywordDF(df, seasons_df)
df = cF.addDFtoKeywordDF(df, months_df)
df = cF.addDFtoKeywordDF(df, years_df)
df = cF.addDFtoKeywordDF(df, details_df)
df = cF.addDFtoKeywordDF(df, short_name_df)
# df = cF.addDFtoKeywordDF(df, long_name_df)
df = cF.removeDuplicates(df)
df = cF.stripWhitespace(df,keyword_col)
df = cF.removeAnyRedundantWord(df)
""" INSERTS INTO tblCruise_Keywords"""
cF.insertCruiseKeywords(ID,df,server)
| [
"[email protected]"
]
| |
00fb0b2202d07d72ab8075b038f6426190d4d82e | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Hard/1449.form-largest-integer-with-digits-that-add-up-to-target.py | fde6df1309dddc7154ccfbf41d760c6ba9bd1dbe | []
| no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | #
# @lc app=leetcode id=1449 lang=python3
#
# [1449] Form Largest Integer With Digits That Add up to Target
#
# https://leetcode.com/problems/form-largest-integer-with-digits-that-add-up-to-target/description/
#
# algorithms
# Hard (42.08%)
# Total Accepted: 6.5K
# Total Submissions: 15.5K
# Testcase Example: '[4,3,2,5,6,7,2,5,5]\n9'
#
# Given an array of integers cost and an integer target. Return the maximum
# integer you can paint under the following rules:
#
#
# The cost of painting a digit (i+1) is given by cost[i] (0 indexed).
# The total cost used must be equal to target.
# Integer does not have digits 0.
#
#
# Since the answer may be too large, return it as string.
#
# If there is no way to paint any integer given the condition, return "0".
#
#
# Example 1:
#
#
# Input: cost = [4,3,2,5,6,7,2,5,5], target = 9
# Output: "7772"
# Explanation: The cost to paint the digit '7' is 2, and the digit '2' is 3.
# Then cost("7772") = 2*3+ 3*1 = 9. You could also paint "977", but "7772" is
# the largest number.
# Digit cost
# 1 -> 4
# 2 -> 3
# 3 -> 2
# 4 -> 5
# 5 -> 6
# 6 -> 7
# 7 -> 2
# 8 -> 5
# 9 -> 5
#
#
# Example 2:
#
#
# Input: cost = [7,6,5,5,5,6,8,7,8], target = 12
# Output: "85"
# Explanation: The cost to paint the digit '8' is 7, and the digit '5' is 5.
# Then cost("85") = 7 + 5 = 12.
#
#
# Example 3:
#
#
# Input: cost = [2,4,6,2,4,6,4,4,4], target = 5
# Output: "0"
# Explanation: It's not possible to paint any integer with total cost equal to
# target.
#
#
# Example 4:
#
#
# Input: cost = [6,10,15,40,40,40,40,40,40], target = 47
# Output: "32211"
#
#
#
# Constraints:
#
#
# cost.length == 9
# 1 <= cost[i] <= 5000
# 1 <= target <= 5000
#
#
#
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
| [
"[email protected]"
]
| |
42d77cdb15f7031c1d699412730a8035bd7e471a | 367d2670c75d385d122bca60b9f550ca5b3888c1 | /gem5/env/lib/python3.6/site-packages/kombu/asynchronous/http/__init__.py | e776977dd40d3fa99f91d5b31d93c25a7d36b580 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
]
| permissive | Anish-Saxena/aqua_rowhammer_mitigation | 4f060037d50fb17707338a6edcaa0ac33c39d559 | 3fef5b6aa80c006a4bd6ed4bedd726016142a81c | refs/heads/main | 2023-04-13T05:35:20.872581 | 2023-01-05T21:10:39 | 2023-01-05T21:10:39 | 519,395,072 | 4 | 3 | Unlicense | 2023-01-05T21:10:40 | 2022-07-30T02:03:02 | C++ | UTF-8 | Python | false | false | 591 | py | from kombu.asynchronous import get_event_loop
from .base import Request, Headers, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
"""Create new HTTP client."""
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
"""Get or create HTTP client bound to the current event loop."""
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
| [
"[email protected]"
]
| |
95a65891632e1c526dfe49cd5b082b05a23fb3a0 | d80173b86be50c7b8c8dec265bfe4e9b66575f7c | /objects.py | 80305cbd3829557b6a79312bc8f6e6372c6c0d8e | []
| no_license | Pk13055/bomberman | 11450bb673ab1ffbb827d9dddeac3583742ce7e5 | 5e4d4413f9572e520de5604174123393f4463e86 | refs/heads/master | 2021-01-19T06:02:39.774474 | 2017-10-20T14:08:16 | 2017-10-20T14:08:16 | 100,589,676 | 6 | 4 | null | 2017-10-20T14:08:17 | 2017-08-17T10:03:18 | Python | UTF-8 | Python | false | false | 3,723 | py | '''
contains the structure of each object
'''
import config
from config import x_fac, y_fac
import numpy as np
class Object:
'''# bombs, walls, bricks all will be of this type'''
def __init__(self, x, y, ch=config._empty):
'''# the x and y coords wrt top left of board'''
self._x = x
self._y = y
self.width = 4
self.height = 2
self.is_killable = False
self._ch = ch
self.structure = np.chararray((self.height, self.width))
self.structure[:, :] = self._ch
self._type = config.types[self._ch]
def get_type(self):
'''# returns whether "Bomber", "Enemy", etc'''
return self._type
def get_size(self):
'''# returns (height, willdth)'''
return self.structure.shape
def get_coords(self):
'''# returns (x, y)'''
return (self._x, self._y)
def update_location(self, board, new_x, new_y, init=False):
'''# update the location of the person'''
if board.draw_obj(type(self)(new_x, new_y)):
# if initial update, will not clear original
if not init:
board.clear_obj(self)
self._x, self._y = new_x, new_y
return True
return False
class Wall(Object):
'''# this is the repr of the wall object
it implements no methods and some data about each wall element'''
def __init__(self, n, m):
'''# preferred size = 2 x 4'''
super(Wall, self).__init__(n, m, config._wall)
self.height = int(m)
self.width = int(n)
def __repr__(self):
''' repr '''
for r in range(self.height):
print("\n")
for c in range(self.width):
try:
print(self.structure[r, c].decode(), end="")
except UnicodeDecodeError:
print(self.structure[r, c], end="")
return ""
class Bomb(Object):
'''# this class implements the bomb object'''
def __init__(self, x, y):
''' init '''
super(Bomb, self).__init__(x, y, config._bomb)
self.timer = 0
self.active = False
self.is_killable = True
self.structure[:, :] = np.matrix([['[', self._ch, self._ch, ']'],
['[', self._ch, self._ch, ']']])
self.blast_radius = [(x + 1 * x_fac, y), (x + 2 * x_fac, y),
(x - 1 * x_fac, y), (x - 2 * x_fac, y), (x,
y + 1 * y_fac), (x, y + 2 * y_fac),
(x, y - 1 * y_fac), (x, y - 2 * y_fac)]
self.owner = None
def detonate(self, time):
'''# begin detonating the bomb (happens one frame after)'''
self.active = True
self.timer = time
def countdown(self):
''' countdown the bomb when active '''
if self.active:
self.timer -= 1
self.structure[:, 1:3] = str(self.timer)
return True
if not self.timer:
self.structure[:, :] = config._expl
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
class Bricks(Object):
'''# this class implements the bricks Object'''
def __init__(self, x, y):
''' init '''
super(Bricks, self).__init__(x, y, config._bricks)
self.is_killable = True
self.structure[:, :] = self._ch
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
| [
"[email protected]"
]
| |
07ddacbb56954526dfc54b49ec898b630b576e55 | 2d4b7280fac70fd922dc203f07d89241f3c21535 | /src/cloudify/aria_extension_cloudify/classic_modeling/policies.py | 238124b82d67385d4d632d119bc37bfe4460d753 | [
"Apache-2.0"
]
| permissive | tliron/aria-ng | 602f0cad18df0332e25be03cc834e7a42cb7c674 | 55cf7af3b0a8fe62d422dd687dd7da3849824524 | refs/heads/master | 2020-08-02T18:10:30.735677 | 2016-11-12T14:19:32 | 2016-11-14T18:23:57 | 73,556,794 | 0 | 2 | null | 2016-11-12T14:17:45 | 2016-11-12T14:17:45 | null | UTF-8 | Python | false | false | 676 | py | #
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
SCALING_POLICY_NAME = 'cloudify.policies.scaling'
| [
"[email protected]"
]
| |
80f796be803c6cbe9307785b3beaf103fdaf5177 | 52266a44e2aca241707984e3b138775681b3e95f | /一本册子/字符串.py | 739ef5576c270d031768b4e1d83d68f15064ac44 | []
| no_license | Mr-hongji/pythonNote | 91b1252711ce0b919fc365932276b89d85d4c16b | ff5eda0c8f63345de4d98cff8f0f7ab5254c77a6 | refs/heads/master | 2020-04-11T14:49:39.637983 | 2019-05-26T09:21:09 | 2019-05-26T09:21:09 | 161,869,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | print 'hello'
print "I'm Shihongji"
'''
\被称作转译字符,除了用来表示引号,还有比如用
\\表示字符串中的\
\n表示字符串中的换行
'''
print 'I\'m a \"good\" people'
print 'I\'m a "good" people'
print '我是良民\\'
print '我是良民\n吗'
'''
作业
输出以下文字
1、He said, "I'm yours!"
2、\\_v_//
3、Stay hungry,
stay foolish.
---Steve Jobs
4、 *
***
****
***
*
'''
print 'He said, "I\'m yours!\"'
print "\\\\_v_//"
print "Stay hunngry,\nstay foolish.\n -- Steve Jobs"
print '*\n***\n****\n***\n*'
| [
"[email protected]"
]
| |
26f644c66a8b92892987b70efed6d22aee3270b8 | 6160586aa239eada16e735d40d57970dedbe1dfc | /modules/app_additional/app_custom/app_position_update_info.py | b2724f015a7037a4d90534964a519bb0702c5061 | []
| no_license | showgea/AIOT | 7f9ffcd49da54836714b3342232cdba330d11e6c | fe8275aba1c4b5402c7c2c2987509c0ecf49f330 | refs/heads/master | 2020-07-23T10:19:37.478456 | 2019-09-23T12:25:59 | 2019-09-23T12:25:59 | 207,525,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_position_update_info(positionId, positionName=None, isDefault=None):
url_ = url + "/app/v1.0/lumi/app/position/update/info"
json_ = {
"positionId": positionId,
"positionName": positionName,
"isDefault": isDefault
}
list_ = ["positionId", "positionName", "isDefault"]
num = 0
for i in (positionId, positionName, isDefault):
if i is None:
json_.pop(list_[num])
num += 1
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % json_)
r = requests.post(url=url_, json=json_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_position_update_info("real2.615945282455937024")
print(result_main.text)
| [
"[email protected]"
]
| |
16e1c2a3227a5d0baee604734564e9d99490428f | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/model/notify/operation_log_with_meta_pb2.pyi | 4eb4b81a00e7c4fbc85a45c47c2b95b1eeda8653 | [
"Apache-2.0"
]
| permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from monitor_sdk.model.notify.operation_log_pb2 import (
OperationLog as monitor_sdk___model___notify___operation_log_pb2___OperationLog,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class OperationLogWithMeta(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
system = ... # type: typing___Text
topic = ... # type: typing___Text
@property
def data(self) -> monitor_sdk___model___notify___operation_log_pb2___OperationLog: ...
def __init__(self,
*,
system : typing___Optional[typing___Text] = None,
topic : typing___Optional[typing___Text] = None,
data : typing___Optional[monitor_sdk___model___notify___operation_log_pb2___OperationLog] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> OperationLogWithMeta: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> OperationLogWithMeta: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"data",b"data",u"system",b"system",u"topic",b"topic"]) -> None: ...
| [
"[email protected]"
]
| |
202431c6183a6dcff01d28a468d59da31fa8c7b1 | cb9f5db2cdaa5c85a4c5950e34fa22d931da445e | /seed.py | d94c6e63d50668962053785917432aba4eb825c1 | []
| no_license | rmmistry/movie-ratings- | 248fdb36a7392cebc8cfc9686cae61a3b0c516c4 | 89050e4da2dc998ab99fca8537d8df75a650e845 | refs/heads/master | 2021-01-10T05:13:17.863638 | 2015-10-23T00:58:23 | 2015-10-23T00:58:23 | 44,561,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | """Utility file to seed ratings database from MovieLens data in seed_data/"""
from model import User, Movie, Rating
# from model import Rating
# from model import Movie
from model import connect_to_db, db
from server import app
from datetime import datetime
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
row = row.rstrip()
row_splitted = row.split("|")
##throwing out rows with no release date or title is unknown
movie_id = row_splitted[0]
title = row_splitted[1]
released_at = row_splitted[2]
imdb_url = row_splitted[4]
## FIX LATER: optionally, rstrip('(') - why didn't it work?
title = title[:-7]
print title
if released_at != (''):
released_at_ob = datetime.strptime(released_at, '%d-%b-%Y')
else:
pass
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at_ob,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.rstrip()
row_splitted=row.split()
user_id = row_splitted[0]
movie_id = row_splitted[1]
score = row_splitted[2]
rating = Rating(movie_id=movie_id,
user_id=user_id,
score=score)
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
| [
"[email protected]"
]
| |
1ad194458a4f64f614b9ac861a9e7623c7eaa041 | 29345337bf86edc938f3b5652702d551bfc3f11a | /python/src/main/python/pyalink/alink/tests/examples/from_docs/test_alsusersperitemrecommbatchop.py | 21104be85c65e675c3b2d8099853b1de16f0fc5b | [
"Apache-2.0"
]
| permissive | vacaly/Alink | 32b71ac4572ae3509d343e3d1ff31a4da2321b6d | edb543ee05260a1dd314b11384d918fa1622d9c1 | refs/heads/master | 2023-07-21T03:29:07.612507 | 2023-07-12T12:41:31 | 2023-07-12T12:41:31 | 283,079,072 | 0 | 0 | Apache-2.0 | 2020-07-28T02:46:14 | 2020-07-28T02:46:13 | null | UTF-8 | Python | false | false | 920 | py | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestAlsUsersPerItemRecommBatchOp(unittest.TestCase):
def test_alsusersperitemrecommbatchop(self):
df_data = pd.DataFrame([
[1, 1, 0.6],
[2, 2, 0.8],
[2, 3, 0.6],
[4, 1, 0.6],
[4, 2, 0.3],
[4, 3, 0.4],
])
data = BatchOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')
als = AlsTrainBatchOp().setUserCol("user").setItemCol("item").setRateCol("rating") \
.setNumIter(10).setRank(10).setLambda(0.01)
model = als.linkFrom(data)
predictor = AlsUsersPerItemRecommBatchOp() \
.setItemCol("item").setRecommCol("rec").setK(1).setReservedCols(["item"])
predictor.linkFrom(model, data).print();
pass | [
"[email protected]"
]
| |
81a39a0d1720fe639ac2b59e7861b623c6118af5 | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_1_17a.py | 9a350a6d333f32263cf6731390cfab23de618e79 | []
| no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | def p17():
def lowest_digit(n):
return n/10, n%10
def words(n):
if n > 999:
raise ValueError, "Number too big."
digits = [None, 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine']
teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen']
tens = [None, None, 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
n, o = lowest_digit(n)
n, t = lowest_digit(n)
n, h = lowest_digit(n)
result = []
if t == 1:
result.append(teens[o])
else:
if o:
result.append(digits[o])
if t:
result.append(tens[t])
if h:
if t or o:
result.append('and')
result.append('hundred')
result.append(digits[h])
#return ''.join(reversed(result))
return ''.join(result)
c = 0
for i in range(1,1000):
c += len(words(i))
c+=len('onethousand')
print c
p17()
| [
"[email protected]"
]
| |
77a600b8a161271244c70a072a2ad68e0c19c0f9 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/个人项目/weather/venv/Scripts/pip3-script.py | a6ac6cc88412f3e6968662a23c89959c23f69bbe | []
| no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 446 | py | #!E:\学习文件\python学习资料\开班笔记\个人项目\weather\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
]
| |
c82b626196c32cb53a26ce7409d33e52aeb8817f | d82efe8ea61a9d391e1444af55bb35c1b95ae7b0 | /mainapp/__init__.py | f0be2b2fac1640b58f67d8e2a5d515d8f769813c | []
| no_license | xulongyuan203/leargit | ecbdb46b54d95d6c569ce5e3edb234bff1125e89 | 40b70ee4d2512d1e5827a9558483bc8c6b4ea761 | refs/heads/main | 2023-09-04T07:31:06.858491 | 2021-10-17T03:57:35 | 2021-10-17T03:57:35 | 346,919,815 | 0 | 0 | null | 2021-10-17T03:56:18 | 2021-03-12T03:12:12 | Python | UTF-8 | Python | false | false | 100 | py | from flask import Flask
import settings
app = Flask(__name__)
app.config.from_object(settings.Dev) | [
"[email protected]"
]
| |
410d7498c362b982e00c1371ea8e80ffedc787f5 | 2ecfe0e10d10513917e4f2770e0a56075404c5d8 | /oldnumba/tests/test_exceptions.py | 80cbe4e56325c6d8248dd39bfb2723c2511aeeb1 | [
"BSD-2-Clause"
]
| permissive | laserson/numba | 84ab7615ea0177b496a63e2a86319f0b12992cd2 | 35546517b27764a9120f6dfcd82eba7f4dd858cb | refs/heads/master | 2020-05-20T23:13:23.011971 | 2014-12-08T20:16:20 | 2014-12-08T20:16:20 | 16,754,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | """
>>> boom()
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'boom'
>>> boom2()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
>>> boom3()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
"""
import sys
import ctypes
from numba import *
import numpy as np
@autojit(backend='ast')
def boom():
return int('boom')
@jit(int_())
def boom2():
return object()('boom')
@jit(complex128())
def boom3():
return object()('boom')
if __name__ == "__main__":
import numba
numba.testing.testmod()
| [
"[email protected]"
]
| |
6013abaf1ceca0fccaeea59d0bb1e9949aee08e7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch8_2020_03_02_19_15_37_237019.py | 7d27944df20781e9fb1001e6829337fdf582639c | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | def calcula_posicao(tempo):
posicao = posição inicial + velocidade*(instante t)
return posicao
| [
"[email protected]"
]
| |
597d15f9483743209611d978a2f889f859a6aa82 | 2a8a6327fb9a7ce8696aa15b197d5170661fb94f | /test/test_put_order_action_trigger_dates_request_type_order_actions.py | f00aa4180e526b79d4133bf03d89dcfe4c1837b8 | []
| no_license | moderndatainc/zuora-client | 8b88e05132ddf7e8c411a6d7dad8c0baabaa6dad | d50da49ce1b8465c76723496c2561a3b8ebdf07d | refs/heads/master | 2021-09-21T19:17:34.752404 | 2018-08-29T23:24:07 | 2018-08-29T23:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,976 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/G_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/3_Responses_and_errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation and Export ZOQL queries only. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2018-08-23
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.put_order_action_trigger_dates_request_type_order_actions import PUTOrderActionTriggerDatesRequestTypeOrderActions # noqa: E501
from zuora_client.rest import ApiException
class TestPUTOrderActionTriggerDatesRequestTypeOrderActions(unittest.TestCase):
"""PUTOrderActionTriggerDatesRequestTypeOrderActions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPUTOrderActionTriggerDatesRequestTypeOrderActions(self):
"""Test PUTOrderActionTriggerDatesRequestTypeOrderActions"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.put_order_action_trigger_dates_request_type_order_actions.PUTOrderActionTriggerDatesRequestTypeOrderActions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
d315787bb6b8a33384f02df4fd9358fc7f3ae68e | f359c953ef823cc44f7d87a3736c3e4fb1817c0b | /EDBRCommon/python/simulation/RunIIDR74X50ns/TTbar/TTaw.py | 71536ff1fd213b3a0b0ae79234018df0b109d56f | []
| no_license | jruizvar/ExoDiBosonResonancesRun2 | aa613200725cf6cd825d7bcbde60d2e39ba84e39 | b407ab36504d0e04e6bddba4e57856f9f8c0ec66 | refs/heads/Analysis76X | 2021-01-18T20:00:57.358494 | 2016-05-30T21:30:19 | 2016-05-30T21:30:19 | 23,619,682 | 1 | 1 | null | 2016-04-22T18:38:45 | 2014-09-03T12:41:07 | Python | UTF-8 | Python | false | false | 1,426 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FAB076ED-590F-E511-B784-0CC47A4DEEBA.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC007331-5E0F-E511-8D0C-0025904B1424.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC9BEF1E-540F-E511-8740-002590E39F36.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FCD4075D-6A0F-E511-AA8B-00259073E410.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FEC4769D-6E0F-E511-8A65-0025907277E8.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FECA6F36-360F-E511-8BA1-0CC47A13D09C.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FED5EE4E-C910-E511-91E8-AC853D9DAC41.root' ] );
| [
"[email protected]"
]
| |
c135d62f920dc56b65ff40f4fbe07eac168328ba | 5b6f2b0ff8828d247885204522a7fe4ad7136f7a | /test_arc4.py | fb3574f9ebf651e152ea1554a8cf92cf764e7598 | [
"MIT"
]
| permissive | manicmaniac/arc4 | 5fdc292e3ac172a2e2817ff14b2d052604964cd5 | 6f0706a6f68cb84e419e8652d4196745268c9b3b | refs/heads/master | 2023-08-16T04:05:42.398404 | 2023-04-22T03:58:58 | 2023-04-22T03:58:58 | 149,815,580 | 28 | 5 | MIT | 2023-09-12T09:24:09 | 2018-09-21T20:40:18 | Python | UTF-8 | Python | false | false | 8,405 | py | try:
from setuptools.distutils.version import StrictVersion
except ImportError:
from distutils.version import StrictVersion
import doctest
import functools
import multiprocessing
import platform
import textwrap
import timeit
import unittest
import arc4
import setup
KEY = b'PYTHON3'
LOREM = b"""Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim \
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea \
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit \
esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat \
cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est \
laborum."""
LOREM_ARC4 = b"""\xf0\xa8\x59\xec\xdf\x9d\xbd\x95\x52\x91\x66\x72\x50\x01\x0d\
\x3a\xac\x62\x10\xdc\x58\x0f\x49\x02\xd9\x45\x2a\xad\x3a\x2b\x79\xd5\x2b\x29\
\xe7\x16\xf1\x9c\x93\x58\xcd\xa9\x32\x87\xfc\x9f\x6e\x29\x14\x0a\x59\x12\x21\
\x89\x51\x49\xc7\x3f\x59\x78\x0b\x16\xb6\xb2\xc4\xc3\xc0\x61\xc4\xcd\xcf\x9e\
\xff\x34\x2c\xf2\x28\x14\xf8\xc9\x08\xf0\x1f\x2d\xfa\xe8\xbf\x77\xe0\xeb\xee\
\xa1\x51\xd4\xf3\x86\x66\x60\x1c\xb1\x3a\x14\x86\xf2\x6c\xe5\x47\xf8\xb5\x50\
\xad\xbc\x1c\x64\xeb\xbc\x52\x33\x60\x41\x58\x33\x6f\x58\x8c\xfd\x41\x1b\xb0\
\x05\xb3\xbc\x46\x37\xf3\xa4\x5e\x3e\x1f\x20\xe9\x00\x02\xcc\x31\x07\xe8\x65\
\xbb\x12\x97\x05\xcb\xfd\xba\x50\x9c\x59\x14\x49\xb4\x3c\x12\x2b\x47\x27\x5f\
\x30\x52\x57\xf4\xa2\x70\xc5\x7d\x4a\xf2\x92\x01\x5d\x02\x69\x1d\x74\xff\x43\
\xb1\x73\xb9\x28\xfe\x73\x62\x7f\xbd\xcd\xa1\x53\xa2\x1e\x28\x37\x19\xc4\x59\
\xbc\x81\x93\x79\x05\x13\x07\xc2\x43\xb3\xd1\x2a\x9d\xf7\x3c\xe7\x1e\x63\x4b\
\x70\xc7\xc2\xa6\x80\x31\xc7\xc5\x07\x64\x49\x40\x08\x7a\x4f\x4f\x90\x63\x88\
\x4d\x35\x8b\xd2\x48\xe1\xc2\xfc\xa2\xb5\x47\xca\xaf\x75\x36\x31\x22\xa8\x45\
\x5d\x0f\x03\xb7\xd5\x3b\xff\x47\xbc\x6f\xe0\xa3\x49\xfb\x63\xbe\xfc\xa7\x60\
\x59\x43\x50\x8e\x95\x76\x68\xda\xfa\xdb\x9b\x96\x9d\x1b\x6d\xac\x14\x2c\x12\
\x29\xfd\xf0\xaf\xc4\xba\x12\xdf\x83\xd9\xae\xcc\x19\x80\xfd\xc2\x36\x32\xf4\
\x01\x0b\x6d\xeb\x9e\xff\x74\x2e\xfe\x58\xc7\x91\xa9\x75\xf5\xa0\xc0\x5d\xb7\
\x5e\x6a\x71\x5a\x9c\xd3\x98\xca\x6c\xae\x80\xd6\x0d\xb9\x84\x63\x7f\xdf\x31\
\x1b\x5c\x4f\x07\x4c\x9b\x23\x24\x43\xce\x9e\x4d\x29\x5f\xb9\x3a\x57\x0f\x18\
\xf5\xa0\x5a\x94\x88\xfa\x55\x64\xca\x4f\x74\x9f\x71\x33\xa5\x6d\xd4\xd8\x5a\
\xdd\x51\x66\xad\xf5\x37\xad\x44\xe9\x20\xf2\x31\xd3\x9a\xef\x3e\x47\xd1\x20\
\x88\x2c\x21\x74\xed\xa3\x5c\x7c\xa7\x03\x42\x4d\x21\x50\xe2\x9b\x2b\x99\x88\
\x1e\xd4\x53\xda\x1c\xa2\xc7\x5b\xb5\x94\x5d\xc0"""
def raises_deprecation_warning(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
with self.assertWarns(DeprecationWarning):
return f(self, *args, **kwargs)
return decorated
def raises_deprecation_warning_if(condition):
if condition:
return raises_deprecation_warning
return lambda x: x
def expected_failure_if(condition):
if condition:
return unittest.expectedFailure
return lambda x: x
class TestARC4(unittest.TestCase):
def test_arc4_module_has_doc(self):
self.assertIsNotNone(arc4.__doc__)
def test_arc4_version_is_strict_version(self):
try:
StrictVersion(arc4.__version__)
except (AttributeError, ValueError) as e:
self.fail(e)
def test_arc4_version_is_equal_to_setup_version(self):
self.assertEqual(arc4.__version__, setup.VERSION)
def test_arc4_class_has_doc(self):
self.assertIsNotNone(arc4.ARC4.__doc__)
def test_init_with_zero_length_key_raises_error(self):
with self.assertRaisesRegex(ValueError, r'^invalid key length: 0$'):
arc4.ARC4(b'')
def test_init_with_bytes_returns_instance(self):
self.assertIsInstance(arc4.ARC4(b'spam'), arc4.ARC4)
@raises_deprecation_warning
def test_init_with_unicode_returns_instance(self):
self.assertIsInstance(arc4.ARC4(u'スパム'), arc4.ARC4)
@raises_deprecation_warning_if(platform.python_implementation() == 'PyPy')
def test_init_with_bytearray_raises_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'argument 1 must be .*, not bytearray'):
arc4.ARC4(bytearray([0x66, 0x6f, 0x6f]))
@raises_deprecation_warning_if(platform.python_implementation() == 'PyPy')
def test_init_with_memoryview_raises_type_error(self):
pattern = r'^argument 1 must be .*, not memoryview$'
with self.assertRaisesRegex(TypeError, pattern):
arc4.ARC4(memoryview(b'spam'))
@expected_failure_if(platform.python_implementation() == 'PyPy')
def test_encrypt_has_doc(self):
self.assertIsNotNone(arc4.ARC4.encrypt.__doc__)
def test_encrypt_with_long_bytes_returns_encrypted_bytes(self):
cipher = arc4.ARC4(KEY)
self.assertEqual(LOREM_ARC4, cipher.encrypt(LOREM))
def test_encrypt_multiple_times_returns_encrypted_bytes(self):
cipher = arc4.ARC4(KEY)
encrypted = b''
for c in LOREM:
if isinstance(c, int):
c = chr(c).encode('utf-8')
encrypted += cipher.encrypt(c)
self.assertEqual(LOREM_ARC4, encrypted)
@raises_deprecation_warning
def test_encrypt_with_unicode_returns_encrypted_bytes(self):
cipher = arc4.ARC4(b'spam')
self.assertEqual(b'Q\xcd\xb1!\xecg', cipher.encrypt(u'ハム'))
def test_encrypt_with_bytearray_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
with self.assertRaisesRegex(
TypeError,
r'^crypt\(\) argument 1 must be .*, not bytearray$'):
cipher.encrypt(bytearray(b'ham'))
def test_encrypt_with_memoryview_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
with self.assertRaisesRegex(
TypeError,
r'^crypt\(\) argument 1 must be .*, not memoryview$'):
cipher.encrypt(memoryview(b'ham'))
def test_encrypt_with_list_raises_type_error(self):
cipher = arc4.ARC4(b'spam')
message = (r'^crypt\(\) argument 1 must be read-only bytes-like ' +
r'object, not list')
with self.assertRaisesRegex(TypeError, message):
cipher.encrypt([0x68, 0x61, 0x6d])
@unittest.skip('takes long time and a bit flaky depends on environment')
@unittest.skipIf(multiprocessing.cpu_count() <= 1, 'needs multiple cores')
def test_encrypt_thread_performance(self):
large_text = 'a' * 10 * 1024 * 1024
number = 100
cpu_count = multiprocessing.cpu_count()
setup = textwrap.dedent("""\
from arc4 import ARC4
from threading import Thread
def target():
ARC4({key!r}).encrypt({text!r})
""".format(key=KEY, text=large_text))
# Create unused threads to make the similar conditions
# between single and multiple threads.
code = textwrap.dedent("""\
threads = []
for i in range({}):
thread = Thread(target=target)
threads.append(thread)
for thread in threads:
pass
target()
""".format(cpu_count))
single_thread_elapsed_time = timeit.timeit(code, setup, number=number)
code = textwrap.dedent("""\
threads = []
for i in range({}):
thread = Thread(target=target)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
""".format(cpu_count))
multi_thread_elapsed_time = timeit.timeit(code, setup,
number=number // cpu_count)
self.assertLess(multi_thread_elapsed_time, single_thread_elapsed_time)
@expected_failure_if(platform.python_implementation() == 'PyPy')
def test_decrypt_has_doc(self):
self.assertIsNotNone(arc4.ARC4.decrypt.__doc__)
def test_decrypt_with_long_bytes_returns_decrypted_bytes(self):
cipher = arc4.ARC4(KEY)
self.assertEqual(LOREM, cipher.decrypt(LOREM_ARC4))
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(arc4))
tests.addTests(doctest.DocFileSuite('README.rst'))
return tests
| [
"[email protected]"
]
| |
e06d790514e028de8404d51db547b5b990b4f864 | 4a5d9f129d5129b34c55171c99f83f0893ae5c11 | /archives/migrations/0006_categorie_lien.py | 1d61623e1a6f57d121b4c3b2cf399d28cc058f6f | [
"MIT"
]
| permissive | fromdanut/syndicat-riviere | ec097cf9bf9aec8829069a2a93d4750a36d87a39 | 0fd099524a2a79d0932dbf8b87f8232d470308ad | refs/heads/master | 2018-09-04T19:14:40.490656 | 2018-06-04T10:52:21 | 2018-06-04T10:52:21 | 103,665,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 06:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archives', '0005_remove_categorie_lien'),
]
operations = [
migrations.AddField(
model_name='categorie',
name='lien',
field=models.CharField(default='default_link', max_length=30, unique=True),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
e174afa38ec2ea5f548eadf2273ad23fbf7cb7e9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_105/324.py | 9ddec5819097ba9f1a61905d441b8271fd8d44f7 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py |
class Item(object):
def __init__(self, index=0):
self.index = index
self.parents = []
self.childs = []
def is_source(self):
return len(self.parents) > 1
def is_dest(self):
return len(self.childs) > 1
def get_dests(self):
if len(self.parents):
dests = []
for parent in self.parents:
dests.extend(parent.get_dests())
return dests
else:
return [self]
if __name__ == '__main__':
T = int(raw_input())
for test_index in xrange(1, T+1):
N = int(raw_input())
items = [Item(_) for _ in xrange(N+1)]
for index in xrange(1, N+1):
nums = map(int, raw_input().split())
Mi,Ii = nums[0], nums[1:]
for ii in Ii:
items[index].parents.append(items[ii])
items[ii].childs.append(items[index])
src_items = filter(lambda item: item.is_source(), items)
dst_items = filter(lambda item: item.is_dest(), items)
def check_item(item):
dests = item.get_dests()
for dest in set(dests):
if dests.count(dest) > 1:
return True
return False
result = False
for src_item in src_items:
if check_item(src_item):
result = True
break
print 'Case #%d: %s' % (test_index, 'Yes' if result else 'No')
| [
"[email protected]"
]
| |
687a25694621f2e864b8c7dc5e552473ecff1887 | 241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777 | /pde/pdes/base.py | 9cf1ac5b1fae6171c9a2c5dc4a00496021f9c523 | [
"MIT"
]
| permissive | xuanxu/py-pde | d8be358ab76d4060b14afc74bc7d836591c6188e | de33d938aea8680eff872ae1b64569895662a248 | refs/heads/master | 2021-03-09T21:37:13.920717 | 2020-03-10T12:18:03 | 2020-03-10T12:18:03 | 246,382,909 | 0 | 0 | MIT | 2020-03-10T18:54:22 | 2020-03-10T18:54:22 | null | UTF-8 | Python | false | false | 11,150 | py | """
Base classes
.. codeauthor:: David Zwicker <[email protected]>
"""
from abc import ABCMeta, abstractmethod
import logging
from typing import Callable, Dict, Optional, TYPE_CHECKING # @UnusedImport
import numpy as np
from ..fields.base import FieldBase
from ..trackers.base import TrackerCollectionDataType
from ..tools.numba import jit
if TYPE_CHECKING:
from ..solvers.controller import TRangeType # @UnusedImport
class PDEBase(metaclass=ABCMeta):
""" base class for solving partial differential equations """
explicit_time_dependence: Optional[bool] = None
def __init__(self, noise: float = 0):
"""
Args:
noise (float):
Magnitude of the additive Gaussian white noise that is supported
by default. If set to zero, a determinitics partial differential
equation will be solved. If another noise structure is required
the respective methods need to be overwritten.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self.noise = noise
@property
def is_sde(self) -> bool:
""" flag indicating whether this is a stochastic differential equation
The :class:`BasePDF` class supports additive Gaussian white noise, whose
magnitude is controlled by the `noise` property. In this case, `is_sde`
is `True` if `self.noise != 0`.
"""
# check for self.noise, in case __init__ is not called in a subclass
return hasattr(self, 'noise') and self.noise != 0
@abstractmethod
def evolution_rate(self, field: FieldBase, t: float = 0) \
-> FieldBase: pass
def _make_pde_rhs_numba(self, state: FieldBase) -> Callable:
""" create a compiled function for evaluating the right hand side """
raise NotImplementedError
def make_pde_rhs(self, state: FieldBase, backend: str = 'auto') -> Callable:
""" return a function for evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
backend (str): Determines how the function is created. Accepted
values are 'python` and 'numba'. Alternatively, 'auto' lets the
code decide for the most optimal backend.
Returns:
Function determining the right hand side of the PDE
"""
if backend == 'auto':
try:
result = self._make_pde_rhs_numba(state)
except NotImplementedError:
backend = 'numpy'
else:
result._backend = 'numba' # type: ignore
return result
if backend == 'numba':
result = self._make_pde_rhs_numba(state)
result._backend = 'numba' # type: ignore
elif backend == 'numpy':
state = state.copy()
def evolution_rate_numpy(state_data, t: float):
""" evaluate the rhs given only a state without the grid """
state.data = state_data
return self.evolution_rate(state, t).data
result = evolution_rate_numpy
result._backend = 'numpy' # type: ignore
else:
raise ValueError(f'Unknown backend `{backend}`')
return result
def noise_realization(self, state: FieldBase, t: float = 0) -> FieldBase:
""" returns a realization for the noise
Args:
state (:class:`~pde.fields.ScalarField`):
The scalar field describing the concentration distribution
t (float): The current time point
Returns:
:class:`~pde.fields.ScalarField`:
Scalar field describing the evolution rate of the PDE
"""
if self.noise:
data = np.random.normal(scale=self.noise, size=state.data.shape)
return state.copy(data=data, label='Noise realization')
else:
return state.copy(data=0, label='Noise realization')
def _make_noise_realization_numba(self, state: FieldBase) -> Callable:
""" return a function for evaluating the noise term of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
Returns:
Function determining the right hand side of the PDE
"""
if self.noise:
noise_strength = float(self.noise)
data_shape = state.data.shape
@jit
def noise_realization(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return noise_strength * np.random.randn(*data_shape)
else:
@jit
def noise_realization(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return None
return noise_realization # type: ignore
def _make_sde_rhs_numba(self, state: FieldBase) -> Callable:
""" return a function for evaluating the noise term of the PDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
Returns:
Function determining the right hand side of the PDE
"""
evolution_rate = self._make_pde_rhs_numba(state)
noise_realization = self._make_noise_realization_numba(state)
@jit
def sde_rhs(state_data: np.ndarray, t: float):
""" compiled helper function returning a noise realization """
return (evolution_rate(state_data, t),
noise_realization(state_data, t))
return sde_rhs # type: ignore
def make_sde_rhs(self, state: FieldBase, backend: str = 'auto') \
-> Callable:
""" return a function for evaluating the right hand side of the SDE
Args:
state (:class:`~pde.fields.FieldBase`):
An example for the state from which the grid and other
information can be extracted
backend (str): Determines how the function is created. Accepted
values are 'python` and 'numba'. Alternatively, 'auto' lets the
code decide for the most optimal backend.
Returns:
Function determining the deterministic part of the right hand side
of the PDE together with a noise realization.
"""
if backend == 'auto':
try:
sde_rhs = self._make_sde_rhs_numba(state)
except NotImplementedError:
backend = 'numpy'
else:
sde_rhs._backend = 'numba' # type: ignore
return sde_rhs
if backend == 'numba':
sde_rhs = self._make_sde_rhs_numba(state)
sde_rhs._backend = 'numba' # type: ignore
elif backend == 'numpy':
state = state.copy()
def sde_rhs(state_data, t: float):
""" evaluate the rhs given only a state without the grid """
state.data = state_data
return (self.evolution_rate(state, t).data,
self.noise_realization(state, t).data)
sde_rhs._backend = 'numpy' # type: ignore
else:
raise ValueError(f'Unknown backend `{backend}`')
return sde_rhs
def solve(self, state: FieldBase,
t_range: "TRangeType",
dt: float = None,
tracker: TrackerCollectionDataType = ['progress', 'consistency'],
method: str = 'auto',
**kwargs):
""" convenience method for solving the partial differential equation
The method constructs a suitable solver
(:class:`~pde.solvers.base.SolverBase`) and controller
(:class:`~pde.controller.Controller`) to advance the state over the
temporal range specified by `t_range`. To obtain full flexibility, it is
advisable to construct these classes explicitly.
Args:
state (:class:`~pde.fields.base.FieldBase`):
The initial state (which also defines the grid)
t_range (float or tuple):
Sets the time range for which the PDE is solved. If only a
single value `t_end` is given, the time range is assumed to be
`[0, t_end]`.
dt (float):
Time step of the chosen stepping scheme. If `None`, a default
value based on the stepper will be chosen.
tracker:
Defines a tracker that process the state of the simulation at
fixed time intervals. Multiple trackers can be specified as a
list. The default value is ['progress', 'consistency'], which
displays a progress bar and checks the state for consistency,
aborting the simulation when not-a-number values appear.
method (:class:`~pde.solvers.base.SolverBase` or str):
Specifies a method for solving the differential equation. This
can either be an instance of
:class:`~pde.solvers.base.SolverBase` or a descriptive name
like 'explicit' or 'scipy'. The valid names are given by
:meth:`pde.solvers.base.SolverBase.registered_solvers`.
**kwargs:
Additional keyword arguments are forwarded to the solver class
Returns:
:class:`~pde.fields.base.FieldBase`:
The state at the final time point.
"""
from ..solvers.base import SolverBase
if method == 'auto':
method = 'scipy' if dt is None else 'explicit'
# create solver
if callable(method):
solver = method(pde=self, **kwargs)
if not isinstance(solver, SolverBase):
self._logger.warn('Solver is not an instance of `SolverBase`. '
'Specified wrong method?')
else:
solver = SolverBase.from_name(method, pde=self, **kwargs)
# create controller
from ..solvers import Controller
controller = Controller(solver, t_range=t_range, tracker=tracker)
# run the simulation
return controller.run(state, dt)
| [
"[email protected]"
]
| |
109a875760f5fc39260fd4abcf0b9b11c346051b | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2020/september/Evaluate_Division.1.py | 65d4246ab245ebe5ad135c0ae57a97572fd70b22 | []
| no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | '''
Floyd
You are here!
Your runtime beats 27.33 % of python submissions.
'''
class Solution(object):
def calcEquation(self, edges, weights, pairs):
graph = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
for (i, j), weight in itertools.izip(edges, weights):
graph[i][i], graph[i][j], graph[j][i], graph[j][j] = 1., weight, 1. / weight, 1.
for mid in graph:
for i in graph[mid]:
for j in graph[mid]:
graph[i][j] = min(graph[i][j], graph[i][mid] * graph[mid][j])
return [graph[i][j] if graph[i][j] < float('inf') else -1. for i, j in pairs]
| [
"[email protected]"
]
| |
91bb39e87b153c78a084acbdc38998fcc5de7e04 | 5a01774b1815a3d9a5b02b26ca4d6ba9ecf41662 | /Module 2/Chapter03/django-myproject-03/quotes/models.py | 1659b30889e4e5de96390dfb7a8897a216d15bfe | [
"MIT"
]
| permissive | PacktPublishing/Django-Web-Development-with-Python | bf08075ff0a85df41980cb5e272877e01177fd07 | 9f619f56553b5f0bca9b5ee2ae32953e142df1b2 | refs/heads/master | 2023-04-27T22:36:07.610076 | 2023-01-30T08:35:11 | 2023-01-30T08:35:11 | 66,646,080 | 39 | 41 | MIT | 2023-04-17T10:45:45 | 2016-08-26T12:30:45 | Python | UTF-8 | Python | false | false | 3,578 | py | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
from PIL import Image
from django.db import models
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.core.files.storage import default_storage as storage
from utils.models import UrlMixin
THUMBNAIL_SIZE = getattr(settings, "QUOTES_THUMBNAIL_SIZE", (50, 50))
def upload_to(instance, filename):
now = timezone_now()
filename_base, filename_ext = os.path.splitext(filename)
return "quotes/%s%s" % (
now.strftime("%Y/%m/%Y%m%d%H%M%S"),
filename_ext.lower(),
)
@python_2_unicode_compatible
class InspirationalQuote(UrlMixin):
author = models.CharField(_("Author"), max_length=200)
quote = models.TextField(_("Quote"))
picture = models.ImageField(_("Picture"), upload_to=upload_to, blank=True, null=True)
language = models.CharField(_("Language"), max_length=2, blank=True, choices=settings.LANGUAGES)
class Meta:
verbose_name = _("Inspirational Quote")
verbose_name_plural = _("Inspirational Quotes")
def __str__(self):
return self.quote
def get_url_path(self):
try:
return reverse("quote_detail", kwargs={"id": self.pk})
except NoReverseMatch:
return ""
def save(self, *args, **kwargs):
super(InspirationalQuote, self).save(*args, **kwargs)
# generate thumbnail picture version
self.create_thumbnail()
def create_thumbnail(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return "exists"
try:
# resize the original image and return url path of the thumbnail version
f = storage.open(file_path, 'r')
image = Image.open(f)
width, height = image.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
image = image.crop((left, upper, right, lower))
image = image.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)
f_mob = storage.open(thumbnail_file_path, "w")
image.save(f_mob, "JPEG")
f_mob.close()
return "success"
except:
return "error"
def get_thumbnail_picture_url(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return storage.url(thumbnail_file_path)
# return original as a fallback
return self.picture.url
def title(self):
return self.quote | [
"[email protected]"
]
| |
2b062e03f669e6aaead91edb14be24e5af00d892 | 0d76013f6e1ee69713690d6d6e65ce05a3c94de1 | /account/urls.py | e37b608b6be1c0e5f060818a1a26f890b42c089d | []
| no_license | rafiulgits/law | 8f8576980a47dc27ef744a9c32447e69630d3eca | 42e6e6ac79229b648e023b3ae9c3252919045453 | refs/heads/master | 2023-03-05T22:05:25.854131 | 2021-02-20T04:02:52 | 2021-02-20T04:02:52 | 177,262,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | from account.views import auth, manage
from django.urls import path
from django.contrib.auth import views as resetviews
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('signup/', auth.SignUp.as_view()),
path('signin/', auth.SignIn.as_view()),
path('access-renew/', TokenRefreshView.as_view()),
path('profile/', manage.Profile.as_view()),
path('update/', auth.AccountUpdate.as_view()),
path('password-change/', auth.PasswordChange.as_view()),
path('verify/', auth.VerifyEmail.as_view()),
path('password-reset/request/', auth.PasswordResetRequest.as_view()),
path('password-reset/verify/', auth.VerifyPasswordRequest.as_view()),
path('password-reset/', auth.PasswordResetView.as_view()),
] | [
"[email protected]"
]
| |
46d66199b07078ad113d2244608aa0f3dcff80bb | ed8cdcce521b8cab33c66f716c0886e17f035d21 | /.history/script/get_cpu_mem_info_20191222122843.py | 8d68937cd4d25850ae1c036ceb08d000a04b8098 | []
| no_license | deancsdfy/AndroidPerformanceTool_windows | 8ac35729bc651c3af551f090d6788b6ee3f17eb5 | c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f | refs/heads/master | 2020-11-27T20:38:55.014228 | 2020-01-09T15:55:52 | 2020-01-09T15:55:52 | 229,593,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | #! python3
#coding=utf-8
import sys,os,re
print(sys.path)
sys.path.append('.')
from public import publicfunction as util
PATH = lambda p: os.path.abspath(p)
#获取当前应用包名
package_name = util.get_current_packagename()
# print('本次测试APP为:%s' %(package_name))
#获取men cpu 占用情况
def top():
print('Starting get mem cpu information...')
pid=get_pid()
print(pid)
top_info = util.shell("top -n 1 | grep %d" %(int(pid))).stdout.readlines()
for x in top_info:
temp_list = x.split()
#print(temp_list[8])
cpu=float(temp_list[8])
#cpu.append(float(temp_list[8]))
#print(temp_list[9])
mem=float(temp_list[9])
#mem.append(float(temp_list[9]))
print(cpu)
print(mem)
return (cpu,mem)
def getCpuNums():
num_info = util.shell('cat /proc/cpuinfo|grep processor').stdout.readlines()
# print("cpu nums is %d" %(len(num_info)))
return len(num_info)
def getCpuInfo():
# print('Starting get mem cpu information...')
pid = get_pid()
# print(pid)
cpunums=getCpuNums()
top_info = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
if(len(top_info)!=0):
for x in top_info:
temp_list = x.split()
# print(temp_list[8])
if getSDKVersion() == '23':
cpu = round(float(str(temp_list[2])[2:-2])/cpunums,2)
print(cpu)
elif (temp_list[8]!=" "):
print(float(temp_list[8]))
cpu = round(float(temp_list[8])/cpunums,2)
# print(cpu)
else:
cpu = 0.0
return cpu
else:
return 0.0
def getMemInfo():
# print('start get mem information....')
pid=get_pid()
# print(pid)
if getSDKVersion() == '23':
temp_list = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
print(temp_list[6])
mem=round(float(temp_list[6])/1024,1)
else:
mem_info = util.shell('dumpsys meminfo %d |grep TOTAL:' %(int(pid))).stdout.readlines()
for x in mem_info:
temp_list = x.split()
mem=round(float(temp_list[1])/1024,1)
print(mem)
return mem
#获取机型名称
def getDevicesName():
devicesName = str(util.shell('getprop ro.product.model').stdout.read())
return devicesName
# 获取系统版本
def getSDKVersion():
SDKVersion = str(util.shell('getprop ro.build.version.sdk').stdout.read())[2:-7]
return SDKVersion
#获取pid
def get_pid():
# 正则匹配出package和activity的pid
pattern = re.compile(r"[a-zA-Z0-9\.]+=.[0-9\.]+")
package = util.shell('dumpsys activity top| grep ACTIVITY').stdout.read()
pid = pattern.findall(package.decode())[-1].split('=')[1]
# pid_info = util.shell('ps| grep %s' %(package_name)).stdout.readlines()
# print(pid_info)
# pid = pid_info[0].split()[1]
# print('pid为: %s' %(pid))
return pid
#获取uid
def get_uid():
cmd = 'cat /proc/'+ get_pid() + '/status'
uid_info = util.shell(cmd).stdout.readlines()
uid = uid_info[6].split()[1]
print('uid为:%s' %(uid))
return str(uid)
#上传流量,暂时不可用,需查下其他方式获取上行流量
def get_flow_send():
cmd = '"cat proc/net/xt_qtaguid/stats|grep '+'%s"'%get_uid()
print(cmd)
flow = util.shell(cmd).stdout.readlines()
print(flow)
if __name__ == "__main__":
print("Starting get top information...")
#get_flow_send()
#top()
getSDKVersion()
getCpuInfo()
getMemInfo() | [
"[email protected]"
]
| |
da1c27f4df3f3d42ec1025d9f87a1ffc36a10f25 | d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3 | /chromium/content/test/gpu/gpu_tests/gpu_integration_test_unittest.py | 7abe56fc3e5829005d6262afc304c84092b965a5 | [
"BSD-3-Clause"
]
| permissive | Csineneo/Vivaldi | 4eaad20fc0ff306ca60b400cd5fad930a9082087 | d92465f71fb8e4345e27bd889532339204b26f1e | refs/heads/master | 2022-11-23T17:11:50.714160 | 2019-05-25T11:45:11 | 2019-05-25T11:45:11 | 144,489,531 | 5 | 4 | BSD-3-Clause | 2022-11-04T05:55:33 | 2018-08-12T18:04:37 | null | UTF-8 | Python | false | false | 6,491 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
import mock
from telemetry.testing import browser_test_runner
from gpu_tests import path_util
from gpu_tests import gpu_integration_test
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
def testSimpleIntegrationTest(self):
self._RunIntegrationTest(
'simple_integration_unittest',
['unittest_data.integration_tests.SimpleTest.unexpected_error',
'unittest_data.integration_tests.SimpleTest.unexpected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_flaky',
'unittest_data.integration_tests.SimpleTest.expected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_skip'],
[])
# It might be nice to be more precise about the order of operations
# with these browser restarts, but this is at least a start.
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
self._RunIntegrationTest(
'browser_start_failure_integration_unittest', [],
['unittest_data.integration_tests.BrowserStartFailureTest.restart'],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
self._RunIntegrationTest(
'browser_crash_after_start_integration_unittest', [],
[('unittest_data.integration_tests.BrowserCrashAfterStartTest.restart')],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
self._RunIntegrationTest(
'test_retry_limit',
['unittest_data.integration_tests.TestRetryLimit.unexpected_failure'],
[],
[],
['--retry-limit=2'])
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def testRepeat(self):
self._RunIntegrationTest(
'test_repeat',
[],
['unittest_data.integration_tests.TestRepeat.success'],
[],
['--repeat=3'])
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
self._RunIntegrationTest(
'test_also_run_disabled_tests',
['unittest_data.integration_tests.TestAlsoRunDisabledTests.skip',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.flaky'],
# Tests that are expected to fail and do fail are treated as test passes
[('unittest_data.integration_tests.'
'TestAlsoRunDisabledTests.expected_failure')],
[],
['--also-run-disabled-tests'])
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (
mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(
gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_name, failures, successes, skips,
additional_args):
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
temp_dir = tempfile.mkdtemp()
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
try:
browser_test_runner.Run(
config,
[test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path] + additional_args)
with open(test_results_path) as f:
test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (
self._ExtractTestResults(test_result))
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
self.assertEquals(set(actual_skips), set(skips))
finally:
shutil.rmtree(temp_dir)
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
| [
"[email protected]"
]
| |
96ebd867811570532d8fc6a0934d0475f42f77e1 | db903a5e99712d1f45e1d45c4d77537f811ae569 | /src/python/pants/option/global_options_test.py | ede2086b69991da2a0ecc2330dd8015392456304 | [
"Apache-2.0"
]
| permissive | Hirni-Meshram2/pants | 777db8ea67c1fc66de46f0ab374ba4fff8597357 | e802d62cc68176aa66947a939c771b01f47d5425 | refs/heads/main | 2023-05-01T09:23:10.973766 | 2021-05-19T08:24:50 | 2021-05-19T08:24:50 | 366,021,656 | 0 | 2 | Apache-2.0 | 2021-05-10T11:38:07 | 2021-05-10T11:38:06 | null | UTF-8 | Python | false | false | 6,764 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import sys
from pathlib import Path
from textwrap import dedent
import pytest
from pants.base.build_environment import get_buildroot
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals.scheduler import ExecutionError
from pants.init.options_initializer import OptionsInitializer
from pants.option.global_options import (
DynamicRemoteExecutionOptions,
ExecutionOptions,
GlobalOptions,
)
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.contextutil import temporary_dir
def create_dynamic_execution_options(
*,
initial_headers: dict[str, str],
token_path: str | None = None,
plugin: str | None = None,
local_only: bool = False,
) -> DynamicRemoteExecutionOptions:
args = [
"--remote-cache-read",
"--remote-execution-address=grpc://fake.url:10",
"--remote-store-address=grpc://fake.url:10",
f"--remote-store-headers={initial_headers}",
f"--remote-execution-headers={initial_headers}",
"--remote-instance-name=main",
]
if token_path:
args.append(f"--remote-oauth-bearer-token-path={token_path}")
if plugin:
args.append(f"--remote-auth-plugin={plugin}")
ob = create_options_bootstrapper(args)
env = CompleteEnvironment({})
_build_config, options = OptionsInitializer(ob).build_config_and_options(ob, env, raise_=False)
return DynamicRemoteExecutionOptions.from_options(options, env, local_only=local_only)
def test_dynamic_execution_options_remote_oauth_bearer_token_path() -> None:
with temporary_dir() as tempdir:
token_path = Path(tempdir, "token.txt")
token_path.touch()
token_path.write_text("my-token")
exec_options = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, token_path=str(token_path)
)
assert exec_options.remote_store_headers == {"authorization": "Bearer my-token", "foo": "bar"}
assert exec_options.remote_execution_headers == {
"authorization": "Bearer my-token",
"foo": "bar",
}
def test_dynamic_execution_options_local_only() -> None:
# Test that local_only properly disables remote execution.
assert (
create_dynamic_execution_options(initial_headers={}, local_only=True)
== DynamicRemoteExecutionOptions.disabled()
)
def test_dynamic_execution_options_auth_plugin() -> None:
def compute_exec_options(state: str) -> DynamicRemoteExecutionOptions:
with temporary_dir() as tempdir:
# NB: For an unknown reason, if we use the same file name for multiple runs, the plugin
# result gets memoized. So, we use a distinct file name.
plugin_path = Path(tempdir, f"auth_plugin_{state}.py")
plugin_path.touch()
plugin_path.write_text(
dedent(
f"""\
from pants.option.global_options import AuthPluginState, AuthPluginResult
def auth_func(initial_execution_headers, initial_store_headers, options, **kwargs):
return AuthPluginResult(
state=AuthPluginState.{state},
execution_headers={{
**{{k: "baz" for k in initial_execution_headers}},
"exec": "xyz",
}},
store_headers={{
**{{k: "baz" for k in initial_store_headers}},
"store": "abc",
"store_url": options.for_global_scope().remote_store_address,
}},
instance_name="custom_instance",
)
"""
)
)
sys.path.append(tempdir)
result = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, plugin=f"auth_plugin_{state}:auth_func"
)
sys.path.pop()
return result
exec_options = compute_exec_options("OK")
assert exec_options.remote_store_headers == {
"store": "abc",
"foo": "baz",
"store_url": "grpc://fake.url:10",
}
assert exec_options.remote_execution_headers == {"exec": "xyz", "foo": "baz"}
assert exec_options.remote_cache_read is True
assert exec_options.remote_instance_name == "custom_instance"
exec_options = compute_exec_options("UNAVAILABLE")
assert exec_options.remote_cache_read is False
assert exec_options.remote_instance_name == "main"
def test_execution_options_remote_addresses() -> None:
# Test that we properly validate and normalize the scheme.
def create_exec_options(
remote_store_address: str, remote_execution_address: str
) -> ExecutionOptions:
ob = create_options_bootstrapper(
[
f"--remote-store-address={remote_store_address}",
f"--remote-execution-address={remote_execution_address}",
]
)
_build_config, options = OptionsInitializer(ob).build_config_and_options(
ob, CompleteEnvironment({}), raise_=False
)
return ExecutionOptions.from_options(
options.for_global_scope(), DynamicRemoteExecutionOptions.disabled()
)
host = "fake-with-http-in-url.com:10"
exec_options = create_exec_options(f"grpc://{host}", f"grpc://{host}")
assert exec_options.remote_execution_address == f"http://{host}"
assert exec_options.remote_store_address == f"http://{host}"
exec_options = create_exec_options(f"grpcs://{host}", f"grpcs://{host}")
assert exec_options.remote_execution_address == f"https://{host}"
assert exec_options.remote_store_address == f"https://{host}"
with pytest.raises(ExecutionError):
create_exec_options(f"http://{host}", f"grpc://{host}")
with pytest.raises(ExecutionError):
create_exec_options(f"grpc://{host}", f"https:://{host}")
def test_invalidation_globs() -> None:
# Confirm that an un-normalized relative path in the pythonpath is filtered out.
suffix = "something-ridiculous"
ob = OptionsBootstrapper.create(env={}, args=[f"--pythonpath=../{suffix}"], allow_pantsrc=False)
globs = GlobalOptions.compute_pantsd_invalidation_globs(
get_buildroot(), ob.bootstrap_options.for_global_scope()
)
for glob in globs:
assert suffix not in glob
| [
"[email protected]"
]
| |
f35a1255a58d91c36ed5bb36d3db683c8d4278c1 | c14d8d4e648fc6433ddb4cbef790e93e23c8bc8d | /BankApp/urls.py | 8168201860000704a2b372459e4c12045fb387e7 | []
| no_license | dilshamony/BankAppProject | f9b1f77713d6aaf2b3814886e775df5c45aabb52 | bd6b93b00aefe4440b6718cbd0134fd90b5c35dd | refs/heads/master | 2023-04-27T00:56:05.333750 | 2021-05-21T14:13:03 | 2021-05-21T14:13:03 | 369,554,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """BankApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("bankapp/",include("mybank.urls"))
]
| [
"[email protected]"
]
| |
eed94a047c8ceace0d5f1642db2ffe1c7eb3bf0e | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Study/Keras/Chapter_03_Catching_Layer_Concept/sub_03_image_augmentation.py | 5af584ec17aef04328d39886bb785271c2918441 | []
| no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import numpy as np
# 랜덤시트 고정시키기
np.random.seed(5)
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# 데이터셋 불러오기
data_aug_gen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=[0.8, 2.0],
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
img = load_img(path='./dataset/handwriting_shape/train/triangle/triangle001.png')
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in data_aug_gen.flow(x, batch_size=1, save_to_dir='./dataset/handwriting_shape/preview', save_prefix='tri',
save_format='png'):
i += 1
if i > 30:
break | [
"[email protected]"
]
| |
63aece5376d78fe1adf90813932e843283448f09 | 2b28f749fef34e566b685d520be7ed50f28b7bff | /bondhon_docx/convert_bangla.py | eec2df4268d0d8e6e6bd40811d001112db6fa54b | [
"MIT"
]
| permissive | banglakit/bondhon-docx | cc58fea46fd9a50b4559ed26ba2142a5d708423e | a8f6a58995392f420d48f5fc8ec7a25dadeca30a | refs/heads/master | 2020-04-28T12:00:15.608727 | 2019-03-12T18:23:15 | 2019-03-12T18:23:15 | 175,262,079 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
import argparse
import os
from docx import Document
from bondhon_docx import conversion
def main():
parser = argparse.ArgumentParser(description='Convert Bengali Documents between encodings.')
parser.add_argument('from_enc', help='Original Encoding of File')
parser.add_argument('to', help='The Encoding you want to convert to')
parser.add_argument('path', help='The path of the file')
args = parser.parse_args()
document = Document(args.path)
conversion.convert_document(args.from_enc, args.to, document)
path_without_ext, _ = os.path.splitext(args.path)
document.save(path_without_ext + '.converted.docx')
| [
"[email protected]"
]
| |
4013fcc598254b2c31c9a6d62683192317037477 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/regressiontests/test_utils/models.py | 514cf46f7b562720f2894129b24d62b046c1f3c1 | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/regressiontests/test_utils/models.py | [
"[email protected]"
]
| |
995c7fb086f0b3ce3be2766dfa862208c3486b28 | d52f71cac1c10a8641a18b2b30e789744f3b3ef7 | /Experiments/Yellow_submarine/2019_01_30_ml_approach/src/qmlt/numerical/__init__.py | a1f226b035bbee8776fb49ed53650e6768d1eceb | []
| no_license | BOHRTECHNOLOGY/public_research | 89c67e583b2283f6c67ab33c7303c23bf18467df | d9209f20073d075ae7150250cb1a369f8cb215b7 | refs/heads/master | 2022-12-10T16:47:54.319350 | 2020-01-09T12:51:04 | 2020-01-09T12:51:04 | 143,842,978 | 17 | 5 | null | 2022-12-08T01:40:31 | 2018-08-07T08:26:05 | Python | UTF-8 | Python | false | false | 2,833 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Numerical Quantum Circuit Learner
========================================================
**Module name:** :mod:`qmlt.numerical`
.. currentmodule:: qmlt.numerical
.. codeauthor:: Maria Schuld <[email protected]>
This module contains a class to train models for machine learning and optimization based on variational quantum circuits.
The optimization is executed by scipy's numerical optimisation library. The user defines a function that computes
the outputs of the variational circuit, as well as the training objective, and specifies the model and training
hyperparameters.
There are three basic functionalities. The circuit can be trained, run with the current parameters, and scored.
The numerical learner module has been designed for the training of continuous-variable circuits written in StrawberryFields or
BlackBird (using any backend), but is in principle able to train any user-provided model coded in python.
.. note::
Numerical differentiation is not robust, which means that some models fail to be trained. For example, the approximations
of gradients for gradient-based methods are not precise enough to find the steepest descent in plateaus of the
optimization landscape. This can sometimes be rectified by choosing good hyperparameters, but ultimately poses a limit
to training quantum circuits with numerical methods.
CircuitLearner class
---------------------
.. currentmodule:: qmlt.numerical.CircuitLearner
.. autosummary::
train_circuit
run_circuit
score_circuit
get_circuit_parameters
Helper methods
--------------
.. currentmodule:: qmlt.numerical
.. autosummary::
check
check_X
check_Y
check_steps
check_batch_size
check_logs
Code details
------------
"""
from .learner import (CircuitLearner,
_check as check,
_check_X as check_X,
_check_Y as check_Y,
_check_steps as check_steps,
_check_batch_size as check_batch_size,
_check_logs as check_logs)
__all__ = ['CircuitLearner', 'check', 'check_X', 'check_Y', 'check_steps', 'check_batch_size', 'check_logs']
| [
"[email protected]"
]
| |
bdff99867244f35c2cca367095e00638f4182ed5 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/publish_assets_response.py | 981baa1a542fb9aab9b16d4419a291edee3eff10 | [
"Apache-2.0"
]
| permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PublishAssetsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_info_array': 'list[AssetInfo]'
}
attribute_map = {
'asset_info_array': 'asset_info_array'
}
def __init__(self, asset_info_array=None):
"""PublishAssetsResponse - a model defined in huaweicloud sdk"""
super(PublishAssetsResponse, self).__init__()
self._asset_info_array = None
self.discriminator = None
if asset_info_array is not None:
self.asset_info_array = asset_info_array
@property
def asset_info_array(self):
"""Gets the asset_info_array of this PublishAssetsResponse.
发布的媒资信息。
:return: The asset_info_array of this PublishAssetsResponse.
:rtype: list[AssetInfo]
"""
return self._asset_info_array
@asset_info_array.setter
def asset_info_array(self, asset_info_array):
"""Sets the asset_info_array of this PublishAssetsResponse.
发布的媒资信息。
:param asset_info_array: The asset_info_array of this PublishAssetsResponse.
:type: list[AssetInfo]
"""
self._asset_info_array = asset_info_array
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublishAssetsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
786e15a926f9ea9ba51dff0e7cfd6b90ea532743 | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs24/enroll/urls.py | c58191feb75d1b077f6411cb53f93548cd76ff79 | []
| no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.urls import path
from .views import studentinfo
urlpatterns = [
path('stu/', studentinfo, name='studetails'),
]
| [
"[email protected]"
]
| |
e3af4d6ab9808460198837d63b6e0f5553b57bbb | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /content/browser/frame_host/DEPS | 3da57e57e31e41791a995a6a1205c9dbf9929048 | [
"BSD-3-Clause"
]
| permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 902 | include_rules = [
# The frame_host files should only call upwards in the layering via the
# delegate interfaces.
"-content/browser/web_contents",
"-content/public/browser/web_contents.h",
"-content/public/browser/web_contents_delegate.h",
"-content/public/browser/web_contents_view.h",
]
specific_include_rules = {
".*_(unit|browser)test\.cc": [
"+content/browser/web_contents",
"+content/public/browser/web_contents.h",
"+content/public/browser/web_contents_delegate.h",
],
".*interstitial_page_impl\.cc": [
# TODO(nasko): This should be removed once we remove
# WebContentsObserver as the method of telling interstitial pages to
# clean themselves up.
"+content/browser/web_contents",
"+content/public/browser/web_contents_delegate.h",
],
"popup_menu_helper_mac.mm": [
"+content/app_shim_remote_cocoa/render_widget_host_view_cocoa.h",
]
}
| [
"[email protected]"
]
| ||
8c677a448294359eddc72929c681abd438b90e80 | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/difang/src/difang/majiang2/table_state/state_xueliu.py | 8aa6d33479d6ef5c2163185c743230768621fe2e | []
| no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # -*- coding=utf-8
'''
Created on 2016年9月23日
@author: zhaol
'''
from difang.majiang2.table_state.state import MTableState
class MTableStateXueliu(MTableState):
def __init__(self):
super(MTableStateXueliu, self).__init__()
# 血战玩法
self.setState(MTableState.TABLE_STATE_DROP)
# 碰
self.setState(MTableState.TABLE_STATE_PENG)
# 杠
self.setState(MTableState.TABLE_STATE_GANG)
# 定缺
self.setState(MTableState.TABLE_STATE_ABSENCE)
# 和
self.setState(MTableState.TABLE_STATE_HU)
# 和牌后血流成河
self.setState(MTableState.TABLE_STATE_XUELIU)
| [
"[email protected]"
]
| |
292ffd198700cdc76c0bcbe232ae0cb3ca792a13 | 07b751896b5e8c029a1808f5587a9bb30090b0b4 | /tensorflow/python/data/experimental/kernel_tests/restructured_dataset_test.py | 3b0d23d6e11ee17a3fe6ac5cf9cce767232c559a | [
"Apache-2.0"
]
| permissive | danfischetti/tensorflow | c5326578bac35c6f9a47444d8f91e03097fc2506 | f3d4bf4345a442f605a45b1fbf74ea9656fa72ed | refs/heads/master | 2020-04-11T10:07:21.324395 | 2018-12-13T22:46:13 | 2018-12-13T22:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager coverage
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, new.output_types)
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
38fd11aa7c506efa49b6de2a5c4c9d8db6977752 | 0667af1539008f9c6c0dcde2d3f50e8bbccf97f3 | /source/rttov_test/profile-datasets-py/div83/070.py | 2f10763fdc3f1b7acb4813b3e935a503943ac821 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | bucricket/projectMAScorrection | bc6b90f07c34bf3e922225b2c7bd680955f901ed | 89489026c8e247ec7c364e537798e766331fe569 | refs/heads/master | 2021-01-22T03:54:21.557485 | 2019-03-10T01:47:32 | 2019-03-10T01:47:32 | 81,468,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,446 | py | """
Profile ../profile-datasets-py/div83/070.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/070.py"
self["Q"] = numpy.array([ 2.96115100e+00, 3.39443800e+00, 4.23145200e+00,
5.32973200e+00, 5.94539500e+00, 5.61806800e+00,
5.33932100e+00, 5.83540600e+00, 6.51451800e+00,
6.65043600e+00, 6.55529700e+00, 6.44838800e+00,
6.31400000e+00, 6.12939200e+00, 5.89224500e+00,
5.61127900e+00, 5.25048200e+00, 4.95066500e+00,
4.67371800e+00, 4.41607000e+00, 4.19023200e+00,
4.03347400e+00, 3.90907500e+00, 3.80890500e+00,
3.72809600e+00, 3.66258700e+00, 3.59588700e+00,
3.52695800e+00, 3.46347800e+00, 3.40734800e+00,
3.35437900e+00, 3.29386900e+00, 3.23485000e+00,
3.14360000e+00, 3.02604100e+00, 2.91984100e+00,
2.84811200e+00, 2.80580200e+00, 2.77955200e+00,
2.76484200e+00, 2.75856200e+00, 2.76287200e+00,
2.77901200e+00, 2.78894200e+00, 2.78647200e+00,
2.77288200e+00, 2.76076200e+00, 2.76365200e+00,
2.74351200e+00, 2.72718300e+00, 2.80359200e+00,
3.31174900e+00, 4.34185100e+00, 6.46307800e+00,
1.06424900e+01, 1.60878400e+01, 1.63576300e+01,
1.71113100e+01, 1.90418400e+01, 2.31223700e+01,
3.08604500e+01, 4.50171700e+01, 6.65996600e+01,
8.22580300e+01, 9.53938000e+01, 1.13173200e+02,
1.44493100e+02, 1.71137700e+02, 1.90849600e+02,
2.00276900e+02, 2.16941900e+02, 2.48781100e+02,
3.30323900e+02, 4.11388700e+02, 4.60154200e+02,
3.54358400e+02, 3.09988900e+02, 3.09470200e+02,
3.72736000e+02, 4.40020300e+02, 5.15807800e+02,
6.12176000e+02, 6.69989800e+02, 6.82229200e+02,
6.88011300e+02, 8.48419600e+02, 1.07039300e+03,
9.75860800e+02, 6.97745800e+02, 6.90916300e+02,
1.42792800e+03, 2.93211700e+03, 4.33631400e+03,
4.80827900e+03, 5.10875700e+03, 5.57655800e+03,
7.39840600e+03, 7.53264900e+03, 8.02216400e+03,
7.80660700e+03, 7.59956400e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 377.2399, 377.2387, 377.2354, 377.231 , 377.2238, 377.2129,
377.196 , 377.1698, 377.1305, 377.0945, 377.0805, 377.1046,
377.1886, 377.4377, 377.7968, 378.1419, 378.454 , 378.7141,
378.9302, 379.1213, 379.3134, 379.4245, 379.4775, 379.4286,
379.3536, 379.2596, 379.1476, 379.0247, 378.8627, 378.6927,
378.5567, 378.4108, 378.4178, 378.4328, 378.5689, 378.7689,
378.9979, 379.2789, 379.5759, 379.743 , 379.903 , 380.013 ,
380.0599, 380.1129, 380.3039, 380.5039, 380.6779, 380.8329,
380.985 , 381.068 , 381.1539, 381.3877, 381.6943, 381.9965,
382.2619, 382.5358, 382.7957, 383.0594, 383.2827, 383.4611,
383.6232, 383.6757, 383.7264, 383.7334, 383.7334, 383.7236,
383.7005, 383.6753, 383.6428, 383.6142, 383.5838, 383.5456,
383.5003, 383.4562, 383.4345, 383.4761, 383.5031, 383.5173,
383.509 , 383.5022, 383.4781, 383.4401, 383.395 , 383.3623,
383.3181, 383.2226, 383.1175, 382.9749, 382.9216, 382.8663,
382.5689, 381.9757, 381.4218, 381.2271, 381.0961, 380.902 ,
380.1902, 380.1289, 379.9335, 380.0121, 380.0894])
self["CO"] = numpy.array([ 0.02209863, 0.02343462, 0.02636759, 0.03232103, 0.04441274,
0.0705057 , 0.1377673 , 0.3534269 , 0.461836 , 0.448828 ,
0.30839 , 0.1443591 , 0.0477044 , 0.02142597, 0.02094568,
0.02113658, 0.02141849, 0.0209528 , 0.02028611, 0.01988771,
0.01996482, 0.01988972, 0.01966442, 0.01913733, 0.01849003,
0.01775643, 0.01725304, 0.01676274, 0.01639724, 0.01602755,
0.01581625, 0.01559305, 0.01559965, 0.01561665, 0.01591045,
0.01635915, 0.01689455, 0.01758455, 0.01834365, 0.01884835,
0.01935865, 0.01995024, 0.02063974, 0.02140204, 0.02298004,
0.02475493, 0.02631743, 0.02770002, 0.02908402, 0.02962222,
0.03019272, 0.0316051 , 0.03353225, 0.03579607, 0.03871269,
0.04198372, 0.04633544, 0.05141782, 0.05625363, 0.0606512 ,
0.06506769, 0.06728707, 0.06965246, 0.07065419, 0.07140839,
0.07176868, 0.07178553, 0.07174982, 0.07157794, 0.0714007 ,
0.07118445, 0.07096244, 0.07084199, 0.07073449, 0.07069745,
0.07069484, 0.07069948, 0.07070691, 0.07073942, 0.07078744,
0.07078517, 0.07076295, 0.07065773, 0.07053265, 0.07032628,
0.07011017, 0.06989271, 0.06894036, 0.06801451, 0.06735533,
0.06722317, 0.0670592 , 0.06686429, 0.06673387, 0.06665921,
0.06658212, 0.06643731, 0.06641224, 0.06656013, 0.06681143,
0.06706642])
self["T"] = numpy.array([ 196.811, 204.304, 217.854, 233.389, 248.33 , 260.214,
266.226, 265.259, 257.043, 242.866, 228.387, 223.547,
221.781, 221.163, 220.489, 219.198, 217.572, 214.833,
211.843, 209.215, 207.518, 207.486, 208.183, 209.263,
210.647, 212.484, 214.098, 215.184, 215.87 , 216.185,
216.149, 215.884, 215.792, 215.469, 214.984, 214.746,
215.066, 215.903, 216.949, 217.9 , 218.537, 218.612,
218.158, 217.61 , 217.538, 218.073, 218.871, 219.311,
219.09 , 218.297, 217.175, 216.108, 214.708, 212.93 ,
210.93 , 209.05 , 208.321, 209.076, 210.395, 212.004,
213.744, 215.541, 217.436, 219.439, 221.535, 223.702,
225.918, 228.187, 230.5 , 232.779, 235.027, 237.244,
239.332, 241.432, 243.59 , 245.912, 248.205, 250.491,
252.742, 254.943, 257.096, 259.143, 261.125, 263.048,
264.839, 266.281, 267.58 , 269.085, 270.573, 271.785,
272.39 , 272.482, 272.773, 273.871, 275.725, 277.517,
279.019, 281.268, 282.488, 282.488, 282.488])
self["N2O"] = numpy.array([ 0.01098997, 0.00728997, 0.00453998, 0.00265999, 0.00247999,
0.00200999, 0.00114999, 0.00069 , 0.00117999, 0.00162999,
0.00288998, 0.00513997, 0.00804995, 0.01392991, 0.02119988,
0.03283982, 0.04253978, 0.04902976, 0.05510974, 0.06037973,
0.06539973, 0.07965968, 0.09801962, 0.1156096 , 0.1445195 ,
0.1738694 , 0.2021193 , 0.2240692 , 0.2434892 , 0.2622491 ,
0.2783291 , 0.2858791 , 0.2931891 , 0.3002791 , 0.3046291 ,
0.3074991 , 0.3102091 , 0.3113491 , 0.3123391 , 0.3132791 ,
0.3141391 , 0.3149891 , 0.3158391 , 0.3166791 , 0.3175091 ,
0.3183191 , 0.3190891 , 0.3198291 , 0.3205191 , 0.3211491 ,
0.3217191 , 0.3222089 , 0.3224086 , 0.3225979 , 0.3227566 ,
0.3228948 , 0.3230147 , 0.3231045 , 0.3231538 , 0.3231725 ,
0.32317 , 0.3231655 , 0.3231585 , 0.3231534 , 0.3231492 ,
0.3231434 , 0.3231333 , 0.3231247 , 0.3231183 , 0.3231153 ,
0.3231099 , 0.3230996 , 0.3230732 , 0.323047 , 0.3230313 ,
0.3230655 , 0.3230798 , 0.32308 , 0.3230595 , 0.3230378 ,
0.3230133 , 0.3229822 , 0.3229635 , 0.3229595 , 0.3229576 ,
0.3229058 , 0.3228341 , 0.3228646 , 0.3229545 , 0.3229567 ,
0.3227185 , 0.3222324 , 0.3217786 , 0.3216261 , 0.321529 ,
0.3213778 , 0.320789 , 0.3207456 , 0.3205874 , 0.3206571 ,
0.320724 ])
self["O3"] = numpy.array([ 0.1602895 , 0.1751264 , 0.2174751 , 0.3374902 , 0.5335188 ,
0.7633317 , 1.037814 , 1.460131 , 2.162996 , 3.224179 ,
4.53031 , 5.466185 , 6.217261 , 6.769869 , 7.127218 ,
7.306899 , 7.309212 , 7.264534 , 7.173686 , 7.048029 ,
6.909961 , 6.743313 , 6.517205 , 6.247726 , 5.956838 ,
5.674419 , 5.417091 , 5.175832 , 4.917243 , 4.617564 ,
4.272526 , 3.918947 , 3.711748 , 3.457549 , 3.109261 ,
2.729652 , 2.387403 , 2.102434 , 1.848435 , 1.578356 ,
1.283646 , 1.063227 , 0.9494254 , 0.8682006 , 0.7912998 ,
0.729591 , 0.71388 , 0.6975461 , 0.726947 , 0.7552989 ,
0.6827211 , 0.5344572 , 0.4157442 , 0.3371368 , 0.2526923 ,
0.1435567 , 0.08720537, 0.06677076, 0.05481476, 0.04785379,
0.04413724, 0.04291547, 0.04383858, 0.04436565, 0.04414069,
0.04283575, 0.04163488, 0.04011853, 0.03831519, 0.03676863,
0.03569176, 0.03506188, 0.03532053, 0.03546251, 0.03550216,
0.03530918, 0.03537293, 0.03567126, 0.03650509, 0.0372565 ,
0.0377927 , 0.03774168, 0.0365436 , 0.03488379, 0.03320204,
0.03254936, 0.03240398, 0.03257758, 0.03319672, 0.03371569,
0.03283395, 0.03113155, 0.03098734, 0.03117398, 0.03105016,
0.03142975, 0.03257728, 0.03247998, 0.02796762, 0.0279737 ,
0.02797954])
self["CH4"] = numpy.array([ 0.01317666, 0.01317666, 0.08705683, 0.1460802 , 0.1961588 ,
0.2331317 , 0.2815615 , 0.3160522 , 0.3272379 , 0.3720405 ,
0.451968 , 0.5704313 , 0.7142055 , 0.8724697 , 1.009724 ,
1.109014 , 1.179674 , 1.198244 , 1.214824 , 1.223625 ,
1.232015 , 1.266925 , 1.313765 , 1.358645 , 1.425535 ,
1.492905 , 1.557734 , 1.603984 , 1.643204 , 1.652804 ,
1.663104 , 1.674134 , 1.685915 , 1.687485 , 1.688995 ,
1.690425 , 1.691735 , 1.692905 , 1.696045 , 1.699355 ,
1.702845 , 1.706505 , 1.710355 , 1.726075 , 1.739925 ,
1.754405 , 1.766195 , 1.775945 , 1.784715 , 1.784065 ,
1.783385 , 1.783484 , 1.783962 , 1.784378 , 1.784571 ,
1.784771 , 1.784851 , 1.784919 , 1.784596 , 1.783859 ,
1.782935 , 1.78108 , 1.779142 , 1.777374 , 1.775601 ,
1.774139 , 1.772954 , 1.771877 , 1.771152 , 1.770435 ,
1.770026 , 1.76958 , 1.769015 , 1.768432 , 1.767866 ,
1.767543 , 1.767622 , 1.767913 , 1.768471 , 1.769251 ,
1.769707 , 1.769996 , 1.769724 , 1.769372 , 1.768402 ,
1.76658 , 1.76361 , 1.759861 , 1.756264 , 1.752808 ,
1.749678 , 1.746125 , 1.743467 , 1.74281 , 1.742632 ,
1.741991 , 1.738949 , 1.738823 , 1.738054 , 1.738482 ,
1.738864 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 282.488
self["S2M"]["Q"] = 7599.56436003
self["S2M"]["O"] = 0.0279795394023
self["S2M"]["P"] = 1029.69702
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 282.488
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -45.309
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 7, 10])
self["TIME"] = numpy.array([0, 0, 0])
| [
"[email protected]"
]
| |
07d557b67c5f57d0bc58e144628ef21653545f9f | ff8db86ce558e57f7b24f8f6d890a3154f6d948f | /neutron_plugin_contrail/plugins/opencontrail/loadbalancer/v2/loadbalancer_member.py | bcc4781fbd29f19c81389f17ff651e751bc75193 | [
"Apache-2.0"
]
| permissive | lungdear/tf-neutron-plugin | 143740d1cafb93f4cbe672e53a609c4771be6833 | d19e758673e1e28bf8b270b8e934857014a46cdf | refs/heads/master | 2022-12-04T21:18:39.869684 | 2020-08-08T13:32:59 | 2020-08-11T20:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import uuid
from neutron_lbaas.extensions import loadbalancerv2
try:
from neutron.openstack.common import uuidutils
except ImportError:
from oslo_utils import uuidutils
try:
from neutron.common.exceptions import NotAuthorized
except ImportError:
from neutron_lib.exceptions import NotAuthorized
from vnc_api.vnc_api import IdPermsType, NoIdError
from vnc_api.vnc_api import LoadbalancerMember, LoadbalancerMemberType
from .. resource_manager import ResourceManager
class LoadbalancerMemberManager(ResourceManager):
_loadbalancer_member_type_mapping = {
'admin_state': 'admin_state_up',
'status': 'status',
'protocol_port': 'protocol_port',
'weight': 'weight',
'address': 'address',
'subnet_id': 'subnet_id',
}
@property
def property_type_mapping(self):
return self._loadbalancer_member_type_mapping
def make_properties(self, member):
props = LoadbalancerMemberType()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
if mapping in member:
setattr(props, key, member[mapping])
return props
def _get_member_pool_id(self, member):
pool_uuid = member.parent_uuid
return pool_uuid
def make_dict(self, member, fields=None):
res = {'id': member.uuid,
'name': member.name,
'pool_id': member.parent_uuid,
'status': self._get_object_status(member)}
try:
pool = self._api.loadbalancer_pool_read(id=member.parent_uuid)
res['tenant_id'] = pool.parent_uuid.replace('-', '')
except NoIdError:
pass
props = member.get_loadbalancer_member_properties()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
value = getattr(props, key, None)
if value is not None:
res[mapping] = value
return self._fields(res, fields)
def resource_read(self, id):
return self._api.loadbalancer_member_read(id=id)
def resource_list(self, tenant_id=None):
""" In order to retrive all the members for a specific tenant
the code iterates through all the pools.
"""
if tenant_id is None:
return self._api.loadbalancer_members_list()
pool_list = self._api.loadbalancer_pools_list(tenant_id)
if 'loadbalancer-pools' not in pool_list:
return {}
member_list = []
for pool in pool_list['loadbalancer-pools']:
pool_members = self._api.loadbalancer_members_list(
parent_id=pool['uuid'])
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = {'loadbalancer-members': member_list}
return response
def get_resource(self, context, id, pool_id, fields=None):
res = super(LoadbalancerMemberManager, self).get_resource(context, id)
if res and res['pool_id'] != pool_id:
raise loadbalancerv2.MemberNotFoundForPool(member_id=res['id'],
pool_id=res['pool_id'])
return self._fields(res, fields)
def get_collection(self, context, pool_id, filters=None, fields=None):
""" Optimize the query for members in a pool.
"""
member_list = []
pool_members = self._api.loadbalancer_members_list(
parent_id=pool_id)
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = []
for m in member_list:
res = self._get_resource_dict(m['uuid'], filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
def resource_update(self, obj):
return self._api.loadbalancer_member_update(obj)
def resource_delete(self, id):
return self._api.loadbalancer_member_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
def get_exception_inuse(self, id=None):
pass
@property
def neutron_name(self):
return "member"
@property
def resource_name_plural(self):
return "loadbalancer-members"
def create(self, context, pool_id, member):
"""
Create a loadbalancer_member object.
"""
m = member['member']
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool', id=pool_id)
tenant_id = self._get_tenant_id_for_create(context, m)
if str(uuid.UUID(tenant_id)) != pool.parent_uuid:
raise NotAuthorized()
obj_uuid = uuidutils.generate_uuid()
props = self.make_properties(m)
id_perms = IdPermsType(enable=True)
member_db = LoadbalancerMember(
obj_uuid, pool, loadbalancer_member_properties=props,
id_perms=id_perms)
member_db.uuid = obj_uuid
self._api.loadbalancer_member_create(member_db)
return self.make_dict(member_db)
def update_properties(self, member_db, id, m):
props = member_db.get_loadbalancer_member_properties()
if self.update_properties_subr(props, m):
member_db.set_loadbalancer_member_properties(props)
return True
return False
def delete(self, context, id, pool_id):
try:
_ = self._api.loadbalancer_member_read(id=id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
if id not in [member['uuid'] for member in
pool.get_loadbalancer_members() or []]:
raise loadbalancerv2.MemberNotFoundForPool(member_id=id,
pool_id=pool_id)
super(LoadbalancerMemberManager, self).delete(context, id)
def update_object(self, member_db, id, m):
pool_id = member_db.parent_uuid
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
db_props = member_db.get_loadbalancer_member_properties()
members = pool.get_loadbalancer_members()
for member in members or []:
if id == member['uuid']:
continue
member_obj = self._api.loadbalancer_member_read(id=member['uuid'])
props = member_obj.get_loadbalancer_member_properties()
if (props.get_address() == db_props.get_address() and
props.get_protocol_port() == db_props.get_protocol_port()):
raise loadbalancerv2.MemberExists(
address=props.get_address(),
port=props.get_protocol_port(),
pool=pool_id)
return True
| [
"[email protected]"
]
| |
33aac62c06dca320ef84cbca693af39b9e8b6757 | ee6caf788762d7e297aed4c291b20012ed681410 | /92. Codeforces/R73-C.py | 25419f28edf76623870b575c939e5b06d5e7ad59 | []
| no_license | dmlimgo/Problem-Solving | 61ea51f1737f572714bc5030470a73a6e0339336 | c265ccac046b3e87c34d014876fde11f33a15ed9 | refs/heads/master | 2020-08-30T15:45:08.895947 | 2020-02-02T14:45:28 | 2020-02-02T14:45:28 | 218,424,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | Q = int(input())
for q in range(Q):
c, m, x = map(int, input().split())
if c == 0 or m == 0:
print(0)
continue
if min(c,m,x) == x:
c -= x
m -= x
b = max(c, m)
s = min(c, m)
if (s+b)//3 > s:
print(s+x)
else:
print((s+b)//3+x)
continue
if min(c,m,x) == c or min(c,m,x) == m:
print(min(c,m,x))
continue
| [
"[email protected]"
]
| |
6b75e66b7182ecc217fcf6cf12e24451b43ad307 | aa9647e01ace505d9c70e5247af0bce6749bdc45 | /src/db.py | b34ffc4a33947da398cf2efb32ceeecdd3a2e601 | [
"MIT"
]
| permissive | cgDeepLearn/pyserver | 83853875dc33173eb3ae72b2e70c7db2c9ba3404 | 5a5e23ccafcc203b2d70eef289ec618ff9da0481 | refs/heads/main | 2023-01-29T05:45:52.110262 | 2020-12-09T09:03:33 | 2020-12-09T09:03:33 | 311,908,364 | 0 | 0 | MIT | 2020-11-16T08:39:29 | 2020-11-11T08:29:55 | Python | UTF-8 | Python | false | false | 5,075 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : db.py
# @Author : cgDeepLearn
# @Create Date : 2020/11/16-3:30 下午
import redis
from conf import config
import pymysql
from DBUtils.PooledDB import PooledDB
from utils.log import logger
class RedisOps(object):
FIELD_EXIST = 0
NEW_FIELD = 1
def __init__(self, host, port, password, db):
rd = redis.ConnectionPool(host=host, port=port, password=password, db=db)
self.rd = redis.Redis(connection_pool=rd)
class MysqlOps(object):
def __init__(self, host, port, user, passwd, db):
self.pool = PooledDB(
pymysql,
mincached=10,
maxcached=30,
maxconnections=0,
host=host,
user=user,
passwd=passwd,
db=db,
port=port,
charset='utf8')
self.user_apply = 'user_apply'
self.user_base = 'user_base'
self.flows = 'flows'
self.table_list = list()
def _execute(self, sql, values):
'''
每次都使用新的连接池中的链接
'''
conn = self.pool.connection()
cur = conn.cursor()
cur.execute(sql, values)
conn.commit()
conn.close()
return cur
def _check_parameter(self, sql, values):
count = sql.count('%s')
if count > 0:
for elem in values:
if not elem:
return False
return True
def _get_table_list(self):
if len(self.table_list) == 0:
sql = '''SELECT COUNT(id) FROM data_split_info'''
table_num = list(self.select(sql))[0][0]
self.table_list = [num for num in range(0, table_num)]
def _replace(self, sql, table, num):
if num == 0:
if table in sql:
string = ' AND %s.deleted_at is null' % table
sql = sql + string
else:
pattern = '%s' % table
string = '%s_%d' % (table, num)
sql = sql.replace(pattern, string)
return sql
def _mulselect(self, apply_id, sql, values):
self._get_table_list()
mulcur = list()
for num in self.table_list:
temp_c = 0
sql_tmp = sql
sql_tmp = self._replace(sql_tmp, self.user_apply, num)
sql_tmp = self._replace(sql_tmp, self.user_base, num)
sql_tmp = self._replace(sql_tmp, self.flows, num)
cur = self._execute(sql_tmp, values)
for row in cur:
temp_c = temp_c + 1
mulcur.append(row)
logger.info('apply_id:%d _mulselect sql:%s, values:%s, result:%s',
apply_id, sql_tmp, values, temp_c)
return mulcur
def mulselect(self, sql, values=[], apply_id=0, check=False, log=True):
'''
多表查询接口
1、支持mysql基本查询,不支持聚集函数和分组排序等
'''
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d mulselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._mulselect(apply_id, sql, values)
for row in cur:
yield row
def sinselect(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
#过渡期间,增加deleted_at值判断
sql = self._replace(sql, self.user_apply, num=0)
sql = self._replace(sql, self.user_base, num=0)
sql = self._replace(sql, self.flows, num=0)
if log:
logger.info('apply_id:%d sinselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._execute(sql, values)
for row in cur:
yield row
def select(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d select sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
for row in cur:
yield row
def execute(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d execute sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
redis_op = RedisOps(
host=config.redis_host, port=config.redis_port, password=config.redis_pwd, db=config.redis_db)
mysql_op = MysqlOps(
host=config.mysql_host,
port=config.mysql_port,
user=config.mysql_user,
passwd=config.mysql_pwd,
db=config.mysql_db)
if __name__ == '__main__':
print(dir(redis_op))
print(dir(mysql_op)) | [
"[email protected]"
]
| |
4f66898e78968d145cadffd50f0fbaa0bc24e6f1 | 3b1daac7c1f72b985da899770d98e5f0e8fb835c | /Configurations/VBS/2017CR_v7/plot.py | 98e0a0b236687fec6d81492a000ee0a41787e122 | []
| no_license | freejiebao/PlotsConfigurations | 7e10aa45aa3bf742f30d1e21dc565d59d2a025d8 | cdfd3aff38d1ece9599a699997753bc8ba01b9b1 | refs/heads/master | 2020-06-18T19:22:00.561542 | 2019-09-02T12:52:28 | 2019-09-02T12:52:28 | 186,931,874 | 0 | 0 | null | 2019-05-16T01:58:07 | 2019-05-16T01:58:07 | null | UTF-8 | Python | false | false | 1,009 | py | # plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots (merge different sample during plot).
# If not defined, normal plots is used
#
Red=632; Violet=880; Green=416; Orange=800; Yellow=400; Azure=860
groupPlot['non-prompt'] = {
'nameHR' : 'non-Prompt',
'isSignal' : 0,
'color': Yellow, # kYellow
'samples' : ['Fake_lep']
}
##Fake and prompt substraction
plot['Fake_lep'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1 ,
'scale' : 1.0
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
| [
"[email protected]"
]
| |
d01e1db1a3d1d0bce24766f0e241c2a7a9923a0f | 665b89f2472f5cf7eb441609eb112109b7381884 | /weblatex/migrations/0003_song_attribution.py | 98e7a851e1a47ea4155fcbc38063165cc4d344cb | []
| no_license | Mortal/weblatex | 5807bf25ea0d6a371e9fc6f0094f7e7375645b6c | 9c841f9ec226e99f38b6e0c4f12e03535d2c06de | refs/heads/master | 2020-05-14T11:53:08.299274 | 2016-12-23T12:03:50 | 2016-12-23T13:43:52 | 24,682,829 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 09:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblatex', '0002_auto_20151227_1835'),
]
operations = [
migrations.AddField(
model_name='song',
name='attribution',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
77d2fa83d35599a5b053874fa4654b5d4fae6602 | 7e72c17745625a1dd4d04f1787c1d2b7bd90642f | /htmlgen/attribute.pyi | 7d17093d5f6cc7d37287a665c5b87a2b0710bba8 | [
"MIT"
]
| permissive | ra2003/python-htmlgen | 27de75b94ad3b635caf11d26fa64f4a19e543668 | cbe74d89acd655b78ffe12773b16ef2036502514 | refs/heads/master | 2022-04-08T10:37:36.265349 | 2020-03-11T13:46:53 | 2020-03-11T13:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | pyi | import datetime
from typing import Optional, List, Iterable
from htmlgen.element import Element
class html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[str] = ...
) -> None: ...
def __get__(
self, obj: Element, type: Optional[type] = ...
) -> Optional[str]: ...
def __set__(self, obj: Element, value: Optional[str]) -> None: ...
class boolean_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
class int_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[int] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[int]: ...
def __set__(self, obj: Element, value: Optional[int]) -> None: ...
class float_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[float] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[float]: ...
def __set__(self, obj: Element, value: Optional[float]) -> None: ...
class time_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[datetime.time] = None
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[datetime.time]: ...
def __set__(
self, obj: Element, value: Optional[datetime.time]
) -> None: ...
class list_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> List[str]: ...
def __set__(self, obj: Element, value: Iterable[str]) -> None: ...
class data_attribute(html_attribute):
def __init__(
self, data_name: str, default: Optional[str] = None
) -> None: ...
class css_class_attribute(object):
def __init__(self, css_class: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
| [
"[email protected]"
]
| |
b31a19f61f75d84e9c43cae789ca4a9fafb8dfc3 | 3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf | /AOJ/ITP1/python/ITP1_3_B_Print_Test_Cases.py | 01ada1baf19ee14e9ca3f502aaf3c19915bc6f52 | []
| no_license | kokorinosoba/contests | 3ee14acf729eda872ebec9ec7fe3431f50ae23c2 | 6e0dcd7c8ee086650d89fc65616981361b9b20b9 | refs/heads/master | 2022-08-04T13:45:29.722075 | 2022-07-24T08:50:11 | 2022-07-24T08:50:11 | 149,092,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | for i,e in enumerate(list(open(0))[:-1],1):print(f'Case {i}:',e,end='')
"""
i=1
while 1:
n=input()
if n=="0": break
print(f"Case {i}: {n}")
i+=1
"""
"""
import sys
for i,x in enumerate(sys.stdin,1):
if x=="0\n":break
print(f"Case {i}: {x}",end="")
"""
| [
"[email protected]"
]
| |
e894dd2c0042e872525cb05a134c54ed4c900387 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5010/317005010.py | bae484c5693cbe1c4f44c01024c8ae9c43673514 | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 756 | py | from bots.botsconfig import *
from records005010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'SO',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N1', MIN: 1, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'G61', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 9},
]},
{ID: 'G62', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 9},
{ID: 'TD5', MIN: 1, MAX: 1},
{ID: 'L0', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'L5', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
]
| |
de44f671db344112f3455fc9a68fd630b9fa685c | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/exception/exception_3.py | c238bdfaf994db7ca61ad080adc0958a24b2cca5 | []
| no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
class MyClass(object):
__slots__ = ('attribute',)
o = MyClass()
o.attribute = 'known attribute'
o.not_a_slot = 'new attribute' | [
"[email protected]"
]
| |
1c738ef73bdc0768137d85581d244067c1e3ef73 | f9d7036649ff5d64660c33bc295ddf97e316d082 | /blog/settings.py | b9c6c73355a9da6b8c57b7e16e0b4b08e72fe807 | []
| no_license | jocsakesley/blog-jocsa-kesley | 1ebd6c11ad45c98a6b396ddfe58675da5cd113ec | d106a0870636542c08ee7791d971d77a948b3e0a | refs/heads/main | 2023-03-16T00:08:23.688040 | 2021-03-12T15:36:57 | 2021-03-12T15:36:57 | 322,912,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
'blog.posts',
'blog.comentarios',
'blog.categorias',
'blog.sobre',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'widget_tweaks',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl)
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'templates/static'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
INSTALLED_APPS += ('django_summernote', )
X_FRAME_OPTIONS = 'SAMEORIGIN'
| [
"[email protected]"
]
| |
0e0b02856e4b9275bbad24a7461c2c793b231d87 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_208/81.py | 5eb844629edbf0f9bad243963bf552da90da0e7c | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | for t in range(int(input())):
n, q = (int(i) for i in input().split())
hs = [[int(i) for i in input().split()] for j in range(n)]
ds = [[int(i) for i in input().split()][j + 1] for j in range(n - 1)]
input()
input()
tc = [0] * n
tc[n - 1] = 0
for i in range(n - 2, -1, -1):
min = -1
sd = 0
for j in range(1, n - i):
sd += ds[i + j - 1]
if sd > hs[i][0]:
break
if tc[i + j] == -1:
continue
tm = tc[i + j] + sd / hs[i][1]
if min == -1 or tm < min:
min = tm
tc[i] = min
print("Case #%d: %f" % (t + 1, tc[0])) | [
"[email protected]"
]
| |
b247c4def0f45dc6866abfd6944a0a96789d5be0 | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/plot_kernel_approximation.py | 735ed9238223d5243a53a245a95a7e1b780db417 | []
| no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,687 | py | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: mrex.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`mrex.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
"""
###########################################################################
# Python package and dataset imports, load dataset
# ---------------------------------------------------
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
print(__doc__)
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from mrex import datasets, svm, pipeline
from mrex.kernel_approximation import (RBFSampler,
Nystroem)
from mrex.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
##################################################################
# Timing and accuracy plots
# --------------------------------------------------
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(16, 4))
accuracy = plt.subplot(121)
# second y axis for timings
timescale = plt.subplot(122)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
plt.tight_layout()
plt.show()
############################################################################
# Decision Surfaces of RBF Kernel SVM and Linear SVM
# --------------------------------------------------------
# The second plot visualized the decision surfaces of the RBF kernel SVM and
# the linear SVM with approximate kernel maps.
# The plot shows decision surfaces of the classifiers projected onto
# the first two principal components of the data. This visualization should
# be taken with a grain of salt since it is just an interesting slice through
# the decision surface in 64 dimensions. In particular note that
# a datapoint (represented as a dot) does not necessarily be classified
# into the region it is lying in, since it will not lie on the plane
# that the first two principal components span.
# The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
# in :ref:`kernel_approximation`.
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.figure(figsize=(18, 7.5))
plt.rcParams.update({'font.size': 14})
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired,
edgecolors=(0, 0, 0))
plt.title(titles[i])
plt.tight_layout()
plt.show()
| [
"[email protected]"
]
| |
c536b9fd5c1e73cc295090ed7b3acb50d109db16 | fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce | /movie recommendation system/.history/moviemodel_20210503171215.py | 065bab6744cb5a59f9f2bcad99cc217a20cecea4 | []
| no_license | kannan768/movie-recommendation-system | e6cf71620e25a0185fed3b37896137f1f39b0801 | 7460d440d44e77390e459ab10c535b6971c9c3ab | refs/heads/main | 2023-05-14T02:21:50.930672 | 2021-06-09T05:02:30 | 2021-06-09T05:02:30 | 375,225,316 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,004 | py | import pandas as pd
import numpy as np
from zipfile import ZipFile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from pathlib import Path
import matplotlib.pyplot as plt
"""##Dataset"""
df = pd.read_csv('ratings.csv', sep=',', encoding='latin-1', usecols=['userId','movieId','rating','timestamp'])
movie_df=df
user_ids = df["userId"].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
userencoded2user = {i: x for i, x in enumerate(user_ids)}
movie_ids = df["movieId"].unique().tolist()
movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)}
movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)}
df["user"] = df["userId"].map(user2user_encoded)
df["movie"] = df["movieId"].map(movie2movie_encoded)
num_users = len(user2user_encoded)
num_movies = len(movie_encoded2movie)
df["rating"] = df["rating"].values.astype(np.float32)
min_rating = min(df["rating"])
max_rating = max(df["rating"])
# print(
# "Number of users: {}, Number of Movies: {}, Min rating: {}, Max rating: {}".format(
# num_users, num_movies, min_rating, max_rating
# )
# )
df = df.sample(frac=1, random_state=42)
x = df[["user", "movie"]].values
y = df["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values
train_indices = int(0.9 * df.shape[0])
x_train, x_val, y_train, y_val = (
x[:train_indices],
x[train_indices:],
y[:train_indices],
y[train_indices:],
)
EMBEDDING_SIZE = 50
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_movies, embedding_size, **kwargs):
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_movies = num_movies
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.movie_embedding = layers.Embedding(
num_movies,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.movie_bias = layers.Embedding(num_movies, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
return tf.nn.sigmoid(x)
model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(), optimizer=keras.optimizers.Adam(lr=0.001)
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(x_val, y_val),
)
# plt.plot(history.history["loss"])
# plt.plot(history.history["val_loss"])
# plt.title("model loss")
# plt.ylabel("loss")
# plt.xlabel("epoch")
# plt.legend(["train", "test"], loc="upper left")
# plt.show()
movie_df = pd.read_csv('movies.csv', sep=',', encoding='latin-1', usecols=['movieId','title','genres'])
def Display(User_id):
user_id = df.userId.sample(1).iloc[0]
movies_watched_by_user = df[df.userId == user_id]
movies_not_watched = movie_df[~movie_df["movieId"].isin(movies_watched_by_user.movieId.values)]["movieId"]
movies_not_watched = list(
set(movies_not_watched).intersection(set(movie2movie_encoded.keys())))
movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_movie_array = np.hstack(([[user_encoder]] * len(movies_not_watched), movies_not_watched))
ratings = model.predict(user_movie_array).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_movie_ids = [ movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices]
# print("Showing recommendations for user: {}".format(user_id))
# print("====" * 9)
# print("Movies with high ratings from user")
# print("----" * 8)
top_movies_user = (movies_watched_by_user.sort_values(by="rating", ascending=False)
.head(5)
.movieId.values
)
movie_df_rows = movie_df[movie_df["movieId"].isin(top_movies_user)]
# for row in movie_df_rows.itertuples():
# print(row.title, ":", row.genres)
print("----" * 8)
print("Top 10 movie recommendations")
print("----" * 8)
recommended_movies = movie_df[movie_df["movieId"].isin(recommended_movie_ids)]
# for row in recommended_movies.itertuples():
# print(row.title, ":", row.genres)
print
# user_id=input("Please Enter User id")
user_id=int(sys.argv[1])
Display(user_id) | [
"[email protected]"
]
| |
4c592d51f61cf481cc775b42cd08c2ac8509d63a | d2f50124ff3bec70b9b3139ecb063b06e526781d | /biable/migrations/0063_auto_20170209_1210.py | b1f585b6e2133d7294f9972748a301e53108e589 | []
| no_license | odecsarrollo/odecopack-componentes | e8d993f089bf53bbf3c53d1265e70ac5c06b59b8 | b583a115fb30205d358d97644c38d66636b573ff | refs/heads/master | 2022-12-12T00:33:02.874268 | 2020-08-13T18:45:01 | 2020-08-13T18:45:01 | 189,262,705 | 0 | 0 | null | 2022-12-08T11:23:46 | 2019-05-29T16:37:21 | Python | UTF-8 | Python | false | false | 470 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-09 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biable', '0062_auto_20170209_1032'),
]
operations = [
migrations.AlterField(
model_name='grupocliente',
name='nombre',
field=models.CharField(max_length=120, unique=True),
),
]
| [
"[email protected]"
]
| |
f562bc0096ec80473c16957f03b4c070b782bab7 | 99280ee4672420b43bdcedb9c6f5c93a5fe182f0 | /API/backend_3/todo_project/todo_project/settings.py | 297321c3ae1b7a167c333d4af61b2cc4b333d714 | []
| no_license | kamral/test_1 | f8674a075d51fc94630df7d6a5cf55b11d086db0 | a10ce3337463d1cb9b56876d0566798740c0b42f | refs/heads/master | 2023-08-06T23:50:45.519935 | 2020-06-07T09:27:43 | 2020-06-07T09:27:43 | 265,688,683 | 0 | 0 | null | 2021-09-22T19:23:15 | 2020-05-20T21:21:21 | Python | UTF-8 | Python | false | false | 3,293 | py | """
Django settings for todo_project project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=_jwj$8oi08uu8m)5170xe#@o_aqjjpyhy(5d-fq=^k-^!f9ui'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3d party
'rest_framework',
#local
'todos.apps.TodosConfig',
]
REST_FRAMEWORK={
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.AllowAny',
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
e7d10f8320db3c2f560b7875b1bb254593aca879 | 5ffa05429f1278455cd02e759cc64f376813ce20 | /html_form_builder/__openerp__.py | 1e8592471c11867f3ba1a29645d05d25c8cae4e7 | []
| no_license | tonihr/Odoo9 | 217f483993c4a49d5c14ad93ec2594e0a46bef5d | 93e0d3de55714e34229cb5273400a6ebc1f6e3e0 | refs/heads/9.0 | 2021-01-19T04:02:57.407271 | 2017-03-08T05:35:47 | 2017-03-08T05:35:47 | 84,426,868 | 0 | 0 | null | 2017-03-09T10:02:45 | 2017-03-09T10:02:45 | null | UTF-8 | Python | false | false | 681 | py | {
'name': "HTML Form Builder",
'version': "1.8.9",
'author': "Sythil Tech",
'category': "Tools",
'support': "[email protected]",
'summary': "Manage both internal and external forms",
'description': "Manage both internal and external forms",
'license':'LGPL-3',
'data': [
'views/html_form.xml',
'views/html_form_builder_templates.xml',
'data/html.form.captcha.csv',
'data/html.form.field.type.csv',
'data/html.form.action.type.csv',
'security/ir.model.access.csv',
],
'demo': [],
'images':[
'static/description/1.jpg',
],
'depends': [],
'installable': True,
} | [
"[email protected]"
]
| |
9abfdc5a2c0729518fddf65bbefeae6317b8b9a0 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2366/right-branch-2366/twisted/internet/tksupport.py | 19dcf48b56a21fe81e5d2e00d290099a36bdac51 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | """This module integrates Tkinter with twisted.internet's mainloop.
API Stability: semi-stable
Maintainer: U{Itamar Shtull-Trauring<mailto:[email protected]>}
To use, do::
| tksupport.install(rootWidget)
and then run your reactor as usual - do *not* call Tk's mainloop(),
use Twisted's regular mechanism for running the event loop.
Likewise, to stop your program you will need to stop Twisted's
event loop. For example, if you want closing your root widget to
stop Twisted::
| root.protocol('WM_DELETE_WINDOW', reactor.stop)
"""
import Tkinter, tkSimpleDialog, tkMessageBox
from twisted.python import log
from twisted.internet import task
_task = None
def install(widget, ms=10, reactor=None):
"""Install a Tkinter.Tk() object into the reactor."""
installTkFunctions()
global _task
_task = task.LoopingCall(widget.update)
_task.start(ms / 1000.0, False)
def uninstall():
"""Remove the root Tk widget from the reactor.
Call this before destroy()ing the root widget.
"""
global _task
_task.stop()
_task = None
def installTkFunctions():
import twisted.python.util
twisted.python.util.getPassword = getPassword
def getPassword(prompt = '', confirm = 0):
while 1:
try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*')
if not confirm:
return try1
try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*')
if try1 == try2:
return try1
else:
tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over')
__all__ = ["install", "uninstall"]
| [
"[email protected]"
]
| |
cd14d101a34e2fb93abf67a6f5d7818b15d89544 | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/sandbox/malach/filefix.py | b32a126b489cba53a0958df1d590cbbd7e027b16 | [
"Apache-2.0"
]
| permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,982 | py | ###########################################################################
#
# File: filefix.py
# Date: Tue 28 Apr 2009 14:51
# Author: Ken Basye
# Description: Some general tools for fixing files
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
from __future__ import with_statement
import os.path
import re
"""
>>> problem_line_re = re.compile("^(.*)<(Trans|Episode|Speaker|Speakers|Turn|Who|Sync)(.*)>(.*)$")
"""
def apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*'):
"""
Apply the callable *transform* to each line of each file in *dirname* that
matches *glob_str* (default '*'), creating new files with the same basename
in *newdirname*.
"""
import glob
import os
fnames = glob.glob(dirname + os.sep + glob_str)
print("Reading %d files in %s" % (len(fnames), dirname))
for fname in fnames:
dir,base = os.path.split(fname)
newfname = os.path.join(newdirname, base)
with open(fname) as f:
with open(newfname, 'w') as newf:
for lineno,line in enumerate(f):
newf.write(transform(line, lineno))
def fix_all_malach_files(transform, stage):
"""
Process the given transform on all Malach transcription files. *stage*
should be a positive integer; its value will be used to determine both the
source and target directory names.
"""
dirname = "./transcriptions%d" % (stage,)
newdirname = "./transcriptions%d" % (stage + 1,)
os.mkdir(newdirname)
apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*.trs')
dirname = "./transcriptions%d/additional" % (stage,)
newdirname = "./transcriptions%d/additional" % (stage + 1,)
os.mkdir(newdirname)
apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*.trs')
def fix_encoding_in_header(line, lineno):
"""
This line transform fixes a problem in the first line of the file where the
encoding attribute had been formatted incorrectly.
"""
_CORRECT = """<?xml version="1.0" encoding="ISO-8859-1"?>\n"""
if lineno == 0 and line.find("encoding") == -1:
return _CORRECT
else:
return line
def fix_version_date(line, lineno):
"""
This line transform fixes a problem in the third line of the file where the
version_date attribute had been misspelt.
"""
if lineno == 2 and line.find("version_data") != -1:
return line.replace("version_data", "version_date")
else:
return line
def fix_common_bad_tags(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform is limited to replacing a few
common bad tags just to reduce the remaining problems to a manageable size.
"""
bases = ("noise", "pause", "um", "UH", "breath", "inhale", "uh", "cough", "laugh", "HM", "emotion", "UH-UH", "UM",
"unintelligible", "mouth", "silence", "lead_silence", "hm", "uh_hum", "sniff", "exhale", "UH-UH-UH", "uh-uh",
"cross_talk_begin", "cross_talk_end", "cross_talk_begins", "cross_talk_ends",
"bkgrd_noise", "cross_talk", "long_pause", "UH_HUH", "uh_huh", "UH_HUM", "UH-HUH", "uh-huh", "UH-HUM", "EH",
"laugh-laugh", "noise-noise", "cough-cough", "ap-", "uf-", "spk#1", "spk#2")
pairs = [("<%s>" % (token,), "<%s>" % (token,)) for token in bases]
for problem, fix in pairs:
if line.find(problem) != -1:
line = line.replace(problem, fix)
return line
def fix_bad_tags1(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform is limited to replacing
tokens in <>s with only lower-case letters, and underscores, and will
only replace one such instance in a line. This covers many error cases, and
later transforms can do more work on fewer instances.
"""
import re
problem_line_re = re.compile("^(.*)<([a-z_]*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags2(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. Limited to any <> with an a-z character
immediately after the <.
"""
import re
problem_line_re = re.compile("^(.*)<([a-z].*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags3(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform deals with tokens in <>s
which consist only of capital letters, underscores, and hyphens.
"""
import re
problem_line_re = re.compile("^(.*)<([A-Z_/-]*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags4(line, lineno):
"""
This line transform fixes remaining bad tags, which is anything in <>s that
doesn't start with a tag we know about. It prints the line it is fixing,
and is meant to be used when almost everything has been fixed.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
print line
return newline
def check_for_bad_tags0(line, lineno):
"""
This line transform just checks for bad tags, which is anything in <>s that
doesn't start with a tag we know about. It prints any line which has more than one < in it.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
if line.count('<') > 1:
print line
return line
def check_for_bad_tags(line, lineno):
"""
This line transform just checks for bad tags, which is anything in <>s that
doesn't start with a tag we know about.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
print line
return line
if __name__ == '__main__':
fix_all_malach_files(fix_encoding_in_header, 1)
fix_all_malach_files(fix_version_date, 2)
fix_all_malach_files(fix_common_bad_tags, 3)
# We do two rounds of the next fix since there are several begin/end pairs
# and each round will only clean up one tag
fix_all_malach_files(fix_bad_tags1, 4)
fix_all_malach_files(fix_bad_tags1, 5)
fix_all_malach_files(fix_bad_tags2, 6)
fix_all_malach_files(fix_bad_tags3, 7)
fix_all_malach_files(check_for_bad_tags0, 8)
fix_all_malach_files(fix_bad_tags4, 9)
fix_all_malach_files(check_for_bad_tags, 10)
| [
"[email protected]"
]
| |
d1ca2a52b83d8def8c1aa10f303e6cad817df346 | 41a20700b5bb351d20562ac23ec4db06bc96f0d7 | /src/fg/tv_metrics.py | f38e38ae3fce02c994c9be7c9605523073f0d3f0 | []
| no_license | kedz/noiseylg | ee0c54634767e8d3789b4ffb93727988c29c6979 | 17266e1a41e33aecb95dc1c3aca68f6bccee86d5 | refs/heads/master | 2020-07-30T11:22:08.351759 | 2019-10-30T21:33:11 | 2019-10-30T21:33:11 | 210,212,253 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | from plum.types import register, PlumModule, HP, props
from subprocess import check_output
from queue import Queue
from threading import Thread
from pathlib import Path
from tempfile import NamedTemporaryFile
import json
import d2t.preprocessing.tvs as preproc
@register("metrics.tv_metrics")
class TVMetrics(PlumModule):
path = HP(type=props.EXISTING_PATH)
search_fields = HP()
references_fields = HP()
def __pluminit__(self):
self._cache = None
self._queue = Queue(maxsize=0)
self._thread = None
self._thread = Thread(target=self._process_result)
self._thread.setDaemon(True)
self._thread.start()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def postprocess(self, tokens, mr):
# TODO right now this is specific to the e2e dataset. Need to
# generalize how to do post processing.
tokens = [t for t in tokens if t[0] != "<" and t[-1] != ">"]
text = " ".join(tokens)
return preproc.lexicalize(text, mr)
def _process_result(self):
while True:
hyp, refs, mr = self._queue.get()
print(self.postprocess(hyp, mr), file=self._hyp_fp)
#print(" ".join(hyp), file=self._hyp_fp)
if isinstance(refs, (list, tuple)):
refs = "\n".join(refs)
print(refs, file=self._ref_fp, end="\n\n")
self._queue.task_done()
def reset(self):
self._cache = None
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def apply_fields(self, fields, obj):
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if hasattr(field, "__call__"):
obj = field(obj)
else:
obj = obj[field]
return obj
def forward(self, forward_state, batch):
search = self.apply_fields(self.search_fields, forward_state)
hypotheses = search.output()
reference_sets = self.apply_fields(self.references_fields, batch)
for i, (hyp, refs) in enumerate(zip(hypotheses, reference_sets)):
self._queue.put([hyp, refs, batch["mr"][i]])
def run_script(self):
self._queue.join()
self._ref_fp.flush()
self._hyp_fp.flush()
script_path = Path(self.path).resolve()
result_bytes = check_output(
[str(script_path), self._hyp_fp.name, self._ref_fp.name])
result = json.loads(result_bytes.decode("utf8"))
self._cache = result
self._ref_fp = None
self._hyp_fp = None
def compute(self):
if self._cache is None:
self.run_script()
return self._cache
def pretty_result(self):
return str(self.compute())
| [
"[email protected]"
]
| |
b48cbc34229e604e32f551d252f74916fe277a3e | b789bf78ffe684782da7eed9df9d88a62d13ad82 | /pyannote/database/protocol/__init__.py | d9f270782593bbe36e7e6fabe7d6039e4a1d5979 | [
"MIT"
]
| permissive | yinruiqing/pyannote-database | 8d77678efec06ffb797716e28b4673f1d5ec6453 | 731593b57082e675e0f661f6211f2dd261807561 | refs/heads/master | 2020-12-02T06:45:29.029202 | 2017-06-28T13:12:26 | 2017-06-28T13:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .speaker_diarization import SpeakerDiarizationProtocol
from .speaker_recognition import SpeakerRecognitionProtocol
| [
"[email protected]"
]
| |
0a101df3b11fa31f2f9270e4eb622a88f96554f3 | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/scattercarpet/selected/marker/__init__.py | 67542f2ea7f75af48003f76f0d057af6429e1e4c | [
"MIT"
]
| permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 1,616 | py |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='opacity',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
max=kwargs.pop('max', 1),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"[email protected]"
]
| |
9d7d16c4a73674e00426099c87f36ac5e20d778f | 60a4f0fa5c8239edbb4cd4390e3b4a7d70c919de | /user/migrations/0006_auto_20190805_2145.py | 54fac07db74e7b1545406f8ec51ded054071913c | []
| no_license | DuncanMoyo/Developer-Portfolio | cca6cbe29e13bddbf56584e400cbd169a515c047 | 9aa8dcef123b3144d9bf2c34a19f4c65c193ac98 | refs/heads/master | 2022-12-09T17:14:42.865413 | 2019-08-09T03:55:21 | 2019-08-09T03:55:21 | 200,691,837 | 0 | 0 | null | 2022-12-08T05:59:41 | 2019-08-05T16:31:39 | CSS | UTF-8 | Python | false | false | 497 | py | # Generated by Django 2.2.4 on 2019-08-05 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20190805_2144'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='skill_level',
),
migrations.AddField(
model_name='skill',
name='skill_level',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
]
| |
35ff7c7b0b2608a161283aad1158714f840e4261 | bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed | /python-basic/chapter06/ex02_1.py | b89200405d271e475c79d5066eb693b18a584a1a | []
| no_license | juneglee/Deep_Learning | fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8 | 17a448cf6a7c5b61b967dd78af3d328d63378205 | refs/heads/master | 2023-07-15T03:02:55.739619 | 2021-08-19T14:04:55 | 2021-08-19T14:04:55 | 273,253,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | # 예외 고급
# 예외 객체
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 as 예외 객체를 활용할 변수 이름:
# 예외가 발생했을 때 실행할 구문
# 예외 객체
try:
number_input_a = int(input("정수 입력> "))
print("원의 반지름:", number_input_a)
print("원의 둘레:", 2 * 3.14 * number_input_a)
print("원의 넓이:", 3.14 * number_input_a * number_input_a)
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception) # Exception은 부모클래스
# 예외 구분하기
# 여러가지 예외가 발생할 수 있는 코드
# 에러 1 : 정수로 변환될수 없는 값을 입력 ex) "yes!!"
# 에러 2 : 리스트의 길이를 넘는 인덱스를 입력한 경우 ex) 100
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception)
# 예외 구분하기
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 A:
# 예외A가 발생했을 때 실행할 구문
# except 예외의 종류 B:
# 예외B가 발생했을 때 실행할 구문
# except 예외의 종류 C:
# 예외C가 발생했을 때 실행할 구문
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError:
# ValueError가 발생하는 경우
print("정수를 입력해 주세요!")
except IndexError:
# IndexError가 발생하는 경우
print("리스트의 인덱스를 벗어났어요!")
# 예외 구분 구문과 예외 객체
# as 키워드를 사용하여 추가
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError as exception:
print("정수를 입력해 주세요!")
print("exception:", exception)
except IndexError as exception:
print("리스트의 인덱스를 벗어났어요!")
print("exception:", exception) | [
"[email protected]"
]
| |
6c4260cb4c1cd8605e45e07848249e782def76d3 | e3d6f803beece2ecc2cde8de795fdd20291213ff | /nova/tests/unit/scheduler/test_utils.py | e92dd111cd4e2d319bac244ad5f43e733c2f47ab | [
"Apache-2.0"
]
| permissive | panguan737/nova | 437c1adb81f3e9ef82c28ad957144623db13ba52 | 0d177185a439baa228b42c948cab4e934d6ac7b8 | refs/heads/main | 2023-01-07T00:08:44.069599 | 2020-11-01T14:00:42 | 2020-11-01T14:00:42 | 309,332,719 | 0 | 0 | Apache-2.0 | 2020-11-02T10:17:13 | 2020-11-02T10:17:13 | null | UTF-8 | Python | false | false | 27,014 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from nova.api.openstack.placement import lib as plib
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
@ddt.ddt
class TestUtils(test.NoDBTestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.context = nova_context.get_admin_context()
def assertResourceRequestsEqual(self, expected, observed):
ex_by_id = expected._rg_by_id
ob_by_id = observed._rg_by_id
self.assertEqual(set(ex_by_id), set(ob_by_id))
for ident in ex_by_id:
self.assertEqual(vars(ex_by_id[ident]), vars(ob_by_id[ident]))
self.assertEqual(expected._limit, observed._limit)
def _test_resources_from_request_spec(self, flavor, expected):
fake_spec = objects.RequestSpec(flavor=flavor)
resources = utils.resources_from_request_spec(fake_spec)
self.assertResourceRequestsEqual(expected, resources)
def test_resources_from_request_spec(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_resources_from_request_spec_with_no_disk(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=0,
ephemeral_gb=0,
swap=0)
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_custom_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 15,
"CUSTOM_TEST_CLASS": 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_override_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 99,
"resources:MEMORY_MB": 99,
"resources:DISK_GB": 99})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 99,
"MEMORY_MB": 99,
"DISK_GB": 99,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_remove_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 0,
"resources:DISK_GB": 0})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"MEMORY_MB": 1024,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_vgpu(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=0,
swap=0,
extra_specs={
"resources:VGPU": 1,
"resources:VGPU_DISPLAY_HEAD": 1})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 10,
"VGPU": 1,
"VGPU_DISPLAY_HEAD": 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
def test_get_resources_from_request_spec_bad_std_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:DOESNT_EXIST": 0})
fake_spec = objects.RequestSpec(flavor=flavor)
with mock.patch("nova.scheduler.utils.LOG.warning") as mock_log:
utils.resources_from_request_spec(fake_spec)
mock_log.assert_called_once()
args = mock_log.call_args[0]
self.assertEqual(args[0], "Received an invalid ResourceClass "
"'%(key)s' in extra_specs.")
self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
def test_get_resources_from_request_spec_granular(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={'resources1:VGPU': '1',
'resources1:VGPU_DISPLAY_HEAD': '2',
# Replace
'resources3:VCPU': '2',
# Stay separate (don't sum)
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom in the unnumbered group (merge with DISK_GB)
'resources:CUSTOM_THING': '123',
# Traits make it through
'trait3:CUSTOM_SILVER': 'required',
'trait3:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': '0',
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': '0',
'resources:CUSTOM_FOO': '0',
# Bogus values don't make it through
'resources1:MEMORY_MB': 'bogus'})
expected_resources = utils.ResourceRequest()
expected_resources._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
'CUSTOM_THING': 123,
}
)
expected_resources._rg_by_id['1'] = plib.RequestGroup(
resources={
'VGPU': 1,
'VGPU_DISPLAY_HEAD': 2,
}
)
expected_resources._rg_by_id['3'] = plib.RequestGroup(
resources={
'VCPU': 2,
},
required_traits={
'CUSTOM_GOLD',
'CUSTOM_SILVER',
}
)
expected_resources._rg_by_id['24'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 2,
},
)
expected_resources._rg_by_id['42'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
}
)
self._test_resources_from_request_spec(flavor, expected_resources)
@mock.patch("nova.scheduler.utils.ResourceRequest.from_extra_specs")
def test_process_extra_specs_granular_called(self, mock_proc):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
mock_proc.assert_called_once()
@mock.patch("nova.scheduler.utils.ResourceRequest.from_extra_specs")
def test_process_extra_specs_granular_not_called(self, mock_proc):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
mock_proc.assert_not_called()
def test_process_missing_extra_specs_value(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
fake_spec = objects.RequestSpec(flavor=flavor)
utils.resources_from_request_spec(fake_spec)
def test_process_no_force_hosts_or_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
expected = utils.ResourceRequest()
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
def test_process_use_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
expected = utils.ResourceRequest()
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
def test_process_use_force_hosts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = utils.ResourceRequest()
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertEqual(expected._limit, resources._limit)
@ddt.data(
# Test single hint that we are checking for.
{'group': [uuids.fake]},
# Test hint we care about and some other random hint.
{'same_host': [uuids.fake], 'fake-hint': ['fake-value']},
# Test multiple hints we are checking for.
{'same_host': [uuids.server1], 'different_host': [uuids.server2]})
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
"""Tests that there is no limit applied to the
GET /allocation_candidates query string if a given scheduler hint
is in the request spec.
"""
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, scheduler_hints=hints)
expected = utils.ResourceRequest()
expected._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
expected._limit = None
resources = utils.resources_from_request_spec(fake_spec)
self.assertResourceRequestsEqual(expected, resources)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 16,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
def test_resources_from_flavor_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 6, # No root disk...
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_with_override(self, mock_is_bfv):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1024,
extra_specs={
# Replace
'resources:VCPU': '2',
# Sum up
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom
'resources:CUSTOM_THING': '123',
# Ignore
'trait:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': 0,
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': 0,
'resources:CUSTOM_FOO': 0})
instance = objects.Instance()
expected = {
'VCPU': 2,
'DISK_GB': 16,
'CUSTOM_THING': 123,
'SRIOV_NET_VF': 3,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
def test_resource_request_from_extra_specs(self):
extra_specs = {
'resources:VCPU': '2',
'resources:MEMORY_MB': '2048',
'trait:HW_CPU_X86_AVX': 'required',
# Key skipped because no colons
'nocolons': '42',
'trait:CUSTOM_MAGIC': 'required',
# Resource skipped because invalid resource class name
'resources86:CUTSOM_MISSPELLED': '86',
'resources1:SRIOV_NET_VF': '1',
# Resource skipped because non-int-able value
'resources86:CUSTOM_FOO': 'seven',
# Resource skipped because negative value
'resources86:CUSTOM_NEGATIVE': '-7',
'resources1:IPV4_ADDRESS': '1',
# Trait skipped because unsupported value
'trait86:CUSTOM_GOLD': 'preferred',
'trait1:CUSTOM_PHYSNET_NET1': 'required',
'resources2:SRIOV_NET_VF': '1',
'resources2:IPV4_ADDRESS': '2',
'trait2:CUSTOM_PHYSNET_NET2': 'required',
'trait2:HW_NIC_ACCEL_SSL': 'required',
# Groupings that don't quite match the patterns are ignored
'resources_5:SRIOV_NET_VF': '7',
'traitFoo:HW_NIC_ACCEL_SSL': 'required',
# Solo resource, no corresponding traits
'resources3:DISK_GB': '5',
}
# Build up a ResourceRequest from the inside to compare against.
expected = utils.ResourceRequest()
expected._rg_by_id[None] = plib.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_AVX',
'CUSTOM_MAGIC',
}
)
expected._rg_by_id['1'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 1,
},
required_traits={
'CUSTOM_PHYSNET_NET1',
}
)
expected._rg_by_id['2'] = plib.RequestGroup(
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 2,
},
required_traits={
'CUSTOM_PHYSNET_NET2',
'HW_NIC_ACCEL_SSL',
}
)
expected._rg_by_id['3'] = plib.RequestGroup(
resources={
'DISK_GB': 5,
}
)
self.assertResourceRequestsEqual(
expected, utils.ResourceRequest.from_extra_specs(extra_specs))
def test_merge_resources(self):
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'CUSTOM_FOO': 1,
}
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072, 'CUSTOM_FOO': 1,
}
saved_orig = dict(resources)
utils.merge_resources(resources, new_resources)
# Check to see that we've doubled our resources
self.assertEqual(doubled, resources)
# and then removed those doubled resources
utils.merge_resources(resources, saved_orig, -1)
self.assertEqual(new_resources, resources)
def test_merge_resources_zero(self):
"""Test 0 value resources are ignored."""
resources = {
'VCPU': 1, 'MEMORY_MB': 1024,
}
new_resources = {
'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 0,
}
# The result should not include the zero valued resource.
doubled = {
'VCPU': 3, 'MEMORY_MB': 3072,
}
utils.merge_resources(resources, new_resources)
self.assertEqual(doubled, resources)
def test_merge_resources_original_zeroes(self):
"""Confirm that merging that result in a zero in the original
excludes the zeroed resource class.
"""
resources = {
'VCPU': 3, 'MEMORY_MB': 1023, 'DISK_GB': 1,
}
new_resources = {
'VCPU': 1, 'MEMORY_MB': 512, 'DISK_GB': 1,
}
merged = {
'VCPU': 2, 'MEMORY_MB': 511,
}
utils.merge_resources(resources, new_resources, -1)
self.assertEqual(merged, resources)
def test_claim_resources_on_destination_no_source_allocations(self):
"""Tests the negative scenario where the instance does not have
allocations in Placement on the source compute node so no claim is
attempted on the destination compute node.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider',
return_value={})
@mock.patch.object(reportclient,
'claim_resources',
new_callable=mock.NonCallableMock)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, uuids.source_node, instance.uuid)
test()
def test_claim_resources_on_destination_claim_fails(self):
"""Tests the negative scenario where the resource allocation claim
on the destination compute node fails, resulting in an error.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but we're lazy.
'DISK_GB': instance.root_gb
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': source_res_allocs
}
}
}
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider',
return_value=source_res_allocs)
@mock.patch.object(reportclient,
'claim_resources', return_value=False)
def test(mock_claim, mock_get_allocs):
# NOTE(danms): Don't pass source_node_allocations here to test
# that they are fetched if needed.
self.assertRaises(exception.NoValidHost,
utils.claim_resources_on_destination,
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, uuids.source_node, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.12')
test()
def test_claim_resources_on_destination(self):
"""Happy path test where everything is successful."""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but we're lazy.
'DISK_GB': instance.root_gb
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': source_res_allocs
}
}
}
@mock.patch.object(reportclient,
'get_allocations_for_consumer_by_provider')
@mock.patch.object(reportclient,
'claim_resources', return_value=True)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node,
source_res_allocs)
self.assertFalse(mock_get_allocs.called)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.12')
test()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources(self, mock_is_rebuild, mock_client):
"""Tests that when claim_resources() is called, that we appropriately
call the placement client to claim resources for the instance.
"""
mock_is_rebuild.return_value = False
ctx = mock.Mock(user_id=uuids.user_id)
spec_obj = mock.Mock(project_id=uuids.project_id)
instance_uuid = uuids.instance
alloc_req = mock.sentinel.alloc_req
mock_client.claim_resources.return_value = True
res = utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.user_id, allocation_request_version=None)
self.assertTrue(res)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resouces_for_policy_check(self, mock_is_rebuild,
mock_client):
mock_is_rebuild.return_value = True
ctx = mock.Mock(user_id=uuids.user_id)
res = utils.claim_resources(ctx, None, mock.sentinel.spec_obj,
mock.sentinel.instance_uuid, [])
self.assertTrue(res)
mock_is_rebuild.assert_called_once_with(mock.sentinel.spec_obj)
self.assertFalse(mock_client.claim_resources.called)
| [
"[email protected]"
]
| |
a8d1c3855133be357e3ac72d35616e8b7fc0d18b | ce07ccf78739a768971f393222fdca4a56315241 | /employee_management/employee_management/doctype/ord/ord.py | 5cfedfcf1c3f5f11200b1042214ecfbf25a91a73 | [
"MIT"
]
| permissive | Gdinesh03/Frappe | 563e0ddbe925be536f65f925787ed321a6098c0d | efd2d1568b6f5b8a4e0ff31e06a415c717a3d32a | refs/heads/master | 2023-08-27T19:24:12.024442 | 2021-09-14T07:04:27 | 2021-09-14T07:04:27 | 406,260,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Ord(Document):
def validate(self):
total = 0
for d in self.get('product_details'):
total += int(d.product_price)
self.total_amount = total
# self.total = mow
# @frappe.whitelist()
# def get_pro(orderb):
# source = frappe.db.sql(''' select * from `tabOrderb` where name = %s''',orderb,as_dict=1)
# for i in source:
# # frappe.log_error(i,"kk")
# sam = frappe.db.sql(''' select product_total from `tabProductdetb` where parent = %s''',i.name,as_dict=1)
# for d in sam:
# mow = sum(float(d.product_total) for d in sam)
# return mow
| [
"[email protected]"
]
| |
9e94751b6f70c73ed790cef4cef4bfb8083f9ffd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_traipsed.py | f59c7ae5d2434f5d2f1133296a72f7b2307b4aa4 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _TRAIPSED():
def __init__(self,):
self.name = "TRAIPSED"
self.definitions = traipse
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['traipse']
| [
"[email protected]"
]
| |
e8cff7405331705ecde8b0a9722786a9a9e6d615 | 11ff14c118240e87c4804d0373e4656d0683d479 | /RatToolAgent/test/firefox_test.py | 63c7ccf8fd97890cb406cd2616cc6efaffa93c1d | []
| no_license | wxmmavis/OS3.1 | e3028d9c79d5a1a17449fea6380fcdda902bdec7 | 26d954344207a82d2298821c3c4f01302393dc7e | refs/heads/master | 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import sys
sys.path += ['../../RatToolAgent']
import RatToolAgent as rta
id = rta.init_and_start_browser()
conf = {
'validation_url': "http://172.16.10.252/authenticated/",
'download_loc': r"//a[@id='logo']",
'file_name': "logo.zip",
'page_title': "Ruckus Automation Test",
}
try:
rta.download_file_on_web_server(id, conf.pop('validation_url'),
conf.pop('download_loc'),
conf.pop('file_name'),
**conf
)
except Exception, e:
print '........................................'
print 'Raise:' + e.message
rta.close_browser(id)
| [
"[email protected]"
]
| |
8eb0ddd533b6242fa21b29701e10215b497fcd90 | d93901e7ff019c7c929594c17b9ed0c575dd1165 | /NumPyNet/box.py | 506948ebbb806413bf3c0380425a8914f0f69669 | [
"MIT"
]
| permissive | Nico-Curti/NumPyNet | 0e673ad3da4120cd761a5b1f4c1f0c429cfd20a9 | c5e217751e28f0812282333b83964b7fee217cfb | refs/heads/master | 2022-05-04T04:51:50.076629 | 2022-03-28T10:02:15 | 2022-03-28T10:02:15 | 199,490,280 | 57 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import operator
from functools import wraps
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['[email protected]', '[email protected]']
class Box (object):
'''
Detection box class
Parameters
----------
coords : tuple (default=None)
Box Coordinates as (x, y, w, h)
Example
-------
>>> import pylab as plt
>>> from matplotlib.patches import Rectangle
>>>
>>> b1 = Box((.5, .3, .2, .1))
>>> x_1, y_1, w_1, h_1 = b1.box
>>> left_1, top_1, right_1, bottom_1 = b1.coords
>>>
>>> print('Box1: {}'.format(b1))
>>>
>>> b2 = Box((.4, .5, .2, .5))
>>> x_2, y_2, w_2, h_2 = b2.box
>>> left_2, top_2, right_2, bottom_2 = b2.coords
>>>
>>> print('Box2: {}'.format(b2))
>>>
>>> print('Intersection: {:.3f}'.format(b1.intersection(b2)))
>>> print('Union: {:.3f}'.format(b1.union(b2)))
>>> print('IOU: {:.3f}'.format(b1.iou(b2)))
>>> print('rmse: {:.3f}'.format(b1.rmse(b2)))
>>>
>>> plt.figure()
>>> axis = plt.gca()
>>> axis.add_patch(Rectangle(xy=(left_1, top_1),
>>> width=w_1, height=h_1,
>>> alpha=.5, linewidth=2, color='blue'))
>>> axis.add_patch(Rectangle(xy=(left_2, top_2),
>>> width=w_2, height=h_2,
>>> alpha=.5, linewidth=2, color='red'))
'''
def __init__ (self, coords=None):
if coords is not None:
try:
self.x, self.y, self.w, self.h = coords
except ValueError:
class_name = self.__class__.__name__
raise ValueError('{0}: inconsistent input shape. Expected a 4D (x, y, w, h) shapes and given {1}'.format(class_name, coords))
else:
self.x, self.y, self.w, self.h = (None, None, None, None)
def _is_box (func):
'''
Decorator function to check if the input variable is a Box object
'''
@wraps(func)
def _ (self, b):
if isinstance(b, self.__class__):
return func(self, b)
else:
raise ValueError('Box functions can be applied only on other Box objects')
return _
@property
def box(self):
'''
Get the box coordinates
Returns
-------
coords : tuple
Box coordinates as (x, y, w, h)
'''
return (self.x, self.y, self.w, self.h)
def __iter__ (self):
'''
Iter over coordinates as (x, y, w, h)
'''
yield self.x
yield self.y
yield self.w
yield self.h
def __eq__ (self, other):
'''
Check if the box coordinates are equal
'''
return isinstance(other, Box) and tuple(self) == tuple(other)
def __ne__ (self, other):
'''
Check if the box coordinates are NOT equal
'''
return not (self == other)
def __repr__ (self):
'''
Object representation
'''
return type(self).__name__ + repr(tuple(self))
def _overlap (self, x1, w1, x2, w2):
'''
Compute the overlap between (left, top) | (right, bottom) of the coordinates
Parameters
----------
x1 : float
X coordinate
w1 : float
W coordinate
x2 : float
w2 : float
Returns
-------
overlap : float
The overlapping are between the two boxes
'''
half_w1, half_w2 = w1 * .5, w2 * .5
l1, l2 = x1 - half_w1, x2 - half_w2
r1, r2 = x1 + half_w1, x2 + half_w2
return min(r1, r2) - max(l1, l2)
@_is_box
def intersection (self, other):
'''
Common area between boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
intersection : float
Intersection area of two boxes
'''
w = self._overlap(self.x, self.w, other.x, other.w)
h = self._overlap(self.y, self.h, other.y, other.h)
w = w if w > 0. else 0.
h = h if h > 0. else 0.
return w * h
__and__ = intersection
@_is_box
def union (self, other):
'''
Full area without intersection
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
union : float
Union area of the two boxes
'''
return self.area + other.area - self.intersection(other)
__add__ = union
@_is_box
def iou (self, other):
'''
Intersection over union
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
iou : float
Intersection over union between boxes
'''
union = self.union(other)
return self.intersection(other) / union if union != 0. else float('nan')
__sub__ = iou
@_is_box
def rmse (self, other):
'''
Root mean square error of the boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
rmse : float
Root mean square error of the boxes
'''
diffs = tuple(map(operator.sub, self, other))
dot = sum(map(operator.mul, diffs, diffs))
return dot**(.5)
@property
def center(self):
'''
In the current storage the x,y are the center of the box
Returns
-------
center : tuple
Center of the current box.
'''
x, y, _, _ = self._object.box
return (x, y)
@property
def dimensions(self):
'''
In the current storage the w,h are the dimensions of the rectangular box
Returns
-------
dims : tuple
Dimensions of the current box as (width, height).
'''
_, _, w, h = self._object.box
return (w, h)
@property
def area(self):
'''
Compute the are of the box
Returns
-------
area : float
Area of the current box.
'''
return self.w * self.h
@property
def coords(self):
'''
Return box coordinates in clock order (left, top, right, bottom)
Returns
-------
coords : tuple
Coordinates as (left, top, right, bottom)
'''
x, y, w, h = self.box
half_w, half_h = w * .5, h * .5
return (x - half_w, y - half_h, x + half_w, y + half_h)
def __str__(self):
'''
Printer
'''
fmt = '(left={0:.3f}, bottom={1:.3f}, right={2:.3f}, top={3:.3f})'.format(*self.coords)
return fmt
if __name__ == '__main__':
import pylab as plt
from matplotlib.patches import Rectangle
b1 = Box((.5, .3, .2, .1))
x_1, y_1, w_1, h_1 = b1.box
left_1, top_1, right_1, bottom_1 = b1.coords
print('Box1: {}'.format(b1))
b2 = Box((.4, .5, .2, .5))
x_2, y_2, w_2, h_2 = b2.box
left_2, top_2, right_2, bottom_2 = b2.coords
print('Box2: {}'.format(b2))
print('Intersection: {:.3f}'.format(b1.intersection(b2)))
print('Union: {:.3f}'.format(b1.union(b2)))
print('IOU: {:.3f}'.format(b1.iou(b2)))
print('rmse: {:.3f}'.format(b1.rmse(b2)))
plt.figure()
axis = plt.gca()
axis.add_patch(Rectangle(xy=(left_1, top_1), width=w_1, height=h_1, alpha=.5, linewidth=2, color='blue'))
axis.add_patch(Rectangle(xy=(left_2, top_2), width=w_2, height=h_2, alpha=.5, linewidth=2, color='red'))
plt.show()
| [
"[email protected]"
]
| |
4bad0a9d74fdc33c1b08594b16c3ae6ae2d4ad36 | 26b6a35e2415d94fbc1c9fc43814309a5d6f443b | /tests/test_openapi_basic.py | f18074c73970570a97135bc4faab94c39ee95a93 | [
"BSD-3-Clause",
"MIT"
]
| permissive | BigRLab/apiflask | 57e0c036aa5d284da5340dcecd49108eea651bcd | d6dd5595009be5de6a7741a5a887276c3ac011bf | refs/heads/main | 2023-05-30T21:30:17.930046 | 2021-07-11T04:07:15 | 2021-07-11T04:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,700 | py | import json
import pytest
from openapi_spec_validator import validate_spec
from .schemas import BarSchema
from .schemas import BazSchema
from .schemas import FooSchema
from apiflask import doc
from apiflask import input
from apiflask import output
from apiflask import Schema as BaseSchema
from apiflask.fields import Integer
def test_spec(app):
assert app.spec
assert 'openapi' in app.spec
def test_spec_processor(app, client):
@app.spec_processor
def edit_spec(spec):
assert spec['openapi'] == '3.0.3'
spec['openapi'] = '3.0.2'
assert app.title == 'APIFlask'
assert spec['info']['title'] == 'APIFlask'
spec['info']['title'] = 'Foo'
return spec
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['openapi'] == '3.0.2'
assert rv.json['info']['title'] == 'Foo'
@pytest.mark.parametrize('spec_format', ['json', 'yaml', 'yml'])
def test_get_spec(app, spec_format):
spec = app._get_spec(spec_format)
if spec_format == 'json':
assert isinstance(spec, dict)
else:
assert 'title: APIFlask' in spec
def test_get_spec_force_update(app):
app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
spec = app._get_spec()
assert '/foo' not in spec['paths']
new_spec = app._get_spec(force_update=True)
assert '/foo' in new_spec['paths']
def test_spec_attribute(app):
spec = app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
assert '/foo' not in spec['paths']
assert '/foo' in app.spec['paths']
def test_spec_schemas(app):
@app.route('/foo')
@output(FooSchema(partial=True))
def foo():
pass
@app.route('/bar')
@output(BarSchema(many=True))
def bar():
pass
@app.route('/baz')
@output(BazSchema)
def baz():
pass
class Spam(BaseSchema):
id = Integer()
@app.route('/spam')
@output(Spam)
def spam():
pass
class Schema(BaseSchema):
id = Integer()
@app.route('/schema')
@output(Schema)
def schema():
pass
with app.app_context():
spec = app.spec
assert len(spec['components']['schemas']) == 5
assert 'FooUpdate' in spec['components']['schemas']
assert 'Bar' in spec['components']['schemas']
assert 'Baz' in spec['components']['schemas']
assert 'Spam' in spec['components']['schemas']
assert 'Schema' in spec['components']['schemas']
def test_servers_and_externaldocs(app):
assert app.external_docs is None
assert app.servers is None
app.external_docs = {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
app.servers = [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['externalDocs'] == {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
assert rv.json['servers'] == [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
def test_auto_200_response(app, client):
@app.get('/foo')
def bare():
pass
@app.get('/bar')
@input(FooSchema)
def only_input():
pass
@app.get('/baz')
@doc(summary='some summary')
def only_doc():
pass
@app.get('/eggs')
@output(FooSchema, 204)
def output_204():
pass
@app.get('/spam')
@doc(responses={204: 'empty'})
def doc_responses():
pass
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert '200' in rv.json['paths']['/foo']['get']['responses']
assert '200' in rv.json['paths']['/bar']['get']['responses']
assert '200' in rv.json['paths']['/baz']['get']['responses']
assert '200' not in rv.json['paths']['/eggs']['get']['responses']
assert '200' not in rv.json['paths']['/spam']['get']['responses']
assert rv.json['paths']['/spam']['get']['responses'][
'204']['description'] == 'empty'
def test_sync_local_json_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'json'
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
with open(local_spec_path) as f:
spec_content = f.read()
assert json.loads(spec_content) == app.spec
assert '{\n "info": {' in spec_content
assert '"title": "APIFlask",' in spec_content
def test_sync_local_yaml_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'yaml'
rv = client.get('/openapi.json')
assert rv.status_code == 200
with open(local_spec_path) as f:
spec_content = f.read()
assert spec_content == str(app.spec)
assert 'title: APIFlask' in spec_content
def test_sync_local_spec_no_path(app):
app.config['SYNC_LOCAL_SPEC'] = True
with pytest.raises(TypeError):
app.spec
| [
"[email protected]"
]
| |
abcd9cf3a6a72e23d78bf410cfbdac852343d238 | eb40dce4039d528b9cd06dbeda75da09d09d7fc5 | /need_install/Django-1.8.17/tests/basic/models.py | 0ebe3e0b4af812d92177a78a86fa007380fb0e16 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | MulticsYin/MulticsSH | 39b62189446787c7f0f037b1640c9c780bd1dddd | 5837a0bff0e7da0e8535e4e0b31ef6baf24274b4 | refs/heads/master | 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # -*- coding: utf-8 -*-
"""
Bare-bones model
This is a basic model with only two non-primary-key fields.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
class Meta:
ordering = ('pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleSelectOnSave(Article):
class Meta:
proxy = True
select_on_save = True
@python_2_unicode_compatible
class SelfRef(models.Model):
selfref = models.ForeignKey('self', null=True, blank=True,
related_name='+')
article = models.ForeignKey(Article, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
# This method intentionally doesn't work for all cases - part
# of the test for ticket #20278
return SelfRef.objects.get(selfref=self).pk
| [
"[email protected]"
]
| |
0ecb406dc4b005795c6d37aaa895fd106844ac7f | b1e7481f8b5bf40c2547c95b1863e25b11b8ef78 | /Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_C.py | a8cd9f368837fbf5bec45d00d8e189ee53cc12fe | [
"Apache-2.0"
]
| permissive | NJManganelli/FourTopNAOD | 3df39fd62c0546cdbb1886b23e35ebdc1d3598ad | c86181ae02b1933be59d563c94e76d39b83e0c52 | refs/heads/master | 2022-12-22T22:33:58.697162 | 2022-12-17T01:19:36 | 2022-12-17T01:19:36 | 143,607,743 | 1 | 1 | Apache-2.0 | 2022-06-04T23:11:42 | 2018-08-05T11:40:42 | Python | UTF-8 | Python | false | false | 6,794 | py | #!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = True
isUltraLegacy = False
era = "2017"
subera = "C"
thePreselection = None
crossSection = None
equivLumi = 41.53
nEventsPositive = None
nEventsNegative = None
sumWeights = None
TriggerChannel = "Mu"
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
| [
"[email protected]"
]
| |
0cbc26a7c531c9e66e72aff03e1ef1e05d090406 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2542/60761/235001.py | 0f6cce935b31eb1a6dc6d3e0854022eb80c48159 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import math
arr=input("")
arr=arr.replace("[","")
arr=arr.replace("]","")
arr=list(map(int,arr.split(",")))
arr.sort()
i=1
maxlen=1
templen=1
while(i<len(arr)):
if(arr[i]==arr[i-1]+1):
templen=templen+1
else:
maxlen=max(templen,maxlen)
templen=1
i=i+1
print(maxlen) | [
"[email protected]"
]
| |
fc362768e4ec1bd2b2882b5a20af0d37ee5f822a | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow_nightly/source2.7/tensorflow/contrib/model_pruning/python/layers/core_layers.py | 764ab620bc2227ff5e8e3f473d689e0e133e83d4 | [
"MIT"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 19,691 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the core layer classes for model pruning and its functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
class _MaskedConv(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. The weight tensor of this layer is masked.
If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(_MaskedConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = nn.convolution(
input=inputs,
filter=self.masked_kernel,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, self.rank + 2))
if self.bias is not None:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class MaskedConv2D(_MaskedConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
class MaskedFullyConnected(base.Layer):
"""Fully-connected layer class with masked weights.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(MaskedFullyConnected, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
self.mask = self.add_variable(
name='mask',
shape=[input_shape[-1].value, self.units],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.masked_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.masked_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
| [
"[email protected]"
]
| |
7c658b02af1216d35936435030ac30caedbcf48f | e79888cd68177e7ec5125270cdc52f888e211e78 | /hirao/chapter01/knock04.py | de4c0c4219da8267d76dd51e2e4cbcf9b31ea0fd | []
| no_license | cafenoctua/100knock2019 | ec259bee27936bdacfe0097d42f23cc7500f0a07 | 88717a78c4290101a021fbe8b4f054f76c9d3fa6 | refs/heads/master | 2022-06-22T04:42:03.939373 | 2019-09-03T11:05:19 | 2019-09-03T11:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | s = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
drop = ",."
print(s)
for c in list(drop):
s = s.replace(c, "")
s = s.split()
display_list = [1, 5, 6, 7, 8, 9, 15, 16, 19]
ans_dict = {}
for i, word in enumerate(s):
if i + 1 in display_list:
ans = word[0]
else:
ans = word[:2]
ans_dict[ans] = i + 1
print(ans_dict)
| [
"[email protected]"
]
| |
13e7dfb79f5a9e988593ddae9d68927018ac1463 | f070c3acba7da2254adc2c12f80e54b830396d40 | /test/venv/bin/futurize | 65e98b939532e827e94109ba696ca6402ce2bfc3 | []
| no_license | liruidesysu/cloudCluster | 241a6ac472ecce9c6b4c966a44304128d258fc9b | fc558b464c3052f59cb1e6326aa22bade556b0c8 | refs/heads/master | 2022-11-06T03:51:31.954607 | 2019-08-22T12:47:53 | 2019-08-22T12:47:53 | 200,144,454 | 0 | 1 | null | 2022-03-29T21:56:02 | 2019-08-02T01:42:17 | Python | UTF-8 | Python | false | false | 252 | #!/home/liruide/Desktop/cloudCluster/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libfuturize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
3c22bf817ee148fbc70da528dfb8cff5991cedb0 | f12fac0dd5c9c9eeedff16377d1f57a3cd02ef32 | /Python游戏编程入门/02.初识Pygame:Pie游戏/绘制弧形.py | 8031255f9f3580e0e721331544bdda1f67ae9357 | []
| no_license | SesameMing/PythonPygame | 61fe09a38d1729963b86f348b349572760676195 | ca0554427cd30838d56630e8b1e04aa0e26834a1 | refs/heads/master | 2020-12-07T21:23:56.271193 | 2016-11-25T06:38:06 | 2016-11-25T06:38:06 | 66,639,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python3
# -*-coding:utf-8-*-
# Author:SesameMing <blog.v-api.cn>
# Email:[email protected]
# Time:2016-11-25 12:51
import sys
import math
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Drawing Arcs")
while True:
for event in pygame.event.get():
if event.type in (QUIT, KEYDOWN):
sys.exit()
screen.fill((0, 0, 200))
color = 255, 0, 255
position = 200, 150, 200, 200
start_angle = math.radians(0)
end_angle = math.radians(180)
width = 8
pygame.draw.arc(screen, color, position, start_angle, end_angle, width)
pygame.display.update() | [
"[email protected]"
]
| |
04e3a1cfd126c0710557c5f5944b73240af4deec | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/vb25/plugins/TexSwirl.py | 9ca7e67f86475efdb3be99c3fa816a582b516141 | []
| no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,575 | py | #
# V-Ray/Blender
#
# http://vray.cgdo.ru
#
# Author: Andrey M. Izrantsev (aka bdancer)
# E-Mail: [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# All Rights Reserved. V-Ray(R) is a registered trademark of Chaos Software.
#
# Blender module
import bpy
from bpy.props import *
# V-Ray/Blender modules
from vb25.utils import *
from vb25.ui import ui
from vb25.plugins import *
from vb25.texture import *
from vb25.uvwgen import *
TYPE = 'TEXTURE'
ID = 'TexSwirl'
PLUG = 'TexSwirl'
NAME = 'Swirl'
DESC = "TexSwirl"
PID = 15
PARAMS = (
'uvwgen',
'color1',
'color2',
'swirl_intensity',
'color_contrast',
'swirl_amount',
'constant_detail',
'center_x',
'center_y',
'random_seed',
'twist',
)
def add_properties(rna_pointer):
class TexSwirl(bpy.types.PropertyGroup):
pass
bpy.utils.register_class(TexSwirl)
rna_pointer.TexSwirl= PointerProperty(
name= "TexSwirl",
type= TexSwirl,
description= "V-Ray TexSwirl settings"
)
TexSwirl.color1= FloatVectorProperty(
name= "Color 1",
description= "First color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (1,1,1)
)
# color2
TexSwirl.color2= FloatVectorProperty(
name= "Color 2",
description= "Second color",
subtype= 'COLOR',
min= 0.0,
max= 1.0,
soft_min= 0.0,
soft_max= 1.0,
default= (0,0,0)
)
# swirl_intensity
TexSwirl.swirl_intensity= FloatProperty(
name= "Swirl Intensity",
description= "Swirl Intensity",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 2
)
# color_contrast
TexSwirl.color_contrast= FloatProperty(
name= "Color Contrast",
description= "Color Contrast",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0.4
)
# swirl_amount
TexSwirl.swirl_amount= FloatProperty(
name= "Swirl Amount",
description= "Swirl Amount",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
# constant_detail
TexSwirl.constant_detail= IntProperty(
name= "Constant Detail",
description= "Constant Detail",
min= 0,
max= 100,
soft_min= 0,
soft_max= 10,
default= 4
)
# center_x
TexSwirl.center_x= FloatProperty(
name= "Center X",
description= "Center Position X",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# center_y
TexSwirl.center_y= FloatProperty(
name= "Center Y",
description= "Center Position Y",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= -0.5
)
# random_seed
TexSwirl.random_seed= FloatProperty(
name= "Random Seed",
description= "Random Seed",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 0
)
# twist
TexSwirl.twist= FloatProperty(
name= "Twist",
description= "Twist",
min= 0.0,
max= 100.0,
soft_min= 0.0,
soft_max= 10.0,
precision= 3,
default= 1
)
'''
OUTPUT
'''
def write(bus):
scene= bus['scene']
ofile= bus['files']['textures']
slot= bus['mtex']['slot']
texture= bus['mtex']['texture']
tex_name= bus['mtex']['name']
uvwgen= write_uvwgen(bus)
TexSwirl= getattr(texture.vray, PLUG)
ofile.write("\n%s %s {"%(PLUG, tex_name))
PLUGINS['TEXTURE']['TexCommon'].write(bus)
for param in PARAMS:
if param == 'uvwgen':
value= uvwgen
else:
value= getattr(TexSwirl, param)
ofile.write("\n\t%s= %s;"%(param, a(scene, value)))
ofile.write("\n}\n")
return tex_name
'''
GUI
'''
class VRAY_TP_TexSwirl(ui.VRayTexturePanel, bpy.types.Panel):
bl_label = NAME
COMPAT_ENGINES = {'VRAY_RENDER','VRAY_RENDER_PREVIEW'}
@classmethod
def poll(cls, context):
tex = context.texture
return tex and tex.type == 'VRAY' and tex.vray.type == ID and ui.engine_poll(cls, context)
def draw(self, context):
wide_ui = context.region.width > ui.narrowui
layout = self.layout
tex= context.texture
TexSwirl= getattr(tex.vray, PLUG)
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'color1', text="")
if wide_ui:
col= split.column()
col.prop(TexSwirl, 'color2', text="")
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'swirl_amount', text="Amount")
col.prop(TexSwirl, 'swirl_intensity', text="Intensity")
col.prop(TexSwirl, 'color_contrast', text="Color Contrast")
if not wide_ui:
split= layout.split()
col= split.column(align=True)
col.prop(TexSwirl, 'twist')
col.prop(TexSwirl, 'constant_detail')
split= layout.split()
row= split.row(align=True)
row.prop(TexSwirl, 'center_x')
row.prop(TexSwirl, 'center_y')
split= layout.split()
col= split.column()
col.prop(TexSwirl, 'random_seed', text="Seed")
def GetRegClasses():
return (
VRAY_TP_TexSwirl,
)
def register():
for regClass in GetRegClasses():
bpy.utils.register_class(regClass)
def unregister():
for regClass in GetRegClasses():
bpy.utils.unregister_class(regClass)
| [
"[email protected]"
]
| |
498848a1ce67711fa364584705c0f90477f76fb5 | 90e049109be38889523b265d2683a4f29a57da30 | /flink-python/pyflink/table/tests/test_table_environment_api.py | 64080f1e53b36dd5df4f0c09993ae8772e33988c | [
"BSD-3-Clause",
"MIT",
"OFL-1.1",
"ISC",
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC-BY-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-jdom",
"GCC-exception-3.1",
"EPL-1.0",
"CC-BY-2.5",
"MPL-2.0-no-copyleft-exception",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"CDDL-1.0",
"AGPL-3.0-only",
"CC0-1.0",
"BSD-2-Clause-Views",
"MPL-2.0",
"CC-PDDC",
"MIT-0",
"CDDL-1.1"
]
| permissive | Jasonpengrui/flink | bc7cf1baced87a72a75e2bd0e326a137ed0ab529 | 81a5212cb99b860de9c7384fa14caaa3f5af1c1f | refs/heads/master | 2020-06-10T16:44:23.895203 | 2019-12-09T06:35:08 | 2019-12-09T06:35:08 | 193,673,904 | 0 | 0 | Apache-2.0 | 2019-06-25T09:09:15 | 2019-06-25T09:09:14 | null | UTF-8 | Python | false | false | 10,360 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import os
from py4j.compat import unicode
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.table_environment import BatchTableEnvironment, StreamTableEnvironment
from pyflink.table.table_config import TableConfig
from pyflink.table.types import DataTypes, RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase
class StreamTableEnvironmentTests(PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (path: [default_catalog, default_database, Source], fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result)
assert isinstance(actual, str) or isinstance(actual, unicode)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.insert_into("sinks")
t_env.exec_env().execute()
actual = source_sink_utils.results()
expected = ['2,Hi,Hello', '3,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sinks select * from %s" % source)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update_with_query_config(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
query_config = t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
t_env.sql_update("insert into sinks select * from %s" % source, query_config)
t_env.exec_env().execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_query_config(self):
query_config = self.t_env.query_config()
query_config.with_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
assert query_config.get_max_idle_state_retention_time() == 2 * 24 * 3600 * 1000
assert query_config.get_min_idle_state_retention_time() == 24 * 3600 * 1000
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
class BatchTableEnvironmentTests(PyFlinkBatchTableTestCase):
def test_explain(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Source", csv_source)
source = t_env.scan("Source")
result = source.alias("a, b, c").select("1 + a, b, c")
actual = t_env.explain(result)
self.assertIsInstance(actual, (str, unicode))
def test_table_config(self):
table_config = TableConfig()
table_config.set_timezone("Asia/Shanghai")
table_config.set_max_generated_code_length(64000)
table_config.set_null_check(True)
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
self.assertTrue(table_config.get_null_check())
self.assertEqual(table_config.get_max_generated_code_length(), 64000)
self.assertEqual(table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(table_config.get_built_in_database_name(), "test_database")
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_timezone("Asia/Shanghai")
table_config.set_built_in_catalog_name("test_catalog")
table_config.set_built_in_database_name("test_database")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_timezone(), "Asia/Shanghai")
self.assertEqual(readed_table_config.get_built_in_catalog_name(), "test_catalog")
self.assertEqual(readed_table_config.get_built_in_database_name(), "test_database")
| [
"[email protected]"
]
| |
c04479133e596d0015f9df6569bf7d2c2283e6d1 | b23c6c02d9b54c987bca2e36c3506cf80fa28239 | /python databse connectivity progs/bind variable.py | a9bf8a8d9dcc71bd722251121197416765b6ba4e | []
| no_license | nishikaverma/Python_progs | 21190c88460a79f5ce20bb25d1b35f732fadd642 | 78f0cadde80b85356b4cb7ba518313094715aaa5 | refs/heads/master | 2022-06-12T14:54:03.442837 | 2020-05-08T10:28:58 | 2020-05-08T10:28:58 | 262,293,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import cx_Oracle
try:
conn=cx_Oracle.connect("system/oracle123@localhost/orcl")
print("connection established")
cur=conn.cursor()
print("cursor created!")
print("***********************")
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("***********************")
name=input("enter book name : ")
price=int(input("enter book price"))
cur.execute("Insert into Books (Book_name,Book_price)values(:1,:2)",(name,price))
n=cur.rowcount
print(n,'rows inserted')
conn.commit()
cur.execute("Select Book_name,Book_price from Books")
for x in cur:
print(x)
print("************************")
except(cx_Oracle.DatabaseError)as e:
print("Error in connectin: ",e)
finally:
if conn is not None:
cur.close()
print("curser closed!")
conn.close()
print("connection closed!")
| [
"[email protected]"
]
| |
b48a2e29d81c5d7ddbf5cc76cd714fe6c1483872 | 9e27f91194541eb36da07420efa53c5c417e8999 | /twilio/twiml/messaging_response.py | abb58ff2c6d33ad1d66998d8f9520dd3786f329a | []
| no_license | iosmichael/flask-admin-dashboard | 0eeab96add99430828306b691e012ac9beb957ea | 396d687fd9144d3b0ac04d8047ecf726f7c18fbd | refs/heads/master | 2020-03-24T05:55:42.200377 | 2018-09-17T20:33:42 | 2018-09-17T20:33:42 | 142,508,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from admin.twilio.twiml import (
TwiML,
format_language,
)
class MessagingResponse(TwiML):
""" <Response> TwiML for Messages """
def __init__(self, **kwargs):
super(MessagingResponse, self).__init__(**kwargs)
self.name = 'Response'
def message(self, body=None, to=None, from_=None, action=None, method=None,
status_callback=None, **kwargs):
"""
Create a <Message> element
:param body: Message Body
:param to: Phone Number to send Message to
:param from: Phone Number to send Message from
:param action: Action URL
:param method: Action URL Method
:param status_callback: Status callback URL. Deprecated in favor of action.
:param kwargs: additional attributes
:returns: <Message> element
"""
return self.nest(Message(
body=body,
to=to,
from_=from_,
action=action,
method=method,
status_callback=status_callback,
**kwargs
))
def redirect(self, url, method=None, **kwargs):
"""
Create a <Redirect> element
:param url: Redirect URL
:param method: Redirect URL method
:param kwargs: additional attributes
:returns: <Redirect> element
"""
return self.nest(Redirect(url, method=method, **kwargs))
class Redirect(TwiML):
""" <Redirect> TwiML Verb """
def __init__(self, url, **kwargs):
super(Redirect, self).__init__(**kwargs)
self.name = 'Redirect'
self.value = url
class Message(TwiML):
""" <Message> TwiML Verb """
def __init__(self, body=None, **kwargs):
super(Message, self).__init__(**kwargs)
self.name = 'Message'
if body:
self.value = body
def body(self, message, **kwargs):
"""
Create a <Body> element
:param message: Message Body
:param kwargs: additional attributes
:returns: <Body> element
"""
return self.nest(Body(message, **kwargs))
def media(self, url, **kwargs):
"""
Create a <Media> element
:param url: Media URL
:param kwargs: additional attributes
:returns: <Media> element
"""
return self.nest(Media(url, **kwargs))
class Media(TwiML):
""" <Media> TwiML Noun """
def __init__(self, url, **kwargs):
super(Media, self).__init__(**kwargs)
self.name = 'Media'
self.value = url
class Body(TwiML):
""" <Body> TwiML Noun """
def __init__(self, message, **kwargs):
super(Body, self).__init__(**kwargs)
self.name = 'Body'
self.value = message
| [
"[email protected]"
]
| |
cbd142b626698fe1debd6ecef0822cc0d7b13f7f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_150/ch50_2020_04_13_03_25_44_929209.py | a262c1522f55ac719f56e8c2e06b6e69fde73ed5 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def junta_nome_sobrenome(nome, sobrenome):
nome_e_sobrenome = []
i = 0
while i < len(nome) and i < len(sobrenome):
nome_e_sobrenome.append(nome[i] + ' ' +sobrenome[i])
i += 1
return nome_e_sobrenome | [
"[email protected]"
]
| |
45f343096530fa01c5f2708f14403031fa6baa1f | 5332fef91e044555e605bb37cbef7c4afeaaadb0 | /hy-data-analysis-with-python-2020/part06-e07_binding_sites/src/binding_sites.py | 6baad43f425d059dd9d258e457e1d88a1b708b0e | []
| no_license | nopomi/hy-data-analysis-python-2019 | f3baa96bbe9b6ee7f0b3e6f6b8b0f3adfc3b6cc8 | 464685cb377cfdeee890a008fbfbd9ed6e3bcfd0 | refs/heads/master | 2021-07-10T16:16:56.592448 | 2020-08-16T18:27:38 | 2020-08-16T18:27:38 | 185,044,621 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import scipy
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def find_permutation(n_clusters, real_labels, labels):
permutation=[]
for i in range(n_clusters):
idx = labels == i
# Choose the most common label among data points in the cluster
new_label=scipy.stats.mode(real_labels[idx])[0][0]
permutation.append(new_label)
return permutation
def toint(x):
return 'ACGT'.find(x)
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep="\t")
X = [[toint(c) for c in s] for s in df["X"]]
return (np.array(X), np.array(df["y"]))
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
X, y = get_features_and_labels(filename)
model = AgglomerativeClustering(linkage="average", affinity="euclidean")
model.fit(X)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
distances=pairwise_distances(X, metric="euclidean")
#plot(distances)
return score
def cluster_hamming(filename):
X, y = get_features_and_labels(filename)
distances = pairwise_distances(X, metric="hamming")
model = AgglomerativeClustering(affinity="precomputed", linkage="average")
model.fit_predict(distances)
permutation = find_permutation(2, y, model.labels_)
new_labels = [permutation[label] for label in model.labels_]
score = accuracy_score(y, new_labels)
#plot(distances, method="average", affinity="hamming")
return score
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
2ee2ccec5dbf7843302c65bae409bb7fdcc29b2a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3325.py | ba3c7f7f745af20e6283d8398fd4aeb577461651 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,323 | py | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[3],input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3325.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
deaa0857f040e4558c3a3aa27b0b1ff32bf995cc | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_1/16_1_3_ka_ya_c.py | 7735ad455887347c1c5a1e4c3582e3531bafa93a | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,141 | py | def solve(n, fs):
fs = [f-1 for f in fs]
lp = [None for p in xrange(n)]
for i in xrange(n):
chk = [False for p in xrange(n)]
p = i
cnt = 0
while not chk[p] and not lp[p]:
chk[p] = True
p = fs[p]
cnt += 1
if p == i:
while not lp[p]:
lp[p] = (cnt, 0)
p = fs[p]
for i in xrange(n):
p = i
cnt = 0
while not lp[p]:
p = fs[p]
cnt += 1
l, b = lp[p]
if cnt > b:
lp[p] = (l, cnt)
res = 0
tmp = 0
for i in xrange(n):
if lp[i]:
l, b = lp[i]
if l == 2:
j = fs[i]
_, bj = lp[j]
tmp += l + b + bj
else:
if l > res:
res = l
if tmp / 2 > res:
res = tmp / 2
return res
T = input()
for i in xrange(1, T+1):
N = input()
Fs = map(int, raw_input().split())
print 'Case #{}: {}'.format(i, solve(N, Fs))
| [
"[[email protected]]"
]
| |
9646ac4cc55d9a5e30e41d7546f3ca1df7b888f9 | f0d9ba8456cdad2b2fa711fa8975b41da7af1784 | /worms/tests/__init__.py | 2b9503765bab2d60bb03f655ddf70c5209239ab5 | [
"Apache-2.0"
]
| permissive | willsheffler/worms | f1d893d4f06b421abdd4d1e526b43c2e132e19a2 | 27993e33a43474d647ecd8277b210d4206858f0b | refs/heads/master | 2023-04-08T01:18:33.656774 | 2022-06-09T20:04:55 | 2022-06-09T20:04:55 | 118,678,808 | 6 | 5 | NOASSERTION | 2021-10-05T22:28:24 | 2018-01-23T22:30:45 | Python | UTF-8 | Python | false | false | 670 | py | # -*- coding: utf-8 -*-
"""Unit test package for worms."""
import os
import pytest
try:
import pyrosetta
HAVE_PYROSETTA = True
only_if_pyrosetta = lambda x: x
try:
import pyrosetta.distributed
HAVE_PYROSETTA_DISTRIBUTED = True
only_if_pyrosetta_distributed = lambda x: x
except ImportError:
HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta_distributed = pytest.mark.skip
except ImportError:
HAVE_PYROSETTA = HAVE_PYROSETTA_DISTRIBUTED = False
only_if_pyrosetta = only_if_pyrosetta_distributed = pytest.mark.skip
only_if_jit = lambda x: x
if "NUMBA_DISABLE_JIT" in os.environ:
only_if_jit = pytest.mark.skip
| [
"[email protected]"
]
| |
e25fd776db4cf8dfcdb7f6e854d3db92deb6dbc6 | 00da73f35308b860ef9a3c6eb6cdaf8c89608f57 | /deps/requests/adapters.py | cdaabdbee6f16c829f051891b4fe6ff7b718df96 | [
"MIT"
]
| permissive | kylebebak/Requester | 32abf8a56ba0e9e42fdd25b13ce48d40a87f20e0 | 7f177bc417c45fd1792c6020543a4c6909e3ea21 | refs/heads/master | 2022-07-17T11:09:30.238568 | 2022-05-05T17:31:48 | 2022-05-05T17:38:56 | 89,746,594 | 333 | 16 | MIT | 2021-02-23T14:43:12 | 2017-04-28T21:37:08 | Python | UTF-8 | Python | false | false | 20,880 | py | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| [
"[email protected]"
]
| |
297467e64e5b45612d4fe55253b3388b8442f79f | 770d4df866b9e66a333f3ffeacdd659b8553923a | /results/0193/config.py | fbbe800c6116da5429a209d219fc7846de53d1e2 | []
| no_license | leojo/ResultsOverview | b2062244cbd81bc06b99963ae9b1695fa9718f90 | a396abc7a5b4ab257150c0d37c40b646ebb13fcf | refs/heads/master | 2020-03-20T19:52:37.217926 | 2018-08-05T12:50:27 | 2018-08-05T12:50:27 | 137,656,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | import os
import numpy as np
import waveUtils
class config(object):
def __init__(self):
self.prepare_data()
# Bsub arguments
bsub_mainfile = "main.py"
bsub_processors = 4
bsub_timeout = "4:00"
bsub_memory = 8000
# Epoch and batch config
batch_size = 128
latent_dim = 100
epochs = 100
epoch_updates = 100
# Network structure
input_s = 16000
n_ae = 5
n_conv_layers = 3
n_deconv_layers = 3
first_size = input_s // (2 ** n_deconv_layers)
final_decoder_filter_size = 3
# Model
load_model = False
model_path = os.path.join("models", "0103", "model") # only used if load_model=True
# Miscellaneous constants
sample_rate = 8000
reconstruction_mult = 1
learning_rate_min = 1e-3
learning_rate_max = 1e-3
learning_rate_scaling_factor = 0 # controlls the shape of the scaling curve from max to min learning rate
learning_rate = 1e-3 # legacy
kl_loss_mult = 1e-7
kl_extra_mult = 2
kl_extra_exponent = 2
keep_prob = 1
use_square = False
data_sources = ["sax-baritone","violin"]
data = None
# Functions
def prepare_data(self):
self.load_data()
def load_and_prepare_audio(self, source):
duration = self.input_s / float(self.sample_rate)
data_dir = os.path.join("wav_files", source)
waves, original_sample_rate = waveUtils.loadAudioFiles(data_dir)
cut_data = waveUtils.extractHighestMeanIntensities(waves, sample_rate=original_sample_rate, duration=duration)
del waves
data = waveUtils.reduceQuality(cut_data, self.sample_rate, duration)
del cut_data
return data
def load_data(self):
if self.data is None:
self.data = [self.load_and_prepare_audio(source) for source in self.data_sources]
def get_training_batch(self):
samples = []
originals = []
num_sources = len(self.data_sources)
sample_shape = self.data[0][0].shape
for _ in range(self.batch_size):
waves = []
sample = np.zeros(sample_shape)
for s in range(num_sources):
i = np.random.randint(len(self.data[s]))
wave = self.data[s][i]
waves.append(wave)
sample += wave
sample = sample/num_sources
samples.append(sample)
originals.append(waves)
samples = np.asarray(samples)
originals = np.asarray(originals)
return samples, originals
def normalize_batch(self, batch):
x = batch.astype(np.float32)
return x / np.max(np.abs(x))
| [
"[email protected]"
]
| |
661cac8acf0eadfcb8a1d63605e97bdbdb2e9740 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/TestScript/UI/suite_UI_62/tst_UI_62_Cellular_design/test.py | 4b116d08c137cfe84f4e37aea4edc7de3cf116e4 | []
| no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | ######################
#Author: Alex Leung ##
######################
from API.Utility import UtilConst
from API.Utility.Util import Util
from API.ComponentBox import ComponentBoxConst
from API.Device.EndDevice.PC.PC import PC
from API.Device.CellTower.CellTower import CellTower
from API.Device.COServer.COServer import COServer
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbarConst import GoldenPhysicalToolbarConst
from API.Toolbar.GoldenPhysicalToolbar.GoldenPhysicalToolbar import GoldenPhysicalToolbar
from API.SimulationPanel.EventList.EventList import EventList
from API.SimulationPanel.PlayControls.PlayControls import PlayControls
from API.functions import check
from API.Workspace.Physical import Physical
from API.Device.DeviceBase.ServicesBase.ServicesBaseConst import ServicesConst
#function initialization
util = Util()
pda0 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 100, "Pda0")
pda1 = PC(ComponentBoxConst.DeviceModel.PDA, 200, 200, "Pda1")
ct = CellTower(ComponentBoxConst.DeviceModel.CELL_TOWER, 100, 100, "Cell Tower0")
cos = COServer(ComponentBoxConst.DeviceModel.CO_SERVER, 100, 200, "Central OfficeServer0")
gpt = GoldenPhysicalToolbar()
gptc = GoldenPhysicalToolbarConst()
def main():
util.init()
maketop()
checksettings()
movephysical()
def maketop():
pda0.create()
pda1.create()
ct.create()
cos.create()
ct.connect(cos, ComponentBoxConst.Connection.CONN_COAXIAL, "Coaxial0", "Coaxial0/0")
util.speedUpConvergence()
def checksettings():
ct.select()
ct.clickConfigTab()
ct.close()
cos.select()
cos.clickConfigTab()
cos.config.selectInterface('Cell Tower')
cos.config.interface.cellTower.check.ip("172.16.1.1")
cos.config.interface.cellTower.check.subnet('255.255.255.0')
cos.config.interface.cellTower.check.ipv6("2001::1")
cos.config.interface.cellTower.check.subnetv6("64")
cos.config.interface.cellTower.check.linkLocal("FE80::[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}:[A-F\d]{1,4}")
cos.clickServicesTab()
cos.services.selectInterface('DHCP')
cos.services.dhcp.check.ip("172.16.1.1")
cos.services.dhcp.check.subnet("255.255.255.0")
cos.services.dhcp.check.startIp1("172")
cos.services.dhcp.check.startIp2('16')
cos.services.dhcp.check.startIp3('1')
cos.services.dhcp.check.startIp4('100')
cos.services.dhcp.check.maxUsers('50')
cos.services.selectInterface('DHCPv6')
#cos.services.dhcpv6.on()
cos.services.dhcpv6.check.on(True)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.PREFIX_TABLE).rowCount, 1)
test.compare(findObject(cos.squishName + ServicesConst.dhcpv6.LOCAL_TABLE).rowCount, 1)
cos.services.selectInterface("CELL TOWER")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.refreshButton()
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_TOWER_LIST).rowCount, 1)
cos.services.cellTower.clickItem("0/0")
test.compare(findObject(cos.squishName + ServicesConst.cellTower.CELL_DEVICE_LIST).rowCount, 2)
cos.services.selectInterface("PAP/CHAP")
cos.close()
def movephysical():
util.clickOnPhysical()
gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1.Home City.Corporate Office.Smartphone0")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 409)
# gpt.scrollTo(gptc.RACK_VIEW_V_SCROLL_BAR, 818)
gpt.clickButton(gptc.MOVE_OBJECT)
util.clickOnPhysicalWorkspace(172, 215)
#mouseClick(waitForObject(gptc.TABLE1_DEVICE1), 39, 848, 0, Qt.LeftButton)
#sendEvent("QMouseEvent", waitForObject(gptc.TABLE1_DEVICE1), QEvent.MouseButtonRelease, 38, 95, Qt.LeftButton, 0, 0)
activateItem(waitForObjectItem(gptc.MOVE_DROPDOWN, "Move to Intercity"))
snooze(5)
#gpt.clickButton(gptc.NAVIGATION)
gpt.clickItem(gptc.NAVIGATION_LIST, "Intercity_1")
gpt.clickButton(gptc.JUMP_TO_SELECTED_LOCATION)
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 500, 300)
util.clickOnLogical()
pda0.select()
pda0.clickDesktopTab()
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText("ping 172.16.1.1")
util.fastForwardTime()
pda0.desktop.commandPrompt.textCheckPoint("Received = 0", 1)
#checkpoint phone outside range
#checkpoint phone not getting reception
pda0.close()
util.clickOnPhysical()
smartphone = Physical().getObject('Smartphone0')
util.dragAndDrop(smartphone, 10, 10, UtilConst.PHYSICAL_WORKSPACE, 200, 200)
util.clickOnLogical()
util.clickOnSimulation()
pda0.select()
pda0.clickTab('Desktop')
pda0.desktop.applications.commandPrompt()
pda0.desktop.commandPrompt.setText('ping 172.16.255.255')
PlayControls().captureForward(10)
foundEvent = []
foundEvent.append(EventList().findEventAt('Smartphone0', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Smartphone1', 'Cell Tower0', 'ICMP'))
foundEvent.append(EventList().findEventAt('Central Office Server0', 'Cell Tower0', 'ICMP'))
check(not False in foundEvent)
| [
"[email protected]"
]
| |
a270947c1b4f962a0d9e5be8ec990bbefd2b4a32 | 3a39ddc4a8600ffc5110453867370c1d8e2da121 | /x11-libs/libXcomposite/libXcomposite-0.4.3.py | 8ce4b041dc0124e9f86b8c9c3514052f3dd809a7 | []
| no_license | seqizz/hadron64 | f2276133786c62f490bdc0cbb6801491c788520f | ca6ef5df3972b925f38e3666ccdc20f2d0bfe87e | refs/heads/master | 2021-01-18T04:53:09.597388 | 2013-02-25T21:25:32 | 2013-02-25T21:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | metadata = """
summary @ X11 Composite extension library
homepage @ http://xorg.freedesktop.org/
license @ MIT
src_url @ http://xorg.freedesktop.org/releases/individual/lib/libXcomposite-$version.tar.bz2
arch @ ~x86
"""
depends = """
runtime @ x11-libs/libXfixes x11-proto/compositeproto
"""
#srcdir = "libXcomposite-%s" % version
def configure():
conf(
"--disable-static")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING")
| [
"[email protected]"
]
| |
59534247ee1449496330021da54fc527d05a14e3 | 34a043e6961639657e36e7ac9fd459ad5b1f6de1 | /openpathsampling/experimental/storage/test_mdtraj_json.py | f3c57c4ad31a103b69866649884b52ccf8542b6a | [
"MIT"
]
| permissive | dwhswenson/openpathsampling | edaddc91e443e7ffc518e3a06c99fc920ad9d053 | 3d02df4ccdeb6d62030a28e371a6b4ea9aaee5fe | refs/heads/master | 2023-02-04T12:31:17.381582 | 2023-01-30T21:17:01 | 2023-01-30T21:17:01 | 23,991,437 | 3 | 1 | MIT | 2022-08-12T17:48:04 | 2014-09-13T10:15:43 | Python | UTF-8 | Python | false | false | 2,273 | py | from .mdtraj_json import *
import pytest
import numpy as np
import numpy.testing as npt
from ..simstore.custom_json import bytes_codec, numpy_codec, custom_json_factory
from ..simstore.test_custom_json import CustomJSONCodingTest
from openpathsampling.tests.test_helpers import data_filename
class MDTrajCodingTest(CustomJSONCodingTest):
def setup(self):
if not HAS_MDTRAJ:
pytest.skip()
self.filename = data_filename('ala_small_traj.pdb')
def test_default(self):
# custom for handling numpy
for (obj, dct) in zip(self.objs, self.dcts):
default = self.codec.default(obj)
numpy_attrs = [attr for attr, val in dct.items()
if isinstance(val, np.ndarray)]
other_attrs = [attr for attr, val in dct.items()
if not isinstance(val, np.ndarray)]
for attr in numpy_attrs:
npt.assert_array_equal(default[attr], dct[attr])
for attr in other_attrs:
assert default[attr] == dct[attr]
def test_round_trip(self):
codecs = [numpy_codec, bytes_codec] + mdtraj_codecs
encoder, decoder = custom_json_factory(codecs)
self._test_round_trip(encoder, decoder)
class TestTopologyCoding(MDTrajCodingTest):
def setup(self):
super(TestTopologyCoding, self).setup()
self.codec = top_codec
top = md.load(self.filename).topology
dataframe, bonds = top.to_dataframe()
self.objs = [top]
self.dcts = [{
'__class__': 'Topology',
'__module__': 'mdtraj.core.topology',
'atoms': dataframe.to_json(),
'bonds': bonds
}]
class TestTrajectoryCoding(MDTrajCodingTest):
def setup(self):
super(TestTrajectoryCoding, self).setup()
self.codec = traj_codec
traj = md.load(self.filename)
self.objs = [traj]
self.dcts = [{
'__class__': 'Trajectory',
'__module__': 'mdtraj.core.trajectory',
'xyz': traj.xyz,
'topology': traj.topology,
'time': traj.time,
'unitcell_lengths': traj.unitcell_lengths,
'unitcell_angles': traj.unitcell_angles
}]
| [
"[email protected]"
]
| |
f63a1432724c3cac911ccad6422806edc4c92da0 | 0369761e54c2766ff2ce13ed249d462a12320c0f | /bubble-search/bubble-search-practice/exercise-09.py | de843c707b960f927b8aa8ee8b57bf0057cd539f | []
| no_license | JasoSalgado/algorithms | e54c739005cc47ee8a401912a77cc70865d28c87 | 8db7d2bedfe468c70e5191bc7873e4dd86e7f95a | refs/heads/master | 2023-04-25T23:41:10.655874 | 2021-06-11T17:35:49 | 2021-06-11T17:35:49 | 333,979,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | """
Bubble search exercise 09
"""
list = [6514 , 2352 , 3984 , 3596 , 2445 , 5535 , 6332 , 5346 , 617 , 3976 , 1242 , 2573 , 7772 , 9324 , 4655 , 3144 , 6233 , 2287 , 6109 , 4139 , 2030 , 6734 , 1495 , 9466 , 6893 , 9336 , 963 , 4412 , 5347 , 2565 , 7590 , 5932 , 6747 , 7566 , 2456 , 9982 , 8880 , 6816 , 9415 , 2426 , 5892 , 5074 , 1501 , 9445 , 6921 , 545 , 4415 , 9516 , 6426 , 7369]
print(f"List: {list}")
for i in range(len(list)):
for x in range(len(list) - 1):
if list[x] > list[x + 1]:
aux = list[x]
list[x] = list[x + 1]
list[x + 1] = aux
print(list)
| [
"[email protected]"
]
| |
f8b8ecc8c9afc0614b9a66d3e6d49402720bd1bf | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/boto/sts/connection.py | 8c0cf4b269ba1ac3926620ffdf9f697f9a4c88a2 | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
]
| permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 32,142 | py | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.connection import AWSQueryConnection
from boto.provider import Provider, NO_CREDENTIALS_PROVIDED
from boto.regioninfo import RegionInfo
from boto.sts.credentials import Credentials, FederationToken, AssumedRole
from boto.sts.credentials import DecodeAuthorizationMessage
import boto
import boto.utils
import datetime
import threading
_session_token_cache = {}
class STSConnection(AWSQueryConnection):
"""
AWS Security Token Service
The AWS Security Token Service is a web service that enables you
to request temporary, limited-privilege credentials for AWS
Identity and Access Management (IAM) users or for users that you
authenticate (federated users). This guide provides descriptions
of the AWS Security Token Service API.
For more detailed information about using this service, go to
`Using Temporary Security Credentials`_.
For information about setting up signatures and authorization
through the API, go to `Signing AWS API Requests`_ in the AWS
General Reference . For general information about the Query API,
go to `Making Query Requests`_ in Using IAM . For information
about using security tokens with other AWS products, go to `Using
Temporary Security Credentials to Access AWS`_ in Using Temporary
Security Credentials .
If you're new to AWS and need additional technical information
about a specific AWS product, you can find the product's technical
documentation at `http://aws.amazon.com/documentation/`_.
We will refer to Amazon Identity and Access Management using the
abbreviated form IAM. All copyrights and legal protections still
apply.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sts.amazonaws.com'
APIVersion = '2011-06-15'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, validate_certs=True, anon=False,
security_token=None, profile_name=None):
"""
:type anon: boolean
:param anon: If this parameter is True, the ``STSConnection`` object
will make anonymous requests, and it will not use AWS
Credentials or even search for AWS Credentials to make these
requests.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=STSConnection)
self.region = region
self.anon = anon
self._mutex = threading.Semaphore()
provider = 'aws'
# If an anonymous request is sent, do not try to look for credentials.
# So we pass in dummy values for the access key id, secret access
# key, and session token. It does not matter that they are
# not actual values because the request is anonymous.
if self.anon:
provider = Provider('aws', NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED,
NO_CREDENTIALS_PROVIDED)
super(STSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
validate_certs=validate_certs,
security_token=security_token,
profile_name=profile_name,
provider=provider)
def _required_auth_capability(self):
if self.anon:
return ['sts-anon']
else:
return ['hmac-v4']
def _check_token_cache(self, token_key, duration=None, window_seconds=60):
token = _session_token_cache.get(token_key, None)
if token:
now = datetime.datetime.utcnow()
expires = boto.utils.parse_ts(token.expiration)
delta = expires - now
if delta < datetime.timedelta(seconds=window_seconds):
msg = 'Cached session token %s is expired' % token_key
boto.log.debug(msg)
token = None
return token
def _get_session_token(self, duration=None,
mfa_serial_number=None, mfa_token=None):
params = {}
if duration:
params['DurationSeconds'] = duration
if mfa_serial_number:
params['SerialNumber'] = mfa_serial_number
if mfa_token:
params['TokenCode'] = mfa_token
return self.get_object('GetSessionToken', params,
Credentials, verb='POST')
def get_session_token(self, duration=None, force_new=False,
mfa_serial_number=None, mfa_token=None):
"""
Return a valid session token. Because retrieving new tokens
from the Secure Token Service is a fairly heavyweight operation
this module caches previously retrieved tokens and returns
them when appropriate. Each token is cached with a key
consisting of the region name of the STS endpoint
concatenated with the requesting user's access id. If there
is a token in the cache meeting with this key, the session
expiration is checked to make sure it is still valid and if
so, the cached token is returned. Otherwise, a new session
token is requested from STS and it is placed into the cache
and returned.
:type duration: int
:param duration: The number of seconds the credentials should
remain valid.
:type force_new: bool
:param force_new: If this parameter is True, a new session token
will be retrieved from the Secure Token Service regardless
of whether there is a valid cached token or not.
:type mfa_serial_number: str
:param mfa_serial_number: The serial number of an MFA device.
If this is provided and if the mfa_passcode provided is
valid, the temporary session token will be authorized with
to perform operations requiring the MFA device authentication.
:type mfa_token: str
:param mfa_token: The 6 digit token associated with the
MFA device.
"""
token_key = '%s:%s' % (self.region.name, self.provider.access_key)
token = self._check_token_cache(token_key, duration)
if force_new or not token:
boto.log.debug('fetching a new token for %s' % token_key)
try:
self._mutex.acquire()
token = self._get_session_token(duration,
mfa_serial_number,
mfa_token)
_session_token_cache[token_key] = token
finally:
self._mutex.release()
return token
def get_federation_token(self, name, duration=None, policy=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
for a federated user. A typical use is in a proxy application
that is getting temporary security credentials on behalf of
distributed applications inside a corporate network. Because
you must call the `GetFederationToken` action using the long-
term security credentials of an IAM user, this call is
appropriate in contexts where those credentials can be safely
stored, usually in a server-based application.
**Note:** Do not use this call in mobile applications or
client-based web applications that directly get temporary
security credentials. For those types of applications, use
`AssumeRoleWithWebIdentity`.
The `GetFederationToken` action must be called by using the
long-term AWS security credentials of the AWS account or an
IAM user. Credentials that are created by IAM users are valid
for the specified duration, between 900 seconds (15 minutes)
and 129600 seconds (36 hours); credentials that are created by
using account credentials have a maximum duration of 3600
seconds (1 hour).
The permissions that are granted to the federated user are the
intersection of the policy that is passed with the
`GetFederationToken` request and policies that are associated
with of the entity making the `GetFederationToken` call.
For more information about how permissions work, see
`Controlling Permissions in Temporary Credentials`_ in Using
Temporary Security Credentials . For information about using
`GetFederationToken` to create temporary security credentials,
see `Creating Temporary Credentials to Enable Access for
Federated Users`_ in Using Temporary Security Credentials .
:type name: string
:param name: The name of the federated user. The name is used as an
identifier for the temporary security credentials (such as `Bob`).
For example, you can reference the federated user name in a
resource-based policy, such as in an Amazon S3 bucket policy.
:type policy: string
:param policy: A policy that specifies the permissions that are granted
to the federated user. By default, federated users have no
permissions; they do not inherit any from the IAM user. When you
specify a policy, the federated user's permissions are intersection
of the specified policy and the IAM user's policy. If you don't
specify a policy, federated users can only access AWS resources
that explicitly allow those federated users in a resource policy,
such as in an Amazon S3 bucket policy.
:type duration: integer
:param duration: The duration, in seconds, that the session
should last. Acceptable durations for federation sessions range
from 900 seconds (15 minutes) to 129600 seconds (36 hours), with
43200 seconds (12 hours) as the default. Sessions for AWS account
owners are restricted to a maximum of 3600 seconds (one hour). If
the duration is longer than one hour, the session for AWS account
owners defaults to one hour.
"""
params = {'Name': name}
if duration:
params['DurationSeconds'] = duration
if policy:
params['Policy'] = policy
return self.get_object('GetFederationToken', params,
FederationToken, verb='POST')
def assume_role(self, role_arn, role_session_name, policy=None,
duration_seconds=None, external_id=None,
mfa_serial_number=None,
mfa_token=None):
"""
Returns a set of temporary security credentials (consisting of
an access key ID, a secret access key, and a security token)
that you can use to access AWS resources that you might not
normally have access to. Typically, you use `AssumeRole` for
cross-account access or federation.
For cross-account access, imagine that you own multiple
accounts and need to access resources in each account. You
could create long-term credentials in each account to access
those resources. However, managing all those credentials and
remembering which one can access which account can be time
consuming. Instead, you can create one set of long-term
credentials in one account and then use temporary security
credentials to access all the other accounts by assuming roles
in those accounts. For more information about roles, see
`Roles`_ in Using IAM .
For federation, you can, for example, grant single sign-on
access to the AWS Management Console. If you already have an
identity and authentication system in your corporate network,
you don't have to recreate user identities in AWS in order to
grant those user identities access to AWS. Instead, after a
user has been authenticated, you call `AssumeRole` (and
specify the role with the appropriate permissions) to get
temporary security credentials for that user. With those
temporary security credentials, you construct a sign-in URL
that users can use to access the console. For more
information, see `Scenarios for Granting Temporary Access`_ in
AWS Security Token Service .
The temporary security credentials are valid for the duration
that you specified when calling `AssumeRole`, which can be
from 900 seconds (15 minutes) to 3600 seconds (1 hour). The
default is 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed and any policies that are associated with the AWS
resource being accessed. You can further restrict the
permissions of the temporary security credentials by passing a
policy in the request. The resulting permissions are an
intersection of the role's access policy and the policy that
you passed. These policies and any applicable resource-based
policies are evaluated when calls to AWS service APIs are made
using the temporary security credentials.
To assume a role, your AWS account must be trusted by the
role. The trust relationship is defined in the role's trust
policy when the IAM role is created. You must also have a
policy that allows you to call `sts:AssumeRole`.
**Important:** You cannot call `Assumerole` by using AWS
account credentials; access will be denied. You must use IAM
user credentials to call `AssumeRole`.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
The session name is included as part of the `AssumedRoleUser`.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRole` call. The
resulting permissions of the temporary security credentials are an
intersection of this policy and the access policy that is
associated with the role. Use this policy to further restrict the
permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
:type external_id: string
:param external_id: A unique identifier that is used by third parties
to assume a role in their customers' accounts. For each role that
the third party can assume, they should instruct their customers to
create a role with the external ID that the third party generated.
Each time the third party assumes the role, they must pass the
customer's external ID. The external ID is useful in order to help
third parties bind a role to the customer who created it. For more
information about the external ID, see `About the External ID`_ in
Using Temporary Security Credentials .
:type mfa_serial_number: string
:param mfa_serial_number: The identification number of the MFA device that
is associated with the user who is making the AssumeRole call.
Specify this value if the trust policy of the role being assumed
includes a condition that requires MFA authentication. The value is
either the serial number for a hardware device (such as
GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device
(such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9.
Maximum length of 256.
:type mfa_token: string
:param mfa_token: The value provided by the MFA device, if the trust
policy of the role being assumed requires MFA (that is, if the
policy includes a condition that tests for MFA). If the role being
assumed requires MFA and if the TokenCode value is missing or
expired, the AssumeRole call returns an "access denied" errror.
Minimum length of 6. Maximum length of 6.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
if external_id is not None:
params['ExternalId'] = external_id
if mfa_serial_number is not None:
params['SerialNumber'] = mfa_serial_number
if mfa_token is not None:
params['TokenCode'] = mfa_token
return self.get_object('AssumeRole', params, AssumedRole, verb='POST')
def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated via a SAML authentication response.
This operation provides a mechanism for tying an enterprise
identity store or directory to role-based AWS access without
user-specific credentials or configuration.
The temporary security credentials returned by this operation
consist of an access key ID, a secret access key, and a
security token. Applications can use these temporary security
credentials to sign calls to AWS services. The credentials are
valid for the duration that you specified when calling
`AssumeRoleWithSAML`, which can be up to 3600 seconds (1 hour)
or until the time specified in the SAML authentication
response's `NotOnOrAfter` value, whichever is shorter.
The maximum duration for a session is 1 hour, and the minimum
duration is 15 minutes, even if values outside this range are
specified.
Optionally, you can pass an AWS IAM access policy to this
operation. The temporary security credentials that are
returned by the operation have the permissions that are
associated with the access policy of the role being assumed,
except for any permissions explicitly denied by the policy you
pass. This gives you a way to further restrict the permissions
for the federated user. These policies and any applicable
resource-based policies are evaluated when calls to AWS are
made using the temporary security credentials.
Before your application can call `AssumeRoleWithSAML`, you
must configure your SAML identity provider (IdP) to issue the
claims required by AWS. Additionally, you must use AWS
Identity and Access Management (AWS IAM) to create a SAML
provider entity in your AWS account that represents your
identity provider, and create an AWS IAM role that specifies
this SAML provider in its trust policy.
Calling `AssumeRoleWithSAML` does not require the use of AWS
security credentials. The identity of the caller is validated
by using keys in the metadata document that is uploaded for
the SAML provider entity for your identity provider.
For more information, see the following resources:
+ `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Security Credentials
guide.
+ `SAML Providers`_ in the Using IAM guide.
+ `Configuring a Relying Party and Claims in the Using IAM
guide. `_
+ `Creating a Role for SAML-Based Federation`_ in the Using
IAM guide.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type principal_arn: string
:param principal_arn: The Amazon Resource Name (ARN) of the SAML
provider in AWS IAM that describes the IdP.
:type saml_assertion: string
:param saml_assertion: The base-64 encoded SAML authentication response
provided by the IdP.
For more information, see `Configuring a Relying Party and Adding
Claims`_ in the Using IAM guide.
:type policy: string
:param policy:
An AWS IAM policy in JSON format.
The temporary security credentials that are returned by this operation
have the permissions that are associated with the access policy of
the role being assumed, except for any permissions explicitly
denied by the policy you pass. These policies and any applicable
resource-based policies are evaluated when calls to AWS are made
using the temporary security credentials.
The policy must be 2048 bytes or shorter, and its packed size must be
less than 450 bytes.
:type duration_seconds: integer
:param duration_seconds:
The duration, in seconds, of the role session. The value can range from
900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the
value is set to 3600 seconds. An expiration can also be specified
in the SAML authentication response's `NotOnOrAfter` value. The
actual expiration time is whichever value is shorter.
The maximum duration for a session is 1 hour, and the minimum duration
is 15 minutes, even if values outside this range are specified.
"""
params = {
'RoleArn': role_arn,
'PrincipalArn': principal_arn,
'SAMLAssertion': saml_assertion,
}
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object('AssumeRoleWithSAML', params, AssumedRole,
verb='POST')
def assume_role_with_web_identity(self, role_arn, role_session_name,
web_identity_token, provider_id=None,
policy=None, duration_seconds=None):
"""
Returns a set of temporary security credentials for users who
have been authenticated in a mobile or web application with a
web identity provider, such as Login with Amazon, Facebook, or
Google. `AssumeRoleWithWebIdentity` is an API call that does
not require the use of AWS security credentials. Therefore,
you can distribute an application (for example, on mobile
devices) that requests temporary security credentials without
including long-term AWS credentials in the application or by
deploying server-based proxy services that use long-term AWS
credentials. For more information, see `Creating a Mobile
Application with Third-Party Sign-In`_ in AWS Security Token
Service .
The temporary security credentials consist of an access key
ID, a secret access key, and a security token. Applications
can use these temporary security credentials to sign calls to
AWS service APIs. The credentials are valid for the duration
that you specified when calling `AssumeRoleWithWebIdentity`,
which can be from 900 seconds (15 minutes) to 3600 seconds (1
hour). By default, the temporary security credentials are
valid for 1 hour.
The temporary security credentials that are returned from the
`AssumeRoleWithWebIdentity` response have the permissions that
are associated with the access policy of the role being
assumed. You can further restrict the permissions of the
temporary security credentials by passing a policy in the
request. The resulting permissions are an intersection of the
role's access policy and the policy that you passed. These
policies and any applicable resource-based policies are
evaluated when calls to AWS service APIs are made using the
temporary security credentials.
Before your application can call `AssumeRoleWithWebIdentity`,
you must have an identity token from a supported identity
provider and create a role that the application can assume.
The role that your application assumes must trust the identity
provider that is associated with the identity token. In other
words, the identity provider must be specified in the role's
trust policy. For more information, see ` Creating Temporary
Security Credentials for Mobile Apps Using Third-Party
Identity Providers`_.
:type role_arn: string
:param role_arn: The Amazon Resource Name (ARN) of the role that the
caller is assuming.
:type role_session_name: string
:param role_session_name: An identifier for the assumed role session.
Typically, you pass the name or identifier that is associated with
the user who is using your application. That way, the temporary
security credentials that your application will use are associated
with that user. This session name is included as part of the ARN
and assumed role ID in the `AssumedRoleUser` response element.
:type web_identity_token: string
:param web_identity_token: The OAuth 2.0 access token or OpenID Connect
ID token that is provided by the identity provider. Your
application must get this token by authenticating the user who is
using your application with a web identity provider before the
application makes an `AssumeRoleWithWebIdentity` call.
:type provider_id: string
:param provider_id: Specify this value only for OAuth access tokens. Do
not specify this value for OpenID Connect ID tokens, such as
`accounts.google.com`. This is the fully-qualified host component
of the domain name of the identity provider. Do not include URL
schemes and port numbers. Currently, `www.amazon.com` and
`graph.facebook.com` are supported.
:type policy: string
:param policy: A supplemental policy that is associated with the
temporary security credentials from the `AssumeRoleWithWebIdentity`
call. The resulting permissions of the temporary security
credentials are an intersection of this policy and the access
policy that is associated with the role. Use this policy to further
restrict the permissions of the temporary security credentials.
:type duration_seconds: integer
:param duration_seconds: The duration, in seconds, of the role session.
The value can range from 900 seconds (15 minutes) to 3600 seconds
(1 hour). By default, the value is set to 3600 seconds.
"""
params = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name,
'WebIdentityToken': web_identity_token,
}
if provider_id is not None:
params['ProviderId'] = provider_id
if policy is not None:
params['Policy'] = policy
if duration_seconds is not None:
params['DurationSeconds'] = duration_seconds
return self.get_object(
'AssumeRoleWithWebIdentity',
params,
AssumedRole,
verb='POST'
)
def decode_authorization_message(self, encoded_message):
"""
Decodes additional information about the authorization status
of a request from an encoded message returned in response to
an AWS request.
For example, if a user is not authorized to perform an action
that he or she has requested, the request returns a
`Client.UnauthorizedOperation` response (an HTTP 403
response). Some AWS actions additionally return an encoded
message that can provide details about this authorization
failure.
Only certain AWS actions return an encoded authorization
message. The documentation for an individual action indicates
whether that action returns an encoded message in addition to
returning an HTTP code.
The message is encoded because the details of the
authorization status can constitute privileged information
that the user who requested the action should not see. To
decode an authorization status message, a user must be granted
permissions via an IAM policy to request the
`DecodeAuthorizationMessage` (
`sts:DecodeAuthorizationMessage`) action.
The decoded message includes the following type of
information:
+ Whether the request was denied due to an explicit deny or
due to the absence of an explicit allow. For more information,
see `Determining Whether a Request is Allowed or Denied`_ in
Using IAM .
+ The principal who made the request.
+ The requested action.
+ The requested resource.
+ The values of condition keys in the context of the user's
request.
:type encoded_message: string
:param encoded_message: The encoded message that was returned with the
response.
"""
params = {
'EncodedMessage': encoded_message,
}
return self.get_object(
'DecodeAuthorizationMessage',
params,
DecodeAuthorizationMessage,
verb='POST'
)
| [
"[email protected]"
]
| |
f59837294f8f44c5babd41a112e886e751a61e97 | 31401549d7a342b3fcb0f276f20e18f130730c69 | /utils/loadweight.py | 05c9d7ff211cd6d9235020fb2c41f2ffb3f1af14 | []
| no_license | takeitea/Attention-Echino | e79f207010ad9c57b31d39ba8681d2cb0e59643f | e157c99e5784c8dc2470b0d3f3ffa61b7921ce09 | refs/heads/master | 2020-05-21T00:01:06.170506 | 2019-03-06T13:27:52 | 2019-03-06T13:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | """
load part of the pre-trained parameters
"""
import os
import torch
import torch.utils.model_zoo as model_zoo
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
def loadcheckpoint(model, optimizer, args):
if args.resume:
if os.path.isfile(args):
print("load checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(" loaded checkpoint '{}'({}) best_prec: {}".format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("no checkpoint found at {}".format(args.resume))
def loadpartweight(model):
old_dict=model.state_dict()
new_dict=model_zoo.load_url(model_urls['vgg16_bn'])
count_feat=0
count_fetch=0
skip=0
for k,_ in new_dict.items():
if 'features' in k:
count_feat=count_feat+1
for i in range(count_feat):
for k in range(i,len(old_dict)):
if 'num_batches_tracked' in list(old_dict.keys())[k+skip]:
skip+=1
if new_dict[list(new_dict.keys())[i]].size()==old_dict[list(old_dict.keys())[k+skip]].size():
old_dict[list(old_dict.keys())[k+skip]]=list(new_dict.values())[i]
count_fetch+=1
break
old_dict.update()
model.load_state_dict(old_dict)
return model
| [
"[email protected]"
]
| |
2fe653f3c427c1407ff776b05974647bae83e94b | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /Arrays/twoSum2.py | dacf7a07e9511280bc0929061c05928bfd38bb93 | []
| no_license | noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | def pair_with_targetsum(arr, target_sum):
result = []
start, end = 0, len(arr) - 1
while start < end:
sum_ = arr[start] + arr[end]
# sum == target
if sum_ == target_sum:
result.append(start)
result.append(end)
break
# sum > target
elif sum_ > target_sum:
end -= 1
else:
start += 1
return result
def two_sum_pair(arr, target_sum):
nums = {}
for i, num in enumerate(arr):
if target_sum - num in nums:
return [nums[target_sum - num], i]
else:
nums[num] = i
return [-1, -1]
print(pair_with_targetsum([1, 2, 3, 4, 6], 6))
print(pair_with_targetsum([2, 5, 9, 11], 11))
print(two_sum_pair([1, 2, 3, 4, 6], 6))
print(two_sum_pair([2, 5, 9, 11], 11)) | [
"[email protected]"
]
| |
1e6d34cd5428851cdf59a0a8cbcabbedc98ffb63 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/04. List/052.py | b71c6b6b072e84b96609243c216c08fb45331666 | []
| no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #리스트에 원소 추가 _ append()
movie_rank = ["닥터 스트레인지", "스플릿", "럭키"]
movie_rank.append("배트맨")
print(movie_rank) | [
"[email protected]"
]
| |
ae953f626dcd7a8cc3573ca343fdeac058daa21f | df0c4875b45e68c106dd1e2ba397f71a10794327 | /src/pifetcher/utilities/sys_utils.py | d389d2340abd6f3e65f41dbd8999e6aed152bff2 | [
"MIT"
]
| permissive | gavinz0228/pifetcher | c28b407cf4965852af67ffe619a55ee90fa49a72 | c8419ae153eefed04e0e8b239cf1a9226fa91c29 | refs/heads/master | 2021-07-04T20:26:41.973408 | 2020-11-22T16:57:38 | 2020-11-22T16:57:38 | 203,682,327 | 1 | 0 | null | 2019-08-24T17:04:59 | 2019-08-22T00:06:58 | Python | UTF-8 | Python | false | false | 507 | py | from os import path, chmod
from sys import platform
import stat
class SysUtils:
@staticmethod
def ensure_path(file_path):
if not path.exists(file_path):
raise Exception(f'file path {file_path} does not exist.')
else:
return file_path
@staticmethod
def set_executable_permission(file_path):
if platform in ['linux', 'linux2', 'darwin']:
chmod(file_path, stat.S_IRWXO)
chmod(file_path, stat.S_IRWXO)
| [
"[email protected]"
]
| |
8c8b11b281a3a8c90dc800644e35e30ea14afc61 | 3a7ef35a51aabaf762dca13f2197548380121ad8 | /beer-song/beer_song_test.py | 761f09f04a2044f0a3c224faaa93715505645455 | [
"Unlicense"
]
| permissive | ikostan/Exercism_Python_Track | ff0be0386cf3fb1b62db54f72b8db15161928af7 | a6d52ad74e36db1d2bf82ed15362c1e4341d741d | refs/heads/master | 2023-08-09T16:56:17.615800 | 2020-10-16T00:46:55 | 2020-10-16T00:46:55 | 191,260,562 | 0 | 0 | Unlicense | 2023-09-04T01:17:29 | 2019-06-10T23:41:50 | Python | UTF-8 | Python | false | false | 19,079 | py | import unittest
from beer_song import recite
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
class BeerSongTest(unittest.TestCase):
def test_first_generic_verse(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99), expected)
def test_last_generic_verse(self):
expected = [
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
]
self.assertEqual(recite(start=3), expected)
def test_verse_with_2_bottles(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
]
self.assertEqual(recite(start=2), expected)
def test_verse_with_1_bottle(self):
expected = [
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
]
self.assertEqual(recite(start=1), expected)
def test_verse_with_0_bottles(self):
expected = [
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=0), expected)
def test_first_two_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=2), expected)
def test_last_three_verses(self):
expected = [
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=2, take=3), expected)
def test_all_verses(self):
expected = [
"99 bottles of beer on the wall, 99 bottles of beer.",
"Take one down and pass it around, 98 bottles of beer on the wall.",
"",
"98 bottles of beer on the wall, 98 bottles of beer.",
"Take one down and pass it around, 97 bottles of beer on the wall.",
"",
"97 bottles of beer on the wall, 97 bottles of beer.",
"Take one down and pass it around, 96 bottles of beer on the wall.",
"",
"96 bottles of beer on the wall, 96 bottles of beer.",
"Take one down and pass it around, 95 bottles of beer on the wall.",
"",
"95 bottles of beer on the wall, 95 bottles of beer.",
"Take one down and pass it around, 94 bottles of beer on the wall.",
"",
"94 bottles of beer on the wall, 94 bottles of beer.",
"Take one down and pass it around, 93 bottles of beer on the wall.",
"",
"93 bottles of beer on the wall, 93 bottles of beer.",
"Take one down and pass it around, 92 bottles of beer on the wall.",
"",
"92 bottles of beer on the wall, 92 bottles of beer.",
"Take one down and pass it around, 91 bottles of beer on the wall.",
"",
"91 bottles of beer on the wall, 91 bottles of beer.",
"Take one down and pass it around, 90 bottles of beer on the wall.",
"",
"90 bottles of beer on the wall, 90 bottles of beer.",
"Take one down and pass it around, 89 bottles of beer on the wall.",
"",
"89 bottles of beer on the wall, 89 bottles of beer.",
"Take one down and pass it around, 88 bottles of beer on the wall.",
"",
"88 bottles of beer on the wall, 88 bottles of beer.",
"Take one down and pass it around, 87 bottles of beer on the wall.",
"",
"87 bottles of beer on the wall, 87 bottles of beer.",
"Take one down and pass it around, 86 bottles of beer on the wall.",
"",
"86 bottles of beer on the wall, 86 bottles of beer.",
"Take one down and pass it around, 85 bottles of beer on the wall.",
"",
"85 bottles of beer on the wall, 85 bottles of beer.",
"Take one down and pass it around, 84 bottles of beer on the wall.",
"",
"84 bottles of beer on the wall, 84 bottles of beer.",
"Take one down and pass it around, 83 bottles of beer on the wall.",
"",
"83 bottles of beer on the wall, 83 bottles of beer.",
"Take one down and pass it around, 82 bottles of beer on the wall.",
"",
"82 bottles of beer on the wall, 82 bottles of beer.",
"Take one down and pass it around, 81 bottles of beer on the wall.",
"",
"81 bottles of beer on the wall, 81 bottles of beer.",
"Take one down and pass it around, 80 bottles of beer on the wall.",
"",
"80 bottles of beer on the wall, 80 bottles of beer.",
"Take one down and pass it around, 79 bottles of beer on the wall.",
"",
"79 bottles of beer on the wall, 79 bottles of beer.",
"Take one down and pass it around, 78 bottles of beer on the wall.",
"",
"78 bottles of beer on the wall, 78 bottles of beer.",
"Take one down and pass it around, 77 bottles of beer on the wall.",
"",
"77 bottles of beer on the wall, 77 bottles of beer.",
"Take one down and pass it around, 76 bottles of beer on the wall.",
"",
"76 bottles of beer on the wall, 76 bottles of beer.",
"Take one down and pass it around, 75 bottles of beer on the wall.",
"",
"75 bottles of beer on the wall, 75 bottles of beer.",
"Take one down and pass it around, 74 bottles of beer on the wall.",
"",
"74 bottles of beer on the wall, 74 bottles of beer.",
"Take one down and pass it around, 73 bottles of beer on the wall.",
"",
"73 bottles of beer on the wall, 73 bottles of beer.",
"Take one down and pass it around, 72 bottles of beer on the wall.",
"",
"72 bottles of beer on the wall, 72 bottles of beer.",
"Take one down and pass it around, 71 bottles of beer on the wall.",
"",
"71 bottles of beer on the wall, 71 bottles of beer.",
"Take one down and pass it around, 70 bottles of beer on the wall.",
"",
"70 bottles of beer on the wall, 70 bottles of beer.",
"Take one down and pass it around, 69 bottles of beer on the wall.",
"",
"69 bottles of beer on the wall, 69 bottles of beer.",
"Take one down and pass it around, 68 bottles of beer on the wall.",
"",
"68 bottles of beer on the wall, 68 bottles of beer.",
"Take one down and pass it around, 67 bottles of beer on the wall.",
"",
"67 bottles of beer on the wall, 67 bottles of beer.",
"Take one down and pass it around, 66 bottles of beer on the wall.",
"",
"66 bottles of beer on the wall, 66 bottles of beer.",
"Take one down and pass it around, 65 bottles of beer on the wall.",
"",
"65 bottles of beer on the wall, 65 bottles of beer.",
"Take one down and pass it around, 64 bottles of beer on the wall.",
"",
"64 bottles of beer on the wall, 64 bottles of beer.",
"Take one down and pass it around, 63 bottles of beer on the wall.",
"",
"63 bottles of beer on the wall, 63 bottles of beer.",
"Take one down and pass it around, 62 bottles of beer on the wall.",
"",
"62 bottles of beer on the wall, 62 bottles of beer.",
"Take one down and pass it around, 61 bottles of beer on the wall.",
"",
"61 bottles of beer on the wall, 61 bottles of beer.",
"Take one down and pass it around, 60 bottles of beer on the wall.",
"",
"60 bottles of beer on the wall, 60 bottles of beer.",
"Take one down and pass it around, 59 bottles of beer on the wall.",
"",
"59 bottles of beer on the wall, 59 bottles of beer.",
"Take one down and pass it around, 58 bottles of beer on the wall.",
"",
"58 bottles of beer on the wall, 58 bottles of beer.",
"Take one down and pass it around, 57 bottles of beer on the wall.",
"",
"57 bottles of beer on the wall, 57 bottles of beer.",
"Take one down and pass it around, 56 bottles of beer on the wall.",
"",
"56 bottles of beer on the wall, 56 bottles of beer.",
"Take one down and pass it around, 55 bottles of beer on the wall.",
"",
"55 bottles of beer on the wall, 55 bottles of beer.",
"Take one down and pass it around, 54 bottles of beer on the wall.",
"",
"54 bottles of beer on the wall, 54 bottles of beer.",
"Take one down and pass it around, 53 bottles of beer on the wall.",
"",
"53 bottles of beer on the wall, 53 bottles of beer.",
"Take one down and pass it around, 52 bottles of beer on the wall.",
"",
"52 bottles of beer on the wall, 52 bottles of beer.",
"Take one down and pass it around, 51 bottles of beer on the wall.",
"",
"51 bottles of beer on the wall, 51 bottles of beer.",
"Take one down and pass it around, 50 bottles of beer on the wall.",
"",
"50 bottles of beer on the wall, 50 bottles of beer.",
"Take one down and pass it around, 49 bottles of beer on the wall.",
"",
"49 bottles of beer on the wall, 49 bottles of beer.",
"Take one down and pass it around, 48 bottles of beer on the wall.",
"",
"48 bottles of beer on the wall, 48 bottles of beer.",
"Take one down and pass it around, 47 bottles of beer on the wall.",
"",
"47 bottles of beer on the wall, 47 bottles of beer.",
"Take one down and pass it around, 46 bottles of beer on the wall.",
"",
"46 bottles of beer on the wall, 46 bottles of beer.",
"Take one down and pass it around, 45 bottles of beer on the wall.",
"",
"45 bottles of beer on the wall, 45 bottles of beer.",
"Take one down and pass it around, 44 bottles of beer on the wall.",
"",
"44 bottles of beer on the wall, 44 bottles of beer.",
"Take one down and pass it around, 43 bottles of beer on the wall.",
"",
"43 bottles of beer on the wall, 43 bottles of beer.",
"Take one down and pass it around, 42 bottles of beer on the wall.",
"",
"42 bottles of beer on the wall, 42 bottles of beer.",
"Take one down and pass it around, 41 bottles of beer on the wall.",
"",
"41 bottles of beer on the wall, 41 bottles of beer.",
"Take one down and pass it around, 40 bottles of beer on the wall.",
"",
"40 bottles of beer on the wall, 40 bottles of beer.",
"Take one down and pass it around, 39 bottles of beer on the wall.",
"",
"39 bottles of beer on the wall, 39 bottles of beer.",
"Take one down and pass it around, 38 bottles of beer on the wall.",
"",
"38 bottles of beer on the wall, 38 bottles of beer.",
"Take one down and pass it around, 37 bottles of beer on the wall.",
"",
"37 bottles of beer on the wall, 37 bottles of beer.",
"Take one down and pass it around, 36 bottles of beer on the wall.",
"",
"36 bottles of beer on the wall, 36 bottles of beer.",
"Take one down and pass it around, 35 bottles of beer on the wall.",
"",
"35 bottles of beer on the wall, 35 bottles of beer.",
"Take one down and pass it around, 34 bottles of beer on the wall.",
"",
"34 bottles of beer on the wall, 34 bottles of beer.",
"Take one down and pass it around, 33 bottles of beer on the wall.",
"",
"33 bottles of beer on the wall, 33 bottles of beer.",
"Take one down and pass it around, 32 bottles of beer on the wall.",
"",
"32 bottles of beer on the wall, 32 bottles of beer.",
"Take one down and pass it around, 31 bottles of beer on the wall.",
"",
"31 bottles of beer on the wall, 31 bottles of beer.",
"Take one down and pass it around, 30 bottles of beer on the wall.",
"",
"30 bottles of beer on the wall, 30 bottles of beer.",
"Take one down and pass it around, 29 bottles of beer on the wall.",
"",
"29 bottles of beer on the wall, 29 bottles of beer.",
"Take one down and pass it around, 28 bottles of beer on the wall.",
"",
"28 bottles of beer on the wall, 28 bottles of beer.",
"Take one down and pass it around, 27 bottles of beer on the wall.",
"",
"27 bottles of beer on the wall, 27 bottles of beer.",
"Take one down and pass it around, 26 bottles of beer on the wall.",
"",
"26 bottles of beer on the wall, 26 bottles of beer.",
"Take one down and pass it around, 25 bottles of beer on the wall.",
"",
"25 bottles of beer on the wall, 25 bottles of beer.",
"Take one down and pass it around, 24 bottles of beer on the wall.",
"",
"24 bottles of beer on the wall, 24 bottles of beer.",
"Take one down and pass it around, 23 bottles of beer on the wall.",
"",
"23 bottles of beer on the wall, 23 bottles of beer.",
"Take one down and pass it around, 22 bottles of beer on the wall.",
"",
"22 bottles of beer on the wall, 22 bottles of beer.",
"Take one down and pass it around, 21 bottles of beer on the wall.",
"",
"21 bottles of beer on the wall, 21 bottles of beer.",
"Take one down and pass it around, 20 bottles of beer on the wall.",
"",
"20 bottles of beer on the wall, 20 bottles of beer.",
"Take one down and pass it around, 19 bottles of beer on the wall.",
"",
"19 bottles of beer on the wall, 19 bottles of beer.",
"Take one down and pass it around, 18 bottles of beer on the wall.",
"",
"18 bottles of beer on the wall, 18 bottles of beer.",
"Take one down and pass it around, 17 bottles of beer on the wall.",
"",
"17 bottles of beer on the wall, 17 bottles of beer.",
"Take one down and pass it around, 16 bottles of beer on the wall.",
"",
"16 bottles of beer on the wall, 16 bottles of beer.",
"Take one down and pass it around, 15 bottles of beer on the wall.",
"",
"15 bottles of beer on the wall, 15 bottles of beer.",
"Take one down and pass it around, 14 bottles of beer on the wall.",
"",
"14 bottles of beer on the wall, 14 bottles of beer.",
"Take one down and pass it around, 13 bottles of beer on the wall.",
"",
"13 bottles of beer on the wall, 13 bottles of beer.",
"Take one down and pass it around, 12 bottles of beer on the wall.",
"",
"12 bottles of beer on the wall, 12 bottles of beer.",
"Take one down and pass it around, 11 bottles of beer on the wall.",
"",
"11 bottles of beer on the wall, 11 bottles of beer.",
"Take one down and pass it around, 10 bottles of beer on the wall.",
"",
"10 bottles of beer on the wall, 10 bottles of beer.",
"Take one down and pass it around, 9 bottles of beer on the wall.",
"",
"9 bottles of beer on the wall, 9 bottles of beer.",
"Take one down and pass it around, 8 bottles of beer on the wall.",
"",
"8 bottles of beer on the wall, 8 bottles of beer.",
"Take one down and pass it around, 7 bottles of beer on the wall.",
"",
"7 bottles of beer on the wall, 7 bottles of beer.",
"Take one down and pass it around, 6 bottles of beer on the wall.",
"",
"6 bottles of beer on the wall, 6 bottles of beer.",
"Take one down and pass it around, 5 bottles of beer on the wall.",
"",
"5 bottles of beer on the wall, 5 bottles of beer.",
"Take one down and pass it around, 4 bottles of beer on the wall.",
"",
"4 bottles of beer on the wall, 4 bottles of beer.",
"Take one down and pass it around, 3 bottles of beer on the wall.",
"",
"3 bottles of beer on the wall, 3 bottles of beer.",
"Take one down and pass it around, 2 bottles of beer on the wall.",
"",
"2 bottles of beer on the wall, 2 bottles of beer.",
"Take one down and pass it around, 1 bottle of beer on the wall.",
"",
"1 bottle of beer on the wall, 1 bottle of beer.",
"Take it down and pass it around, no more bottles of beer on the wall.",
"",
"No more bottles of beer on the wall, no more bottles of beer.",
"Go to the store and buy some more, 99 bottles of beer on the wall.",
]
self.assertEqual(recite(start=99, take=100), expected)
| [
"[email protected]"
]
| |
d4c07aa542fd2df9f7066b893a929bbebdacca97 | 0eb3cb7493b6cc604a1aea9afc7af02e89b38602 | /Chapter10. Files/file.py | 16b8206030bce27b3aa6d69377aa5c469ab2a262 | []
| no_license | ec4sug4/i | 8b7c2d21ff3e7c763464f3a77ea009683eb17d51 | 1dbd58bb12729749c220b9f1f92f63389e7a886c | refs/heads/master | 2023-05-10T17:08:57.966542 | 2020-07-02T09:33:01 | 2020-07-02T09:33:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | fileref = open("olympics.txt","r")
line = fileref.readlines()
for i in line[:4]:
print(i)
fileref.close() | [
"[email protected]"
]
| |
91bfa4b69dc8175e14f2c85dffe644cc6f7a0d71 | fe9e6580e954ed62c4e8fd6b860000bb553150a6 | /ecommerce/forms.py | bffb01b5ed4507bffcb530dd54713c62b71512fe | []
| no_license | Brucehaha/ecommerce | 037fb25608e848f5c0fd4ed78f42028d21872e39 | bea5e5a13ad1e958912b0ac99cfc556a593f91f3 | refs/heads/workplace | 2023-01-03T19:35:13.894572 | 2018-06-20T07:22:19 | 2018-06-20T07:22:19 | 124,492,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django import forms
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Your fullname"
}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
"class": "form-control",
"placeholder": "Your Email"
}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={
"class": "form-control",
"placeholder": "Year message"
}
)
)
def clean_email(self):
email = self.cleaned_data.get("email")
if not "gmail.com" in email:
raise forms.ValidationError("Email has to be gmail.com")
return email
| [
"[email protected]"
]
| |
5ce2a703f5302283b074de6d2a1fb30fb8b91aa4 | bc0938b96b86d1396cb6b403742a9f8dbdb28e4c | /aliyun-python-sdk-nas/aliyunsdknas/request/v20170626/DescribeTagsRequest.py | d76b528b9d21f049ae887b42b56847b5cd568288 | [
"Apache-2.0"
]
| permissive | jia-jerry/aliyun-openapi-python-sdk | fb14d825eb0770b874bc123746c2e45efaf64a6d | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | refs/heads/master | 2022-11-16T05:20:03.515145 | 2020-07-10T08:45:41 | 2020-07-10T09:06:32 | 278,590,780 | 0 | 0 | NOASSERTION | 2020-07-10T09:15:19 | 2020-07-10T09:15:19 | null | UTF-8 | Python | false | false | 2,120 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknas.endpoint import endpoint_data
class DescribeTagsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'NAS', '2017-06-26', 'DescribeTags','nas')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
for i in range(len(Tags)):
if Tags[i].get('Value') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Value' , Tags[i].get('Value'))
if Tags[i].get('Key') is not None:
self.add_query_param('Tag.' + str(i + 1) + '.Key' , Tags[i].get('Key'))
def get_FileSystemId(self):
return self.get_query_params().get('FileSystemId')
def set_FileSystemId(self,FileSystemId):
self.add_query_param('FileSystemId',FileSystemId) | [
"[email protected]"
]
| |
1f1a15327737df474e4091401068d90bf7b7a2d8 | df856d5cb0bd4a4a75a54be48f5b91a62903ee6e | /jishaku/__init__.py | be18c93d969f66dcdc330dc9e0ffd89dc6bb8cc2 | [
"MIT",
"Apache-2.0"
]
| permissive | mortalsky/jishaku | 4c89bd69f6e1efcc45fcfdcc81427c71e10dc1de | 9cbbf64dd83697559a50c64653350253b876165a | refs/heads/master | 2023-07-20T04:55:19.144528 | 2021-01-22T08:18:12 | 2021-01-22T08:18:12 | 299,701,523 | 0 | 0 | MIT | 2020-09-29T18:16:24 | 2020-09-29T18:16:23 | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
"""
jishaku
~~~~~~~
A discord.py extension including useful tools for bot development and debugging.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
# pylint: disable=wildcard-import
from jishaku.cog import * # noqa: F401
from jishaku.features.baseclass import Feature # noqa: F401
from jishaku.meta import * # noqa: F401
__all__ = (
'Jishaku',
'Feature',
'setup'
)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.