blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2cf9a70811ec72e263f4b6c70a8cc1bbd65c0e75 | 830acb926cc5cf5a12f2045c8497d6f4aa1c2ef2 | /HyperNews Portal/Problems/Limitations/task.py | 4a672123fc6df90e8780fdc4bcd6bac7ef3634d1 | [] | no_license | BuyankinM/JetBrainsAcademyProjects | ca2223875ea4aab3ee7fceedc8e293bdb6e1fdcf | d5f9fcde4298af714960b2755f762141de796694 | refs/heads/main | 2023-02-26T05:47:26.070972 | 2021-02-03T22:10:53 | 2021-02-03T22:10:53 | 335,762,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import math
real_number = float(input())
check = math.isfinite(real_number) | [
"[email protected]"
] | |
ebebad6c7731a9504ee607513be35017d718188d | 8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac | /PySide/QtCore/QPointF.py | 2c350c353ea530cc837989441679f71325e61e69 | [
"Apache-2.0"
] | permissive | sonictk/python-skeletons | be09526bf490856bb644fed6bf4e801194089f0d | 49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d | refs/heads/master | 2020-04-06T04:38:01.918589 | 2016-06-09T20:37:43 | 2016-06-09T20:37:43 | 56,334,503 | 0 | 0 | null | 2016-04-15T16:30:42 | 2016-04-15T16:30:42 | null | UTF-8 | Python | false | false | 3,951 | py | # encoding: utf-8
# module PySide.QtCore
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtCore.so
# by generator 1.138
# no doc
# no imports
from _Object import _Object
class QPointF(_Object):
# no doc
def isNull(self, *args, **kwargs): # real signature unknown
pass
def manhattanLength(self, *args, **kwargs): # real signature unknown
pass
def setX(self, *args, **kwargs): # real signature unknown
pass
def setY(self, *args, **kwargs): # real signature unknown
pass
def toPoint(self, *args, **kwargs): # real signature unknown
pass
def toTuple(self, *args, **kwargs): # real signature unknown
pass
def x(self, *args, **kwargs): # real signature unknown
pass
def y(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
__new__ = None
| [
"[email protected]"
] | |
0b13a187ec32ce7aa897761988d4c15a6c652734 | ab3d5ea4bf0e48914ed14fcf16e5b1d752f199ba | /pcg_libraries/src/pcg_gazebo/parsers/sdf/pose.py | c59190b53c6490da0a3566d7aeb02a72a6f5997d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | boschresearch/pcg_gazebo_pkgs | 5f1004d0de874d4d1abc4eb695777013027158b2 | 1c112d01847ca4f8da61ce9b273e13d13bc7eb73 | refs/heads/master | 2020-06-11T06:28:36.228431 | 2020-02-07T13:05:28 | 2020-02-07T13:05:28 | 193,876,180 | 44 | 3 | NOASSERTION | 2020-02-07T12:00:55 | 2019-06-26T09:45:05 | Python | UTF-8 | Python | false | false | 2,348 | py | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLVector
class Pose(XMLVector):
_NAME = 'pose'
_TYPE = 'sdf'
_ATTRIBUTES = dict(
frame=''
)
def __init__(self):
XMLVector.__init__(self, 6)
self.reset()
@property
def frame(self):
return self.attributes['frame']
@frame.setter
def frame(self, value):
assert isinstance(value, str)
self.attributes['frame'] = value
@property
def pose(self):
return self.value
@pose.setter
def pose(self, value):
XMLVector._set_value(self, value)
@property
def x(self):
return self.value[0]
@x.setter
def x(self, value):
assert self._is_scalar(value)
self.value[0] = float(value)
@property
def y(self):
return self.value[1]
@y.setter
def y(self, value):
assert self._is_scalar(value)
self.value[1] = float(value)
@property
def z(self):
return self.value[2]
@z.setter
def z(self, value):
assert self._is_scalar(value)
self.value[2] = float(value)
@property
def roll(self):
return self.value[3]
@roll.setter
def roll(self, value):
assert self._is_scalar(value)
self.value[3] = float(value)
@property
def pitch(self):
return self.value[4]
@pitch.setter
def pitch(self, value):
assert self._is_scalar(value)
self.value[4] = float(value)
@property
def yaw(self):
return self.value[5]
@yaw.setter
def yaw(self, value):
assert self._is_scalar(value)
self.value[5] = float(value)
| [
"[email protected]"
] | |
c623f87b22b649226f52dc2e56f8651ae57fca85 | 484da6ff9bda06183c3d3bbda70c6d11e1ad6b67 | /.history/main_20191007162714.py | 3c0a5d2227f2184e32bd46b4e6485a71ab54093b | [] | no_license | Shynar88/TSP | 009a88bbddb29214921de4d0cf1761dea61b7b75 | 889751ab7d6a91469e86c6583f3c91b85857edd9 | refs/heads/master | 2020-08-06T22:40:49.217474 | 2020-01-14T13:41:44 | 2020-01-14T13:41:44 | 213,185,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,330 | py | import argparse
import math
import random
import operator
class City():
def __init__(self, index, x_coord, y_coord):
self.index = index
self.x_coord = x_coord
self.y_coord = y_coord
def __repr__(self):
return "[" + str(self.x_coord) + ", " + str(self.y_coord) + "]"
def get_distance_to(self, other):
dx = (other.x_coord - self.x_coord) ** 2
dy = (other.y_coord - self.y_coord) ** 2
return math.sqrt(dx + dy)
class Instance():
def __init__(self, route):
self.route = route
self.route_distance = self.get_route_distance()
self.fitness = self.get_fitness()
def get_route_distance(self):
distance = 0
for i in range(len(self.route)):
src = self.route[i]
dest = self.route[i + 1] if i + 1 < len(self.route) else self.route[0]
distance += src.get_distance_to(dest)
return distance
def get_fitness(self):
return 1 / self.route_distance
class GeneticAlgorithm():
def __init__(self, population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate, cities_list):
self.population_size = population_size
self.mat_pool_size = mat_pool_size
self.tournament_size = tournament_size
self.elite_size = elite_size
self.max_generations = max_generations
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.cities_list = cities_list
def generate_instance(self):
route = random.sample(self.cities_list, len(self.cities_list))
instance = Instance(route)
return instance
def create_initial_population(self):
initial_population = []
for _ in range(self.population_size):
initial_population.append(self.generate_instance())
return initial_population
def crossover(self, p1, p2): # proposing good crossover method https://www.hindawi.com/journals/cin/2017/7430125/
#implement simple crossover then try to enhance it
#ordered crossover
li = 0
hi = 0
while hi <= li:
li = int(random.random() * len(p1.route))
hi = int(random.random() * len(p1.route))
chunk = p1.route[li:hi]
child_route = []
not_used_el_in_p2 = [el for el in p2.route if el not in chunk]
pointer = 0
for _ in range(li):
child_route.append(not_used_el_in_p2[pointer])
pointer += 1
child_route += chunk
for _ in range(hi, len(p1.route)):
child_route.append(not_used_el_in_p2[pointer])
pointer += 1
child = Instance(child_route)
return child
def mutate(self, instance):
instance = instance.route
if random.random() < self.mutation_rate:
i1, i2 = random.sample(range(len(self.cities_list)), 2)
instance[i1], instance[i2] = instance[i2], instance[i1]
def selection(self, population):
#experiment on selection way
#implement the simple one
#Tournament selection P/2 size might be better
mating_pool = []
while len(mating_pool) < self.mat_pool_size:
participants = random.sample(population, self.tournament_size)
fittest = max(participants, key=operator.attrgetter('fitness'))
mating_pool.append(fittest)
return mating_pool
def generate_path(self):
# Step 1. Create an initial population of P chromosomes.
population = self.create_initial_population()
# Step 2. Evaluate the fitness of each chromosome. done in create population
for generation in range(self.max_generations):
print(f"generation number: {generation}")
# Step 3. Choose P/2 parents from the current population via proportional selection.
mating_pool = self.selection(population)
population_sorted = sorted(population, key=lambda instance: instance.fitness, reverse=True)
old_elite = population_sorted[:self.elite_size]
new_population = old_elite
while len(new_population) < self.population_size:
# Step 4. Randomly select two parents to create offspring using crossover operator.
parents = random.sample(mating_pool, 2)
child = self.crossover(parents[0], parents[1])
# Step 5. Apply mutation operators for minor changes in the results.
self.mutate(child)
new_population.append(child)
# Step 6. Repeat Steps 4 and 5 until all parents are selected and mated.
# Step 7. Replace old population of chromosomes with new one.
population = new_population
# Step 8. Evaluate the fitness of each chromosome in the new population. Already done in crossover when creating the child
# Step 9. Terminate if the number of generations meets some upper bound; otherwise go to Step 3.
return 0
# parses command line arguments
#is mating pool size also a hyperparameter???????
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-p', type=str, default="a280.tsp", help="path to the input file")
parser.add_argument('-s', type=int, default=50, help="population size")
parser.add_argument('-ms', type=int, default=25, help="mating pool size")
parser.add_argument('-ts', type=int, default=5, help="tournament size")
parser.add_argument('-e', type=int, default=20, help="elite_size")
parser.add_argument('-mg', type=int, default=50, help="max generations")
parser.add_argument('-cr', type=float, default=0.3, help="crossover rate")
parser.add_argument('-mr', type=float, default=0.1, help="mutation rate")
args = parser.parse_args()
return args.p, args.s, args.ms, args.ts, args.e, args.mg, args.cr, args.mr
# parses file, returns list of city coordinates(ex: [(x1, y1), ...])
def parse(file_path): #coordinates start from the 7th line, end with EOF
cities = []
f = open(file_path, "r")
for _ in range(6):
f.readline()
for line in f:
line_contents = line.split()
if len(line_contents) == 3:
cities.append((line_contents[0], line_contents[1], line_contents[2]))
f.close()
return cities
def create_cities(coordinates_list):
cities_list = []
for coordinates in coordinates_list:
cities_list.append(City(coordinates[0], coordinates[1], coordinates[2]))
return cities_list
def main():
path, population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate = parse_arguments()
#delete prints
print(path)
print(population_size)
print(mat_pool_size)
print(tournament_size)
print(elite_size)
print(max_generations)
print(crossover_rate)
print(mutation_rate)
#####
coordinates_list = parse(path)
cities_list = create_cities(coordinates_list)
gen_algo = GeneticAlgorithm(population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate, cities_list)
# distance = gen_algo.generate_path()
# print(distance)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
ce640e99ab9b4f9311a737f6a8f10585751a2bcf | 3ce592352627591346ea33ea0c2665ad879414e2 | /References/web-scraping/101scrapetest.py | 64aa6f9bfe35ee27d2cbd8684578f2c2e1fafc06 | [
"MIT"
] | permissive | royqh1979/python_libs_usage | 113df732ef106f4a5faae1343493756fd703c8c0 | 57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511 | refs/heads/master | 2021-04-16T18:14:43.835482 | 2021-01-11T03:55:25 | 2021-01-11T03:55:25 | 249,374,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('http://www.pythonscraping.com/pages/warandpeace.html')
bs = BeautifulSoup(html.read(),'html5lib')
print(bs.h1)
namelist = bs.findAll('span',{'class':'green'})
for name in namelist:
print(name.get_text()) | [
"[email protected]"
] | |
04483072d03f682fa80d3014d1f0fa2fc7e36601 | 1a1b857c67768f20de0df42a7edb87edd57d9a33 | /Quick_Sort/Quick_Sort_Practice_November_2.py | 1b6901fe97a57dc5b190a20e25f2f70f0af9dfb8 | [] | no_license | LilySu/Python_Practice | 7c7eb30c549239f27680f410d365289b67813c5e | 26767e64742149813ecbc91815454836ffce8b6e | refs/heads/master | 2023-07-29T01:14:19.751490 | 2021-08-15T01:09:41 | 2021-08-15T01:09:41 | 279,446,861 | 1 | 2 | null | 2020-10-09T04:10:40 | 2020-07-14T01:05:55 | Jupyter Notebook | UTF-8 | Python | false | false | 760 | py | def swap(a, b, arr):
arr[a], arr[b] = arr[b], arr[a]
def partition(elements, start, end):
pivot_index = start
pivot = elements[pivot_index]
while start < end:
while start < len(elements) and elements[start] <= pivot:
start += 1
while elements[end] > pivot:
end -= 1
if start < end:
swap(start, end, elements)
swap(pivot_index, end, elements)
return end
def quick_sort(elements, start, end):
if start < end:
pi = partition(elements, start, end)
quick_sort(elements, start, pi - 1)
quick_sort(elements, pi + 1, end)
if __name__ == "__main__":
elements = [8, 24, 92, 14, 3, 47]
quick_sort(elements, 0, len(elements) - 1)
print(elements) | [
"[email protected]"
] | |
fd9b2b5f03d3dc5bc61de7795ca876950a6683e0 | 7e0393251012e91213dddfd9c93f6b6b73ca2bfe | /tests/unit/test_drizzle_error.py | e7d05e37e25a68aede0c2312672d6c495e350d23 | [
"MIT"
] | permissive | josephhardinee/cloudnetpy | ff4cc0303d7f2ae40f2d3466298257659ff3ccde | c37760db3cdfe62ae769f8090ba621803ec9a92c | refs/heads/master | 2021-03-06T15:37:51.529776 | 2020-02-13T09:05:29 | 2020-02-13T09:05:29 | 246,207,849 | 0 | 0 | MIT | 2020-03-10T04:29:48 | 2020-03-10T04:26:16 | null | UTF-8 | Python | false | false | 3,347 | py | import numpy as np
import numpy.testing as testing
import pytest
from cloudnetpy.products import drizzle_error as de
DRIZZLE_PARAMETERS = {'Do': np.array([[0.0001, 0.01, 0.000001],
[0.001, 0.000001, 0.0001]])}
DRIZZLE_INDICES = {'drizzle': np.array([[1, 1, 1], [1, 1, 1]], dtype=bool),
'small': np.array([[1, 0, 0], [0, 0, 1]], dtype=bool),
'tiny': np.array([[0, 0, 1], [0, 1, 0]], dtype=bool)}
ERROR_INPUT = (np.array([[0.01, 0.34, 0.5],
[0.2, 0.3, 0.56]]), 0.14)
BIAS_INPUT = (0.01, 0.57)
@pytest.mark.parametrize('key, value', [
('drizzle', [False, True, True, True]),
('small', [False, True, False, False]),
('tiny', [False, False, False, True])])
def test_get_drizzle_indices(key, value):
dia = np.array([-1, 2 * 1e-5, 1, 1e-6])
d = de._get_drizzle_indices(dia)
testing.assert_array_equal(d[key], value)
@pytest.mark.parametrize('key', [
'Do_error', 'drizzle_lwc_error', 'drizzle_lwf_error', 'S_error'])
def test_calc_parameter_errors(key):
x = de._calc_parameter_errors(DRIZZLE_INDICES, ERROR_INPUT)
assert key in x.keys()
@pytest.mark.parametrize('key', [
'Do_bias', 'drizzle_lwc_bias', 'drizzle_lwf_bias'])
def test_calc_parameter_biases(key):
x = de._calc_parameter_biases(BIAS_INPUT)
assert key in x.keys()
@pytest.fixture
def results():
errors = de._calc_parameter_errors(DRIZZLE_INDICES, ERROR_INPUT)
biases = de._calc_parameter_biases(BIAS_INPUT)
return {**errors, **biases}
@pytest.mark.parametrize('key', [
'drizzle_N_error', 'v_drizzle_error', 'mu_error'])
def test_add_supplementary_errors(results, key):
x = de._add_supplementary_errors(results, DRIZZLE_INDICES, ERROR_INPUT)
assert key in x.keys()
def test_calc_v_error(results):
results['Do_error'] = np.array([[2, 2, 2], [2, 2, 2]])
x = de._add_supplementary_errors(results, DRIZZLE_INDICES, ERROR_INPUT)
testing.assert_almost_equal(x['v_drizzle_error'][DRIZZLE_INDICES['tiny']], 4)
@pytest.mark.parametrize('key', [
'drizzle_N_bias', 'v_drizzle_bias'])
def test_add_supplementary_biases(results, key):
x = de._add_supplementary_biases(results, BIAS_INPUT)
assert key in x.keys()
def test_calc_error():
from cloudnetpy.utils import l2norm_weighted
compare = l2norm_weighted(ERROR_INPUT, 1, 1)
testing.assert_almost_equal(de._calc_error(1, 1, ERROR_INPUT), compare)
def test_stack_errors():
DRIZZLE_INDICES['drizzle'] = np.array([[0, 1, 1], [1, 1, 0]], dtype=bool)
compare = np.ma.array(ERROR_INPUT[0], mask=[[1, 0, 0], [0, 0, 1]])
x = de._stack_errors(ERROR_INPUT[0], DRIZZLE_INDICES)
testing.assert_array_almost_equal(x, compare)
@pytest.mark.parametrize("x, result", [
(-1000, -1),
(-100, -0.99999),
(-10, -0.9),
(-1, np.exp(-1 / 10 * np.log(10)) - 1)])
def test_db2lin(x, result):
testing.assert_array_almost_equal(de.db2lin(x), result, decimal=5)
def test_db2lin_raise():
with pytest.raises(ValueError):
de.db2lin(150)
@pytest.mark.parametrize("x, result", [
(1e6, 60),
(1e5, 50),
(1e4, 40)])
def test_lin2db(x, result):
testing.assert_array_almost_equal(de.lin2db(x), result, decimal=3)
def test_lin2db_raise():
with pytest.raises(ValueError):
de.lin2db(-1)
| [
"[email protected]"
] | |
1abc67418dafabbb3f468f4ff08fea5c925b3bde | d86c5aa92a9763510b539776510ad9795d33ae89 | /September 2020/03-Multidimensional-Lists/03-Primary-Diagonal.py | cb353fab20268a1b0cc58deae94420f1b386b6f6 | [
"MIT"
] | permissive | eclipse-ib/Software-University-Professional-Advanced-Module | 42e3bd50ac5f0df8082add29f4113cffb87889e1 | 636385f9e5521840f680644824d725d074b93c9a | refs/heads/main | 2023-02-13T06:02:53.246980 | 2021-01-06T21:12:14 | 2021-01-06T21:12:14 | 306,282,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | #Съкратен Вариант с комрехеншън:
size = int(input())
matrix = [
[int(el) for el in input().split()]
for i in range(size)
]
current_sum = sum([matrix[i][i] for i in range(size)])
print(current_sum)
#Вариант с range:
# size = int(input())
#
# matrix = [
# [int(el) for el in input().split()]
# for i in range(size)
# ]
#
# current_sum = 0
# for i in range(size):
# current_sum += matrix[i][i]
#
# print(current_sum)
# Вариант с ожхождане на матрицата:
# size = int(input())
#
# matrix = [
# [int(el) for el in input().split()]
# for i in range(size)
# ]
#
# index = 0
# current_sum = 0
# for j in matrix:
# current_sum += j[index]
# index += 1
# print(current_sum)
| [
"[email protected]"
] | |
db89b4926bf8f251c68f068747c97003c1c04fbc | cfac0f4f862180baae078bd7656ac41c8f946006 | /Day22/full.py | 53638388312ad69de0807e67cf6732d90355eefc | [] | no_license | RaspiKidd/AoC2017 | bcf4a8c161b48b2b8f89745d6ff5b741f023b5b7 | 2be828462cd5d56e2f8a8f636525359bb4de045e | refs/heads/master | 2021-09-01T20:07:34.228665 | 2017-12-28T14:25:08 | 2017-12-28T14:25:08 | 112,738,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | def read():
with open("data.txt") as f:
inpt = f.read().splitlines()
offset = len(inpt) // 2
infected = set()
for r, line in enumerate(inpt):
for c, ch in enumerate(line):
if ch == '#':
infected.add((r - offset, c - offset))
return infected
infected = read()
dirs = [(-1, 0), (0, -1), (1, 0), (0, 1)]
d = 0
virusAt = (0, 0)
def burst():
global infected, d, virusAt
infectionCaused = False
if virusAt in infected:
d = (d - 1) % 4
infected.remove(virusAt)
else:
d = (d + 1) % 4
infected.add(virusAt)
infectionCaused = True
virusAt = (virusAt[0] + dirs[d][0], virusAt[1] + dirs[d][1])
return infectionCaused
numInfections = 0
for i in range(10000):
if burst():
numInfections += 1
# Part 1 answer
print(numInfections)
clean = 0
infected = 1
weak = 2
flagged = 3
state = {k: infected for k in read()}
virusAt = (0, 0)
def burst2():
global state, d, virusAt
infectionCaused = False
currentState = state.get(virusAt, 0)
if currentState == clean:
d = (d + 1) % 4
state[virusAt] = weak
elif currentState == weak:
state[virusAt] = infected
infectionCaused = True
elif currentState == infected:
d = (d - 1) % 4
state[virusAt] = flagged
else: # FLAGGED
d = (d + 2) % 4
del state[virusAt]
virusAt = (virusAt[0] + dirs[d][0], virusAt[1] + dirs[d][1])
return infectionCaused
numInfections = 0
for i in range(10000000):
if burst2():
numInfections += 1
# part 2 answer
print (numInfections)
| [
"[email protected]"
] | |
286590a9fe52b4359057b9360cd7b7a404aa8d70 | fe18994a1880f347d8004383434842286b9dccd3 | /python_stack/flask/flask_fundamentals/Dojo_Survey/server.py | 335aa9e1c425addeaea0b58be5359da8056a3f95 | [] | no_license | Anbousi/Python | 682d5b00555ab3183d06afddb4c5f6e1d5739f6c | 4f05dd8ec62e80a28ca607feae976d9220a62227 | refs/heads/master | 2023-05-06T03:37:28.878915 | 2021-05-30T19:11:28 | 2021-05-30T19:11:28 | 364,501,098 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from flask import Flask , render_template , request , redirect
app = Flask(__name__)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/result' , methods=['POST'])
def result():
name_form = request.form['name']
location_form = request.form['location']
language_form = request.form['language']
comment_form = request.form['comment']
radio_form = request.form['radio']
check_form = request.form['check']
print(check_form)
return render_template('result.html' , name_form = name_form , location_form = location_form , language_form = language_form , comment_form = comment_form )
if __name__ == '__main__':
app.run(debug = True) | [
"[email protected]"
] | |
2ab9f2f34b31a7152edd0e7524c21dddd1269df8 | 247389d0b916f972297fe3c38d262502a6cfa084 | /morse | ef7bed09c034b15fde12125c089eb665e1695bcd | [] | no_license | reteps/raspi | 2e69fee4eb96e4a43059f3125c79cf577e2b5bb6 | 96771f0525b3ad71c9b13a36de49b599c5769310 | refs/heads/master | 2021-09-28T05:22:32.999241 | 2017-07-26T13:24:51 | 2017-07-26T13:24:51 | 98,200,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import pilights, time
def to_morse_code(message):
morseAlphabet ={
"A" : ".-",
"B" : "-...",
"C" : "-.-.",
"D" : "-..",
"E" : ".",
"F" : "..-.",
"G" : "--.",
"H" : "....",
"I" : "..",
"J" : ".---",
"K" : "-.-",
"L" : ".-..",
"M" : "--",
"N" : "-.",
"O" : "---",
"P" : ".--.",
"Q" : "--.-",
"R" : ".-.",
"S" : "...",
"T" : "-",
"U" : "..-",
"V" : "...-",
"W" : ".--",
"X" : "-..-",
"Y" : "-.--",
"Z" : "--..",
" " : "/"
}
output = ""
for letter in message.upper():
output += morseAlphabet[letter]
return output
pin = 17
lights = pilights.Lights(pin)
raw_message = input("Message > ")
message = to_morse_code(raw_message)
shorttime = 0.1
longtime = 0.4
split = 0.2
word = 0.6
print(message)
for character in message:
if character == ".":
lights.onoff(pin,shorttime)
elif character == "/":
time.sleep(word)
elif character == "-":
lights.onoff(pin,longtime)
time.sleep(split)
| [
"[email protected]"
] | ||
69354a0bd822307b273f6b1b5fdfdcb3a5c10b88 | 16a5c9c9f0d7519a6808efc61b592b4b614102cf | /Python/16.py | 3672e6b7b0f7551a670966ce8b46ac170ca86da6 | [] | no_license | kevin851066/Leetcode | c1d86b2e028526231b80c6d4fb6d0be7ae8d39e5 | 885a9af8a7bee3c228c7ae4e295dca810bd91d01 | refs/heads/main | 2023-08-10T16:50:12.426440 | 2021-09-28T15:23:26 | 2021-09-28T15:23:26 | 336,277,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | class Solution:
def threeSumClosest(self, nums, target):
'''
:type: nums: List[int]
:type: target: int
:rtype: int
'''
nums.sort()
dis = float('inf')
res = 0
for i in range(len(nums)-2):
if i == 0 or nums[i] != nums[i-1]:
l, r = i + 1, len(nums) - 1
while r > l:
s = nums[i] + nums[r] + nums[l]
diff = abs(target - s)
if diff < dis:
dis = diff
res = s
if target > s:
while r > l and nums[l] == nums[l+1]:
l += 1
l += 1
elif target < s:
while r > l and nums[r] == nums[r-1]:
r -= 1
r -= 1
else:
return res
return res
| [
"[email protected]"
] | |
89bc553261509785779919691202bc8ff9d94257 | 5c69e63f3bb1286a79cb81ca70c969bccd65d740 | /bocadillo/exceptions.py | e8cfd93892c17f615e36715a36c0bba654f5a71f | [
"MIT"
] | permissive | stjordanis/bocadillo | 85dc5895966d3e2031df365db55e4def156e92aa | 658cce55b196d60489530aaefde80b066cb8054b | refs/heads/master | 2020-04-14T09:36:47.245246 | 2019-01-01T19:27:37 | 2019-01-01T19:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,196 | py | from http import HTTPStatus
from typing import Union, Any, List
from starlette.websockets import WebSocketDisconnect as _WebSocketDisconnect
WebSocketDisconnect = _WebSocketDisconnect
class HTTPError(Exception):
"""Raised when an HTTP error occurs.
You can raise this within a view or an error handler to interrupt
request processing.
# Parameters
status (int or HTTPStatus):
the status code of the error.
detail (any):
extra detail information about the error. The exact rendering is
determined by the configured error handler for `HTTPError`.
# See Also
- [HTTP response status codes (MDN web docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status)
"""
def __init__(self, status: Union[int, HTTPStatus], detail: Any = ""):
if isinstance(status, int):
status = HTTPStatus(status)
else:
assert isinstance(
status, HTTPStatus
), f"Expected int or HTTPStatus, got {type(status)}"
self._status = status
self.detail = detail
@property
def status_code(self) -> int:
"""Return the HTTP error's status code, e.g. `404`."""
return self._status.value
@property
def status_phrase(self) -> str:
"""Return the HTTP error's status phrase, e.g. `"Not Found"`."""
return self._status.phrase
@property
def title(self) -> str:
"""Return the HTTP error's title, e.g. `"404 Not Found"`."""
return f"{self.status_code} {self.status_phrase}"
def __str__(self):
return self.title
class UnsupportedMediaType(Exception):
"""Raised when trying to use an unsupported media type.
# Parameters
media_type (str):
the unsupported media type.
available (list of str):
a list of supported media types.
"""
def __init__(self, media_type: str, available: List[str]):
self._media_type = media_type
self._available = available
def __str__(self):
return f'{self._media_type} (available: {", ".join(self._available)})'
class RouteDeclarationError(Exception):
"""Raised when a route is ill-declared."""
| [
"[email protected]"
] | |
44c36be3d14151335716e257311f97e0760b11f5 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/个人项目/weather/venv/Scripts/pip3.6-script.py | d64c53761d2ebf358b440b0154196e579055f766 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 450 | py | #!E:\学习文件\python学习资料\开班笔记\个人项目\weather\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
c6870f0f392d973959a4e8236a2aadf2507635ad | eebb210f13d452822d46432643215a4c8b656906 | /bsl_21716/settings.py | f2b659a27d63e57f748807a78ce2ab48d20a787c | [] | no_license | crowdbotics-apps/bsl-21716 | 5e817b94f0a692469a986f3955cbaf1e813102c9 | 3e4cb348cdd4f84c5b78c35bf5c4f4f37d58fdbb | refs/heads/master | 2023-01-03T18:12:09.360592 | 2020-10-19T19:11:41 | 2020-10-19T19:11:41 | 305,488,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | """
Django settings for bsl_21716 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bsl_21716.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bsl_21716.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
928f58c34772c2b4138712fdfcb79215f149fc96 | 7b4cbaa1e7bab897e34acba06f73ac17760d394a | /sdks/python/client/argo_workflows/model/persistent_volume_claim.py | ed5b44656127c9955f253ef33fcc1405c9aaf6b6 | [
"Apache-2.0"
] | permissive | nHurD/argo | 0fab7f56179c848ad8a77a9f8981cb62b4a71d09 | f4a65b11a184f7429d0615a6fa65bc2cea4cc425 | refs/heads/master | 2023-01-13T04:39:54.793473 | 2022-12-18T04:48:37 | 2022-12-18T04:48:37 | 227,931,854 | 0 | 2 | Apache-2.0 | 2019-12-13T22:24:19 | 2019-12-13T22:24:18 | null | UTF-8 | Python | false | false | 13,755 | py | """
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.object_meta import ObjectMeta
from argo_workflows.model.persistent_volume_claim_spec import PersistentVolumeClaimSpec
from argo_workflows.model.persistent_volume_claim_status import PersistentVolumeClaimStatus
globals()['ObjectMeta'] = ObjectMeta
globals()['PersistentVolumeClaimSpec'] = PersistentVolumeClaimSpec
globals()['PersistentVolumeClaimStatus'] = PersistentVolumeClaimStatus
class PersistentVolumeClaim(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'kind': (str,), # noqa: E501
'metadata': (ObjectMeta,), # noqa: E501
'spec': (PersistentVolumeClaimSpec,), # noqa: E501
'status': (PersistentVolumeClaimStatus,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'apiVersion', # noqa: E501
'kind': 'kind', # noqa: E501
'metadata': 'metadata', # noqa: E501
'spec': 'spec', # noqa: E501
'status': 'status', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""PersistentVolumeClaim - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
api_version (str): APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources. [optional] # noqa: E501
kind (str): Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds. [optional] # noqa: E501
metadata (ObjectMeta): [optional] # noqa: E501
spec (PersistentVolumeClaimSpec): [optional] # noqa: E501
status (PersistentVolumeClaimStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PersistentVolumeClaim - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
api_version (str): APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources. [optional] # noqa: E501
kind (str): Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds. [optional] # noqa: E501
metadata (ObjectMeta): [optional] # noqa: E501
spec (PersistentVolumeClaimSpec): [optional] # noqa: E501
status (PersistentVolumeClaimStatus): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
92e2ffb6e01ccf60d49d3427184289857918536d | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/web/forms/util.py | 1f03bb0a3347fb4b7c709e2715f01c827d7c8c71 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py |
from __future__ import unicode_literals
from collections import OrderedDict
from copy import deepcopy
from wtforms.fields.core import UnboundField
def get_form_field_names(form_class):
"""Returns the list of field names of a WTForm
:param form_class: A `Form` subclass
"""
unbound_fields = form_class._unbound_fields
if unbound_fields:
return [f[0] for f in unbound_fields]
field_names = []
# the following logic has been taken from FormMeta.__call__
for name in dir(form_class):
if not name.startswith('_'):
unbound_field = getattr(form_class, name)
if hasattr(unbound_field, '_formfield'):
field_names.append(name)
return field_names
def inject_validators(form, field_name, validators, early=False):
"""Add extra validators to a form field.
This function may be called from the ``__init__`` method of a
form before the ``super().__init__()`` call or on a form class.
When using a Form class note that this will modify the class, so
all new instances of it will be affected!
:param form: the `Form` instance or a `Form` subclass
:param field_name: the name of the field to change
:param validators: a list of validators to add
:param early: whether to inject the validator before any existing
validators. this is needed if a field has a validator
that stops validation such as DataRequired and the
injected one is e.g. HiddenUnless which needs to run
even if the field is invalid
"""
unbound = deepcopy(getattr(form, field_name))
assert isinstance(unbound, UnboundField)
if 'validators' in unbound.kwargs:
if early:
unbound.kwargs['validators'] = validators + unbound.kwargs['validators']
else:
unbound.kwargs['validators'] += validators
elif len(unbound.args) > 1:
if early:
validators_arg = validators + unbound.args[1]
else:
validators_arg = unbound.args[1] + validators
unbound.args = unbound.args[:1] + (validators_arg,) + unbound.args[2:]
else:
unbound.kwargs['validators'] = validators
setattr(form, field_name, unbound)
if form._unbound_fields is not None:
unbound_fields = OrderedDict(form._unbound_fields)
unbound_fields[field_name] = unbound
form._unbound_fields = unbound_fields.items()
| [
"[email protected]"
] | |
bf25c491d026c56c2680ee54c6c6da0ef243d622 | 1d96db84225301d972f07cad95c2a13f4fbafa84 | /python/my_PyFeyn/testing/pyfeyn-test2.py | 82ff7bcb08a3d9741dea8c1a1909d75c7af37668 | [] | no_license | mattbellis/matts-work-environment | 9eb9b25040dd8fb4a444819b01a80c2d5342b150 | 41988f3c310f497223445f16e2537e8d1a3f71bc | refs/heads/master | 2023-08-23T09:02:37.193619 | 2023-08-09T05:36:32 | 2023-08-09T05:36:32 | 32,194,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | #! /usr/bin/env python
from pyfeyn.user import *
fd = FeynDiagram()
p1 = Point(2, -2)
p2 = Point(-2, 2)
p3 = Vertex(1.25, 1.25, mark=CIRCLE)
p4 = p1.midpoint(p2)
p5 = p4.midpoint(p1)
p6 = p4.midpoint(p2)
c1 = Circle(center=p1, radius=0.5, fill=[RED], points=[p1])
c2 = Circle(center=p2, radius=0.3, fill=[GREEN], points=[p2])
e1 = Ellipse(center=p4, xradius=0.5, yradius=1.0,
fill=[MIDNIGHTBLUE], points=[p4])
l0a = Fermion(p1, p4)
l0b = Fermion(p2, p4)
l1 = NamedLine["gluon"](p2, p1).arcThru(x=3, y=0)
l2 = NamedLine["photon"](p1, p2).arcThru(x=0, y=-3)
l3 = Gluon(p2, p3)
l4 = Photon(p1, p3)
l5 = Gluon(p5, p6).bend(-p5.distance(p6)/2.0)
loop1 = Line(p3, p3).arcThru(x=1.75, y=1.75).addArrow(0.55)
l1.addLabel(r"\Pgluon")
l2.addLabel(r"\Pphoton")
l5.addLabel(r"$\Pgluon_1$")
fd.draw("pyfeyn-test2.pdf")
| [
"[email protected]"
] | |
c96e7a60b206a9a0ed2292d388e43a340c284cc5 | 5c2f520dde0cf8077facc0fcd9a92bc1a96d168b | /from_cpython/Lib/types.py | b8166edf94fcba78305f53822b17e69b61cb466e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | nagyist/pyston | b613337a030ef21a3f03708febebe76cedf34c61 | 14ba2e6e6fb5c7316f66ccca86e6c6a836d96cab | refs/heads/master | 2022-12-24T03:56:12.885732 | 2015-02-25T11:11:08 | 2015-02-25T11:28:13 | 31,314,596 | 0 | 0 | NOASSERTION | 2022-12-17T08:15:11 | 2015-02-25T13:24:41 | Python | UTF-8 | Python | false | false | 2,342 | py | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
# Pyston change: 'buffer' is not implemented yet
# BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
# Pyston change: there is no concept of a "code object" yet:
# CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
FileType = file
XRangeType = xrange
# Pyston change: we don't support sys.exc_info yet
"""
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
"""
SliceType = slice
# Pyston change: don't support this yet
# EllipsisType = type(Ellipsis)
# Pyston change: don't support this yet
# DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
# Pyston change: don't support these yet
# GetSetDescriptorType = type(FunctionType.func_code)
# MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
| [
"[email protected]"
] | |
39c7a34748c1b3e7fb4a8f0b57485acabb6c4b65 | e7efae2b83216d9621bd93390959d652de779c3d | /kyototycoon/tests/test_kyototycoon.py | b7e964c19cda690b27c0b54a0d45f7ae777a268f | [
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 1,657 | py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from copy import deepcopy
import pytest
from datadog_checks.kyototycoon import KyotoTycoonCheck
from .common import DEFAULT_INSTANCE, TAGS
GAUGES = list(KyotoTycoonCheck.GAUGES.values())
DB_GAUGES = list(KyotoTycoonCheck.DB_GAUGES.values())
TOTALS = list(KyotoTycoonCheck.TOTALS.values())
RATES = list(KyotoTycoonCheck.RATES.values())
# all the RATE type metrics
ALL_RATES = TOTALS + RATES
def test_check(aggregator, dd_environment):
kt = KyotoTycoonCheck('kyototycoon', {}, {})
kt.check(deepcopy(DEFAULT_INSTANCE))
kt.check(deepcopy(DEFAULT_INSTANCE))
_assert_check(aggregator)
@pytest.mark.e2e
def test_e2e(dd_agent_check):
aggregator = dd_agent_check(DEFAULT_INSTANCE, rate=True)
_assert_check(aggregator, rate_metric_count=1)
def _assert_check(aggregator, rate_metric_count=2):
# prefix every metric with check name (kyototycoon.)
# no replications, so ignore kyototycoon.replication.delay
for mname in GAUGES:
if mname != 'replication.delay':
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS, count=2)
for mname in DB_GAUGES:
aggregator.assert_metric('kyototycoon.{}'.format(mname), tags=TAGS + ['db:0'], count=2)
for mname in ALL_RATES:
aggregator.assert_metric('kyototycoon.{}_per_s'.format(mname), tags=TAGS, count=rate_metric_count)
# service check
aggregator.assert_service_check(KyotoTycoonCheck.SERVICE_CHECK_NAME, status=KyotoTycoonCheck.OK, tags=TAGS, count=2)
aggregator.assert_all_metrics_covered()
| [
"[email protected]"
] | |
af984f8b92fa6d9f2f3f4f2529a36de9c3b048da | 3ac9cc9f54b1d6c6d5e05317bb0b977f4c1b363d | /profab/main.py | 811a78a9ea7a173bb25ab8d19309d19a26ddd8c8 | [
"Apache-2.0",
"BSL-1.0"
] | permissive | sittisak/profab | 5f5a92d8da7a07af80727eee337993929931ba2a | ff3967397b31986c9396f70a44a565d85178e6a6 | refs/heads/master | 2020-04-05T14:05:47.613997 | 2016-11-22T02:50:12 | 2016-11-22T02:50:12 | 94,763,557 | 0 | 1 | null | 2017-08-21T09:09:46 | 2017-06-19T10:09:29 | Python | UTF-8 | Python | false | false | 541 | py | """Helper functions for the entry point scripts.
"""
def process_arguments(*args):
"""Do the initial argument parse phase. This produces tuples of role
instructions
"""
args = list(args) # Convert tuple to list
args.reverse() # We really wanted head() here, but no matter...
instructions = []
while len(args):
head = args.pop()
if head.startswith('--'):
instructions.append((head[2:], args.pop()))
else:
instructions.append((head, None))
return instructions
| [
"[email protected]"
] | |
7892120a6d6a7156cc8d1cd0c3b45f581a21e5bc | b23d294fdffabe72c336644f119860f5ce704eef | /python_1000phone/语言基础-老师代码/day12-生成器和模块/day12-生成器和模块/game/image.py | fd2e2576d1f4c32bae8d829d16fda3308ee0debd | [] | no_license | ikaros274556330/my_code | 65232758fd20820e9f4fa8cb5a6c91a1969862a2 | 92db21c4abcbd88b7bd77e78d9f660b4534b5071 | refs/heads/master | 2020-11-26T09:43:58.200990 | 2019-12-23T02:08:39 | 2019-12-23T02:08:39 | 229,032,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | """__author__=余婷"""
print('image被执行') | [
"[email protected]"
] | |
156cee855fb4337b19f958444b0a101e766d89a5 | cee0df2a184f3f99306193b9f34aba16889cc57c | /pvextractor/utils/wcs_utils.py | 88cd249959d5af27eeb395dc415eeb0160e41646 | [] | no_license | teuben/pvextractor | 169f3317eb2d53013eb981fca18f69d17fa3a8b3 | 889c108a964d8130b1a17066890c7325b57daf4c | refs/heads/master | 2021-01-14T13:16:48.485846 | 2014-04-18T23:29:17 | 2014-04-18T23:29:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,278 | py | import numpy as np
from astropy import units as u
from astropy import wcs
def get_pixel_scales(mywcs, assert_square=True):
# borrowed from aplpy
mywcs = mywcs.sub([wcs.WCSSUB_CELESTIAL])
cdelt = np.matrix(mywcs.wcs.get_cdelt())
pc = np.matrix(mywcs.wcs.get_pc())
scale = np.array(cdelt * pc)
if (assert_square and
(abs(cdelt[0,0]) != abs(cdelt[0,1]) or
abs(pc[0,0]) != abs(pc[1,1]) or
abs(scale[0,0]) != abs(scale[0,1]))):
raise ValueError("Non-square pixels. Please resample data.")
return abs(scale[0,0])
def sanitize_wcs(mywcs):
pc = np.matrix(mywcs.wcs.get_pc())
if (pc[:,2].sum() != pc[2,2] or pc[2,:].sum() != pc[2,2]):
raise ValueError("Non-independent 3rd axis.")
axtypes = mywcs.get_axis_types()
if ((axtypes[0]['coordinate_type'] != 'celestial' or
axtypes[1]['coordinate_type'] != 'celestial' or
axtypes[2]['coordinate_type'] != 'spectral')):
cunit3 = mywcs.wcs.cunit[2]
ctype3 = mywcs.wcs.ctype[2]
if cunit3 != '':
cunit3 = u.Unit(cunit3)
if cunit3.is_equivalent(u.m/u.s):
mywcs.wcs.ctype[2] = 'VELO'
elif cunit3.is_equivalent(u.Hz):
mywcs.wcs.ctype[2] = 'FREQ'
elif cunit3.is_equivalent(u.m):
mywcs.wcs.ctype[2] = 'WAVE'
else:
raise ValueError("Could not determine type of 3rd axis.")
elif ctype3 != '':
if 'VELO' in ctype3:
mywcs.wcs.ctype[2] = 'VELO'
elif 'FELO' in ctype3:
mywcs.wcs.ctype[2] = 'VELO-F2V'
elif 'FREQ' in ctype3:
mywcs.wcs.ctype[2] = 'FREQ'
elif 'WAVE' in ctype3:
mywcs.wcs.ctype[2] = 'WAVE'
else:
raise ValueError("Could not determine type of 3rd axis.")
else:
raise ValueError("Cube axes not in expected orientation: PPV")
return mywcs
def wcs_spacing(mywcs, spacing):
"""
Return spacing in pixels
Parameters
----------
wcs : `~astropy.wcs.WCS`
spacing : `~astropy.units.Quantity` or float
"""
if spacing is not None:
if hasattr(spacing,'unit'):
if not spacing.unit.is_equivalent(u.arcsec):
raise TypeError("Spacing is not in angular units.")
else:
platescale = get_pixel_scales(mywcs)
newspacing = spacing.to(u.deg).value / platescale
else:
# if no units, assume pixels already
newspacing = spacing
else:
# if no spacing, return pixscale
newspacing = 1
return newspacing
def pixel_to_wcs_spacing(mywcs, pspacing):
"""
Return spacing in degrees
Parameters
----------
wcs : `~astropy.wcs.WCS`
spacing : float
"""
platescale = get_pixel_scales(mywcs)
wspacing = platescale * pspacing * u.deg
return wspacing
def get_wcs_system_name(mywcs):
"""TODO: move to astropy.wcs.utils"""
ct = mywcs.sub([wcs.WCSSUB_CELESTIAL]).wcs.ctype
if 'GLON' in ct[0]:
return 'galactic'
elif 'RA' in ct[0]:
return 'icrs'
else:
raise ValueError("Unrecognized coordinate system")
| [
"[email protected]"
] | |
9b61853924eb18e6ee43387888b12daaf7b0dea5 | 0b0ca6853f351530384fcb9f3f9c91d4c034512b | /website/opensource/views.py | ba63fbd77e3145251be0ac3cead121e00526bdd4 | [] | no_license | thanhleviet/syrusakbary.com | d767129c6b00c092816e3cb58f063d1b052f0df0 | ca04f55462db72bb603bfc0453b9404b04ee6687 | refs/heads/master | 2021-01-18T09:30:31.278757 | 2012-07-22T20:32:12 | 2012-07-22T20:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # Create your views here.
#from django.http import HttpResponse
#from coffin.template import add_to_builtins
#add_to_builtins('jinja2-mediasync.media')
#from coffin.shortcuts import render_to_response
#from django.shortcuts import render_to_response
#from django.template import add_to_builtins
#add_to_builtins('mediasync.templatetags.media')
#from django.template import RequestContext
from django.views.generic import ListView, DetailView
from .models import Project
class OpenSourceDetailView(DetailView):
template_name='opensource/opensource_detail.jade'
#queryset = Project.objects.all()
queryset = []
class OpenSourceListView(ListView):
template_name='opensource/opensource_list.jade'
queryset = Project.objects.all()
context_object_name = "project_list"
#queryset = []
#def index(request):
# return render_to_response('projects/index.html',context_instance=RequestContext(request))
| [
"[email protected]"
] | |
2ca98c5399ca4e051f6ba3b6370573ba00678d56 | ee3e0a69093e82deff1bddf607f6ce0dde372c48 | /ndb769/개념/linked_list.py | 96c6f60d40eb889dc16b88a18505851cfabdcac7 | [] | no_license | cndqjacndqja/algorithm_python | 202f9990ea367629aecdd14304201eb6fa2aa37e | 843269cdf8fb9d4c215c92a97fc2d007a8f96699 | refs/heads/master | 2023-06-24T08:12:29.639424 | 2021-07-24T05:08:46 | 2021-07-24T05:08:46 | 255,552,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self, data):
self.head = Node(data)
def append(self, data):
cur = self.head
while cur.next is not None:
cur = cur.next
cur.next = Node(data)
def get_node(self, index):
cnt = 0
node = self.head
while cnt < index:
cnt += 1
node = node.next
return node
def add_node(self, index, value):
new_node = Node(value)
if index == 0:
new_node.next = self.head
self.head = new_node
return
node = self.get_node(index - 1)
new_node.next = node.next
node.next = new_node
def delete_node(self, index):
if index == 0:
self.head = self.head.next
return
node = self.get_node(index-1)
node.next = node.next.next
if __name__ == "__main__":
data = list(input())
list = LinkedList(data[0])
for i in range(1, len(data)):
list.append(data[i])
| [
"[email protected]"
] | |
eff87412bf6ca4715fe05272080cc15cbc15f11a | a9aa0bce4e45b8712ce77045d0ec52eb4014692f | /manage.py | b001c976f32b96704d291a9e4d06d4bf5c399154 | [] | no_license | Aleleonel/BionicoBusiness | 93431e94a750f86c86d99925952cfc7b7c8cdb6d | 85326614f64235d8348aebac8e8b42a9e0764e18 | refs/heads/master | 2023-08-17T00:41:53.660157 | 2020-06-01T22:04:06 | 2020-06-01T22:04:06 | 268,640,058 | 0 | 0 | null | 2021-06-10T19:24:12 | 2020-06-01T21:49:19 | Python | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bionico.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7b095e3e066626392f53c9d6e431e87be22263e4 | 2a7fe1988b9a9aaf5e301637883319c43d38bcb9 | /users/serializers.py | e18ccb36912540ec79d98c4ac36785882da3fc0c | [] | no_license | kenassash/django_rest_notes | 65d799b32f520faef2dbd02fae2e05efa8535797 | eab022e6e57aaa06918ee5ab80586c8a1a8894c3 | refs/heads/master | 2023-09-03T19:36:28.304504 | 2021-10-27T08:19:14 | 2021-10-27T08:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from rest_framework.serializers import HyperlinkedModelSerializer
# from .models import NoteUser
from .models import User
class UserModelSerializer(HyperlinkedModelSerializer):
class Meta:
# model = NoteUser
# fields = ('username', 'firstname', 'lastname', 'email')
# fields = '__all__'
model = User
fields = ('id', 'url', 'username', 'first_name', 'last_name', 'email', 'is_superuser', 'is_staff')
# fields = ('username', 'first_name', 'last_name', 'email')
# fields = '__all__'
class UserModelSerializerV2(HyperlinkedModelSerializer):
class Meta:
model = User
# fields = '__all__'
fields = ('id', 'url', 'username', 'first_name', 'last_name', 'email')
| [
"[email protected]"
] | |
6da64ddbe8b389fea336607a103d15766e44bd65 | 3dd58a087b59bdba102f2e6e95b3e200a9530c4c | /django_demo/mysite/polls/tests.py | 7d7c15443ccc50f0a7b07c5f29af512cd9f3697e | [] | no_license | natepill/Server-Side-Architecture | b17926cf467083182e96257589dfdc7c3d5ea40e | 4765136c5fe9d0eedc6b50a2bbbb0c9458170694 | refs/heads/master | 2022-12-11T03:41:46.296869 | 2019-06-24T19:53:15 | 2019-06-24T19:53:15 | 189,689,598 | 0 | 0 | null | 2022-12-08T01:22:18 | 2019-06-01T04:19:09 | Python | UTF-8 | Python | false | false | 5,492 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question, Choice,
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
def add_vote_to_choice(choice):
choice.votes += 1
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_publised_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
class QuestionResultViewTests(TestCase):
def test_votes_for_choice(self):
new_question = create_question(question_text='Testing that voting for choices work?')
| [
"[email protected]"
] | |
de826dfae0fa88b16b9f28a90bfb267c18c844c7 | ff2b4b972865ab82464d550934329117500d3195 | /bfg9000/tools/mkdir_p.py | f12325a7350d79273dadc255603b865d4c34c859 | [
"BSD-3-Clause"
] | permissive | juntalis/bfg9000 | cd38a9194e6c08a4fbcf3be29f37c00bfa532588 | 594eb2aa7c259855e7658d69fe84acb6dad890fa | refs/heads/master | 2021-08-08T21:35:33.896506 | 2017-11-11T08:47:57 | 2017-11-11T08:47:57 | 110,331,128 | 0 | 0 | null | 2017-11-11T08:46:05 | 2017-11-11T08:46:04 | null | UTF-8 | Python | false | false | 396 | py | from . import tool
from .common import SimpleCommand
@tool('mkdir_p')
class MkdirP(SimpleCommand):
def __init__(self, env):
default = 'doppel -p' if env.platform.name == 'windows' else 'mkdir -p'
SimpleCommand.__init__(self, env, name='mkdir_p', env_var='MKDIR_P',
default=default)
def _call(self, cmd, path):
return cmd + [path]
| [
"[email protected]"
] | |
3cea7cfc0e0c7f0ce28a2297adca573ec9a9e999 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/devtools/cloudbuild/v1/devtools-cloudbuild-v1-py/google/devtools/cloudbuild_v1/services/cloud_build/transports/base.py | 36c52c54ceb1c9b4ddd4224c7b0ac3a69084ee44 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,757 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.devtools.cloudbuild_v1.types import cloudbuild
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-devtools-cloudbuild',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CloudBuildTransport(abc.ABC):
"""Abstract transport class for CloudBuild."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
def __init__(
self, *,
host: str = 'cloudbuild.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file,
scopes=self._scopes,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(scopes=self._scopes, quota_project_id=quota_project_id)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_build: gapic_v1.method.wrap_method(
self.create_build,
default_timeout=600.0,
client_info=client_info,
),
self.get_build: gapic_v1.method.wrap_method(
self.get_build,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_builds: gapic_v1.method.wrap_method(
self.list_builds,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.cancel_build: gapic_v1.method.wrap_method(
self.cancel_build,
default_timeout=600.0,
client_info=client_info,
),
self.retry_build: gapic_v1.method.wrap_method(
self.retry_build,
default_timeout=600.0,
client_info=client_info,
),
self.create_build_trigger: gapic_v1.method.wrap_method(
self.create_build_trigger,
default_timeout=600.0,
client_info=client_info,
),
self.get_build_trigger: gapic_v1.method.wrap_method(
self.get_build_trigger,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_build_triggers: gapic_v1.method.wrap_method(
self.list_build_triggers,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_build_trigger: gapic_v1.method.wrap_method(
self.delete_build_trigger,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_build_trigger: gapic_v1.method.wrap_method(
self.update_build_trigger,
default_timeout=600.0,
client_info=client_info,
),
self.run_build_trigger: gapic_v1.method.wrap_method(
self.run_build_trigger,
default_timeout=600.0,
client_info=client_info,
),
self.receive_trigger_webhook: gapic_v1.method.wrap_method(
self.receive_trigger_webhook,
default_timeout=None,
client_info=client_info,
),
self.create_worker_pool: gapic_v1.method.wrap_method(
self.create_worker_pool,
default_timeout=600.0,
client_info=client_info,
),
self.get_worker_pool: gapic_v1.method.wrap_method(
self.get_worker_pool,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_worker_pool: gapic_v1.method.wrap_method(
self.delete_worker_pool,
default_timeout=600.0,
client_info=client_info,
),
self.update_worker_pool: gapic_v1.method.wrap_method(
self.update_worker_pool,
default_timeout=600.0,
client_info=client_info,
),
self.list_worker_pools: gapic_v1.method.wrap_method(
self.list_worker_pools,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_build(self) -> typing.Callable[
[cloudbuild.CreateBuildRequest],
typing.Union[
operations.Operation,
typing.Awaitable[operations.Operation]
]]:
raise NotImplementedError()
@property
def get_build(self) -> typing.Callable[
[cloudbuild.GetBuildRequest],
typing.Union[
cloudbuild.Build,
typing.Awaitable[cloudbuild.Build]
]]:
raise NotImplementedError()
@property
def list_builds(self) -> typing.Callable[
[cloudbuild.ListBuildsRequest],
typing.Union[
cloudbuild.ListBuildsResponse,
typing.Awaitable[cloudbuild.ListBuildsResponse]
]]:
raise NotImplementedError()
@property
def cancel_build(self) -> typing.Callable[
[cloudbuild.CancelBuildRequest],
typing.Union[
cloudbuild.Build,
typing.Awaitable[cloudbuild.Build]
]]:
raise NotImplementedError()
@property
def retry_build(self) -> typing.Callable[
[cloudbuild.RetryBuildRequest],
typing.Union[
operations.Operation,
typing.Awaitable[operations.Operation]
]]:
raise NotImplementedError()
@property
def create_build_trigger(self) -> typing.Callable[
[cloudbuild.CreateBuildTriggerRequest],
typing.Union[
cloudbuild.BuildTrigger,
typing.Awaitable[cloudbuild.BuildTrigger]
]]:
raise NotImplementedError()
@property
def get_build_trigger(self) -> typing.Callable[
[cloudbuild.GetBuildTriggerRequest],
typing.Union[
cloudbuild.BuildTrigger,
typing.Awaitable[cloudbuild.BuildTrigger]
]]:
raise NotImplementedError()
@property
def list_build_triggers(self) -> typing.Callable[
[cloudbuild.ListBuildTriggersRequest],
typing.Union[
cloudbuild.ListBuildTriggersResponse,
typing.Awaitable[cloudbuild.ListBuildTriggersResponse]
]]:
raise NotImplementedError()
@property
def delete_build_trigger(self) -> typing.Callable[
[cloudbuild.DeleteBuildTriggerRequest],
typing.Union[
empty.Empty,
typing.Awaitable[empty.Empty]
]]:
raise NotImplementedError()
@property
def update_build_trigger(self) -> typing.Callable[
[cloudbuild.UpdateBuildTriggerRequest],
typing.Union[
cloudbuild.BuildTrigger,
typing.Awaitable[cloudbuild.BuildTrigger]
]]:
raise NotImplementedError()
@property
def run_build_trigger(self) -> typing.Callable[
[cloudbuild.RunBuildTriggerRequest],
typing.Union[
operations.Operation,
typing.Awaitable[operations.Operation]
]]:
raise NotImplementedError()
@property
def receive_trigger_webhook(self) -> typing.Callable[
[cloudbuild.ReceiveTriggerWebhookRequest],
typing.Union[
cloudbuild.ReceiveTriggerWebhookResponse,
typing.Awaitable[cloudbuild.ReceiveTriggerWebhookResponse]
]]:
raise NotImplementedError()
@property
def create_worker_pool(self) -> typing.Callable[
[cloudbuild.CreateWorkerPoolRequest],
typing.Union[
cloudbuild.WorkerPool,
typing.Awaitable[cloudbuild.WorkerPool]
]]:
raise NotImplementedError()
@property
def get_worker_pool(self) -> typing.Callable[
[cloudbuild.GetWorkerPoolRequest],
typing.Union[
cloudbuild.WorkerPool,
typing.Awaitable[cloudbuild.WorkerPool]
]]:
raise NotImplementedError()
@property
def delete_worker_pool(self) -> typing.Callable[
[cloudbuild.DeleteWorkerPoolRequest],
typing.Union[
empty.Empty,
typing.Awaitable[empty.Empty]
]]:
raise NotImplementedError()
@property
def update_worker_pool(self) -> typing.Callable[
[cloudbuild.UpdateWorkerPoolRequest],
typing.Union[
cloudbuild.WorkerPool,
typing.Awaitable[cloudbuild.WorkerPool]
]]:
raise NotImplementedError()
@property
def list_worker_pools(self) -> typing.Callable[
[cloudbuild.ListWorkerPoolsRequest],
typing.Union[
cloudbuild.ListWorkerPoolsResponse,
typing.Awaitable[cloudbuild.ListWorkerPoolsResponse]
]]:
raise NotImplementedError()
__all__ = (
'CloudBuildTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
0dc860e73fdb9073bea7b31d75831fe246b55cd2 | 35c3999aa3f6a9e31ae6f9170ac0235da4fe7e11 | /irekua_rest_api/serializers/devices/physical_devices.py | c9b5828b1b4bdec7cd965b6ca25e906f17db16f2 | [
"BSD-2-Clause"
] | permissive | CONABIO-audio/irekua-rest-api | 28cf9806330c8926437542ae9152b8a7da57714f | 35cf5153ed7f54d12ebad2ac07d472585f04e3e7 | refs/heads/master | 2022-12-12T09:24:18.217032 | 2020-08-15T21:01:20 | 2020-08-15T21:01:20 | 219,046,247 | 0 | 4 | BSD-4-Clause | 2022-12-08T10:54:47 | 2019-11-01T19:03:10 | Python | UTF-8 | Python | false | false | 2,098 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from irekua_database.models import PhysicalDevice
from irekua_rest_api.serializers.base import IrekuaModelSerializer
from irekua_rest_api.serializers.base import IrekuaHyperlinkedModelSerializer
from irekua_rest_api.serializers.users import users
from . import devices
class SelectSerializer(IrekuaModelSerializer):
class Meta:
model = PhysicalDevice
fields = (
'url',
'id',
)
class ListSerializer(IrekuaModelSerializer):
type = serializers.CharField(
read_only=True,
source='device.device_type.name')
brand = serializers.CharField(
read_only=True,
source='device.brand.name')
model = serializers.CharField(
read_only=True,
source='device.model')
class Meta:
model = PhysicalDevice
fields = (
'url',
'id',
'serial_number',
'type',
'brand',
'model',
)
class DetailSerializer(IrekuaHyperlinkedModelSerializer):
device = devices.SelectSerializer(many=False, read_only=True)
owner = users.SelectSerializer(many=False, read_only=True)
class Meta:
model = PhysicalDevice
fields = (
'url',
'serial_number',
'owner',
'metadata',
'bundle',
'device',
'created_on',
'modified_on',
)
class CreateSerializer(IrekuaModelSerializer):
class Meta:
model = PhysicalDevice
fields = (
'serial_number',
'device',
'metadata',
'bundle',
)
def create(self, validated_data):
user = self.context['request'].user
validated_data['owner'] = user
return super().create(validated_data)
class UpdateSerializer(IrekuaModelSerializer):
class Meta:
model = PhysicalDevice
fields = (
'serial_number',
'metadata',
)
| [
"[email protected]"
] | |
dc09c3c13f4ca2119ef4419cf567c1bbe2bf7f42 | 81bdc1dccfb95877e5f376527c23cb5c72a13922 | /pyl2extra/gui/debugger/remote_window.py | 05041c0221dd8c677c8b0d68e7c8136c0ee9f4e5 | [
"BSD-3-Clause"
] | permissive | TNick/pyl2extra | 1fb5be10448bc09018e2b0ac294b2e03fb146a57 | 323e1ecefeedc7d196de6d7ac6d8eceecb756333 | refs/heads/master | 2021-01-22T07:04:10.082374 | 2015-08-11T09:57:17 | 2015-08-11T09:57:17 | 34,400,301 | 0 | 1 | null | 2015-04-22T17:19:50 | 2015-04-22T15:58:21 | Python | UTF-8 | Python | false | false | 2,954 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Nicu Tofan <[email protected]>
"""
from PyQt4 import QtGui, QtCore
from pyl2extra.gui.guihelpers import center
class RemoteDialog(QtGui.QDialog):
"""
Allows selecting remote in order to debug on that remote.
"""
def __init__(self, mw):
"""
Constructor
"""
super(RemoteDialog, self).__init__()
self.mw = mw
self.init_ui()
def init_ui(self):
"""
Prepares the GUI.
"""
self.resize(300, 200)
self.setWindowTitle('Connect to remote')
center(self)
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setGeometry(QtCore.QRect(150, 250, 341, 32))
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.button_box.setObjectName("button_box")
lbl_address = QtGui.QLabel('Address')
lbl_post_rcv = QtGui.QLabel('Control port')
lbl_port_sub = QtGui.QLabel('Broadcast port')
le_address = QtGui.QLineEdit()
le_address.setPlaceholderText('The address of the remote machine')
le_address.setToolTip('This may also be an ip address.')
le_address.setText('127.0.0.1')
sp_port_rcv = QtGui.QSpinBox()
sp_port_rcv.setMinimum(1024)
sp_port_rcv.setMaximum(65565)
sp_port_rcv.setValue(5955)
sp_port_rcv.setToolTip('Port for command and control.')
sp_port_sub = QtGui.QSpinBox()
sp_port_sub.setMinimum(1024)
sp_port_sub.setMaximum(65565)
sp_port_sub.setValue(5956)
sp_port_sub.setToolTip('Port where the remote debugger publishes information.')
grid1 = QtGui.QGridLayout()
grid1.setSpacing(10)
grid1.addWidget(lbl_address, 1, 0)
grid1.addWidget(le_address, 1, 1)
grid1.addWidget(lbl_post_rcv, 2, 0)
grid1.addWidget(sp_port_rcv, 2, 1)
grid1.addWidget(lbl_port_sub, 3, 0)
grid1.addWidget(sp_port_sub, 3, 1)
grid = QtGui.QVBoxLayout()
grid.setSpacing(10)
grid.addLayout(grid1)
grid.addWidget(self.button_box)
self.setLayout(grid)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL("accepted()"), self.accept)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL("rejected()"), self.reject)
QtCore.QMetaObject.connectSlotsByName(self)
self.le_address = le_address
self.sp_port_rcv = sp_port_rcv
self.sp_port_sub = sp_port_sub
def get_values(self):
"""
Return the values selected by the user.
"""
values = {'address': self.le_address.text().strip(),
'rport': self.sp_port_rcv.value(),
'pport': self.sp_port_sub.value()}
return values
| [
"[email protected]"
] | |
e452abec56c616eb8b46b78e240cb845aecc2319 | 6b0d0aec9704d70663fe0edc2a6624a689cc081e | /src/app/pre/wav.py | db52981330702787a0ef82c48d08f68a0e589f2b | [
"BSD-3-Clause"
] | permissive | stefantaubert/tacotron2 | 086d81b38b2c49655df6b0d8d63c633e7531399a | 8475f014391c5066cfe0b92b6c74568639be5e79 | refs/heads/master | 2023-03-29T21:07:02.266973 | 2020-11-25T09:57:40 | 2020-11-25T09:57:40 | 267,858,113 | 5 | 0 | BSD-3-Clause | 2020-05-29T12:56:56 | 2020-05-29T12:56:55 | null | UTF-8 | Python | false | false | 3,818 | py | import os
from functools import partial
from typing import Any
from src.app.pre.ds import get_ds_dir, load_ds_csv
from src.core.common.utils import get_subdir
from src.core.pre.wav import (WavData, WavDataList, normalize, preprocess,
remove_silence, stereo_to_mono, upsample)
_wav_data_csv = "data.csv"
def _get_wav_root_dir(ds_dir: str, create: bool = False):
return get_subdir(ds_dir, "wav", create)
def get_wav_dir(ds_dir: str, wav_name: str, create: bool = False):
return get_subdir(_get_wav_root_dir(ds_dir, create), wav_name, create)
def load_wav_csv(wav_dir: str) -> WavDataList:
path = os.path.join(wav_dir, _wav_data_csv)
return WavDataList.load(WavData, path)
def save_wav_csv(wav_dir: str, wav_data: WavDataList):
path = os.path.join(wav_dir, _wav_data_csv)
wav_data.save(path)
def preprocess_wavs(base_dir: str, ds_name: str, wav_name: str):
print("Preprocessing wavs...")
ds_dir = get_ds_dir(base_dir, ds_name)
wav_dir = get_wav_dir(ds_dir, wav_name)
if os.path.isdir(wav_dir):
print("Already exists.")
else:
data = load_ds_csv(ds_dir)
wav_data = preprocess(data)
os.makedirs(wav_dir)
save_wav_csv(wav_dir, wav_data)
def _wav_op(base_dir: str, ds_name: str, origin_wav_name: str, destination_wav_name: str, op: Any):
ds_dir = get_ds_dir(base_dir, ds_name)
dest_wav_dir = get_wav_dir(ds_dir, destination_wav_name)
if os.path.isdir(dest_wav_dir):
print("Already exists.")
else:
orig_wav_dir = get_wav_dir(ds_dir, origin_wav_name)
assert os.path.isdir(orig_wav_dir)
data = load_wav_csv(orig_wav_dir)
os.makedirs(dest_wav_dir)
wav_data = op(data, dest_wav_dir)
save_wav_csv(dest_wav_dir, wav_data)
def wavs_normalize(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str):
print("Normalizing wavs...")
op = partial(normalize)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_upsample(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str, rate: int):
print("Resampling wavs...")
op = partial(upsample, new_rate=rate)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_stereo_to_mono(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str):
print("Converting wavs from stereo to mono...")
op = partial(stereo_to_mono)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_remove_silence(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str, chunk_size: int, threshold_start: float, threshold_end: float, buffer_start_ms: float, buffer_end_ms: float):
print("Removing silence in wavs...")
op = partial(remove_silence, chunk_size=chunk_size, threshold_start=threshold_start,
threshold_end=threshold_end, buffer_start_ms=buffer_start_ms, buffer_end_ms=buffer_end_ms)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
if __name__ == "__main__":
preprocess_wavs(
base_dir="/datasets/models/taco2pt_v5",
ds_name="ljs",
wav_name="22050kHz",
)
preprocess_wavs(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
wav_name="16000kHz",
)
wavs_normalize(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz",
dest_wav_name="16000kHz_normalized",
)
wavs_remove_silence(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz_normalized",
dest_wav_name="16000kHz_normalized_nosil",
threshold_start=-20,
threshold_end=-30,
chunk_size=5,
buffer_start_ms=100,
buffer_end_ms=150
)
wavs_upsample(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz_normalized_nosil",
dest_wav_name="22050kHz_normalized_nosil",
rate=22050,
)
| [
"[email protected]"
] | |
541a2bb132c30e724fa65dfdccfd3b3de2e89856 | 7f651a7dfa7cd101ddf9dd133ff78bfe996eeb3f | /main.py | 910916d224daa088ba293871bad373666348f2d1 | [
"MIT"
] | permissive | TrendingTechnology/PyPi-Bot | 33071b0e789509dfc267ec25a3e11417d60c1395 | bc2ee98981af4bc9f415a1f968bf872380d017f0 | refs/heads/main | 2023-06-30T08:50:57.641601 | 2021-08-02T13:47:28 | 2021-08-02T13:47:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # Author: Fayas (https://github.com/FayasNoushad) (@FayasNoushad)
import os
import requests
from requests.utils import requote_uri
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
API = "https://api.abirhasan.wtf/pypi?query="
START_TEXT = """
Hello {},
I am a pypi package search telegram bot.
- Send a pypi package name.
- I will send the information of package.
Made by @FayasNoushad
"""
BUTTONS = [InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/FayasNoushad')]
Bot = Client(
"PyPi-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.private & filters.command(["start", "help", "about"]))
async def start(bot, update):
text = START_TEXT.format(update.from_user.mention)
reply_markup = InlineKeyboardMarkup([BUTTONS])
await update.reply_text(
text=text,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
@Bot.on_message(filters.text)
async def pypi_info(bot, update):
try:
query = update.text if update.chat.type == "private" else update.text.split()[1]
text = pypi_text(query)
reply_markup = InlineKeyboardMarkup([pypi_buttons(query), BUTTONS])
await update.reply_text(
text=text,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
except:
pass
def pypi(query):
r = requests.get(requote_uri(API + query))
info = r.json()
return info
def pypi_text(query):
info = pypi(query)
text = "--**Information**--\n"
text += f"\n**Package Name:** `{info['PackageName']}`"
text += f"\n**Title:** `{info['Title']}`"
text += f"\n**About:** `{info['About']}`"
text += f"\n**Latest Release Date:** `{info['LatestReleaseDate']}`"
text += f"\n**PiP Command:** `{info['PipCommand']}`"
return text
def pypi_buttons(query):
info = pypi(query)
buttons = [
InlineKeyboardButton(text="PyPi", url=info['PyPi']),
InlineKeyboardButton(text="Home Page", url=info['HomePage'])
]
return buttons
Bot.run()
| [
"[email protected]"
] | |
d255e8072a01057e097ccaa3a705564e60199c9e | 91fe8f479fa921fa84111d19222a5c6aa6eff030 | /basis/progr-py/Gui/ShellGui/packdlg_redirect.py | e74111a94ff6ede688ace45c422255376555b419 | [] | no_license | romanticair/python | 2055c9cdaa46894c9788d5797643283786ed46dd | 6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141 | refs/heads/master | 2022-11-03T17:17:17.608786 | 2019-07-05T07:07:29 | 2019-07-05T07:07:29 | 195,356,190 | 0 | 1 | null | 2022-10-14T20:51:14 | 2019-07-05T07:00:33 | Python | UTF-8 | Python | false | false | 496 | py | # 将命令行脚本包装到图形界面重定向工具中,输出显示到弹出式窗口中
from tkinter import *
from packdlg import runPackDialog
from Gui.Tools.guiStreams import redirectedGuiFunc
def runPackDialog_Wrapped(): # 在mytools.py中运行的回调函数
redirectedGuiFunc(runPackDialog) # 对整个回调处理程序进行包装
if __name__ == '__main__':
root = Tk()
Button(root, text='pop', command=runPackDialog_Wrapped).pack(fill=X)
root.mainloop()
| [
"[email protected]"
] | |
0b216fd14819d28ed423883d66d0426efdbf220b | d00e68b2c14b44e7cbf13f7ddca284cdfd21c201 | /tests/v2_validation/cattlevalidationtest/core/test_services_lb_host_routing_balancer.py | 94d5b0c0c6c9e7f67ae8c9ce735a2053a54d1fa2 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | guangbochen/validation-tests | db92aff9b48c705115be828de1dc645143f4c9c8 | 23e7ab95ce76744483a0657f790b42a88a93436d | refs/heads/master | 2021-09-11T14:16:57.668817 | 2018-04-08T19:01:02 | 2018-04-08T19:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,937 | py | from common_fixtures import * # NOQA
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
STRESS_LB_PORT_RULE_COUNT = os.environ.get(
'STRESS_LB_PORT_RULE_COUNT', "20")
def test_lbservice_host_routing_1(client, socat_containers):
port = "900"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
delete_all(client, [env])
def test_lbservice_host_routing_cross_stack(
client, socat_containers):
port = "901"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count,
port_rules, crosslinking=True)
for service in services:
service = service.activate()
for service in services:
service = client.wait_success(service, 120)
assert service.state == "active"
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
to_delete = [env]
for service in services:
to_delete.append(get_env(client, service))
delete_all(client, to_delete)
def test_lbservice_host_routing_2(client, socat_containers):
port = "902"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_scale_up(
client, socat_containers):
port = "903"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
final_service_scale = 3
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(client,
final_services, lb_service)
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_scale_down(
client, socat_containers):
port = "904"
service_scale = 3
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
final_service_scale = 2
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(client,
final_services, lb_service)
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client, lb_service,
port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client, lb_service, port,
[final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_only_path(
client, socat_containers):
port = "905"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
None, "/service1.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc3.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
None, "/service1.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_only_host(
client, socat_containers):
port = "906"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
[services[0], services[1]],
lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_3(client, socat_containers):
port = "907"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_3(client, socat_containers):
port = "908"
service_scale = 2
lb_scale = 1
service_count = 5
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
service_list = [services[0], services[1], services[2], services[3]]
wait_for_lb_service_to_become_active(client,
service_list, lb_service)
validate_lb_service(client, lb_service,
port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service1.html")
# Edit port_rules
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": services[2].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service2.html",
"serviceId": services[3].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc.com",
"serviceId": services[4].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": services[4].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
service_list = [services[0], services[2], services[3], services[4]]
wait_for_lb_service_to_become_active(client,
service_list, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[4]],
"www.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[4]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service2.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_add_host(
client, socat_containers):
port = "909"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
port_rule = {"hostname": "www.abc2.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_remove_host(
client, socat_containers):
port = "910"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client, services,
lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
# Edit port rules
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_edit_existing_host(
client, socat_containers):
port = "911"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
# Edit port rules
port_rules = []
port_rule = {"hostname": "www.abc2.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_1(
client, socat_containers):
port1 = "1000"
port2 = "1001"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 1,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 1,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service3.html",
"serviceId": 2,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 3,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"}
port_rules.append(port_rule)
port_rule = {"serviceId": 3,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[3]],
"www.abc1.com", "/service2.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc2.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port2, [services[1]],
"www.abc2.com", "/service3.html")
validate_lb_service(client,
lb_service, port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc4.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[3]],
"www.abc3.com", "/service4.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_2(
client, socat_containers):
port1 = "1002"
port2 = "1003"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"path": "/81",
"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/81/service3.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service",
"serviceId": 2,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service",
"serviceId": 2,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[2]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[0]],
"www.abc1.com", "/81/service4.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc1.com", "/81/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc1.com", "/service4.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_3(
client, socat_containers):
port1 = "1004"
port2 = "1005"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 1,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port2,
[services[1]],
"www.abc1.com", "/service3.html")
delete_all(client, [env])
def test_lbservice_external_service(client, socat_containers):
port = "1010"
lb_scale = 2
env, lb_service, ext_service, con_list = \
create_env_with_ext_svc_and_lb(client, lb_scale, port)
ext_service = activate_svc(client, ext_service)
lb_service = activate_svc(client, lb_service)
validate_lb_service_for_external_services(client,
lb_service, port, con_list)
delete_all(client, [env])
def test_lbservice_host_routing_tcp_only(client,
socat_containers):
port = "1011"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]])
delete_all(client, [env])
def test_lbservice_host_routing_tcp_and_http(client,
socat_containers):
port1 = "1012"
port2 = "1013"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service4.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service4.html",
"serviceId": 1,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
port1 = "1012"
"""
validate_lb_service(client,
lb_service, port1,
[services[0], services[1]])
validate_lb_service(client,
lb_service, port1,
[services[0], services[1]])
"""
validate_lb_service(client,
lb_service, port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service_for_no_access(client, lb_service, port2,
"www.abc2.com",
"/service3.html")
delete_all(client, [env])
def test_lbservice_host_routing_wildcard(
client, socat_containers):
port = "1014"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "*.domain.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "domain.*",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[2]],
"abc.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.def.domain.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.def.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_wildcard_order(
client, socat_containers):
port = "1014"
service_scale = 2
lb_scale = 1
service_count = 5
port_rules = []
port_rule = {"hostname": "*.domain.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "domain.*",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "*.domain.com",
"path": "/service1.html",
"serviceId": 4,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[4]],
"abc.def.domain.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.def.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.def.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[2]],
"abc.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[3]],
"abc.domain.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_priority_override_1(
client, socat_containers):
port = "1015"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "*.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http",
"priority": 1
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http",
"priority": 2
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.domain.com", "/service1.html")
delete_all(client, [env])
def test_lb_with_selector_link_target_portrules(client,
socat_containers):
port = "20001"
# Create Environment
env = create_env(client)
launch_config_svc = {"imageUuid": LB_HOST_ROUTING_IMAGE_UUID,
"labels": {"test1": "value1"}}
port_rule1 = {
"targetPort": "80",
"hostname": "www.abc.com",
"path": "/name.html"}
port_rule2 = {
"targetPort": "80",
"hostname": "www.abc1.com",
"path": "/service1.html"}
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config_svc,
scale=1,
lbConfig=create_lb_config([port_rule1]))
service1 = client.wait_success(service1)
assert service1.state == "inactive"
random_name = random_str()
service2_name = random_name.replace("-", "")
service2 = client.create_service(name=service2_name,
stackId=env.id,
launchConfig=launch_config_svc,
scale=1,
lbConfig=create_lb_config([port_rule2]))
service2 = client.wait_success(service2)
assert service2.state == "inactive"
launch_config_lb = {"ports": [port],
"imageUuid": get_haproxy_image()}
port_rule1 = {
"sourcePort": port,
"selector": "test1=value1"}
lb_env = create_env(client)
lb_service = client.create_loadBalancerService(
name="lb-withselectorlinks",
stackId=lb_env.id,
launchConfig=launch_config_lb,
scale=1,
lbConfig=create_lb_config([port_rule1]))
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
service1 = activate_svc(client, service1)
service2 = activate_svc(client, service2)
lb_service = activate_svc(client, lb_service)
wait_for_lb_service_to_become_active(client,
[service1, service2], lb_service)
validate_lb_service(client,
lb_service, port,
[service1],
"www.abc.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[service2],
"www.abc1.com", "/service1.html")
delete_all(client, [env])
@if_stress
def test_lbservice_edit_add_multiple_port_rules(
client, socat_containers):
port = "90"
count = int(STRESS_LB_PORT_RULE_COUNT)
service_scale = 2
lb_scale = 1
service_count = 1
ports = [port]
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
launch_config_lb = {"imageUuid": get_haproxy_image()}
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, ports, service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
for i in range(0, count):
port_rule = {"hostname": "www.abc.com"+str(i),
"serviceId": services[0].id,
"sourcePort": port+str(i),
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
ports.append(port+str(i))
launch_config_lb["ports"] = ports
lb_service = client.update(lb_service,
launchConfig=launch_config_lb,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
for j in range(0, i):
print "Validation after adding " + str(i) + " ports"
validate_lb_service(client,
lb_service, port+str(j), [services[0]],
"www.abc.com"+str(j), "/name.html")
delete_all(client, [env])
| [
"[email protected]"
] | |
70f33bb40b94725d71df75b5591e7a9b56325cca | f9ed608c620093b9f6b5058bcedf7ae610c09c8d | /329-Longest_Increasing_Path_in_a_Matrix.py | 2076972e979562372e23e07d8d2bfd9f51a966ba | [] | no_license | chanyoonzhu/leetcode-python | 9b88d7f2749e1ae3ed597759b1bf9f7fa4912c35 | 085d868ba0458fc8e6b5549aa00fa151c335fa7f | refs/heads/master | 2022-05-24T11:20:35.927915 | 2022-04-16T06:02:33 | 2022-04-16T06:02:33 | 166,224,197 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | """
- dfs with memoization
- note: path is strictly increasing -> no loop -> DAG -> can use dfs
- O(mn), O(mn)
"""
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
self.matrix = matrix
self.dir = [(1, 0), (-1, 0), (0, 1), (0, -1)]
self.m = len(matrix)
self.n = len(matrix[0])
self.res = 0
for r in range(self.m):
for c in range(self.n):
self.dfs(r, c)
return self.res
@lru_cache(None)
def dfs(self, r, c):
connected = 0
for i, j in self.dir:
nr, nc = r + i, c + j
if 0 <= nr < self.m and 0 <= nc < self.n and self.matrix[nr][nc] > self.matrix[r][c]:
connected = max(connected, self.dfs(nr, nc))
connected += 1 # itself
self.res = max(self.res, connected)
return connected
"""
- topological sorting
- O(mn), O(mn)
"""
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
M, N = len(matrix), len(matrix[0])
indegrees = [[0] * N for _ in range(M)]
DIR = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for r in range(M):
for c in range(N):
for i, j in DIR:
nr, nc = r + i, c + j
if 0 <= nr < M and 0 <= nc < N and matrix[nr][nc] > matrix[r][c]:
indegrees[r][c] += 1
q = deque()
for r in range(M):
for c in range(N):
if indegrees[r][c] == 0:
q.append((r, c))
steps = 0
while q:
new_q = deque()
while q:
r, c = q.popleft()
for i, j in DIR:
nr, nc = r + i, c + j
if 0 <= nr < M and 0 <= nc < N and matrix[nr][nc] < matrix[r][c]:
indegrees[nr][nc] -= 1
if indegrees[nr][nc] == 0:
new_q.append((nr, nc))
q = new_q
steps += 1
return steps | [
"[email protected]"
] | |
a828a1d10bfc5c4b5cd149e658aca32e30558efa | 7f80554c5013ba7bc66a3ec98f804156d977c277 | /src/readux/urls.py | d95051dab91bc770f6c85b38826d7a40f9f870b8 | [] | no_license | akrahdan/LearnAI | fa89c133dbe3b0c06bfdce720ea6dcb429d1dc57 | fbea836a7fc78c8ab92b313c2afa4bdeef59c362 | refs/heads/main | 2023-07-24T15:07:15.692045 | 2021-08-20T16:39:44 | 2021-08-20T16:39:44 | 376,688,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | """readux URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.conf import settings
from allauth.account.views import confirm_email
from django.conf.urls.static import static
from courses.views import CourseDetailSlugView
from projects.views import ProjectDetailView
from .views import home_page, CourseLeadView, pricing
from files.views import DownloadView, UploadPolicyView, UploadView, UploadCoursePolicy, DownloadCourseView
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_page, name='home'),
path('api/lead/', CourseLeadView.as_view(), name='course_signup'),
path('api/pricing/', pricing, name='pricing'),
path('api/dashboard/', include(('dashboard.urls', 'dashboard'), namespace="dashboard")),
path('api/courses/', include('courses.urls')),
path('api/course/<slug:slug>/', CourseDetailSlugView.as_view()),
#path('auths/', include(('accounts.urls', 'auths'), 'auths')),
path('accounts/', include('allauth.urls')),
path('api/accounts/', include('accounts.urls')),
path('api/billing/', include(('billing.urls', 'billing'), 'billing')),
path('api/instructor/', include(('instructors.urls'))),
path('api/students/', include(('students.urls', 'students'), namespace='students')),
path('api/upload/', UploadView.as_view()),
path('api/upload/policy/', UploadPolicyView.as_view()),
path('api/files/<int:id>/download/', DownloadView.as_view()),
path('api/orders/', include('orders.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('api/auth/', include('auths.urls')),
path('api/analytics/', include('analytics.urls')),
path('api/projects/', include('projects.urls')),
path('api/projects/', include('projects.urls')),
path('api/project/<slug:slug>/', ProjectDetailView.as_view()),
path('api/categories/', include('categories.urls')),
path('api/project_categories/', include('project_categories.urls')),
re_path(r"^rest-auth/registration/account-confirm-email/(?P<key>[\s\d\w().+-_',:&]+)/$", confirm_email,
name="account_confirm_email"),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('api/cart/', include('carts.urls')),
]
# if settings.DEBUG:
# urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
15ed8211c8d43131be4eeaa704dbd1400bbea598 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /stock_analytic_account/model/analytic_account.py | ea5556d076df6616c951843e5c8ca6abdca7a083 | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 1,957 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
import openerp.addons.decimal_precision as dp
class account_analytic_account(orm.Model):
_inherit = "account.analytic.account"
_columns = {
'move_ids': fields.one2many('stock.move', 'analytic_account_id',
'Moves for this analytic account',
readonly=True),
'use_reserved_stock': fields.boolean(
'Use reserved stock',
help="Stock with reference to this analytic account "
"is considered to be reserved.")
}
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if default is None:
default = {}
default['move_ids'] = []
res = super(account_analytic_account, self).copy(cr, uid, id, default,
context)
return res
| [
"[email protected]"
] | |
766c006b44f7bca3d96dc1ad604ef9851b7c73be | 0a1a95fe0344c27197b677e8f8d1acc05a9813bd | /tests/test_app/test_static.py | 9c4549b3036f5029a3b59f6d3252f224e800aa5a | [
"MIT"
] | permissive | hirokiky/uiro | 5ddaee966395512919016406c5ed18baed5cb68c | 8436976b21ac9b0eac4243768f5ada12479b9e00 | refs/heads/master | 2023-04-27T00:57:13.953417 | 2013-11-09T02:15:57 | 2013-11-09T02:15:57 | 13,859,983 | 0 | 0 | MIT | 2023-04-15T15:13:52 | 2013-10-25T12:30:05 | Python | UTF-8 | Python | false | false | 411 | py | import pytest
from webtest import TestApp
@pytest.fixture
def target():
from matcha import make_wsgi_app
from uiro.static import generate_static_matching
from .pkgs import static_app
matching = generate_static_matching(static_app)
return TestApp(make_wsgi_app(matching))
def test_static(target):
resp = target.get('/static/static_app/test.txt')
resp.mustcontain('No more work')
| [
"[email protected]"
] | |
b7759d6a6dcb81a63298d8ff7c3583729f1d19eb | 7facdc4644fbe4209b5acdad9f2503bfcfb0d534 | /ensure/_types.py | d0d2db7350653d2171e927886cffa6eccef0f7f8 | [
"Apache-2.0"
] | permissive | KeyWeeUsr/ensure | 2a19d2101418f334bb188d299f5368f96aaf7916 | 47becf82672906d2fcfd4e8e5b0542e43845b3ed | refs/heads/master | 2023-06-01T04:11:19.154208 | 2018-11-06T01:39:11 | 2018-11-06T01:39:11 | 165,532,375 | 0 | 0 | Apache-2.0 | 2019-01-13T17:14:11 | 2019-01-13T17:14:10 | null | UTF-8 | Python | false | false | 1,050 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from six import add_metaclass
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
str = unicode # noqa
class NumericStringType(type):
_type = str
_cast = float
def __instancecheck__(self, other):
try:
if not isinstance(other, self._type):
raise TypeError()
self._cast(other)
return True
except (TypeError, ValueError):
return False
class NumericByteStringType(NumericStringType):
_type = bytes
class IntegerStringType(NumericStringType):
_cast = int
class IntegerByteStringType(IntegerStringType):
_type = bytes
@add_metaclass(NumericStringType)
class NumericString(str):
pass
@add_metaclass(NumericByteStringType)
class NumericByteString(bytes):
pass
@add_metaclass(IntegerStringType)
class IntegerString(str):
pass
@add_metaclass(IntegerByteStringType)
class IntegerByteString(bytes):
pass
| [
"[email protected]"
] | |
4afc9e26c651892b4c66a8e40b134a2277fdb425 | be4759201435054c55ca76d4a973aee8c549e1a6 | /sockets/mn_edge_indices_list_socket.py | 82fca744d5684176868484ad02929b8ee962b360 | [] | no_license | vvFiCKvv/animation-nodes | 75f94549f82702b3ac5f548f009dd2202c694240 | 6988606b8c3601d428fa3fe32c77c7b440eb7c38 | refs/heads/master | 2021-01-17T00:29:13.299665 | 2015-04-25T16:46:20 | 2015-04-25T16:46:20 | 27,539,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import bpy
from animation_nodes.mn_execution import nodePropertyChanged
from animation_nodes.mn_node_base import *
class mn_EdgeIndicesListSocket(mn_BaseSocket, mn_SocketProperties):
bl_idname = "mn_EdgeIndicesListSocket"
bl_label = "Edge Indices List Socket"
dataType = "Edge Indices List"
allowedInputTypes = ["Edge Indices List"]
drawColor = (0, 0.55, 0.23, 1)
def drawInput(self, layout, node, text):
layout.label(text)
def getValue(self):
return []
def setStoreableValue(self, data):
pass
def getStoreableValue(self):
pass
def getCopyValueFunctionString(self):
return "return [edgeIndices[:] for edgeIndices in value]"
| [
"[email protected]"
] | |
7887f18e6d71f9eaf61d02aa2d134eb927a02aec | d3638776a2ce455eb42f29c9c06e267392b6815a | /reading/book/migrations/0007_auto_20180703_2156.py | d188bede58a1d3c4590c803e1b5a26ae3aa7e460 | [
"MIT"
] | permissive | Family-TreeSY/reading | abce1b5e6047c72867839303ab0181c7a4997913 | a35d1242ce3a7303fe125c11de8125bd9afbdb3c | refs/heads/master | 2020-03-20T04:53:18.089247 | 2018-07-09T08:51:32 | 2018-07-09T08:51:32 | 137,197,886 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-03 13:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0006_auto_20180619_2116'),
]
operations = [
migrations.AddField(
model_name='story',
name='html',
field=models.TextField(default='', help_text='\u6b63\u6587\u53ef\u4ee5\u4f7f\u7528markdown', verbose_name='html\u6e32\u67d3\u540e\u7684\u9875\u9762'),
),
migrations.AddField(
model_name='story',
name='is_markdown',
field=models.BooleanField(default=True, verbose_name='\u4f7f\u7528markdown'),
),
]
| [
"[email protected]"
] | |
bb89c0558e9830a7ba414e9cea296ffb578f8509 | e49b654d3db99773390c5b9686df9c99fbf92b2a | /linked_lists/linked_list.py | f018e58590e973d2a1aac0516779018498713c0c | [] | no_license | hao89/diary_of_programming_puzzles | 467e8264d0ad38768ba5ac3cfb45301293d79943 | 0e05d3716f28075f99bbd7b433d16a383209e57c | refs/heads/master | 2021-01-16T00:49:38.956102 | 2015-08-25T13:44:53 | 2015-08-25T13:44:53 | 41,692,587 | 1 | 0 | null | 2015-08-31T18:20:38 | 2015-08-31T18:20:36 | Python | UTF-8 | Python | false | false | 455 | py | class LinkedListNode:
def __init__(self, data):
self.next = None
self.data = data
def __str__(self):
node_str = ""
current_node = self
while current_node:
if current_node.next:
node_str = node_str + str(current_node.data) + ", "
else:
node_str = node_str + str(current_node.data)
current_node = current_node.next
return node_str | [
"[email protected]"
] | |
7153a4c17d679b6a69da201b4c53f56cfe0c5619 | 517a904955033092aec11288151d725548226abc | /pandas_tutorial/data_advance/df_column_order.py | bd0d1a7f8f2a20eab540746de7dffb1501d42be3 | [] | no_license | MinSu-Kim/python_tutorial | ae0a4e3570aa4cb411626cefbc031777364764d5 | ed0c08892822d7054161c9e8f98841370868e82d | refs/heads/master | 2021-06-16T16:15:30.349719 | 2021-05-26T04:59:47 | 2021-05-26T04:59:47 | 207,266,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import seaborn as sns
print("# titanic 데이터셋의 부분을 선택하여 데이터프레임 만들기")
titanic = sns.load_dataset('titanic')
df = titanic.loc[0:4, 'survived':'age']
print(df, '\n')
print("# 열 이름의 리스트 만들기")
columns = list(df.columns.values) # 기존 열 이름
print("ssss", sorted(columns, reverse=True), type(sorted(columns, reverse=True)))
print("# 열 이름을 알파벳 순으로 정렬하기")
columns_sorted = sorted(columns) # 알파벳 순으로 정렬
df_sorted = df[columns_sorted]
print(df_sorted, '\n')
print(columns_sorted, '\n')
print("# 열 이름을 기존 순서의 정반대 역순으로 정렬하기")
columns_reversed = list(sorted(columns, reverse=True))
df_reversed = df[columns_reversed]
print(df_reversed, '\n')
print(columns_reversed, '\n')
print("# 열 이름을 사용자가 정의한 임의의 순서로 재배치하기")
columns_customed = ['pclass', 'sex', 'age', 'survived']
df_customed = df[columns_customed]
print(df_customed)
| [
"[email protected]"
] | |
fd69e5c0ad13bddd3665e157cdd85e17f6da1920 | d25003d4e1a1cd3b5eca1525c0119da47579f294 | /scripts/sort_double.py | 51093694d8a595573520419157b7d218af437429 | [] | no_license | rd37/GooglePracticeProjects | ceabcb838bd4bd50397b8fdf775e810db320dbb1 | b3543ada39b8c24f688a41cf0b745482013a93d9 | refs/heads/master | 2016-09-06T16:50:41.303580 | 2014-12-12T03:23:23 | 2014-12-12T03:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | '''
Created on Dec 10, 2014
@author: ronaldjosephdesmarais
'''
ints = [5,8.2,1,7,4.1,13,12,4.1,8.2]
print "------use python sorted------"
print sorted(ints)
print "------use dictionary ------"
srt_dict = {}
srt_arr = []
for i in ints:
if i not in srt_dict:
srt_dict[i]=1
else:
srt_dict[i]=srt_dict[i]+1
for i_key in srt_dict:
for i in range(0,srt_dict[i_key]):
srt_arr.append(i_key)
print srt_arr
| [
"[email protected]"
] | |
17b011426ea5dd281920f3b73b76457056e5bd1b | 4ce6fb5c49ee6ec4b5df9e056040382812a8a591 | /product/migrations/0029_auto_20191001_0528.py | 2120012f6b7045350592076be1c5027236969a78 | [] | no_license | yantrashalait/Multronics | 198c807a0bb2b8c1ae7bcc2325436467ee8a90b3 | c85b5a263fe1507c994236bba26ad12d93157622 | refs/heads/master | 2021-02-14T18:28:25.984830 | 2021-01-18T09:19:21 | 2021-01-18T09:19:21 | 244,825,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # Generated by Django 2.2.4 on 2019-10-01 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0028_aboutiteam'),
]
operations = [
migrations.AddField(
model_name='product',
name='visibility',
field=models.BooleanField(default=True, verbose_name='Make this product visibile?'),
),
migrations.AlterField(
model_name='aboutiteam',
name='logo',
field=models.ImageField(help_text='Image size: width=192px height=31px', upload_to='logo/'),
),
]
| [
"[email protected]"
] | |
24567018d6cc56c197cd0f52a9cf7d6b9311506f | 349d6ff272a4a113cee5b0ab7849f46305ebfb13 | /sc2/game_data.py | 2e69241ddf4f3f9265fd5ee0cf9aa760d4ddda4e | [
"MIT"
] | permissive | raimohanska/python-sc2 | dafec03d73b905b092c92aefd5ee9d896e8df5e1 | fb936be1618b4c8b8bf453d76d3f9894780a0f21 | refs/heads/master | 2021-09-03T04:04:46.630550 | 2018-01-05T12:50:11 | 2018-01-05T12:50:11 | 116,264,519 | 0 | 0 | null | 2018-01-04T13:41:56 | 2018-01-04T13:41:55 | null | UTF-8 | Python | false | false | 2,856 | py | from functools import lru_cache
from .data import Attribute
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
class GameData(object):
def __init__(self, data):
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability):
for unit in self.units.values():
if unit.creation_ability == ability:
return unit.cost
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def id(self):
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def cost(self):
return self._game_data.calculate_ability_cost(self.id)
def __repr__(self):
return f"AbilityData(name={self._proto.button_name})"
class UnitTypeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def creation_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self):
return self._proto.attributes
@property
def has_attribute(self, attr):
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self):
return self._proto.has_minerals
@property
def has_vespene(self):
return self._proto.has_vespene
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class UpgradeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def research_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class Cost(object):
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self):
return f"Cost({self.minerals}, {self.vespene})"
| [
"[email protected]"
] | |
9f217ec36d8e23186e431bd2f0f1ae0b6ba58f28 | b7e6cdf094baaee9d6e5034c2355641fbf9138d7 | /test2.py | 1d278e6887286acf6ad8073e7a7038fe7af2e13f | [] | no_license | heshibo1994/leetcode-python-2 | 04296c66cd6d1fe58880062aeafdbe9d474b7d2e | 3ea32f03bd453743b9b81de9871fad7ac67ced90 | refs/heads/master | 2020-05-23T21:49:01.367969 | 2019-09-30T03:31:27 | 2019-09-30T03:31:27 | 186,961,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print(int("343")) | [
"[email protected]"
] | |
7a286bf190f3a7ccafa0b6a2278c68f4aebdc583 | 40280c446e21c07ac3ffd20c5eda064a05093698 | /easy_module_attribute_getter/custom_transforms.py | 4eb9cbf5ed62de0c0da0fdd380f0b4112685e08a | [
"MIT"
] | permissive | KevinMusgrave/easy-module-attribute-getter | 884fdee1960b792db49e09edc5de0d268fd6ac8a | e0a733c02f2e6a969191a75c79159f45440c969f | refs/heads/master | 2021-07-04T02:18:17.113242 | 2021-01-21T03:32:22 | 2021-01-21T03:32:22 | 218,787,854 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | import torchvision.transforms.functional as F
from PIL import Image
class ConvertToBGR(object):
"""
Converts a PIL image from RGB to BGR
"""
def __init__(self):
pass
def __call__(self, img):
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img
def __repr__(self):
return "{}()".format(self.__class__.__name__)
class Multiplier(object):
def __init__(self, multiple):
self.multiple = multiple
def __call__(self, img):
return img*self.multiple
def __repr__(self):
return "{}(multiple={})".format(self.__class__.__name__, self.multiple) | [
"[email protected]"
] | |
e9aaecada9a17d7d8b636210f8d990f11a900e07 | 16631cf7cd4a70f2cd2750851649d3eff5e17724 | /2022/day15/part2.py | 00daf3f196e692462e068c11a24b226c3febf106 | [] | no_license | kynax/AdventOfCode | 1dd609a3308d733f2dd7d4ea00508d2da73180b9 | 36a339241dd7a31ebe08a73e5efa599e5faeea1a | refs/heads/master | 2022-12-21T13:32:52.591068 | 2022-12-16T22:41:30 | 2022-12-16T22:41:30 | 48,439,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | import sys
grid = {}
sensitivity = []
sensors = []
beacons = []
for l in sys.stdin:
l = l.strip().split(' ')
sx, sy, bx, by = int(l[2][2:-1]), int(l[3][2:-1]), int(l[8][2:-1]), int(l[9][2:])
grid[(sx,sy)] = 'S'
grid[(bx,by)] = 'B'
dx,dy = abs(sx-bx), abs(sy-by)
md = dx+dy
sensitivity.append((sx,sy,md))
sensors.append((sx,sy))
if (bx,by) not in beacons: beacons.append((bx,by))
minx = min([i[0]-i[2] for i in sensitivity])-1
maxx = max([i[0]+i[2] for i in sensitivity])+1
for row in range(4000000):
intervals = []
for s in sensitivity:
d = abs(s[1] - row)
if d > s[2]:
continue
w = s[2] - d
b,e = s[0] - abs(w), s[0] + abs(w)
if e < b: b,e = e,b
intervals.append((b,e))
ints = sorted(intervals)
nints = [ints[0]]
for i in range(1, len(ints)):
if ints[i][0] <= nints[-1][1]+1:
if ints[i][1] <= nints[-1][1]:
pass # fully included
else:
nints[-1] = (nints[-1][0], ints[i][1])
else:
nints.append(ints[i])
if len(nints) > 1:
print(nints, nints[0][1] + 1, row)
print(4000000 * (nints[0][1]+1) + row)
break | [
"[email protected]"
] | |
29f1b6a21401ee236b971d6979bebb602294ee1b | 89967e55f8ab4037368972dcf30d2aa2cd8cb0f3 | /oop_pedia_classifier.py | 8a0939f92a57883e84b9301842adc075e6e28583 | [] | no_license | petkraw/classifier | 5f487dd51ef70023aa502d69ec402b14bfe6019c | b813ff17013caf4d6d5aa036d6cb45c6c745e3ef | refs/heads/master | 2021-01-21T20:54:22.315291 | 2017-05-18T14:59:12 | 2017-05-18T14:59:12 | 92,289,428 | 0 | 0 | null | 2017-05-24T12:23:13 | 2017-05-24T12:23:13 | null | UTF-8 | Python | false | false | 57,249 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 09:56:23 2017
@author: Martin
"""
import json, os
import warnings
import numpy as np
import sys
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
warnings.filterwarnings("ignore", category=DeprecationWarning)
class Sample(object):
"""common class for all samples. that is the vector of scores describing a sample and its unique identifier case and gene.
Attributes:
case: The ID of the case to which the vector belongs as a string.
gene: The name of the gene this vector describes as a string.
gestalt: The gestalt score based on FDNA's analysis of the case's portrait foto as a float.
feature: The feature score based on FDNA's analysis of the case's annotated symptoms as a float.
cadd_phred: the highest CADDphred scores of the gene (has passed filtering; otherwise it is 0) as a float
phenomizer: the phenomizer p-value (*-1) based on the phenomizers analysis of the cases's annotated symptoms in classical mode as a float
boqa: the phenomizer score based on the phenomizers analyisis of the of the case's annotated symptoms in BOQA mode as a float
pathogenicity: a class label as an integer: 1 means pathogenic, 0 means neutral
pedia: an attribute to hold the PEDIA score as a float (initially it is -5)
extom: an attribute to hold the extom score based on patients symptoms and exome (no biometry)
"""
def __init__(self, case='?', gene='?', gestalt=0, feature=0, cadd_phred=0, phenomizer=0, boqa=0, pathogenicity=0, pedia=-5, extom=-5):
self.case = case
self.gene = gene
self.gestalt = gestalt
self.feature = feature
self.cadd_phred = cadd_phred
self.phenomizer = phenomizer
self.boqa = boqa
self.pathogenicity = pathogenicity
self.pedia = pedia
self.extom = extom
def classify(self):
"""a function to classify a sample using the classifier and the scaler provided in the respective pkl-files
"""
clf = joblib.load('pedia_classifier.pkl')
scaler = joblib.load('pedia_scaler.pkl')
pedia = float(clf.decision_function(scaler.transform(np.array([self.gestalt, self.feature, self.cadd_phred, self.phenomizer, self.boqa]))))
print(pedia)
class Data:
"""Common class for a list of instances of the class Samples
Attributes:
name: name of the data as a string
samples: a list of samples as instances of class Sample
casedisgene: a list of lists [[case,gene]] containing each case in samples and the respective disease causing gene
"""
def __init__(self, samples=[], casedisgene=[]):
#self.name = name
self.samples = list(samples)
self.casedisgene = casedisgene
def load(self, path):
""" loads the samples attribute with the information of the json files in the same directory as instances of the class Samples"""
print('loading data')
for file_name in os.listdir(path):
if file_name.endswith(".json") and file_name != '34159.json' and file_name != '40536.json':
file_name = os.path.join(path, file_name)
vectors = {}
with open(file_name, encoding='utf-8', errors='ignore') as json_data:
data = json.load(json_data)
pathogene = '?'
#gene_omimID='?'
if len(data['genomicData']) >0:
if 'Gene Name' in data['genomicData'][0]['Test Information']:
pathogene = data['genomicData'][0]['Test Information']['Gene Name']
if pathogene == 'MLL2':
pathogene = 'KMT2D'
elif pathogene == 'MLL':
pathogene = 'KMT2A'
elif pathogene == 'B3GALTL':
pathogene = 'B3GLTC'
elif pathogene == 'CASKIN1':
pathogene = 'KIAA1306'
#gene_omimID=data['genomicData'][0]['Test Information']['Gene Name']
case = data['case_id']
for entry in data['geneList']:
gscore = 0 #gestalt score
if 'gestalt_score' in entry:# and entry['gestalt_score']>0: #sometimes negative; mistake by FDNA?
gscore = entry['gestalt_score']
fscore =0 #feature score
if 'feature_score' in entry:# and entry['feature_score']>0: #sometimes negative; mistake by FDNA?
fscore = entry['feature_score']
vscore = 0 #variant score
if 'cadd_phred_score' in entry:
vscore = entry['cadd_phred_score']
pscore = 0 #phenomizer score
if 'pheno_score' in entry:
pscore = entry['pheno_score']
bscore=0 #boqa score
if 'boqa_score' in entry:
bscore = entry['boqa_score']
gene = entry['gene_symbol']
patho = 0 #pathogenicity, will serve as class label, 1 is pathogenic mutation; 0 is neutral variant
if gene == pathogene:
patho = 1
if pathogene != '?':# and (gscore!=0 or fscore!=0 or vscore!=0 or pscore!=0 or bscore!=0): #nullvectors not allowed
if case + gene in vectors: # if a gene appears several times in the gene List of a case onnly its highest values will be assigned to its Sample
smpl = vectors[case + gene]
if gscore > smpl.gestalt:
smpl.gestalt = gscore
if fscore > smpl.feature:
smpl.feature = fscore
if vscore > smpl.cadd_phred:
smpl.cadd_phred = vscore
if pscore > smpl.phenomizer:
smpl.phenomizer = pscore
if bscore > smpl.boqa:
smpl.boqa = bscore
if case + gene not in vectors:
vectors[case+gene] = Sample(case = case, gene = gene, gestalt = gscore, feature = fscore, cadd_phred = vscore, phenomizer = pscore, boqa = bscore, pathogenicity = patho)
for vector in vectors:
self.samples.append(vectors[vector]) # loads samples with instances of the class Sample
casedisgene = []
cases = []
for smpl in self.samples:
if smpl.pathogenicity == 1:
casedisgene.append([smpl.case, smpl.gene])
cases.append(smpl.case)
for smpl in self.samples:
if smpl.case not in cases:
cases.append(smpl.case)
casedisgene.append([smpl.case, 'healthy?'])
self.casedisgene = casedisgene
def load2(self, path):
""" loads the samples attribute with the information of the json files in the same directory as instances of the class Samples"""
print('loading data')
for file_name in os.listdir():
if file_name.endswith(".json") and file_name != '34159.json' and file_name != '40536.json':
file_name = os.path.join(path, file_name)
vectors = {}
with open(file_name, encoding = 'utf-8', errors = 'ignore') as json_data:
data = json.load(json_data)
pathogene='?'
#gene_omimID='?'
if len(data['genomicData'])>0:
if 'Gene Name' in data['genomicData'][0]['Test Information']:
pathogene = data['genomicData'][0]['Test Information']['Gene Name']
if pathogene == 'MLL2':
pathogene = 'KMT2D'
elif pathogene == 'MLL':
pathogene = 'KMT2A'
elif pathogene == 'B3GALTL':
pathogene = 'B3GLTC'
elif pathogene == 'CASKIN1':
pathogene = 'KIAA1306'
#gene_omimID=data['genomicData'][0]['Test Information']['Gene Name']
case = data['case_id']
for entry in data['geneList']:
if 'feature_score' in entry or 'pheno_score' in entry or 'boqa_score' in entry or 'cadd_phred_score' in entry :
gscore = 0 #gestalt score
# if 'gestalt_score' in entry:# and entry['gestalt_score']>0: #sometimes negative; mistake by FDNA?
# gscore=entry['gestalt_score']
fscore = 0 #feature score
if 'feature_score' in entry:# and entry['feature_score']>0: #sometimes negative; mistake by FDNA?
fscore = entry['feature_score']
vscore = 0 #variant score
if 'cadd_phred_score' in entry:
vscore = entry['cadd_phred_score']
pscore = 0 #phenomizer score
if 'pheno_score' in entry:
pscore = entry['pheno_score']
bscore = 0 #boqa score
if 'boqa_score' in entry:
bscore = entry['boqa_score']
gene = entry['gene_symbol']
patho = 0 #pathogenicity, will serve as class label, 1 is pathogenic mutation; 0 is neutral variant
if gene == pathogene:
patho = 1
if pathogene != '?':# and (gscore!=0 or fscore!=0 or vscore!=0 or pscore!=0 or bscore!=0): #nullvectors not allowed
if case+gene in vectors: # if a gene appears several times in the gene List of a case onnly its highest values will be assigned to its Sample
smpl=vectors[case + gene]
if gscore > smpl.gestalt:
smpl.gestalt = gscore
if fscore > smpl.feature:
smpl.feature = fscore
if vscore > smpl.cadd_phred:
smpl.cadd_phred = vscore
if pscore > smpl.phenomizer:
smpl.phenomizer = pscore
if bscore > smpl.boqa:
smpl.boqa = bscore
if case + gene not in vectors:
vectors[case + gene] = Sample(case = case, gene = gene, gestalt = gscore, feature = fscore, cadd_phred = vscore, phenomizer = pscore, boqa = bscore, pathogenicity = patho)
for vector in vectors:
self.samples.append(vectors[vector]) #loads samples with instances ofthe class Sample
casedisgene = []
cases = []
for smpl in self.samples:
if smpl.pathogenicity == 1:
casedisgene.append([smpl.case, smpl.gene])
cases.append(smpl.case)
for smpl in self.samples:
if smpl.case not in cases:
cases.append(smpl.case)
casedisgene.append([smpl.case, 'healthy?'])
self.casedisgene = casedisgene
def filter_gestalt(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.cadd_phred!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def filter_cadd(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.gestalt!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def filter_cadd_gestalt(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def threshold(self, value, score='gestalt'):
for smpl in self.samples:
if getattr(smpl, score)<value:
setattr(smpl, score, 0)
def numhit(self, num):
""" filters self.samples for only those samples that have num scores featuring
values higher than 0, self.samples will be adjusted accordingly"""
newsamples = []
for smpl in self.samples:
num_scores = 0
if smpl.gestalt != 0:
num_scores += 1
if smpl.feature != 0:
num_scores += 1
if smpl.cadd_phred > 0:
num_scores += 1
if smpl.phenomizer > 0:
num_scores += 1
if smpl.boqa > 0:
num_scores += 1
if num_scores >= num:
newsamples.append(smpl)
self.samples = []
self.samples = newsamples
def bucketize_data(self):
"""A function to prepare 10x cross validation
It returns a list of len 10. each list (bucket) will contain case IDs. All case
IDs featuring a pathogenic mutation in the same gene will be in the same bucket.
The size of the buckets will be as similar as possible
"""
print('creating 10 buckets - same gene same bucket')
self.casedisgene.sort()
buckets = []
for i in range(10):
buckets.append([])
allgenes = [] # a list that will contain the names of all genes that have a pathogenic entry in allscores
numgenes = [] # a list that will contain the frequencies by which these genes are pathogentically altered in the data, the indices are corresponding with allgenes
for entry in self.casedisgene:
case = entry[0]
gene = entry[1]
# if a gene is not yet assigned to allgenes, an entry will be created, at it
# will get the frequency 1 in numgenes
if gene not in allgenes:
allgenes.append(gene)
numgenes.append(1)
elif gene in allgenes: #if a gene is in allgenes already...
x = 0 # index of that gene
for i in allgenes:
if i == gene:
# ...its index in mungenes will be determined and this index will be
# increased by 1 (frequency + 1)
numgenes[x] += 1
x += 1
for gene in allgenes:
# minbucket is the minimal number of IDs in a bucket its initial size is the number
# of cases in the data divideb by 10 plus 2
minbucket = len(self.casedisgene) / 10 + 2
for bucket in buckets:
minbucket = min(len(bucket),minbucket) #minbucket is adjusted to the length of the smallest bucket
for bucket in buckets:
if len(bucket) == minbucket: #(one of) the smallest bucket(s) is selected
for entry in self.casedisgene:
case = entry[0]
dgene = entry[1]
if dgene == gene:
bucket.append(case) #all the case IDs with a pathogenic mutation in a certain gene are added to this bucket
break
#print(buckets)
return buckets
def classify_10xSVM(self, C=1):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia
atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
# 10x cross validation, data will be split according to the ID entries in each bucket,
# that were created by bucketize_data()
for bucket in buckets:
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
# only the data will be used for training that are of cases that are NOT in the topical bucket
if smpl.case not in bucket:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred,smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y) #the clf function needs np arrays
# the classifier is balanced because class 0 exceeds class 1 by far,
# (only one pathogenic mutation per case, but several hundred genes per case)
clf = svm.SVC(kernel='poly', C=C, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
for smpl in self.samples:
# only those samples are tested with the classifier that ARE in the bucket
if smpl.case in bucket:
smpl.pedia = float(clf.decision_function(scaler.transform(np.array([smpl.gestalt, smpl.feature , smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn += 1
def save_SVM(self, C=1):
""" saves the classifier so that it can be reloaded and quickly used for other purposes"""
print('loading data')
X = []
y = []
for smpl in self.samples:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa])
y.append([smpl.pathogenicity])
X = np.array(X)
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X)
y = np.array(y)
print('training classifier')
clf = svm.SVC(kernel='poly', C=C, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
print('saving classifier')
joblib.dump(clf, 'pedia_classifier.pkl', compress=9)
print('saving scaler')
joblib.dump(scaler, 'pedia_scaler.pkl', compress=9)
print('done saving')
def classify_10xSVM_extom(self):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xSVM_sgt(self): #sgt: specific gene testing
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.gestalt, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel = 'poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.gestalt, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xSVM_sympt(self): #sgt: specific gene testing
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xMLP(self):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X=np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X=scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y=np.array(y)#the clf function needs np arrays
clf = MLPClassifier(hidden_layer_sizes=(4, 3), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
clf.fit(X,y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.pedia = float(clf.predict(scaler.transform(np.array([smpl.gestalt, smpl.feature , smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn+=1
def classify_real(self, training_data):
""" SVM classification of all samples in the instance of Data against a given training
data set that is also an instance of class Data """
print('classification')
X = []
y = []
for smpl in training_data.samples:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) # class labels
X = np.array(X) # the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) # data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y) # the clf function needs np arrays
# the classifier is balanced because class 0 exceeds class 1 by far,
# (only one pathogenic mutation per case,but several hundred genes per case)
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
for smpl in self.samples:
smpl.pedia = float(clf.decision_function(scaler.transform(np.array([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
def manhattan(self, ID='all', score='pedia'):
""" Displays the information in Data as a manhattan plot. If the optional variable ID is set to a string matching a case ID, only the results of this case will be displayed."""
genepos={}
chr_sizes=[249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663, 146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566, 155270560, 59373566, 16571]
for line in open('allgenepositions.txt'):
fields = line[:-1].split('\t')
nm = fields[0]
chro = fields[1]
pos = fields[2]
name = fields[3]
if name not in genepos:
genepos[name]=[chro,pos]
sanos=[]
sanos2=[]
pathos=[]
s_pos=[]
s_pos2=[]
p_pos=[]
names=[]
names_x=[]
names_y=[]
for smpl in self.samples:
if smpl.case==ID or ID=='all':
if smpl.gene not in genepos and smpl.pathogenicity == 1:
print(smpl.gene)
if smpl.gene in genepos:
chrom=genepos[smpl.gene][0][3:]
if chrom=='X':
chrom=23
elif chrom=='Y':
chrom=24
elif chrom=='M':
chrom=25
else:
chrom=int(chrom)
pos=0
for i in range(chrom-1):
pos+=chr_sizes[i]+10**6
pos+=int(genepos[smpl.gene][1])
if smpl.pathogenicity==0:
if chrom%2==0:
sanos2.append(getattr(smpl, score))
s_pos2.append(pos)
else:
sanos.append(getattr(smpl, score))
s_pos.append(pos)
if smpl.pathogenicity==1:
pathos.append(getattr(smpl, score))
p_pos.append(pos)
if smpl.gene in names:
for i in range(len(names)):
if names[i]==smpl.gene:
if names_y[i]<getattr(smpl, score):
names_y[i]=getattr(smpl, score)
if smpl.gene not in names:
names.append(smpl.gene)
names_x.append(pos)
names_y.append(getattr(smpl, score))
plt.scatter(s_pos,sanos, color='#70ACC0', alpha=0.6, marker='o', s=400, label=('neutrals')) #s=30
plt.scatter(s_pos2,sanos2, color='#008B8B', alpha=0.6, marker='o', s=400, label=('neutrals')) #s=30 #385660
plt.scatter(p_pos,pathos, color='#AA1C7D', alpha=0.6, marker='o', s=400, label='pathogenic') #s=30
for i in range(len(names)):
plt.annotate(names[i], xy = (names_x[i], names_y[i]), xytext = (names_x[i], names_y[i]), fontsize=70, color='#AA1C7D')#, textcoords = 'offset points')
plt.xlabel('chromosomal position', fontsize=30)
ticks=[]
tick=0
for i in chr_sizes:
tick+=i/2
ticks.append(tick)
tick+=(i/2)+10**6
plt.xticks(ticks)
plt.ylabel( score+' score', fontsize=30)
plt.legend(loc='upper left', fontsize=25)
frame1=plt.gca()
chr_names=[]
for i in range(1,26):
if i==23:
chr_names.append('X')
elif i==24:
chr_names.append('Y')
elif i==25:
chr_names.append('M')
else:
chr_names.append(str(i))
frame1.axes.xaxis.set_ticklabels(chr_names, fontsize=25)
frame1.axes.tick_params(axis='x',length=0)
frame1.axes.tick_params(axis='y', labelsize=25)
y_min=min([min(sanos),min(sanos2),min(pathos)])
y_max=max([max(sanos),max(sanos2),max(pathos)])
plt.ylim(y_min, y_max+(y_max/10)) #ymin-(ymax/30)
plt.xlim(0,ticks[-1]+(chr_sizes[-1]/2)+10**6)
def ranker(self, col, lab):
"""A function to evaluate (rank) the results of the classification and put into a plot.
only to be used after data was classified."""
# data is what is to be analyzed, it must have the structure of alldatascored in classify()
# col is the color of the plot
# lab is the label of the plot
print('ranking results based on',lab)
data = []
for smpl in self.samples:
data.append([smpl.case, smpl.pedia, smpl.gestalt, smpl.pathogenicity])
n_cases = len(self.casedisgene)
# sorts the data by case ID because it has index 0 in alldatascored, and then by pedia score,
# because it has index 1
data.sort()
# reverses the data so that each scases starts with the entry with the highest pedia score
data.reverse()
# a list that will contain lists of the IDs of each case and the rank of the respective
# pathogenic variant, ranked by the pedia-score
combined_rank = []
rank = 1
case = data[0][0]
pathoamongdata = False # is the pathogenic gene still among the data (it had not been filtered out?)
npf = 0 # number passed filter
for entry in data:
currcase = entry[0]
patho = entry[-1]
if currcase != case:
if not pathoamongdata:
combined_rank.append([case, 102])
# if the pathogenic gene had not been in that case anymore the case will be assigned
# a rank of 105 (so that it is higher than 100 will be regarded as having failed)
pathoamongdata = False #pathoamongdata is set back to false
rank = 1
case = currcase
if patho == 1:
combined_rank.append([case, rank]) # assignes the rank of the pathogenic gene to the case
pathoamongdata = True # true because there was a pathogenic gene in that case
npf += 1
rank += 1 # increased by 1 for each iteration, because the list is sorted by case and than pedia score
combined_performance = []
for i in range(101): # will evalute ranks in range 0 to 101)
sens = 0
for j in combined_rank:
rank = j[1]
if rank <= i:
sens += 1 # how many cases have a patho rank lower than or eqal to i
# the absolute number is divided by the total number of cases,
# so that one has the fraction of cases having a patho rank not higher than i
sens = (sens/n_cases)
# appends sens to i, so that combined rank is a list of floats, each float describing the
# fraction of cases that have a pathorank lower or eqaul to its index
combined_performance.append(sens)
plt.plot(range(1, len(combined_performance)), combined_performance[1:],
color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]],
color=col, alpha=0.6, marker='o', s=50)
print(lab, [combined_performance[1], combined_performance[10], combined_performance[100]],
'fraction passed filter:', (npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def ranker2(self,col,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
rank=102
if i[1][1]==1:
rank=i[0]+1
npf+=1
combined_rank.append([case,rank])
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/n_cases) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def ranker_returner(self, lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot.
only to be used after data was classified."""
# data is what is to be analyzed, it must have the structure of alldatascored in classify()
# col is the color of the plot
# lab is the label of the plot
print('ranking results based on', lab)
cases = {}
# a list that will contain lists of the IDs of each case and the rank of the respective
# pathogenic variant, ranked by the pedia-score
combined_rank = []
n_cases = len(self.casedisgene)
npf = 0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity * (-1)])
if smpl.case not in cases:
cases[smpl.case] = [[getattr(smpl, score), smpl.pathogenicity * (-1)]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks = (list(enumerate(cases[case])))
for i in ranks:
rank = 102
if i[1][1] == -1:
rank = i[0] + 1
npf += 1
combined_rank.append([case, rank])
combined_performance = []
for i in range(101): # will evalute ranks in range 0 to 101
sens = 0
for j in combined_rank:
rank = j[1]
if rank <= i:
sens += 1 # how many cases have a patho rank lower than or eqal to i
# the absolute number is divided by the total number of cases, so that one has
# the fraction of cases having a patho rank not higher than i
sens = (sens/n_cases)
# appends sens to i, so that combined rank is a list of floats, each float describing
# the fraction of cases that have a pathorank lower or eqaul to its index
combined_performance.append(sens)
# plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
# plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
# print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
# plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
# plt.xlim(0, 100.5)
# plt.xlabel('rank-cut-off')
# plt.ylabel('Sensitivity')
# plt.title('Sensitivity-rank-cut-off-correlation')
# plt.legend(loc='lower right')
return([combined_performance[1], combined_performance[10], combined_performance[100]])
def ranker_returner2(self,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity*(-1)])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity*(-1)]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
print(ranks)
for i in ranks:
rank=102
if i[1][1]==-1:
rank=i[0]+1
npf+=1
combined_rank.append([case,rank])
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/n_cases) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
# plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
# plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
# print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
# plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
# plt.xlim(0, 100.5)
# plt.xlabel('rank-cut-off')
# plt.ylabel('Sensitivity')
# plt.title('Sensitivity-rank-cut-off-correlation')
# plt.legend(loc='lower right')
print([combined_performance[1],combined_performance[10],combined_performance[100]])
def compare(self, score1='pedia', score2='gestalt', score3='extom'):
cases={}
rank1=1000
rank2=1000
rank5=1000
ranking={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score1), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score1), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
#print(i)
ranking[case]=[i[0]+1]
#print(case,ranking[case],[i[0]+1])
cases={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score2), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score2), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
ranking[case].append(i[0]+1)
cases={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score3), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score3), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
ranking[case].append(i[0]+1)
for case in ranking:
if ranking[case][0]<ranking[case][2]:
print(str(case),ranking[case])
def ranker3(self,col,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
genes={}
cdg={}
for entry in self.casedisgene:
genes[entry[1]]=[]
cdg[entry[0]]=entry[1]
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity, smpl.gene])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity, smpl.gene]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
rank=102
for i in ranks:
if i[1][1]==1:
rank=i[0]+1
npf+=1
genes[cdg[case]].append(rank)
#print('genes:',genes)
for gene in genes:
# if genes[gene]==[]:
# genes[gene]=[102]
ranksum=0
for rank in genes[gene]:
ranksum+=rank
ranksum/=len(genes[gene])
combined_rank.append([gene,ranksum])
print(gene, genes[gene], ranksum)
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/len(genes)) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def save_jsons(self,path):
'''a function to save the pedia scores in their respective jsons'''
cwd=os.getcwd()
os.chdir(path)
print('saving results')
for file in os.listdir():
if file[-5:]=='.json':
print(file)
with open(file) as json_data:
casedata = json.load(json_data)
for smpl in self.samples:
for i in casedata['geneList']:
if i['gene_symbol']==smpl.gene:
i['pedia_score']=smpl.pedia
with open(file, 'w') as f:
json.dump(casedata, f)
os.chdir(cwd)
print('finished saving')
def hyper_search_helper(self, start=-5, stop=5, step=10, maximum=0, attempts=2, best=[0,[0,0,0]]):
for i in range(0, step + 1, 1):
exp = start + (i / step * (stop - start))
print('evaluating c-value of 10**' + str(exp) + '\nstep ' + str(i + 1) + ' of ' + str(step))
c_value=10**exp
self.classify_10xSVM(c_value)
performance = [exp, self.ranker_returner(lab=('c_value = 10**' + str(exp)))]
if performance[1][1] > best[1][1]:
best = performance
elif performance[1][1] == best[1][1]:
if performance[1][0] > best[1][0]:
best = performance
elif performance[1][0] == best[1][0] and performance[1][2] > best[1][2]:
best = performance
#results.append(performance)
print('best', best)
#print(results)
print('best',best)
if best[0] == maximum:
attempts -= 1
if best[0] != start and best[0] != stop:
result = [best[0] - (2 * ((stop - start) / step)), best[0] + (2 * ((stop-start) / step)), step, attempts, best]
else:
result=[start - ((stop - start)), stop + ((stop - start)), step, attempts, best]
return(result)
def hyper_search(self, start=-5, stop=5, step=10, maximum=0, attempts=2, best=[0,[0,0,0]]):
iteration = 1
while attempts > 0:
print('hyperparameter search round: ' + str(iteration) + ' \nremaining determination attempts ' + str(attempts))
new = self.hyper_search_helper(start, stop, step, maximum, attempts, best)
start = new[0]
stop = new[1]
step = new[2] # not really necessary as step doesnt change in hyper_search_helper
attempts = new[3]
maximum = new[4][0]
best = new[4]
iteration += 1
print('hyperparameter search determined best c-value at ' + str(best))
def main():
if len(sys.argv) < 2:
sys.exit('Usage: python %s path(simulation data)' % sys.argv[0])
path = sys.argv[1]
print('loading 1KG')
onekg = Data()
onekg.load(path + '/real/train/1KG')
onekg.numhit(0)
print('loading ExAC')
exac = Data()
exac.load(path + '/real/train/ExAC')
exac.numhit(0)
print('loading Iranian')
iran = Data()
iran.load(path + '/real/train/IRAN')
iran.numhit(0)
print('loading test data')
test = Data()
test.load(path + '/real/test')
test.numhit(0)
print('classifying against 1KG')
test.classify_real(onekg)
test.ranker('red', '1KG')
print('classifying against ExAC')
test.classify_real(exac)
test.ranker('blue', 'EXAC')
print('classifying against Iranian')
test.classify_real(iran)
test.ranker('purple', 'Iran')
plt.savefig('sensitivity_rank_cor.png')
results = []
best = [None, [0, 0, 0]]
print('loading 1KG')
onekg = Data()
onekg.load(path + '/1KG/CV')
onekg.save_SVM(C=10 ** (-1.45))
onekg.hyper_search(start=-3, stop=3)
smpl = Sample()
smpl.boqa = 0.5
smpl.phenomizer = 0.7
smpl.cadd_phred = 17
smpl.feature = 0
smpl.gestalt = 1.5
smpl.classify()
print('classifying 1KG by SVM')
onekg.classify_10xSVM()
onekg.manhattan('40639')
plt.savefig('10xSVM.png')
onekg.ranker2('red', 'PEDIA')
onekg.classify_10xSVM_extom()
onekg.ranker2('blue', 'extom', score = 'extom')
onekg.compare()
onekg.classify_10xSVM_extom()
onekg.ranker2('blue', 'extom no filter', score = 'extom')
onekg.ranker2('green', 'gestalt no filter', score = 'gestalt')
onekg2 = Data()
onekg2.load2(path + '/1KG/CV')
print('classifying 1KG by SVM')
onekg2.classify_10xSVM()
onekg2.ranker2('black', 'extom sep. loaded')
plt.savefig('10xSVM_extom.png')
#onekg.ranker2('purple','gestalt',score='gestalt')
#onekg.ranker2('orange','cadd_phred',score='cadd_phred')
onekg.filter_gestalt()
onekg.classify_10xSVM_extom()
onekg.ranker3('blue', 'extom post filter', score = 'extom')
onekg = Data()
onekg.load(path + '/real/train/1KG')
test = Data()
test.load(path + '/real/test')
test.classify_real(onekg)
test.ranker2('red', 'PEDIA')
onekg.filter_gestalt()
test.classify_real(onekg)
test.ranker2('blue', 'extom')
plt.show()
onekgnum = Data()
onekgnum.load(path + '/1KG/CV')
onekgnum.numhit(2)
print('classifying 1KGnum by SVM')
onekgnum.classify_10xSVM()
onekgnum.ranker3('green', 'PEDIA num')
onekgnum.filter_gestalt()
onekgnum.classify_10xSVM_extom()
onekgnum.ranker3('orange', 'extomnum', score = 'extom')
onekg.compare()
plt.show()
scores=[[30, 0], [7, 0], [346, 0], [9, 0], [65, 0], [39, 0], [87, 0], [124, 0], [39, 1], [30, 0], [-1, 0]]
scores.sort()
scores.reverse()
print(list(enumerate(scores)))
print('loading 1KG')
onekg = Data()
onekg.load(path + '/1KG/CV')
onekg.numhit(0)
print('loading ExAC')
exac = Data()
exac.load(path + '/ExAC/CV')
exac.numhit(0)
print('loading Iranian')
iran = Data()
iran.load(path + '/IRAN/CV')
iran.numhit(0)
print('classifying 1KG')
onekg.classify_10xSVM()
onekg.ranker('red','1KG')
print('classifying ExAC')
exac.classify_10xSVM()
exac.ranker('blue','EXAC')
print('classifying Iranian')
iran.classify_10xSVM()
iran.ranker('purple','Iran')
plt.show()
test=Data()
test.load(path + '/1KG/CV')
test.classify_10xSVM()
test.manhattan('97147')
plt.show()
os.chdir(path + '/1KG/CV')
for i in os.listdir():
with open(i) as json_data:
data = json.load(json_data)
print(data['ranks']['combined_rank'], i[:-5])
test=Data()
test.load(path + '/real/test')
cases=[]
patho=0
for smpl in test.samples:
if smpl.case not in cases:
cases.append(smpl.case)
for smpl in test.samples:
if smpl.pathogenicity == 1:
cases.pop(cases.index(smpl.case))
os.chdir(path + '/real/test')
for case in cases:
with open(str(case)+'.json') as json_data:
data = json.load(json_data)
disgene=data['genomicData'][0]['Test Information']['Gene Name']
print(disgene)
for entry in data['geneList']:
#print(entry['gene_symbol'])
if entry['gene_symbol']==disgene:
print('here')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5a9660779063959ecef329d1b58ac42c1dc13e5e | 0da3ebae606295ee3c1613004c6f21650e914841 | /codestreak/extensions.py | 07761de1820e73e03a2ea21169597925d9435969 | [] | no_license | mfwarren/codestreak.io | 38bac87f2ddc6e7cff56a4bc95b6b1ca4a41ef1a | bd37dd7ad55c9926e7a4752afca5986c08145d34 | refs/heads/master | 2020-06-11T06:21:27.012529 | 2019-03-03T15:43:32 | 2019-03-03T15:43:32 | 75,747,414 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # -*- coding: utf-8 -*-
"""Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_debugtoolbar import DebugToolbarExtension
from flask_migrate import Migrate
from raven.contrib.flask import Sentry
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CsrfProtect
csrf_protect = CsrfProtect()
db = SQLAlchemy()
migrate = Migrate()
debug_toolbar = DebugToolbarExtension()
sentry = Sentry()
| [
"[email protected]"
] | |
d911a1de75e301eed643472356197ac68faf3647 | b0fab024e9b7e7bd51c18c5578f0f45314808592 | /sine_competition_url/competition_url.py | 692142fbbe0f49ea5ac2e79373fac3914a120a1b | [] | no_license | dhecar/SINERGIA | a34d98fda84ce8ca8d2f67b89680bbf19c15fe1b | 678cfd41df8045645be130d2f3d51399908b15fd | refs/heads/master | 2021-01-23T11:07:48.724177 | 2017-04-20T10:51:07 | 2017-04-20T10:51:07 | 33,182,317 | 1 | 7 | null | 2015-08-31T20:59:43 | 2015-03-31T11:45:11 | Python | UTF-8 | Python | false | false | 471 | py | import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.osv import fields, osv
import urllib
import re
class competition_url(osv.osv):
_name = 'competition.url'
_description = 'URL for competition'
_table = 'competition_url'
_rec_name = 'url_competition'
_columns = {
'url_competition': fields.char('Url ', size=150),
'regex': fields.char('Expression', size=300),
}
competition_url()
| [
"[email protected]"
] | |
3c2393b7cc3380369cbc4d1b55810fd9d9d82ed4 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/clouddms/v1/clouddms-v1-py/google/cloud/clouddms_v1/services/data_migration_service/transports/grpc.py | 27875f61ce4a3548b8ad8719aabf93eec1c8092e | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,544 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.clouddms_v1.types import clouddms
from google.cloud.clouddms_v1.types import clouddms_resources
from google.longrunning import operations_pb2 # type: ignore
from .base import DataMigrationServiceTransport, DEFAULT_CLIENT_INFO
class DataMigrationServiceGrpcTransport(DataMigrationServiceTransport):
"""gRPC backend transport for DataMigrationService.
Database Migration service
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'datamigration.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'datamigration.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_migration_jobs(self) -> Callable[
[clouddms.ListMigrationJobsRequest],
clouddms.ListMigrationJobsResponse]:
r"""Return a callable for the list migration jobs method over gRPC.
Lists migration jobs in a given project and location.
Returns:
Callable[[~.ListMigrationJobsRequest],
~.ListMigrationJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_migration_jobs' not in self._stubs:
self._stubs['list_migration_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ListMigrationJobs',
request_serializer=clouddms.ListMigrationJobsRequest.serialize,
response_deserializer=clouddms.ListMigrationJobsResponse.deserialize,
)
return self._stubs['list_migration_jobs']
@property
def get_migration_job(self) -> Callable[
[clouddms.GetMigrationJobRequest],
clouddms_resources.MigrationJob]:
r"""Return a callable for the get migration job method over gRPC.
Gets details of a single migration job.
Returns:
Callable[[~.GetMigrationJobRequest],
~.MigrationJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_migration_job' not in self._stubs:
self._stubs['get_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GetMigrationJob',
request_serializer=clouddms.GetMigrationJobRequest.serialize,
response_deserializer=clouddms_resources.MigrationJob.deserialize,
)
return self._stubs['get_migration_job']
@property
def create_migration_job(self) -> Callable[
[clouddms.CreateMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the create migration job method over gRPC.
Creates a new migration job in a given project and
location.
Returns:
Callable[[~.CreateMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_migration_job' not in self._stubs:
self._stubs['create_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/CreateMigrationJob',
request_serializer=clouddms.CreateMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_migration_job']
@property
def update_migration_job(self) -> Callable[
[clouddms.UpdateMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the update migration job method over gRPC.
Updates the parameters of a single migration job.
Returns:
Callable[[~.UpdateMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_migration_job' not in self._stubs:
self._stubs['update_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/UpdateMigrationJob',
request_serializer=clouddms.UpdateMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_migration_job']
@property
def delete_migration_job(self) -> Callable[
[clouddms.DeleteMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete migration job method over gRPC.
Deletes a single migration job.
Returns:
Callable[[~.DeleteMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_migration_job' not in self._stubs:
self._stubs['delete_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/DeleteMigrationJob',
request_serializer=clouddms.DeleteMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_migration_job']
@property
def start_migration_job(self) -> Callable[
[clouddms.StartMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the start migration job method over gRPC.
Start an already created migration job.
Returns:
Callable[[~.StartMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'start_migration_job' not in self._stubs:
self._stubs['start_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/StartMigrationJob',
request_serializer=clouddms.StartMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['start_migration_job']
@property
def stop_migration_job(self) -> Callable[
[clouddms.StopMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the stop migration job method over gRPC.
Stops a running migration job.
Returns:
Callable[[~.StopMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'stop_migration_job' not in self._stubs:
self._stubs['stop_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/StopMigrationJob',
request_serializer=clouddms.StopMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['stop_migration_job']
@property
def resume_migration_job(self) -> Callable[
[clouddms.ResumeMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the resume migration job method over gRPC.
Resume a migration job that is currently stopped and
is resumable (was stopped during CDC phase).
Returns:
Callable[[~.ResumeMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'resume_migration_job' not in self._stubs:
self._stubs['resume_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ResumeMigrationJob',
request_serializer=clouddms.ResumeMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['resume_migration_job']
@property
def promote_migration_job(self) -> Callable[
[clouddms.PromoteMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the promote migration job method over gRPC.
Promote a migration job, stopping replication to the
destination and promoting the destination to be a
standalone database.
Returns:
Callable[[~.PromoteMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'promote_migration_job' not in self._stubs:
self._stubs['promote_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/PromoteMigrationJob',
request_serializer=clouddms.PromoteMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['promote_migration_job']
@property
def verify_migration_job(self) -> Callable[
[clouddms.VerifyMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the verify migration job method over gRPC.
Verify a migration job, making sure the destination
can reach the source and that all configuration and
prerequisites are met.
Returns:
Callable[[~.VerifyMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'verify_migration_job' not in self._stubs:
self._stubs['verify_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/VerifyMigrationJob',
request_serializer=clouddms.VerifyMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['verify_migration_job']
@property
def restart_migration_job(self) -> Callable[
[clouddms.RestartMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the restart migration job method over gRPC.
Restart a stopped or failed migration job, resetting
the destination instance to its original state and
starting the migration process from scratch.
Returns:
Callable[[~.RestartMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'restart_migration_job' not in self._stubs:
self._stubs['restart_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/RestartMigrationJob',
request_serializer=clouddms.RestartMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['restart_migration_job']
@property
def generate_ssh_script(self) -> Callable[
[clouddms.GenerateSshScriptRequest],
clouddms.SshScript]:
r"""Return a callable for the generate ssh script method over gRPC.
Generate a SSH configuration script to configure the
reverse SSH connectivity.
Returns:
Callable[[~.GenerateSshScriptRequest],
~.SshScript]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'generate_ssh_script' not in self._stubs:
self._stubs['generate_ssh_script'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GenerateSshScript',
request_serializer=clouddms.GenerateSshScriptRequest.serialize,
response_deserializer=clouddms.SshScript.deserialize,
)
return self._stubs['generate_ssh_script']
@property
def list_connection_profiles(self) -> Callable[
[clouddms.ListConnectionProfilesRequest],
clouddms.ListConnectionProfilesResponse]:
r"""Return a callable for the list connection profiles method over gRPC.
Retrieve a list of all connection profiles in a given
project and location.
Returns:
Callable[[~.ListConnectionProfilesRequest],
~.ListConnectionProfilesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connection_profiles' not in self._stubs:
self._stubs['list_connection_profiles'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ListConnectionProfiles',
request_serializer=clouddms.ListConnectionProfilesRequest.serialize,
response_deserializer=clouddms.ListConnectionProfilesResponse.deserialize,
)
return self._stubs['list_connection_profiles']
@property
def get_connection_profile(self) -> Callable[
[clouddms.GetConnectionProfileRequest],
clouddms_resources.ConnectionProfile]:
r"""Return a callable for the get connection profile method over gRPC.
Gets details of a single connection profile.
Returns:
Callable[[~.GetConnectionProfileRequest],
~.ConnectionProfile]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connection_profile' not in self._stubs:
self._stubs['get_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GetConnectionProfile',
request_serializer=clouddms.GetConnectionProfileRequest.serialize,
response_deserializer=clouddms_resources.ConnectionProfile.deserialize,
)
return self._stubs['get_connection_profile']
@property
def create_connection_profile(self) -> Callable[
[clouddms.CreateConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connection profile method over gRPC.
Creates a new connection profile in a given project
and location.
Returns:
Callable[[~.CreateConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connection_profile' not in self._stubs:
self._stubs['create_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/CreateConnectionProfile',
request_serializer=clouddms.CreateConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connection_profile']
@property
def update_connection_profile(self) -> Callable[
[clouddms.UpdateConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connection profile method over gRPC.
Update the configuration of a single connection
profile.
Returns:
Callable[[~.UpdateConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connection_profile' not in self._stubs:
self._stubs['update_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/UpdateConnectionProfile',
request_serializer=clouddms.UpdateConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connection_profile']
@property
def delete_connection_profile(self) -> Callable[
[clouddms.DeleteConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connection profile method over gRPC.
Deletes a single Database Migration Service
connection profile. A connection profile can only be
deleted if it is not in use by any active migration
jobs.
Returns:
Callable[[~.DeleteConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connection_profile' not in self._stubs:
self._stubs['delete_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/DeleteConnectionProfile',
request_serializer=clouddms.DeleteConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connection_profile']
__all__ = (
'DataMigrationServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
752e9f1a6a208543137c36cda179ddf64539f177 | b4a0380acd79a21c5596bfa5fac6eb337ef5359a | /build/lib.linux-x86_64-3.8/maskrcnn_benchmark/data/datasets/evaluation/kitchen/__init__.py | 262e29603168969af9a493dd19f6620fd1abb4d8 | [] | no_license | xiaofeng-c/Morphable-Detector | 781104d8a7221eb03c55a67f51f696e46ded4003 | 3e50bb20493c3e0b99d37971e51487124aa08b5b | refs/heads/master | 2023-08-27T20:53:21.606442 | 2021-10-18T22:28:38 | 2021-10-18T22:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import logging
from .kitchen_eval import do_kitchen_evaluation
def kitchen_evaluation(dataset, predictions, output_folder, box_only, **_):
logger = logging.getLogger("maskrcnn_benchmark.inference")
if box_only:
logger.warning("kitchen evaluation doesn't support box_only, ignored.")
logger.info("performing kitchen evaluation, ignored iou_types.")
return do_kitchen_evaluation(
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger,
)
| [
"[email protected]"
] | |
52fe5c0ff1bb7e21c43186b82e52b142647c0566 | 83ed1e2f176133c03a5f6dfa504b8df15ae71efb | /python/secondary/jnet/jnet.py | d29d5ce53337781076ea7ea61b55aca71ca18040 | [] | no_license | jmborr/code | 319db14f28e1dea27f9fc703be629f171e6bd95f | 32720b57699bf01803367566cdc5fff2b6bce810 | refs/heads/master | 2022-03-09T16:11:07.455402 | 2019-10-28T15:03:01 | 2019-10-28T15:03:01 | 23,627,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,652 | py | #!/usr/bin/python
import os,sys,re
from jobs.job import pastry
from inputArgs.inputArgs import inpHand
from utilities.small_utilities import Bye,junkName
from utilities.codedir import codedir
from seq.blastManager import blastRun
from seq.fastaManager import importFastaEntry
from seq.msa import gen_msa
from seq.letters import one2three
#create frequency file
def createFreqFile(seq,msa):
'''
seq: query sequence
msa: multiple sequence alignment in a list, in the style of 3590
and e10. msa does not contain the query sequence, and each
template sequence is a list item
'''
aas='ARNDCQEGHILKMFPSTWYV' #amino acid order to output, assumed by jnet
freq=[] #freq[i][j] 0<i<len(seq), 0<j<19
N=len(seq)
for i in range(N): freq.append([0]*20) #initialize
for i in range(N): #add amino acids of query sequence to the frequency table
m=aas.find(seq[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas (like "X" or "-")
freq[i][m]+=1
for seq2 in msa: #do same for sequences in the alignment
for i in range(N):
m=aas.find(seq2[i]) #return position in aas corresponding to amino acid seq[i]
if m<0:continue #seq[i] character not found in aas
freq[i][m]+=1
blastffreq=junkName()+'.jnetfreq'
out=open(blastffreq,'w')
for i in range(N):
line='' ; m=0
for j in range(20):m+=freq[i][j] #total number of counts for amino acid at position "i"
for j in range(20):
freq[i][j]=round( (10.0*freq[i][j])/m ) #rounded to nearest integer
line+='%3d'%(freq[i][j])
out.write(line+'\n') #write amino acid frequencies for amino acid at position "i"
out.close()
return blastffreq
#########################################################
# SCRIPT EXECUTION STARTS HERE
#########################################################
inpHand('Usage: inprosp.py [options]',
' -a _RA_fastaf sequence file in fasta format',
' -b _A_blasttarf tarred blast output (contains xxxxx.blast,xxxxx.pssm). If not provided, jnet.py will do a psiblast run',
' -c _A_outf output file name (def: ./seq.dat)',
).parse(locals(),sys.argv)
currd=os.getcwd() #current directory
if not outf: outf=currd+'/seq.dat' #output file
workd=currd+'/'+junkName() ;os.system('/bin/mkdir -p '+workd) ; os.chdir(workd) #temp directory
header,seq=importFastaEntry(open(fastaf,'r'))
pastry('/bin/cp '+fastaf+' .') ;fastaf=os.path.basename(fastaf)
#Retrieve/create psiblast outputs
if blasttarf:#we passed a *.tar file containing psiblast report and pssm file
blastOuts={'outf':'', 'blast':'', 'chk':'', 'fasta':fastaf, 'pssm':''}
os.system('tar xf '+blasttarf)
blastOuts['blast']=os.popen('ls -1 *.blast').readline().strip() #get name of blast report
blastOuts['pssm']=os.popen('ls -1 *pssm.').readline().strip() #get name of pssm file
else: blastOuts=blastRun(fastaf) #run blast with default options
#create multiple sequence alignment projected to the query sequence (3590 or e10 style)
msa=gen_msa(seq,header,Eco=0.0001,maxId=0.85,minId=0.10,red=0.75,blastOuts=blastOuts)['alignments']
#find frequency table from msa, and output to "freq" file. I did this
#function because PSIBLAST has evolved and the perl script "getfreq"
#provided in the jnet distro does not work. I could use newer script
#"parse_psi -freq" but it requires a psiblast report obtained with
#blastpgp -m 6, instead of blastpgp -m 0. I don't want to run PSIBLAST
#twice and running with -m 6 gives some humongously-sized reports
blastffreq=createFreqFile(seq,msa)
#remove too short sequences, thenk keep only first M sequences
msa2=[] ; N=len(seq) ; Lhalf=N/2
for seqgapped in msa:
sequngapped=seqgapped.replace('-','')
if len(sequngapped) > Lhalf: msa2.append(sequngapped)
M=int(1000*200/N) #maximum number of sequences to use
msa=msa2[0:M] #reclaim memory space by liberating the gapped sequences list
#output alignment as suitable use for clustalw
rootname=junkName()
fastasf=rootname+'.aln' ; fpt=open(fastasf,'w')
fpt.write('>00000\n'+seq+'\n')
for i in range(len(msa)): fpt.write('>'+'%05d\n'%(i+1)+msa[i]+'\n')
fpt.close()
#run clustalw
os.system('clustalw -OUTORDER=INPUT -INFILE='+fastasf+' -OUTPUT=GCG >/dev/null')
msf=rootname+'.msf'
if not os.path.exists(msf): Bye('ERROR: not msf file generated in jnet.py')
#run perl scripts to create the various inputs required by jnet
pastry('/bin/cp -r '+codedir+'/bin/jnet/perl .')
pastry('/bin/cp -r '+codedir+'/bin/jnet/bin .')
os.system('./perl/msf2jnet '+msf) ; msffa=msf+'.fa' #multiple sequence alignment
os.system('./perl/gethmm '+msf+' >/dev/null') ; msfhm=msf+'.hmmprof' #hidden-Markov model
pssmf=blastOuts['pssm']
os.system('./perl/getpssm '+pssmf+' > '+pssmf+'.jnetpssm') ; pssmf=pssmf+'.jnetpssm'
#run jnet and parse to generate seq.dat
jnetout=junkName()
os.system('./bin/jnet -p '+msffa+' '+msfhm+' '+pssmf+' '+blastffreq+' > '+jnetout)
pattern=re.compile(':\s(\S+)\n') ; final='' ; conf=''
ss2nn={'-':1, 'H':2, 'E':4}
for line in os.popen('grep -P "\sFINAL\t" '+jnetout).readlines():
final+=pattern.search(line).group(1)
for line in os.popen('grep -P "\sCONF\t" '+jnetout).readlines():
conf+=pattern.search(line).group(1)
out=open(outf,'w')
for i in range(N):
out.write( '%5d%6s%5d%5d\n'%( i+1, one2three[seq[i]], ss2nn[final[i]], int(conf[i]) ) )
out.close()
#clean-up working directory
os.chdir(currd)
#os.system('/bin/rm -rf '+workd)
sys.exit(0)
| [
"[email protected]"
] | |
8873e1b784b24057a8e64655dca5dc3c4d1f3d87 | 5603625e865a7cfe415c1aae4035a890aeb23864 | /bin/mnu.py | a178061c01bb7c57e4e3d48aa0bfeed54f50e963 | [] | no_license | msyriac/peakaboo | aa3ac1396c2af0862f9c5891a20a08dddd97068b | 8bb8a50262695733b086984f7d89ff4f04187278 | refs/heads/master | 2021-01-21T13:30:31.434801 | 2018-05-16T18:53:34 | 2018-05-16T18:53:34 | 102,130,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,997 | py | import numpy as np
from peakaboo import liuSims as ls
import orphics.tools.io as io
import os,sys
import orphics.analysis.flatMaps as fmaps
from mpi4py import MPI
from orphics.analysis.pipeline import mpi_distribute, MPIStats
import orphics.analysis.flatMaps as fmaps
from enlib import enmap, resample
# Get MPI comm
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numcores = comm.Get_size()
out_dir = os.environ['WWW']+"peakaboo/"
#file_root = "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/jia_recon_"
file_root = lambda mass,ftype,x,ext: "/gpfs01/astro/workarea/msyriac/data/sims/jia/output/"+mass+"_"+ftype+"_experiment_simple_"+str(x).zfill(9)+"."+ext
Ntot = 500
num_each,each_tasks = mpi_distribute(Ntot,numcores)
mpibox = MPIStats(comm,num_each,tag_start=333)
my_tasks = each_tasks[rank]
LCmassless = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massless/",zstr="1100.00")
LCmassive = ls.LiuConvergence(root_dir="/gpfs01/astro/workarea/msyriac/data/sims/jia/cmb/massive/",zstr="1100.00")
lbin_edges = np.arange(200,3000,100)
for k,i in enumerate(my_tasks):
#massive = enmap.read_map(file_root+"massive_"+str(i).zfill(9)+".fits")
#massless = enmap.read_map(file_root+"massless_"+str(i).zfill(9)+".fits")
massive = enmap.read_map(file_root("massive","kappa_recon",i,"fits"))
massless = enmap.read_map(file_root("massless","kappa_recon",i,"fits"))
if k==0:
qpower = fmaps.QuickPower(massive.modlmap(),lbin_edges)
massive_input = LCmassive.get_kappa(i+1)
massive_input = enmap.ndmap(resample.resample_fft(massive_input,massive.shape),massive.wcs)
massless_input = LCmassless.get_kappa(i+1)
massless_input = enmap.ndmap(resample.resample_fft(massless_input,massless.shape),massless.wcs)
print massive.shape
print massive_input.shape
cents, pauto_massive = qpower.calc(massive)
cents, pauto_massless = qpower.calc(massless)
cents, pcross_massive = qpower.calc(massive,massive_input)
cents, pcross_massless = qpower.calc(massless,massless_input)
cents, pauto_massive_input = qpower.calc(massive_input)
cents, pauto_massless_input = qpower.calc(massless_input)
lcents,massive_rkk = np.loadtxt(file_root("massive","auto_n0_subbed",i,"fits"),unpack=True)
lcents,massless_rkk = np.loadtxt(file_root("massless","auto_n0_subbed",i,"fits"),unpack=True)
mpibox.add_to_stats("massiveAutoN0",massive_rkk)
mpibox.add_to_stats("masslessAutoN0",massless_rkk)
mpibox.add_to_stats("massiveAuto",pauto_massive)
mpibox.add_to_stats("masslessAuto",pauto_massless)
mpibox.add_to_stats("masslessCross",pcross_massless)
mpibox.add_to_stats("massiveCross",pcross_massive)
mpibox.add_to_stats("massiveInput",pauto_massive_input)
mpibox.add_to_stats("masslessInput",pauto_massless_input)
print rank,i
mpibox.get_stats()
if rank==0:
rm = mpibox.stats["massiveAutoN0"]
rm0 = mpibox.stats["masslessAutoN0"]
mauto = mpibox.stats["massiveAuto"]
m0auto = mpibox.stats["masslessAuto"]
m0cross = mpibox.stats["masslessCross"]
mcross = mpibox.stats["massiveCross"]
mauto_input = mpibox.stats["massiveInput"]
m0auto_input = mpibox.stats["masslessInput"]
def camb_pred(nu):
import orphics.tools.cmb as cmb
# camb_cl_prediction
cambRoot = "data/jia_"+nu
theory = cmb.loadTheorySpectraFromCAMB(cambRoot,unlensedEqualsLensed=False,useTotal=False,TCMB = 2.7255e6,lpad=9000)
ellrange = np.arange(2,3000,1)
clkk_camb = theory.gCl("kk",ellrange)
return ellrange,clkk_camb
ellrange,clkk_camb0 = camb_pred("massless")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm0['mean'],yerr=rm0['errmean'],marker="o")
pl.addErr(cents,m0cross['mean'],yerr=m0cross['errmean'],marker="^")
pl.add(cents,m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb0,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massless.png")
ellrange,clkk_camb = camb_pred("massive")
pl = io.Plotter(scaleY='log',labelX="$\\ell$",labelY="$C_{\ell}$")
pl.addErr(lcents,rm['mean'],yerr=rm['errmean'],marker="o")
pl.addErr(cents,mcross['mean'],yerr=mcross['errmean'],marker="^")
pl.add(cents,mauto_input['mean'],marker="x",ls="none")
pl.add(ellrange,clkk_camb,label="cl camb",color="k")
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done(out_dir+"massive.png")
pdiff = (clkk_camb-clkk_camb0)*100./clkk_camb0
pl = io.Plotter(labelX="$\\ell$",labelY="$100\\Delta C_{\ell}/C_{\ell}$")
pl.add(lcents,(rm['mean']-rm0['mean'])*100./rm0['mean'],marker="o",ls="none")
pl.add(cents,(mauto_input['mean']-m0auto_input['mean'])*100./m0auto_input['mean'],marker="x",ls="none")
pl.add(ellrange,pdiff,label="cl camb",color="k")
pl.hline()
#pl._ax.set_ylim(-2,1)
pl._ax.set_xlim(500,3000)
pl.done(out_dir+"mnudiff.png")
| [
"[email protected]"
] | |
50499ed278f1c769e6003b5e965f70ca46dd96e2 | 8972658ca2c64703e8281db89d7a6ac47cbabbf7 | /backend/tests/models.py | db9754be03118136304be7ed51dc6c7b912ed427 | [
"MIT"
] | permissive | denisorehovsky/linkanywhere | 15721824719cc8a959cdddb4178cfe754eb4862d | e21d6725fbe0e74a7301e40f9d9bdbac17c68e68 | refs/heads/master | 2022-07-21T16:16:17.412930 | 2017-08-24T06:32:37 | 2017-08-24T06:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | import uuid
from django.db import models
from linkanywhere.apps.base.behaviors import Published
class TestModel(models.Model):
"""
Base for test models that sets app_label, so they play nicely.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class Meta:
app_label = 'tests'
abstract = True
class BasicModel(TestModel):
text = models.CharField(max_length=100)
class LikeModel(TestModel):
text = models.CharField(max_length=100)
class PublishedModel(Published, TestModel):
pass
| [
"[email protected]"
] | |
0e8424e290643259e8f4b6854ed2725343d9e5cd | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v8/resources/types/ad_group.py | 243d485134ef74e75b402fb5eb6187f67cf4952e | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,520 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import (
explorer_auto_optimizer_setting as gagc_explorer_auto_optimizer_setting,
)
from google.ads.googleads.v8.common.types import (
targeting_setting as gagc_targeting_setting,
)
from google.ads.googleads.v8.enums.types import ad_group_ad_rotation_mode
from google.ads.googleads.v8.enums.types import ad_group_status
from google.ads.googleads.v8.enums.types import ad_group_type
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import bidding_source
from google.ads.googleads.v8.enums.types import targeting_dimension
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"AdGroup",},
)
class AdGroup(proto.Message):
r"""An ad group.
Attributes:
resource_name (str):
Immutable. The resource name of the ad group. Ad group
resource names have the form:
``customers/{customer_id}/adGroups/{ad_group_id}``
id (int):
Output only. The ID of the ad group.
name (str):
The name of the ad group.
This field is required and should not be empty
when creating new ad groups.
It must contain fewer than 255 UTF-8 full-width
characters.
It must not contain any null (code point 0x0),
NL line feed (code point 0xA) or carriage return
(code point 0xD) characters.
status (google.ads.googleads.v8.enums.types.AdGroupStatusEnum.AdGroupStatus):
The status of the ad group.
type_ (google.ads.googleads.v8.enums.types.AdGroupTypeEnum.AdGroupType):
Immutable. The type of the ad group.
ad_rotation_mode (google.ads.googleads.v8.enums.types.AdGroupAdRotationModeEnum.AdGroupAdRotationMode):
The ad rotation mode of the ad group.
base_ad_group (str):
Output only. For draft or experiment ad
groups, this field is the resource name of the
base ad group from which this ad group was
created. If a draft or experiment ad group does
not have a base ad group, then this field is
null.
For base ad groups, this field equals the ad
group resource name.
This field is read-only.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
url_custom_parameters (Sequence[google.ads.googleads.v8.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
campaign (str):
Immutable. The campaign to which the ad group
belongs.
cpc_bid_micros (int):
The maximum CPC (cost-per-click) bid.
cpm_bid_micros (int):
The maximum CPM (cost-per-thousand viewable
impressions) bid.
target_cpa_micros (int):
The target CPA (cost-per-acquisition).
cpv_bid_micros (int):
Output only. The CPV (cost-per-view) bid.
target_cpm_micros (int):
Average amount in micros that the advertiser
is willing to pay for every thousand times the
ad is shown.
target_roas (float):
The target ROAS (return-on-ad-spend)
override. If the ad group's campaign bidding
strategy is a standard Target ROAS strategy,
then this field overrides the target ROAS
specified in the campaign's bidding strategy.
Otherwise, this value is ignored.
percent_cpc_bid_micros (int):
The percent cpc bid amount, expressed as a fraction of the
advertised price for some good or service. The valid range
for the fraction is [0,1) and the value stored here is
1,000,000 \* [fraction].
explorer_auto_optimizer_setting (google.ads.googleads.v8.common.types.ExplorerAutoOptimizerSetting):
Settings for the Display Campaign Optimizer,
initially termed "Explorer".
display_custom_bid_dimension (google.ads.googleads.v8.enums.types.TargetingDimensionEnum.TargetingDimension):
Allows advertisers to specify a targeting
dimension on which to place absolute bids. This
is only applicable for campaigns that target
only the display network and not search.
final_url_suffix (str):
URL template for appending params to Final
URL.
targeting_setting (google.ads.googleads.v8.common.types.TargetingSetting):
Setting for targeting related features.
effective_target_cpa_micros (int):
Output only. The effective target CPA (cost-
er-acquisition). This field is read-only.
effective_target_cpa_source (google.ads.googleads.v8.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective target
CPA. This field is read-only.
effective_target_roas (float):
Output only. The effective target ROAS
(return-on-ad-spend). This field is read-only.
effective_target_roas_source (google.ads.googleads.v8.enums.types.BiddingSourceEnum.BiddingSource):
Output only. Source of the effective target
ROAS. This field is read-only.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this ad group.
excluded_parent_asset_field_types (Sequence[google.ads.googleads.v8.enums.types.AssetFieldTypeEnum.AssetFieldType]):
The asset field types that should be excluded
from this ad group. Asset links with these field
types will not be inherited by this ad group
from the upper levels.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=34, optional=True,)
name = proto.Field(proto.STRING, number=35, optional=True,)
status = proto.Field(
proto.ENUM,
number=5,
enum=ad_group_status.AdGroupStatusEnum.AdGroupStatus,
)
type_ = proto.Field(
proto.ENUM, number=12, enum=ad_group_type.AdGroupTypeEnum.AdGroupType,
)
ad_rotation_mode = proto.Field(
proto.ENUM,
number=22,
enum=ad_group_ad_rotation_mode.AdGroupAdRotationModeEnum.AdGroupAdRotationMode,
)
base_ad_group = proto.Field(proto.STRING, number=36, optional=True,)
tracking_url_template = proto.Field(proto.STRING, number=37, optional=True,)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=6, message=custom_parameter.CustomParameter,
)
campaign = proto.Field(proto.STRING, number=38, optional=True,)
cpc_bid_micros = proto.Field(proto.INT64, number=39, optional=True,)
cpm_bid_micros = proto.Field(proto.INT64, number=40, optional=True,)
target_cpa_micros = proto.Field(proto.INT64, number=41, optional=True,)
cpv_bid_micros = proto.Field(proto.INT64, number=42, optional=True,)
target_cpm_micros = proto.Field(proto.INT64, number=43, optional=True,)
target_roas = proto.Field(proto.DOUBLE, number=44, optional=True,)
percent_cpc_bid_micros = proto.Field(proto.INT64, number=45, optional=True,)
explorer_auto_optimizer_setting = proto.Field(
proto.MESSAGE,
number=21,
message=gagc_explorer_auto_optimizer_setting.ExplorerAutoOptimizerSetting,
)
display_custom_bid_dimension = proto.Field(
proto.ENUM,
number=23,
enum=targeting_dimension.TargetingDimensionEnum.TargetingDimension,
)
final_url_suffix = proto.Field(proto.STRING, number=46, optional=True,)
targeting_setting = proto.Field(
proto.MESSAGE,
number=25,
message=gagc_targeting_setting.TargetingSetting,
)
effective_target_cpa_micros = proto.Field(
proto.INT64, number=47, optional=True,
)
effective_target_cpa_source = proto.Field(
proto.ENUM,
number=29,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
effective_target_roas = proto.Field(proto.DOUBLE, number=48, optional=True,)
effective_target_roas_source = proto.Field(
proto.ENUM,
number=32,
enum=bidding_source.BiddingSourceEnum.BiddingSource,
)
labels = proto.RepeatedField(proto.STRING, number=49,)
excluded_parent_asset_field_types = proto.RepeatedField(
proto.ENUM,
number=54,
enum=asset_field_type.AssetFieldTypeEnum.AssetFieldType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
9ab61ddea3a8f45f1f40b9490b41e4da6d9a6544 | 786de89be635eb21295070a6a3452f3a7fe6712c | /SConsTools/tags/V00-00-16/src/standardExternalPackage.py | f433403d52307a3f120d028b2d80755d194bb0c7 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | #===============================================================================
#
# SConscript fuction for standard external package
#
# $Id$
#
#===============================================================================
import os
import sys
from os.path import join as pjoin
from fnmatch import fnmatch
from SCons.Defaults import *
from SCons.Script import *
from SConsTools.trace import *
from SConsTools.dependencies import *
#
# This is an interface package for the external package. We wan to make
# symlinks to the include files, libs and binaries
#
# build package name from prefix and directory
def _absdir ( prefix, dir ):
if not dir :
return None
if prefix and not os.path.isabs( dir ) :
dir = pjoin( prefix, dir )
if not os.path.isdir( dir ) :
dir = None
return dir
def _glob ( dir, patterns ):
if patterns is None :
return os.listdir(dir)
# patterns could be space-separated string of patterns
if isinstance(patterns,(str,unicode)) :
patterns = patterns.split()
if not patterns : return []
result = []
for l in os.listdir(dir) :
for p in patterns :
if fnmatch ( l, p ) : result.append(l)
return result
#
# Define all builders for the external package
#
def standardExternalPackage ( package, **kw ) :
""" Understands following keywords (all are optional):
PREFIX - top directory of the external package
INCDIR - include directory, absolute or relative to PREFIX
PYDIR - Python src directory, absolute or relative to PREFIX
PYDIRSEP - if present and evaluates to True installs python code to a
separate directory arch/$LUSI_ARCH/python/<package>
LIBDIR - libraries directory, absolute or relative to PREFIX
LINKLIBS - library names to link, or all libs if not present
BINDIR - binaries directory, absolute or relative to PREFIX
LINKBINS - binary names to link, or all libs if not present
PKGLIBS - names of libraries that have to be linked for this package
DEPS - names of other packages that we depend upon
"""
pkg = os.path.basename(os.getcwd())
trace ( "Standard SConscript for external package `"+package+"'", "SConscript", 1 )
env = DefaultEnvironment()
prefix = kw.get('PREFIX',None)
trace ( "prefix: %s" % prefix, "standardExternalPackage", 3 )
# link include directory
inc_dir = _absdir ( prefix, kw.get('INCDIR',None) )
if inc_dir :
trace ( "include_dir: %s" % inc_dir, "standardExternalPackage", 5 )
# make 'geninc' directory if not there yet
archinc = Dir(env.subst("$ARCHINCDIR"))
archinc = str(archinc)
if not os.path.isdir( archinc ) : os.makedirs( archinc )
target = pjoin(archinc,package)
if not os.path.lexists(target) : os.symlink ( inc_dir, target )
# link python directory
py_dir = _absdir ( prefix, kw.get('PYDIR',None) )
if py_dir :
trace ( "py_dir: %s" % py_dir, "standardExternalPackage", 5 )
if kw.get('PYDIRSEP',False) :
# make a link to the whole dir
targ = env.Symlink ( Dir(pjoin(env.subst("$PYDIR"),package)), Dir(py_dir) )
env['ALL_TARGETS']['LIBS'].extend ( targ )
else :
# make links for every file in the directory
files = os.listdir(py_dir)
for f in files :
loc = pjoin(py_dir,f)
if not os.path.isdir(loc) :
targ = env.Symlink ( pjoin(env.subst("$PYDIR"),f), loc )
env['ALL_TARGETS']['LIBS'].extend( targ )
# link all libraries
lib_dir = _absdir ( prefix, kw.get('LIBDIR',None) )
if lib_dir :
trace ( "lib_dir: %s" % lib_dir, "standardExternalPackage", 5 )
# make a list of libs to link
libraries = kw.get('LINKLIBS',None)
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
libraries = _glob ( lib_dir, libraries )
trace ( "libraries: %s" % libraries, "standardExternalPackage", 5 )
for f in libraries :
loc = pjoin(lib_dir,f)
if os.path.isfile(loc) :
#targ = env.Install( "$LIBDIR", loc )
targ = env.Symlink ( pjoin(env.subst("$LIBDIR"),f), loc )
trace ( "linklib: %s -> %s" % (str(targ[0]),loc), "standardExternalPackage", 5 )
env['ALL_TARGETS']['LIBS'].extend ( targ )
# link all executables
bin_dir = _absdir ( prefix, kw.get('BINDIR',None) )
if bin_dir :
trace ( "bin_dir: %s" % bin_dir, "standardExternalPackage", 5 )
# make list of binaries to link
binaries = kw.get('LINKBINS',None)
binaries = _glob ( bin_dir, binaries )
for f in binaries :
loc = pjoin(bin_dir,f)
if os.path.isfile(loc) :
targ = env.Symlink ( pjoin(env.subst("$BINDIR"),f), loc )
env['ALL_TARGETS']['BINS'].extend ( targ )
# add my libs to a package tree
setPkgLibs ( env, package, kw.get('PKGLIBS',[]) )
# add packages that I depend on
setPkgDeps ( env, package, kw.get('DEPS',[]) )
| [
"salnikov@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@b967ad99-d558-0410-b138-e0f6c56caec7 |
80137af34837964be8bf789dbbcf21a7a1f05a3a | 3d386ef093427c227f0ba6637eedfbce044a2e9e | /tfbert/optimization/create_optimizer.py | ac6c1abc5c6bf2aebe07f51b89fe61f37dbec2ae | [] | no_license | HaierAI/tfbert | c3eeb77af70e79e925e72c393a3e8229feaf1a4a | 3779e59a4ebe7458ae732fef547f1168badbba2b | refs/heads/master | 2023-07-09T05:25:19.015760 | 2021-08-16T12:27:37 | 2021-08-16T12:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,278 | py | # -*- coding:utf-8 -*-
# @FileName :create_optimizer.py
# @Time :2021/1/31 19:58
# @Author :huanghui
import tensorflow.compat.v1 as tf
from .adamw import AdamWeightDecayOptimizer
from .lamb import LAMBOptimizer
from .schedule import lr_schedule
def create_optimizer(
learning_rate,
num_train_steps=None,
num_warmup_steps=None,
optimizer_type='adamw',
epsilon=1e-6,
momentum=0.,
weight_decay=0.01,
decay_method='poly',
mixed_precision=False,
init_loss_scale=2 ** 32
):
if decay_method is not None and num_train_steps is not None and num_warmup_steps is not None:
num_train_steps = int(num_train_steps)
num_warmup_steps = int(num_warmup_steps)
learning_rate = lr_schedule(
learning_rate, num_train_steps, num_warmup_steps,
decay_method=decay_method, optimizer_type=optimizer_type
)
if optimizer_type == 'adamw':
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'lamb':
optimizer = LAMBOptimizer(
learning_rate,
weight_decay_rate=weight_decay,
beta_1=0.9,
beta_2=0.999,
epsilon=epsilon,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]
)
elif optimizer_type == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=epsilon)
elif optimizer_type == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
)
elif optimizer_type == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate,
rho=0.95,
epsilon=epsilon,
)
elif optimizer_type == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate=learning_rate,
initial_accumulator_value=0.1
)
elif optimizer_type == 'rmsp':
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
decay=0.9,
momentum=momentum,
epsilon=epsilon,
)
else:
raise ValueError('Unsupported optimizer option: %s' % optimizer_type)
if mixed_precision:
loss_scaler = tf.train.experimental.DynamicLossScale(
initial_loss_scale=init_loss_scale, increment_period=1000,
multiplier=2.0)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scaler)
loss_scale_value = tf.identity(loss_scaler(), name="loss_scale")
return optimizer
def create_train_op(
optimizer,
grads_and_vars,
max_grad=1.0,
mixed_precision=False,
gradient_accumulation_steps=1):
global_step = tf.train.get_or_create_global_step()
if gradient_accumulation_steps > 1:
local_step = tf.get_variable(name="local_step", shape=[], dtype=tf.int32, trainable=False,
initializer=tf.zeros_initializer)
batch_finite = tf.get_variable(name="batch_finite", shape=[], dtype=tf.bool, trainable=False,
initializer=tf.ones_initializer)
accum_vars = [tf.get_variable(
name=tvar.name.split(":")[0] + "/accum",
shape=tvar.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer()) for tvar in tf.trainable_variables()]
reset_step = tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool)
local_step = tf.cond(reset_step, lambda: local_step.assign(tf.ones_like(local_step)),
lambda: local_step.assign_add(1))
grads_and_vars_and_accums = [(gv[0], gv[1], accum_vars[i]) for i, gv in enumerate(grads_and_vars) if
gv[0] is not None]
grads, tvars, accum_vars = list(zip(*grads_and_vars_and_accums))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(
True,
dtype=tf.bool)
batch_finite = tf.cond(reset_step,
lambda: batch_finite.assign(
tf.math.logical_and(tf.constant(True, dtype=tf.bool), all_are_finite)),
lambda: batch_finite.assign(tf.math.logical_and(batch_finite, all_are_finite)))
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
accum_vars = tf.cond(reset_step,
lambda: [accum_vars[i].assign(grad) for i, grad in enumerate(clipped_grads)],
lambda: [accum_vars[i].assign_add(grad) for i, grad in enumerate(clipped_grads)])
def update(accum_vars):
return optimizer.apply_gradients(list(zip(accum_vars, tvars)))
update_step = tf.identity(
tf.cast(tf.math.equal(local_step % gradient_accumulation_steps, 0), dtype=tf.bool),
name="update_step")
update_op = tf.cond(update_step,
lambda: update(accum_vars), lambda: tf.no_op())
new_global_step = tf.cond(tf.math.logical_and(update_step, batch_finite),
lambda: global_step + 1,
lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(update_op, [global_step.assign(new_global_step)])
else:
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
grads, tvars = list(zip(*grads_and_vars))
all_are_finite = tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in grads]) if mixed_precision else tf.constant(True,
dtype=tf.bool)
# This is how the model was pre-trained.
# ensure global norm is a finite number
# to prevent clip_by_global_norm from having a hizzy fit.
(clipped_grads, _) = tf.clip_by_global_norm(
grads, clip_norm=max_grad)
# 这里不要传入global step,adam内部没有对global step累加
# 而原本adam等tf内置优化器会累加,这样就会造成global step重复增加
train_op = optimizer.apply_gradients(
list(zip(clipped_grads, tvars)))
new_global_step = tf.cond(all_are_finite, lambda: global_step + 1, lambda: global_step)
new_global_step = tf.identity(new_global_step, name='step_update')
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
| [
"[email protected]"
] | |
84f66a0bf9e3af5d28f84b3115109b132927b474 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Chandu and chandni's secret chat/solution.py | 5347d87bca861f96880c7fd9b656c67c7b40092f | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 605 | py | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
s, k = input().strip().split()
k = int(k)
idxes = list(range(len(s)))
idxes.sort(key=lambda i: s[i], reverse=True)
idx = idxes[k - 1]
word = ''
for _ in range(len(s)):
word += s[idx]
idx = idxes[idx]
word = word[-1] + word[:-1]
print(word)
| [
"[email protected]"
] | |
b2b80179d0f8ddc7a76ed005cbc3670219bb8091 | 28f0dc2b48ed019dfef08d84e842c5d75e116dfc | /Versions/Release.2.x.x/py/OBSOLETE/BibleTable.py | cced9d9f98a8cb768a4c415715efb06a18c3eb2b | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | garygriswold/SafeBible | 9da0e8d89cb08888b8cf48773b4b3860086c49f7 | 2d378e84cbd6b81641bcccd6ba66699d24208548 | refs/heads/master | 2022-02-25T19:41:10.367183 | 2019-08-22T03:35:02 | 2019-08-22T03:35:02 | 34,028,119 | 0 | 0 | MIT | 2019-10-30T07:11:44 | 2015-04-16T01:40:19 | TSQL | UTF-8 | Python | false | false | 3,942 | py | #
# This program generates SQL statements to create and populate the Bible table
# This was previously called the Version table
#
import io
import os
import json
out = io.open("sql/bible.sql", mode="w", encoding="utf-8")
out.write(u"DROP TABLE IF EXISTS Bible;\n")
out.write(u"CREATE TABLE Bible (\n")
out.write(u" bibleId TEXT NOT NULL PRIMARY KEY,\n") # info.json filename[5:18]
out.write(u" code TEXT NOT NULL,\n") # info.json abbr
out.write(u" abbr TEXT NOT NULL,\n") # info.json abbr char 4-6
out.write(u" iso3 TEXT NOT NULL REFERENCES Language(iso3),\n") # info.json lang
out.write(u" name TEXT NOT NULL,\n") # info.json name
out.write(u" englishName TEXT NULL,\n") # info.json nameEnglish
out.write(u" localizedName TEXT NULL,\n") # Google Translate API
out.write(u" direction TEXT CHECK (direction IN('ltr','rtl')) default('ltr'),\n") # info.json dir
out.write(u" script TEXT NULL,\n") # info.json script
out.write(u" country TEXT NULL REFERENCES Country(code),\n") # info.json countryCode
out.write(u" s3Bucket TEXT NOT NULL,\n") # this program
out.write(u" s3KeyPrefix TEXT NOT NULL,\n") # info.json filename
out.write(u" s3Key TEXT NULL,\n") # %I_%O_%B_%C.html
# I cannot find program, which generated this template: s3KeyTemplate.py
out.write(u" s3CredentialId TEXT NULL,\n") # TBD
out.write(u" otDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" ntDamId TEXT NULL,\n") # BibleUpdateDamId.py
out.write(u" stylesheet TEXT NOT NULL);\n") # constant stylesheet
prefix2 = "INSERT INTO Bible (bibleId, code, abbr, iso3, name, englishName, direction, script, country, s3Bucket, s3KeyPrefix, s3Key, stylesheet) VALUES"
stylesheet = "BibleApp2.css"
# read and process all info.json files
source = "/Users/garygriswold/ShortSands/DBL/FCBH_info/"
filelist = sorted(os.listdir(source))
for filename in filelist:
#if len(filename) != 28:
#print(len(filename), filename)
#else:
if len(filename) == 28:
#print(filename)
input2 = io.open(source + filename, mode="r", encoding="utf-8")
data = input2.read()
bible = json.loads(data)
bibleId = filename[5:18]
# check type to see if == bible
bType = bible['type']
if bType != 'bible':
print "?? Type = ", bType
# check abbr to see if different from bibleId
code = bible['abbr']
# remove lang code from abbr
abbr = code[3:]
# check that lang == first 3 letters of bibleId
iso3 = bible['lang']
if iso3.upper() != code[0:3]:
print "?? abbr=", code, " iso3=", iso3
iso3 = iso3.lower()
name = bible['name'].replace("'", "''")
englishName = bible['nameEnglish'].replace("'", "''")
direction = bible['dir']
# convert script to iso 15924 code
script = bible.get('script')
validScripts = [None, 'Arab', 'Beng', 'Bugi', 'Cans', 'Cyrl', 'Deva', 'Ethi', 'Geor',
'Hans', 'Hant', 'Java', 'Kore', 'Latn', 'Orya', 'Syrc', 'Taml', 'Thai' ]
#if validScripts.index(script) < 0:
if script in validScripts:
a = 1
else:
if script == 'Latin':
script = 'Latn'
elif script == 'Cyrillic':
script = 'Cyrl'
elif script == 'Arabic':
script = 'Arab'
elif script == 'Devangari':
script = 'Deva'
elif script == 'Devanagari (Nagari)':
script = 'Deva'
elif script == 'CJK':
script = None
else:
print "ERROR: unknown script code", script, filename
script = "'" + script + "'" if script != None else 'null'
country = bible.get('countryCode')
country = "'" + country.upper() + "'" if len(country) > 0 else 'null'
bucket = "dbp-prod"
keyPrefix = filename.replace("info.json", "").replace(":", "/")
s3Key = '%I_%O_%B_%C.html'
out.write("%s ('%s', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s', '%s');\n" %
(prefix2, bibleId, code, abbr, iso3, name, englishName, direction, script, country, bucket, keyPrefix, s3Key, stylesheet))
out.close() | [
"[email protected]"
] | |
1f33447947159a11ecf117ebfd09d4a0232c26ed | 890a6921b9dbc3d849ee51366c76a791761d35d2 | /.qt_for_python/uic/PlacefieldVisualSelectionWidgetBase.py | e5bfd84f65b40625dd5b625a786686f1a3fc1927 | [] | no_license | CommanderPho/Spike3D | 87e1ea17a76080e18e835e9d015e7fe7bb3426e4 | 63e5e78c3bcb28f3dbab02d6354e6eb83cbccc2a | refs/heads/master | 2023-08-17T10:40:44.389682 | 2023-08-16T10:57:12 | 2023-08-16T10:57:12 | 413,545,455 | 2 | 0 | null | 2022-10-22T05:54:57 | 2021-10-04T18:48:06 | Jupyter Notebook | UTF-8 | Python | false | false | 4,107 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\pho\repos\pyPhoPlaceCellAnalysis\src\pyphoplacecellanalysis\GUI\Qt\PlacefieldVisualSelectionControls\PlacefieldVisualSelectionWidgetBase.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_rootForm(object):
def setupUi(self, rootForm):
rootForm.setObjectName("rootForm")
rootForm.resize(94, 126)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(rootForm.sizePolicy().hasHeightForWidth())
rootForm.setSizePolicy(sizePolicy)
rootForm.setMinimumSize(QtCore.QSize(50, 0))
rootForm.setBaseSize(QtCore.QSize(50, 126))
rootForm.setStyleSheet("background-color: rgb(71, 58, 46);\n"
"border-color: rgb(207, 207, 207);\n"
"background-color: rgba(71, 65, 60, 180);\n"
"color: rgb(244, 244, 244);\n"
"border-color: rgb(0, 0, 0);")
self.gridLayout = QtWidgets.QGridLayout(rootForm)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(rootForm)
self.groupBox.setMinimumSize(QtCore.QSize(50, 0))
self.groupBox.setMaximumSize(QtCore.QSize(160, 160))
self.groupBox.setBaseSize(QtCore.QSize(50, 0))
self.groupBox.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.groupBox.setFlat(False)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setContentsMargins(2, 0, 2, 4)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.btnTitle = QtWidgets.QPushButton(self.groupBox)
self.btnTitle.setObjectName("btnTitle")
self.verticalLayout_2.addWidget(self.btnTitle)
self.btnColorButton = ColorButton(self.groupBox)
self.btnColorButton.setEnabled(False)
self.btnColorButton.setMinimumSize(QtCore.QSize(24, 24))
self.btnColorButton.setText("")
self.btnColorButton.setObjectName("btnColorButton")
self.verticalLayout_2.addWidget(self.btnColorButton)
self.chkbtnPlacefield = QtWidgets.QToolButton(self.groupBox)
self.chkbtnPlacefield.setCheckable(True)
self.chkbtnPlacefield.setChecked(False)
self.chkbtnPlacefield.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnPlacefield.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnPlacefield.setObjectName("chkbtnPlacefield")
self.verticalLayout_2.addWidget(self.chkbtnPlacefield)
self.chkbtnSpikes = QtWidgets.QToolButton(self.groupBox)
self.chkbtnSpikes.setCheckable(True)
self.chkbtnSpikes.setChecked(False)
self.chkbtnSpikes.setPopupMode(QtWidgets.QToolButton.DelayedPopup)
self.chkbtnSpikes.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.chkbtnSpikes.setObjectName("chkbtnSpikes")
self.verticalLayout_2.addWidget(self.chkbtnSpikes)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(rootForm)
QtCore.QMetaObject.connectSlotsByName(rootForm)
def retranslateUi(self, rootForm):
_translate = QtCore.QCoreApplication.translate
rootForm.setWindowTitle(_translate("rootForm", "Pf"))
self.groupBox.setTitle(_translate("rootForm", "pf[i]"))
self.btnTitle.setText(_translate("rootForm", "pf[i]"))
self.chkbtnPlacefield.setText(_translate("rootForm", "pf"))
self.chkbtnSpikes.setText(_translate("rootForm", "spikes"))
from pyphoplacecellanalysis.External.pyqtgraph.widgets.ColorButton import ColorButton
| [
"[email protected]"
] | |
274068ca686b8f9e974d3eafdfe1eeb963c21cbf | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather2_W_fixGood_C_change/train/pyr_2s/L6/step10_a.py | f0754e9c5056e21c626cfe3e46433c46c93290ba | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,317 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_2s.L6.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1_and_1s6_2s6.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
db8551c3b00fdaa9cea83beff7f976a27482b764 | 0486b6ccf883e9cd7a24bbd89b5420e7de2172b9 | /DRF Study Material/Django REST Code/gs23/manage.py | 0fd63ec5bb04d9d85aeb9039d1fb86e9be16bd10 | [] | no_license | ajitexl/restfrmaework | 2980203d7faa6c8364288283758d32c8f2a37817 | 9ab203748e623516365d9924dcc68acc786a66e1 | refs/heads/main | 2023-02-03T08:52:00.672047 | 2020-12-10T09:50:51 | 2020-12-10T09:50:51 | 320,222,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs23.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a9d0bc7546f7ca3723a17a3f5fd7ba086d51f28c | 21bf726bf895569a41a8b8d2db6772dc51f46cfd | /OTHERS/Interviews/Akuna.py | abba9d6a6b4e54cda0048899107ec10a4fa00cc0 | [] | no_license | jeffsnguyen/Python-1 | dd924d25337cd6ac21e321d7b2c5ac17c065d94b | 463d32a61a760d076656c73c9f8c9fadf262438d | refs/heads/master | 2022-03-23T09:50:04.476094 | 2019-12-23T12:32:49 | 2019-12-23T12:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | def if_none_trivial(x):
if x==0:
return 0
else:
return 1
def violet_search_icecream_shop(stock, max_capacity,demands,n_days,overnight_fee,price,deliver_fee,total_expense=[],expense=0):
delivery_min = max(0,demands[0]-stock)
delivery_max = max(0,sum(demands) - stock)
for delivery in range(delivery_min,delivery_max+1,1):
expense_today = expense + if_none_trivial(delivery)*deliver_fee + delivery*price
expense_today = expense_today + max(0,(stock+delivery-max_capacity))*overnight_fee
stock_next = stock+delivery-demands[0]
print("***********************")
print("expense until yesterday: ",expense)
print("expense until today: ", expense_today)
print(n_days, "remains")
if n_days>1:
violet_search_icecream_shop(stock_next, max_capacity,demands[1:],n_days-1,overnight_fee,price,deliver_fee,total_expense,expense_today)
else:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("total expense",expense_today)
total_expense.append(expense_today)
# yield(expense_today)
total_expense=[]
violet_search_icecream_shop(0,10,[1,2,1,4],4,1,3,4,total_expense=total_expense)
print(total_expense)
print("the optimum cost:", min(total_expense))
from collections import defaultdict
def code_preprocessing(delivery_code):
code_dic = defaultdict(list)
i = 0
for code in delivery_code:
crude = code.split('-',1)
code_dic[crude[0]].append((crude[1],i))
i = i+1
print(code_dic)
code_dict = code_preprocessing(["123-2","2345-1","123-3","123-5","2345-5"])
def swarm_delivery(code_dict):
bee = []
for key,value in code_dict:
bee.append(value)
print(bee)
swarm_delivery(code_dict)
| [
"[email protected]"
] | |
5b771ee4fa02ac609d1a9cff17e724f9d74cdcdc | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/5041434/snippet.py | a978469c85746091bf61c1a14c4ddfde95ab6244 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,470 | py | import requests
from bs4 import BeautifulSoup, NavigableString
def get_review_text(block):
"""Get just the text of a review from it's DIV"""
strings = []
for possible_text in block.children:
if isinstance(possible_text, NavigableString):
stripped_text = possible_text.strip()
if len(stripped_text) > 0:
strings.append(stripped_text)
return "\n".join(strings)
def get_review_texts(review_html):
"""Get all the reviews on a review page"""
soup = BeautifulSoup(review_html)
table = soup.find(id="productReviews").tr.td
review_blocks = table.find_all("div", recursive=False)
return [get_review_text(block) for block in review_blocks]
def get_review_page_count(review_html):
"""Get the number of review pages"""
soup = BeautifulSoup(review_html)
try:
return int(soup.find("span", class_="paging").find_all("a")[-2].text)
except:
return 1
def get_all_reviews(review_url):
"""Get all the reviews, given a review page URL"""
# sanitize the url
review_url = "/".join(review_url.split("/")[:-1])
first_review_page = requests.get(review_url).text
review_page_count = get_review_page_count(first_review_page)
reviews = []
for i in range(1, review_page_count + 1):
url = review_url + "?pageNumber=%d" % i
review_html = requests.get(url).text
reviews.extend(get_review_texts(review_html))
return reviews | [
"[email protected]"
] | |
dac74fe07f41bad595f3daece43a0047c4795112 | c105570f12f1d56087ffb831f5d34cd763d6c90b | /top/api/rest/HotelRoomImgDeleteRequest.py | 55f04fe4e6cf46864d486cf6bd4e5cf9dc7d3abd | [] | no_license | wjianwei126/Alinone | 01607423833d7736b2fd3c77e9e21f63c69b4e4c | 80144d4657cb049d651c09647eb245405240f12f | refs/heads/master | 2020-12-07T05:14:58.746777 | 2015-05-06T12:48:33 | 2015-05-06T12:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | '''
Created by auto_sdk on 2014-11-09 14:51:18
'''
from top.api.base import RestApi
class HotelRoomImgDeleteRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.gid = None
self.position = None
def getapiname(self):
return 'taobao.hotel.room.img.delete'
| [
"[email protected]"
] | |
38989ea3cc2d9323d7df74726b4cbe4770c237d1 | 1d0895269d1d93bab6a0b595c806418b1eeda735 | /qiskit/providers/ibmq/api/rest/experiment.py | 21cea49b8c27b3ce319c61a25cd6e4314e06b812 | [
"Apache-2.0"
] | permissive | Qiskit/qiskit-ibmq-provider | 3921bf5f77a9621013ada7ea5e18fa199470650c | 590f68d9ddb42a45c4ac8a8626ea60da85575b21 | refs/heads/master | 2023-06-08T03:17:52.745052 | 2023-06-05T14:20:16 | 2023-06-05T14:20:16 | 163,192,893 | 240 | 182 | Apache-2.0 | 2023-06-05T14:20:18 | 2018-12-26T15:22:11 | Python | UTF-8 | Python | false | false | 5,218 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Experiment REST adapter."""
import logging
from typing import Dict, Union
from .base import RestAdapterBase
from ..session import RetrySession
logger = logging.getLogger(__name__)
class Experiment(RestAdapterBase):
"""Rest adapter for experiment related endpoints."""
URL_MAP = {
'self': '',
'upload_plots': '/plots'
}
def __init__(self, session: RetrySession, experiment_uuid: str, url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}'.format(url_prefix, experiment_uuid))
def retrieve(self) -> str:
"""Retrieve the specific experiment.
Returns:
Experiment data.
"""
url = self.get_url('self')
return self.session.get(url).text
def update(self, experiment: str) -> Dict:
"""Update the experiment.
Args:
experiment: Experiment to update.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.put(url, data=experiment, headers=self._HEADER_JSON_CONTENT).json()
def delete(self) -> Dict:
"""Delete the experiment.
Returns:
JSON response.
"""
url = self.get_url('self')
return self.session.delete(url).json()
def upload_plot(
self,
plot: Union[bytes, str],
plot_name: str,
sync_upload: bool = True
) -> Dict:
"""Upload a plot for the experiment.
Args:
plot: Plot file name or data to upload.
plot_name: Name of the plot.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('upload_plots')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (plot_name, file)}
response = self.session.post(url, files=data, headers=headers).json()
else:
data = {'plot': (plot_name, plot)} # type: ignore[dict-item]
response = self.session.post(url, files=data, headers=headers).json()
return response
class ExperimentPlot(RestAdapterBase):
"""Rest adapter for experiment plot related endpoints."""
URL_MAP = {
'self': ''
}
def __init__(
self,
session: RetrySession,
experiment_uuid: str,
plot_name: str,
url_prefix: str = '') -> None:
"""Experiment constructor.
Args:
session: Session to be used in the adaptor.
experiment_uuid: UUID of the experiment.
plot_name: Name of the plot.
url_prefix: URL prefix.
"""
super().__init__(session, '{}/experiments/{}/plots/{}'.format(
url_prefix, experiment_uuid, plot_name))
self.plot_name = plot_name
def retrieve(self) -> bytes:
"""Retrieve the specific experiment plot.
Returns:
Plot content.
"""
url = self.get_url('self')
response = self.session.get(url)
return response.content
def delete(self) -> None:
"""Delete this experiment plot."""
url = self.get_url('self')
self.session.delete(url)
def update(
self,
plot: Union[bytes, str],
sync_upload: bool = True
) -> Dict:
"""Update an experiment plot.
Args:
plot: Plot file name or data to upload.
sync_upload: By default the server will upload the plot file
to backend storage asynchronously. Set this to False to use
that behavior and not block the upload.
Returns:
JSON response.
"""
url = self.get_url('self')
headers = {
'x-sync-upload': str(sync_upload)
}
if isinstance(plot, str):
with open(plot, 'rb') as file:
data = {'plot': (self.plot_name, file)}
response = self.session.put(url, files=data, headers=headers).json()
else:
data = {'plot': (self.plot_name, plot)} # type: ignore[dict-item]
response = self.session.put(url, files=data, headers=headers).json()
return response
| [
"[email protected]"
] | |
be022fe3fefde1e08b0c0cafbf8646767f2ba51d | a65103e2f33192d9e6fcf8c8852f263369190175 | /core/models.py | c653f88b69de6db591ec935f5bf162047d706249 | [] | no_license | dhilipsiva/ircman | e23153572d5f8cf09d4ed7d47c47b90050762489 | 767b42f321598b155f2fd74729947ed92f8da160 | refs/heads/master | 2023-07-10T06:42:45.855788 | 2015-07-22T04:17:00 | 2015-07-22T04:17:00 | 35,310,806 | 6 | 0 | null | 2023-09-05T05:15:21 | 2015-05-09T01:58:04 | Python | UTF-8 | Python | false | false | 7,308 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: models.py
Version: 0.1
Author: dhilipsiva <[email protected]>
Date created: 2015-05-09
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
# Python imports
import datetime
from uuid import uuid4
# Django imports
from django.utils.timezone import utc
from django.contrib.auth.models import AbstractUser
from django.db.models import Model, ForeignKey, DateTimeField, UUIDField, \
CharField, TextField, PositiveIntegerField, BooleanField
def utc_now():
"""
`now` with UTC
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class User(AbstractUser):
"""
A custom user so that we can add permissions easily
"""
id = UUIDField(primary_key=True, default=uuid4, editable=False)
socket = UUIDField(default=uuid4, editable=False)
class Meta(AbstractUser.Meta):
abstract = False
def save(self, *args, **kwargs):
if 'pbkdf2_sha256' not in self.password:
self.set_password(self.password)
super(User, self).save(*args, **kwargs)
def to_dict(self, with_sensitive_data=False):
"""
Dictify user
"""
d = {
'id': str(self.id),
'username': self.username,
'firstName': self.first_name,
'lastName': self.last_name,
}
if with_sensitive_data:
d.update({
'socket': str(self.socket),
'email': self.email,
})
return d
def __str__(self):
return self.username
def __repr__(self):
return "<User: %s>" % self.__str__()
class Server(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
host = CharField(max_length=256)
port = PositiveIntegerField(default=6667, blank=True)
is_ssl = BooleanField(default=False)
is_sasl = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'host': self.host,
'port': self.port,
'isSsl': self.is_ssl,
'isSasl': self.is_sasl,
}
def __str__(self):
return self.host
def __repr__(self):
return "<Server: %s>" % self.__str__()
class UserServer(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user = ForeignKey(User, related_name="user_servers")
server = ForeignKey(Server, related_name="user_servers")
label = CharField(max_length=256, default="My IRC Server")
username = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
nickname = CharField(max_length=256)
realname = CharField(max_length=256, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'user': str(self.user_id),
'server': str(self.server_id),
'label': self.label,
'username': self.username,
'password': self.password,
'nickname': self.nickname,
'realname': self.realname,
}
def __str__(self):
return "%s - %s" % (self.user, self.server)
def __repr__(self):
return "<UserServer: %s>" % self.__str__()
class Channel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
server = ForeignKey(Server, related_name="channels")
name = CharField(max_length=256)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'server': str(self.server_id),
'name': self.name,
}
def __str__(self):
return "%s - %s" % (self.server, self.name)
def __repr__(self):
return "<Channel: %s>" % self.__str__()
class UserChannel(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_server = ForeignKey(
UserServer, related_name="user_channels", null=True)
channel = ForeignKey(Channel, related_name="user_channels")
nickname = CharField(max_length=256)
password = CharField(max_length=256, null=True, blank=True)
mode = CharField(max_length=16, null=True, blank=True)
def to_dict(self):
"""
Dictify user
"""
return {
"id": str(self.id),
"userServer": str(self.user_server_id),
"channel": str(self.channel_id),
"nickname": self.nickname,
"password": self.password,
"mode": self.mode,
}
def to_dict_deep(self):
"""
Deep `to_dict`
"""
d = self.to_dict()
d['userServer'] = self.user_server.to_dict()
d['channel'] = self.channel.to_dict()
return d
def __str__(self):
return "%s - %s" % (self.channel, self.nickname)
def __repr__(self):
return "<UserChannel: %s>" % self.__str__()
class BaseMessage(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
text = TextField()
created_on = DateTimeField(auto_now_add=True)
def to_dict(self):
"""
Dictify user
"""
return {
'id': str(self.id),
'text': self.text,
'createdOn': self.created_on,
}
class Meta:
abstract = True
class Message(BaseMessage):
channel = ForeignKey(Channel, related_name="messages")
user_channel = ForeignKey(UserChannel, related_name="messages")
def to_dict(self):
"""
Dictify user
"""
d = super(Message, self).to_dict()
d.update({
'channel': str(self.channel_id),
'userChannel': str(self.user_channel_id),
})
return d
def __str__(self):
return "%s" % self.text
def __repr__(self):
return "<Message: %s>" % self.__str__()
class Conversation(Model):
id = UUIDField(primary_key=True, default=uuid4, editable=False)
user_channel_1 = ForeignKey(UserChannel, related_name='+')
user_channel_2 = ForeignKey(UserChannel, related_name='+')
def to_dict(self):
"""
Dictify Conversation
"""
return {
'id': str(self.id),
'userChannel1': str(self.user_channel_1_id),
'userChannel2': str(self.user_channel_2_id),
}
def __str__(self):
return "%s - %s" % (self.user_channel_1_id, self.user_channel_2_id)
def __repr__(self):
return "<Conversation: %s>" % self.__str__()
class PrivateMessage(BaseMessage):
conversation = ForeignKey(Conversation, related_name='private_messages')
user_channel = ForeignKey(UserChannel, related_name='private_messages')
read = BooleanField(default=False)
def to_dict(self):
"""
Dictify user
"""
d = super(PrivateMessage, self).to_dict()
d.update({
'conversation': str(self.conversation_id),
'userChannel': str(self.user_channel_id),
'read': self.read,
})
return d
def __repr__(self):
return "<PrivateMessage: %s>" % self.__str__()
| [
"[email protected]"
] | |
26918a548c2f53fe227743b0c90bdc04e5101662 | 0e27a64e42d87e049284ad263b807e4c64057de9 | /tensorflow/python/ops/metrics_impl.py | 392bb5ea18c96e2ac1e85c7af7d70fba4d7bbd61 | [
"Apache-2.0"
] | permissive | shaayaan16/tensorflow | f721776f322e8e780cd569a7df026886317b9ac5 | 1ec32ec396d8efebac37325edf94b8948f1397b4 | refs/heads/master | 2021-01-12T02:04:58.634427 | 2017-06-07T05:10:35 | 2017-06-07T05:10:35 | 78,465,880 | 0 | 0 | null | 2017-01-09T20:25:38 | 2017-01-09T20:25:38 | null | UTF-8 | Python | false | false | 116,067 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def _remove_squeezable_dimensions(labels, predictions, weights):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their rank differs by 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if its rank is 1
more than the new rank of `predictions`
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is not None:
weights = ops.convert_to_tensor(weights)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif (weights_rank is None) or (
weights_shape.dims[-1].is_compatible_with(1)):
# Use dynamic rank
weights = control_flow_ops.cond(
math_ops.equal(array_ops.rank(weights),
math_ops.add(array_ops.rank(predictions), 1)),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat_v2((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' % (
labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope),
lambda: labels)
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=dtypes.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variables.Variable(
initial_value=array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def _assert_weights_rank(weights, values):
return check_ops.assert_rank_in(weights, (0, array_ops.rank(values)))
def _broadcast_weights(weights, values):
"""Broadcast `weights` to the same shape as `values`.
This returns a version of `weights` following the same broadcast rules as
`multiply(weights, values)`. When computing a weighted average, use this
function to broadcast `weights` before summing them; e.g.,
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape.
Raises:
ValueError: if `weights` rank is invalid.
"""
weights_shape = weights.get_shape()
values_shape = values.get_shape()
if (weights_shape.is_fully_defined() and
values_shape.is_fully_defined() and
weights_shape.is_compatible_with(values_shape)):
return weights
with ops.control_dependencies((_assert_weights_rank(weights, values),)):
return math_ops.multiply(
weights, array_ops.ones_like(values), name='broadcast_weights')
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
weights = _broadcast_weights(math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
count_compute_op = state_ops.assign_add(count, num_values)
mean_t = _safe_div(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = _safe_div(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def accuracy(labels, predictions, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
def _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=None, includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invaild key: %s.' % include)
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = _broadcast_weights(math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(array_ops.reshape(
weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = _create_local('true_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_p, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = _create_local('false_negatives', shape=[num_thresholds])
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_n, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = _create_local('true_negatives', shape=[num_thresholds])
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_n, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = _create_local('false_positives', shape=[num_thresholds])
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_p, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def auc(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None,
curve='ROC', name=None):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'auc', (labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' %
(curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
return math_ops.reduce_sum(math_ops.multiply(
x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.), name=name)
# sum up the areas of all the trapeziums
auc_value = compute_auc(
values['tp'], values['fn'], values['tn'], values['fp'], 'value')
update_op = compute_auc(
update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
def mean_absolute_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
def mean_cosine_distance(labels, predictions, dim, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'mean_iou', (predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Local variable to accumulate the predictions in the confusion matrix.
cm_dtype = dtypes.int64 if weights is not None else dtypes.float64
total_cm = _create_local('total_confusion_matrix',
shape=[num_classes, num_classes], dtype=cm_dtype)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=cm_dtype)
update_op = state_ops.assign_add(total_cm, current_cm)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(iou, name=name)
mean_iou_v = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
def mean_relative_error(labels, predictions, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections,
updates_collections, name or 'mean_squared_error')
def mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total_tensor', shape=values.get_shape())
count = _create_local('count_tensor', shape=values.get_shape())
num_values = array_ops.ones_like(values)
if weights is not None:
weights = _broadcast_weights(math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
total_compute_op = state_ops.assign_add(total, values)
count_compute_op = state_ops.assign_add(count, num_values)
def compute_mean(total, count, name):
non_zero_count = math_ops.maximum(count,
array_ops.ones_like(count),
name=name)
return math_ops.truediv(total, non_zero_count, name=name)
mean_t = compute_mean(total, count, 'value')
with ops.control_dependencies([total_compute_op, count_compute_op]):
update_op = compute_mean(total, count, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def percentage_below(values, threshold, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold,
weights,
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((
check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 1))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def false_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'false_positives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),
math_ops.equal(predictions, 1))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
def precision(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'precision', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_p, false_positives_update_op = false_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_precision(name):
return array_ops.where(
math_ops.greater(true_p + false_p, 0),
math_ops.div(true_p, true_p + false_p),
0,
name)
p = compute_precision('value')
with ops.control_dependencies([true_positives_update_op,
false_positives_update_op]):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
def precision_at_thresholds(labels, predictions, thresholds,
weights=None,
metrics_collections=None,
updates_collections=None, name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
tp = values['tp']
fp = values['fp']
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
prec = compute_precision('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_precision('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),
math_ops.equal(predictions, 0))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
def recall(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'recall', (predictions, labels, weights)):
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_n, false_negatives_update_op = false_negatives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n),
0,
name)
rec = compute_recall(true_p, false_n, 'value')
with ops.control_dependencies([true_positives_update_op,
false_negatives_update_op]):
update_op = compute_recall(true_p, false_n, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(
name, 'true_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(
None, 'false_negatives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = sets.set_size(sets.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(
name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def recall_at_thresholds(labels, predictions, thresholds,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
tp = values['tp']
fn = values['fn']
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
rec = compute_recall('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_recall('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def root_mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
labels, predictions, weights = _remove_squeezable_dimensions(
labels, predictions, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
value_tensor, update_op = mean_squared_error(
labels, predictions, weights, None, None,
name or 'root_mean_squared_error')
rmse = math_ops.sqrt(value_tensor)
with ops.control_dependencies([update_op]):
update_op = math_ops.sqrt(update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rmse, update_op
def sensitivity_at_specificity(
labels, predictions, specificity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_sensitivity_at_specificity(name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_sensitivity_at_specificity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat_v2(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat_v2(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_k(labels, predictions, k):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,
and the result `Tensors`. In the common case, this is [batch_size]. Each row
of the results contains the average precision for that row.
Internally, a `top_k` operation computes a `Tensor` indicating the top `k`
`predictions`. Set operations applied to `top_k` and `labels` calculate the
true positives, which are used to calculate the precision ("P_{i}" term,
above).
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if k is invalid.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
None, 'average_precision', (predictions, labels, k)) as scope:
labels = _maybe_expand_labels(labels, predictions)
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k, math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(
name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_k(
predictions=predictions, labels=labels, k=k)
if weights is not None:
weights = _broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
update = _safe_scalar_div(total_update, max_update, name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(
None, 'false_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fp = sets.set_size(sets.set_difference(
predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((_assert_weights_rank(weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.mul(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
"""
with ops.name_scope(
name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
top_k_idx = math_ops.to_int64(top_k_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def specificity_at_sensitivity(
labels, predictions, sensitivity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: A `bool` `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
tp = values['tp']
fn = values['fn']
tn = values['tn']
fp = values['fp']
def compute_specificity_at_sensitivity(name):
"""Computes the specificity at the given sensitivity.
Args:
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
specificity = compute_specificity_at_sensitivity('value')
with ops.control_dependencies(update_ops.values()):
update_op = compute_specificity_at_sensitivity('update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| [
"[email protected]"
] | |
86c9b86af42f911790b3c9a9171e90b2a3a6d5ab | 6b8c3974d3ce5f7841e51dcb406666c0c5d92155 | /heat/heat/tests/mistral/test_mistral_cron_trigger.py | f675a7962563de8a3c4633e373b00471eb251b3b | [
"Apache-2.0"
] | permissive | swjang/cloudexchange | bbbf78a2e7444c1070a55378092c17e8ecb27059 | c06ed54f38daeff23166fb0940b27df74c70fc3e | refs/heads/master | 2020-12-29T03:18:43.076887 | 2015-09-21T07:13:22 | 2015-09-21T07:13:22 | 42,845,532 | 1 | 1 | null | 2015-09-21T07:13:22 | 2015-09-21T05:19:35 | C++ | UTF-8 | Python | false | false | 4,428 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import template_format
from heat.engine import resources
from heat.engine.resources.openstack.mistral import cron_trigger
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
stack_template = '''
heat_template_version: 2013-05-23
resources:
cron_trigger:
type: OS::Mistral::CronTrigger
properties:
name: my_cron_trigger
pattern: "* * 0 * *"
workflow: {'name': 'get_first_glance_image', 'input': {} }
count: 3
first_time: "2015-04-08 06:20"
'''
class FakeCronTrigger(object):
def __init__(self, name):
self.name = name
self.next_execution_time = '2015-03-01 00:00:00'
self.remaining_executions = 3
self._data = {'trigger': 'info'}
class MistralCronTriggerTestResource(cron_trigger.CronTrigger):
@classmethod
def is_service_available(cls, context):
return True
class MistralCronTriggerTest(common.HeatTestCase):
def setUp(self):
super(MistralCronTriggerTest, self).setUp()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
t = template_format.parse(stack_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['cron_trigger']
self.client = mock.Mock()
self.patchobject(MistralCronTriggerTestResource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ct = MistralCronTriggerTestResource(name, snippet, stack)
self.client.cron_triggers.create.return_value = FakeCronTrigger(
'my_cron_trigger')
self.client.cron_triggers.get.return_value = FakeCronTrigger(
'my_cron_trigger')
scheduler.TaskRunner(ct.create)()
args = self.client.cron_triggers.create.call_args[1]
self.assertEqual('* * 0 * *', args['pattern'])
self.assertEqual('get_first_glance_image', args['workflow_name'])
self.assertEqual({}, args['workflow_input'])
self.assertEqual('2015-04-08 06:20', args['first_time'])
self.assertEqual(3, args['count'])
self.assertEqual('my_cron_trigger', ct.resource_id)
return ct
def test_create(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
expected_state = (ct.CREATE, ct.COMPLETE)
self.assertEqual(expected_state, ct.state)
def test_resource_mapping(self):
mapping = cron_trigger.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(cron_trigger.CronTrigger,
mapping['OS::Mistral::CronTrigger'])
def test_attributes(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.assertEqual('2015-03-01 00:00:00',
ct.FnGetAtt('next_execution_time'))
self.assertEqual(3, ct.FnGetAtt('remaining_executions'))
self.assertEqual({'trigger': 'info'}, ct.FnGetAtt('show'))
def test_delete(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
def test_delete_not_found(self):
ct = self._create_resource('trigger', self.rsrc_defn, self.stack)
self.client.cron_triggers.delete.side_effect = (
self.client.mistral_base.APIException(error_code=404))
scheduler.TaskRunner(ct.delete)()
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
self.client.cron_triggers.delete.assert_called_once_with(
ct.resource_id)
| [
"[email protected]"
] | |
8ebc06ef30a7723e5ccdac18c322c90c8fac7485 | dc9cef0a654ba5bf613501c0f11c110231aace18 | /easysteps/assign.py | afa552e6b99e0a85abdb5c6063de71be522fad1a | [] | no_license | mxt123/getstartpython | 36e297b9b20a9a7942e87f691f13c965c417ca7b | 70bf3f4775967449fc7590bbfa470fb6ff5c3b64 | refs/heads/master | 2021-01-10T19:03:08.035051 | 2019-01-20T16:29:58 | 2019-01-20T16:29:58 | 40,926,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | a = 8
b = 4
print('assign values:\t\t','a = ',a,'\tb =',b)
| [
"="
] | = |
aba17ae60493775a1cbdd3dc41b31bb2ee9afbcd | 669e9241b02bdaa303fbc2fd4023b90d4d179a59 | /Cash Register/base.py | 272ceeded4ef34a761806bab212fe9e20d79a550 | [] | no_license | benjaminpotter/HatchProjects | 0854cf46ae7c3781468116a5d63b703dd54ae68c | 7f6a948d3474c755d071751b725c059e6c7f3553 | refs/heads/master | 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | def setup():
size(400, 400)
bg_color = color(120, 120, 120)
bills_and_coins = [100, 50, 20, 10, 5, 2, 1, 0.25, 0.10, 0.5]
def draw_cash_registerfunction():
noStroke()
fill(50, 50, 50)
rect(50, 50, 300, 220, 0)
fill(225, 225, 225)
rect(87, 130, 225, 35, 0)
fill(225, 225, 225)
rect(87, 210, 225, 35, 0)
fill(225, 225, 225)
textSize(20)
text("Cash Register", 135, 85)
textSize(14)
text("Cost", 90, 120)
text("Tendered", 90, 200)
def draw():
background(bg_color)
draw_cash_register()
noLoop()
cost = prompt("Input cost", "")
tendered = prompt("Input tendered amount", "")
change = Math.round((tendered - cost) / 0.05) * 0.05
fill(0, 0, 0)
text(cost, 95, 152)
text(tendered, 95, 232)
res = []
for i in range(0, 10):
count = 0;
while change >= bills_and_coins[i]:
count++
change -= bills_and_coins[i]
res.append(count)
answer = ""
for i in range (0, 10):
if res[i] > 0:
answer += res[i] + "x $" + bills_and_coins[i] + "\n"
text(answer, 70, 325)
| [
"[email protected]"
] | |
4077f2c69b513dfbb09a6279cfbd85f564d84ab5 | 9f9ec8bebfe8b7ac8e60dcaa23153abe976585e6 | /dataCommons/postingAPI/tasks.py | 1ade94032b03b28ce48f6ba157446b40d942de40 | [] | no_license | erikwestra/data-commons | bbf32cd9b4b64ace28bcb049190d8272a23ed891 | e3ed33fad104157ff505bb02bc7ae981f8ba3b11 | refs/heads/master | 2020-04-11T12:03:19.996644 | 2013-02-14T17:08:24 | 2013-02-14T17:08:24 | 8,188,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | """ dataCommons.postingAPI.tasks
This module implements the background tasks run by the Celery task queuing
system.
"""
import logging
from celery import task
from dataCommons.shared.lib.decorators import print_exceptions_to_stdout
from dataCommons.postingAPI import postingProcessor
#############################################################################
logger = logging.getLogger(__name__)
#############################################################################
@task()
@print_exceptions_to_stdout
def process_postings(parsed_postings):
""" Process the given set of postings.
Note that we simply call the posting processor to do all the work, but
wrap it up in a Huey command so that the work is queued, and use the
'print_exceptions_to_stdout' decorator so that any exceptions will be
logged to stdout rather than written to the Huey log file (which won't
exist when the system is deployed to Heroku).
"""
postingProcessor.process_postings(parsed_postings)
| [
"[email protected]"
] | |
06c1ae158672d9a651a94f2a70faf79cce3232d5 | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /network/analyzer/libpcap/actions.py | 33f3310aad9a49bdb10846b36e483c86e64304b9 | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 853 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CFLAGS", "%s -fPIC" % get.CFLAGS())
autotools.autoreconf("-vfi")
autotools.configure("--prefix=/usr \
--enable-ipv6")
def build():
autotools.make("all")
autotools.make("shared")
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
# No static libs
pisitools.remove("/usr/lib/*.a")
# it is needed for ppd etc.
pisitools.insinto("/usr/include", "pcap-int.h")
pisitools.dodoc("CHANGES", "CREDITS", "README*", "VERSION", "TODO")
| [
"[email protected]"
] | |
0c3ba85209268e4419995bf3b0e59c8dc4ee5a21 | 1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2 | /projects/project02/tests/q1_1.py | 61dd4039d3bf8affab16b12c4665cbd175e3a540 | [] | no_license | taylorgibson/ma4110-fa21 | 201af7a044fd7d99140c68c48817306c18479610 | a306e1b6e7516def7de968781f6c8c21deebeaf5 | refs/heads/main | 2023-09-05T21:31:44.259079 | 2021-11-18T17:42:15 | 2021-11-18T17:42:15 | 395,439,687 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | test = { 'name': 'q1_1',
'points': None,
'suites': [{'cases': [{'code': '>>> type(all_unique_causes) in [np.ndarray, list]\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"[email protected]"
] | |
872fea6e32fd13b181d5aee64e9711014a9df0d1 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/styles/test_write_num_fmts.py | 1445d94a1fdf72bcc2de972b5c46a5085b48cd0d | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteNumFmts(unittest.TestCase):
"""
Test the Styles _write_num_fmts() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_num_fmts(self):
"""Test the _write_num_fmts() method"""
xf_format = Format()
xf_format.num_format_index = 164
xf_format.set_num_format('#,##0.0')
self.styles._set_style_properties([[xf_format], None, 0, 1, 0, 0, [], []])
self.styles._write_num_fmts()
exp = """<numFmts count="1"><numFmt numFmtId="164" formatCode="#,##0.0"/></numFmts>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
84a8d7587333beacb530ca0dc5bd8c795e393d3a | 5cb29431ecbba7b61463c67749794b54201907e1 | /pelicide/runner.py | 688ba9e582ad94aeb3dcda92e73a6b397e49a1ed | [] | no_license | iksteen/pelicide | 6a9a88a1fe2df6acb271c465942820ab76ccfa82 | 5b8a6a919257840fafdcab5c886c81a72b18a6c0 | refs/heads/master | 2021-05-16T02:39:52.910803 | 2016-01-06T11:40:51 | 2016-01-06T11:40:51 | 34,100,676 | 16 | 2 | null | 2019-10-22T23:49:10 | 2015-04-17T06:35:34 | HTML | UTF-8 | Python | false | false | 2,958 | py | import json
import os
import sys
from twisted.internet import defer, protocol
class RunnerProtocol(protocol.ProcessProtocol):
def __init__(self, callback):
self.callback = callback
self.seq = 0
self.buffer = ''
self.pending = set()
def sendCommand(self, command, args=None):
self.seq += 1
self.pending.add(self.seq)
self.transport.write('%d %s %s\n' % (self.seq, command, json.dumps(args)))
return self.seq
def outReceived(self, data):
self.buffer += data
while '\n' in self.buffer:
response, self.buffer = self.buffer.split('\n', 1)
self.process_response(response)
def process_response(self, response):
seq, result, args = response.split(' ', 2)
seq = int(seq)
if seq in self.pending:
self.pending.remove(seq)
args = json.loads(args)
if self.callback is not None:
self.callback(seq, result == '+', args)
def processExited(self, reason):
pending, self.pending = self.pending, set()
while pending:
self.callback(pending.pop(), False, reason)
class Runner(object):
def __init__(self, python, config_path, settings, **kwargs):
reactor = kwargs.get('reactor')
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self.python = python
self.config_path = config_path
self.init_settings = settings
self.settings = None
self.d = None
self.pending = {}
def start(self):
self.d = defer.Deferred()
runner = os.path.join(os.path.dirname(__file__), 'pelican-runner.py')
protocol = RunnerProtocol(self.responseReceived)
self.transport = self.reactor.spawnProcess(
protocol,
self.python,
[
self.python,
runner,
self.config_path,
json.dumps(self.init_settings),
],
env=None,
childFDs={
0: 'w',
1: 'r',
2: sys.stderr.fileno(),
},
)
return self.d
def restart(self):
return self.command('quit').addCallback(lambda _: self.start())
def command(self, command, args=None):
if self.transport.proto is None:
self.start()
command_id = self.transport.proto.sendCommand(command, args)
d = defer.Deferred()
self.pending[command_id] = d
return d
def responseReceived(self, command_seq, success, args):
if command_seq == 0:
self.settings = args
if self.d:
self.d.callback(args)
self.d = None
return
d = self.pending.pop(command_seq)
if success:
d.callback(args)
else:
d.errback(RuntimeError(args))
| [
"[email protected]"
] | |
4824a2ce716332ac9003208ee8e5242619b3e1e8 | fb8f496a0ea0a416a5ee214ca308953392fb08ff | /predict.py | cf0c635b320c85db397e915d78c9e0d26067baed | [] | no_license | Noba1anc3/CH-NER | 32ba6138d4344052844698ee917a5910997ec3ed | 82f3ab07b2bf7a5b7a4debe6c074c9801b8f2fcf | refs/heads/master | 2023-04-05T05:52:29.132299 | 2020-04-05T10:10:36 | 2020-04-05T10:10:36 | 252,642,674 | 2 | 0 | null | 2023-03-24T23:27:38 | 2020-04-03T05:43:07 | HTML | UTF-8 | Python | false | false | 9,658 | py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.crf import viterbi_decode
from copy import deepcopy
import re
from model import BiLSTM_CRF
from utils import train_utils
from data_process import read_dictionary
import utils.config as cf
# 参数部分
params = cf.ConfigPredict('predict', 'config/params.conf')
params.load_config()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
def predict_one_batch(model, ses, seqs):
"""
Created by jty
预测引擎,输入句子id和保存好的模型参数进行预测,输出标签id
:param ses: 使用会话
:param seqs: 句子id
:return: label_list seq_len_list 标签id 句子id
"""
feed_dict, seq_len_list = train_utils.get_feed_dict(model, seqs, drop_keep=1.0)
# transition_params代表转移概率,由crf_log_likelihood方法计算出
log_its, transition_params = ses.run([model.log_its, model.transition_params],
feed_dict=feed_dict)
label_list = []
# 默认使用CRF
for log_it, seq_len in zip(log_its, seq_len_list):
vtb_seq, _ = viterbi_decode(log_it[:seq_len], transition_params)
label_list.append(vtb_seq)
return label_list, seq_len_list
def demo_one(model, ses, sent, batch_size, vocab, shuffle, tag2label):
"""
Created by jty
输入句子,得到预测标签id,并转化为label
:param model: 保存好的模型
:param ses: 使用会话
:param sent: 输入要进行实体抽取的句子
:param batch_size: 每次预测的句子数
:param vocab: word2id
:param shuffle: 默认为False
:return: tag 预测标签
"""
# batch_yield就是把输入的句子每个字的id返回,以及每个标签转化为对应的tag2label的值
label_list = []
for seqs, labels in train_utils.batch_yield(sent, batch_size, vocab, tag2label, shuffle):
label_list_, _ = predict_one_batch(model, ses, seqs)
label_list.extend(label_list_)
label2tag = {}
for tag, label in tag2label.items():
label2tag[label] = tag if label != 0 else label
tag = [label2tag[label] for label in label_list[0]]
return tag
"""
Created by jty
数据后处理
根据输入的tag和句子返回对应的字符
其中包括抽取出对应的人名、地名、组织名
"""
def get_entity(tag_seq, char_seq):
PER = get_PER_entity(tag_seq, char_seq)
LOC = get_LOC_entity(tag_seq, char_seq)
ORG = get_ORG_entity(tag_seq, char_seq)
return PER, LOC, ORG
# 输出PER对应的字符
def get_PER_entity(tag_seq, char_seq):
length = len(char_seq)
PER = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-PER':
if 'per' in locals().keys():
PER.append(per)
del per
per = char
if i + 1 == length:
per = per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
if tag == 'I-PER':
per += char
if i + 1 == length:
per = per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
if tag not in ['I-PER', 'B-PER']:
if 'per' in locals().keys():
per=per.strip()
per.replace('\n', '')
per.replace('\r', '')
PER.append(per)
del per
continue
return PER
# 输出LOC对应的字符
def get_LOC_entity(tag_seq, char_seq):
length = len(char_seq)
LOC = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-LOC':
if 'loc' in locals().keys():
LOC.append(loc)
del loc
loc = char
if i + 1 == length:
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
if tag == 'I-LOC':
loc += char
if i + 1 == length:
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
if tag not in ['I-LOC', 'B-LOC']:
if 'loc' in locals().keys():
loc = loc.strip()
loc.replace('\n', '')
loc.replace('\r', '')
LOC.append(loc)
del loc
continue
return LOC
# 输出ORG对应的字符
def get_ORG_entity(tag_seq, char_seq):
length = len(char_seq)
ORG = []
for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):
if tag == 'B-ORG':
if 'org' in locals().keys():
ORG.append(org)
del org
org = char
if i + 1 == length:
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
if tag == 'I-ORG':
org += char
if i + 1 == length:
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
if tag not in ['I-ORG', 'B-ORG']:
if 'org' in locals().keys():
org = org.strip()
org = org.replace('\n', '')
org = org.replace('\r', '')
ORG.append(org)
del org
continue
return ORG
def predict(model, batch_size, vocab, tag2label, demo_sent, shuffle=False):
"""
Created by jty
预测模块总函数。
输入:保存好的模型、每次预测的句子数、word2id字典、交互界面输入的需要实体抽取的句子
输出:实体抽取的结果
:param model: 保存好的模型
:param batch_size: 每次预测的句子数
:param vocab: word2id
:param shuffle: 默认为False
"""
s_id = 1
sent_id = {}
ckpt_file = tf.train.latest_checkpoint(params.model_path)
print(ckpt_file)
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
# print('============= demo =============')
saver.restore(sess, ckpt_file)
# print('Please input your sentence:')
# demo_sent = input()
#demo_sent = '我在北京上北京大学'
if demo_sent == '' or demo_sent.isspace():
print('See you next time!')
else:
# 打上id标签
for word in demo_sent:
sent_id[s_id] = word
s_id += 1
demo_sent = list(demo_sent.strip())
demo_data = [(demo_sent, ['O'] * len(demo_sent))]
tag = demo_one(model, sess, demo_data, batch_size, vocab, shuffle, tag2label)
PER, LOC, ORG = get_entity(tag, demo_sent)
PER_local = {}
LOC_local = {}
ORG_local = {}
p_id = 1
l_id = 1
o_id = 1
PER_mess = {}
LOC_mess = {}
ORG_mess = {}
# 抽取PER实体长度、位置信息
i = 1
for word in PER:
PER_local['item'] = word
PER_local['tag'] = 'PER'
PER_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
PER_local['offset'] = j
i = j + len(word)
break
PER_mess[p_id] = deepcopy(PER_local)
p_id += 1
# 抽取LOC实体长度、位置信息
i = 1
for word in LOC:
LOC_local['item'] = word
LOC_local['tag'] = 'LOC'
LOC_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
LOC_local['offset'] = j
i = j + len(word)
break
LOC_mess[l_id] = deepcopy(LOC_local)
l_id += 1
# 抽取ORG实体长度、位置信息
i = 1
for word in ORG:
ORG_local['item'] = word
ORG_local['tag'] = 'ORG'
ORG_local['length'] = len(word)
for j in range(i, len(sent_id)):
if word[0] == sent_id[j]:
ORG_local['offset'] = j
i = j + len(word)
break
ORG_mess[o_id] = deepcopy(ORG_local)
o_id += 1
#print(PER_mess, LOC_mess, ORG_mess)
return PER_mess, LOC_mess, ORG_mess
def run(demo_sent, flag=False):
embedding_mat = np.random.uniform(-0.25, 0.25, (len(read_dictionary(params.vocab_path)), params.embedding_dim))
embedding_mat = np.float32(embedding_mat)
embeddings = embedding_mat
num_tags = len(params.tag2label)
summary_path = "logs"
model = BiLSTM_CRF(embeddings, params.update_embedding, params.hidden_dim, num_tags, params.clip, summary_path,
params.optimizer)
model.build_graph()
PER_mess, LOC_mess, ORG_mess = predict(model, params.batch_size, read_dictionary(params.vocab_path), params.tag2label, demo_sent)
if flag:
return PER_mess, LOC_mess, ORG_mess
#run('我在北京上北京大学,周恩来是中国总理,我喜欢北京。我在清华大学,毛泽东是中国主席,他去过苏联。') | [
"[email protected]"
] | |
b2d5d706f2f349be03c0c756e348e26475a9300b | ea44a1681e276b3cc85226b53de217f6096a05d4 | /fhir/resources/tests/test_specimen.py | bc833ac238d30077aeb6dc9c5c72f3cf16919802 | [
"BSD-3-Clause"
] | permissive | stephanie-howson/fhir.resources | 69d2a5a6b0fe4387b82e984255b24027b37985c4 | 126e9dc6e14541f74e69ef7c1a0b8a74aa981905 | refs/heads/master | 2020-05-04T22:24:49.826585 | 2019-06-27T15:51:26 | 2019-06-27T15:51:26 | 179,511,579 | 0 | 0 | null | 2019-04-04T14:14:53 | 2019-04-04T14:14:52 | null | UTF-8 | Python | false | false | 13,742 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-01-17.
# 2019, SMART Health IT.
import os
import pytest
import io
import unittest
import json
from .fixtures import force_bytes
from .. import specimen
from ..fhirdate import FHIRDate
@pytest.mark.usefixtures("base_settings")
class SpecimenTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Specimen", js["resourceType"])
return specimen.Specimen(js)
def testSpecimen1(self):
inst = self.instantiate_from("specimen-example-serum.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen1(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen1(inst2)
def implSpecimen1(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://acme.com/labs/accession-ids"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("20150816-00124"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T06:40:17Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T06:40:17Z")
self.assertEqual(force_bytes(inst.container[0].type.coding[0].code), force_bytes("SST"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].display), force_bytes("Serum Separator Tube"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].system), force_bytes("http://acme.com/labs"))
self.assertEqual(force_bytes(inst.id), force_bytes("sst"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("119364003"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Serum sample"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
def testSpecimen2(self):
inst = self.instantiate_from("specimen-example-pooled-serum.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen2(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen2(inst2)
def implSpecimen2(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("20171120-1234"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2017-11-14").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2017-11-14")
self.assertEqual(force_bytes(inst.container[0].type.coding[0].code), force_bytes("RTT"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].display), force_bytes("Red Top Tube"))
self.assertEqual(force_bytes(inst.container[0].type.coding[0].system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Red Top Blood Collection Tube"))
self.assertEqual(force_bytes(inst.id), force_bytes("pooled-serum"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Pooled serum sample from 30 individuals"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("Serum sample, pooled"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Serum sample, pooled"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("https://vetmed.iastate.edu/vdl"))
self.assertEqual(force_bytes(inst.type.text), force_bytes("Pooled serum sample"))
def testSpecimen3(self):
inst = self.instantiate_from("specimen-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen3(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen3(inst2)
def implSpecimen3(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2011"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].code), force_bytes("49852007"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].display), force_bytes("Structure of median cubital vein (body structure)"))
self.assertEqual(force_bytes(inst.collection.bodySite.coding[0].system), force_bytes("http://snomed.info/sct"))
self.assertEqual(force_bytes(inst.collection.bodySite.text), force_bytes("Right median cubital vein"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2011-05-30T06:15:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2011-05-30T06:15:00Z")
self.assertEqual(force_bytes(inst.collection.method.coding[0].code), force_bytes("LNV"))
self.assertEqual(force_bytes(inst.collection.method.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0488"))
self.assertEqual(force_bytes(inst.collection.quantity.unit), force_bytes("mL"))
self.assertEqual(inst.collection.quantity.value, 6)
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("hep"))
self.assertEqual(force_bytes(inst.container[0].capacity.unit), force_bytes("mL"))
self.assertEqual(inst.container[0].capacity.value, 10)
self.assertEqual(force_bytes(inst.container[0].description), force_bytes("Green Gel tube"))
self.assertEqual(force_bytes(inst.container[0].identifier[0].value), force_bytes("48736-15394-75465"))
self.assertEqual(force_bytes(inst.container[0].specimenQuantity.unit), force_bytes("mL"))
self.assertEqual(inst.container[0].specimenQuantity.value, 6)
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Vacutainer"))
self.assertEqual(force_bytes(inst.id), force_bytes("101"))
self.assertEqual(force_bytes(inst.identifier[0].system), force_bytes("http://ehr.acme.org/identifiers/collections"))
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("23234352356"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Specimen is grossly lipemic"))
self.assertEqual(inst.receivedTime.date, FHIRDate("2011-03-04T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2011-03-04T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("122555007"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Venous blood specimen"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
def testSpecimen4(self):
inst = self.instantiate_from("specimen-example-urine.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen4(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen4(inst2)
def implSpecimen4(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2015"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.container[0].capacity.unit), force_bytes("mls"))
self.assertEqual(inst.container[0].capacity.value, 50)
self.assertEqual(force_bytes(inst.container[0].specimenQuantity.unit), force_bytes("mls"))
self.assertEqual(inst.container[0].specimenQuantity.value, 10)
self.assertEqual(force_bytes(inst.container[0].type.text), force_bytes("Non-sterile specimen container"))
self.assertEqual(force_bytes(inst.id), force_bytes("vma-urine"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.processing[0].description), force_bytes("Acidify to pH < 3.0 with 6 N HCl."))
self.assertEqual(force_bytes(inst.processing[0].procedure.coding[0].code), force_bytes("ACID"))
self.assertEqual(force_bytes(inst.processing[0].procedure.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0373"))
self.assertEqual(inst.processing[0].timeDateTime.date, FHIRDate("2015-08-18T08:10:00Z").date)
self.assertEqual(inst.processing[0].timeDateTime.as_json(), "2015-08-18T08:10:00Z")
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("RANDU"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Urine, Random"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0487"))
def testSpecimen5(self):
inst = self.instantiate_from("specimen-example-isolate.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen5(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen5(inst2)
def implSpecimen5(self, inst):
self.assertEqual(force_bytes(inst.accessionIdentifier.system), force_bytes("http://lab.acme.org/specimens/2011"))
self.assertEqual(force_bytes(inst.accessionIdentifier.value), force_bytes("X352356-ISO1"))
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T07:03:00Z")
self.assertEqual(force_bytes(inst.collection.method.coding[0].code), force_bytes("BAP"))
self.assertEqual(force_bytes(inst.collection.method.coding[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v2-0488"))
self.assertEqual(force_bytes(inst.contained[0].id), force_bytes("stool"))
self.assertEqual(force_bytes(inst.id), force_bytes("isolate"))
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(force_bytes(inst.meta.tag[0].display), force_bytes("test health data"))
self.assertEqual(force_bytes(inst.meta.tag[0].system), force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"))
self.assertEqual(force_bytes(inst.note[0].text), force_bytes("Patient dropped off specimen"))
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(force_bytes(inst.status), force_bytes("available"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("429951000124103"))
self.assertEqual(force_bytes(inst.type.coding[0].display), force_bytes("Bacterial isolate specimen"))
self.assertEqual(force_bytes(inst.type.coding[0].system), force_bytes("http://snomed.info/sct"))
| [
"[email protected]"
] | |
842861fff402dd09ab5a9f2cfa8e490d1b842ff7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03696/s669152949.py | 6ffe317a8dbd760aeaf50530708cdf2ac5bd88ad | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | N=int(input())
S=input()
A=0 #(
B=0 #)
for s in S:
if s=='(':
A+=1
elif s==')' and A>0:
A-=1
else:
B+=1
print('('*B+S+')'*A) | [
"[email protected]"
] | |
3d710bc135abe8b2f05ed2957e0e98fee42cc9fd | 287a10a6f28517003728aebbd7ed097af13a8d18 | /exp_170727_neuroproof/pipeline.py | 569d93db858b5d701c70c85039b00a3960cdd7df | [] | no_license | jhennies/nmmp_experiments | 05c78c6068fa0f6df0002e57529cd7b8d1daa456 | 7c06a5818a5176fa0dc17a42ba22b2262239d91d | refs/heads/master | 2021-04-06T12:01:25.537695 | 2017-09-22T13:42:51 | 2017-09-22T13:42:51 | 83,289,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,023 | py |
import os
import vigra
import cPickle as pickle
import numpy as np
import sys
sys.path.append(
'/export/home/jhennies/src/nature_methods_multicut_pipeline_devel/nature_methods_multicut_pipeline/software/')
from multicut_src import DataSet
from multicut_src import lifted_multicut_workflow
from multicut_src import load_dataset
from multicut_src import compute_false_merges
from multicut_src import resolve_merges_with_lifted_edges_global, resolve_merges_with_lifted_edges
from multicut_src import RandomForest
from multicut_src import ExperimentSettings
from multicut_src import merge_small_segments
import logging
logger = logging.getLogger(__name__)
def init_dataset(
meta_folder, name,
raw_filepath, raw_name,
probs_filepath, probs_name,
seg_filepath, seg_name,
gt_filepath=None, gt_name=None,
make_cutouts=False
):
# Init the dataset
ds = DataSet(meta_folder, name)
# Add data
ds.add_raw(raw_filepath, raw_name)
ds.add_input(probs_filepath, probs_name)
ds.add_seg(seg_filepath, seg_name)
if gt_filepath is not None:
ds.add_gt(gt_filepath, gt_name)
# add cutouts for lifted multicut training
if make_cutouts:
shape = ds.shape
z_offset = 10
ds.make_cutout([0, 0, 0], [shape[0], shape[1], z_offset])
ds.make_cutout([0, 0, z_offset], [shape[0], shape[1], shape[2] - z_offset])
ds.make_cutout([0, 0, shape[2] - z_offset], [shape[0], shape[1], shape[2]])
def init_datasets(
meta_folder, names,
raw_path, raw_files, raw_names,
probs_path, probs_files, probs_names,
seg_path, seg_files, seg_names,
gt_path, gt_files, gt_names,
make_cutouts=None
):
assert len(raw_files) == len(raw_names)
assert len(probs_files) == len(raw_files)
assert len(probs_names) == len(raw_files)
assert len(seg_files) == len(raw_files)
assert len(seg_names) == len(raw_files)
assert len(gt_files) == len(raw_files)
assert len(gt_names) == len(raw_files)
if make_cutouts is None:
make_cutouts = [False] * len(raw_files)
for idx, train_name in enumerate(names):
if not os.path.exists(os.path.join(meta_folder, train_name)):
print 'Dataset {} is being created ...'.format(train_name)
raw_file = raw_files[idx]
probs_file = probs_files[idx]
seg_file = seg_files[idx]
gt_file = gt_files[idx]
raw_name = raw_names[idx]
probs_name = probs_names[idx]
seg_name = seg_names[idx]
gt_name = gt_names[idx]
init_dataset(
meta_folder, train_name,
raw_path + raw_file, raw_name,
probs_path + probs_file, probs_name,
seg_path + seg_file, seg_name,
gt_filepath=gt_path + gt_file, gt_name=gt_name,
make_cutouts=make_cutouts[idx]
)
else:
print 'Training set {} exists, nothing to do.'.format(train_name)
def run_lifted_mc(
meta_folder,
train_folder,
ds_train_names,
ds_test_name,
save_path,
results_name,
pre_save_path=None
):
assert os.path.exists(os.path.split(save_path)[0]), "Please choose an existing folder to save your results"
merge_segments = True
compute_mc = True
if os.path.isfile(save_path): # Nothing to do
compute_mc = False
merge_segments = False
else: # Final result needs to be computed
if pre_save_path is not None:
if os.path.isfile(pre_save_path):
compute_mc = False
if compute_mc:
seg_id = 0
feature_list = ['raw', 'prob', 'reg']
feature_list_lifted = ['cluster', 'reg']
gamma = 2.
ds_train = [load_dataset(train_folder, name) for name in ds_train_names if name != ds_test_name]
ds_test = load_dataset(meta_folder, ds_test_name)
mc_nodes, _, _, _ = lifted_multicut_workflow(
ds_train, ds_test,
seg_id, seg_id,
feature_list, feature_list_lifted,
gamma=gamma
)
segmentation = ds_test.project_mc_result(seg_id, mc_nodes)
# Save in case sth in the following function goes wrong
if pre_save_path is not None:
vigra.writeHDF5(segmentation, pre_save_path, results_name, compression='gzip')
if merge_segments:
if not compute_mc:
assert pre_save_path is not None, 'Investigate code, this must not happen!'
segmentation = vigra.readHDF5(pre_save_path, results_name)
# # Relabel with connected components
# segmentation = vigra.analysis.labelVolume(segmentation.astype('uint32'))
# Merge small segments
segmentation = merge_small_segments(segmentation.astype('uint32'), 100)
# Store the final result
vigra.writeHDF5(segmentation, save_path, results_name, compression = 'gzip')
def find_false_merges(
ds_test_name,
ds_train_names,
meta_folder,
test_seg_path, test_seg_key,
train_segs_paths, train_segs_keys
):
ds_train = [load_dataset(meta_folder, name) for name in ds_train_names if name != ds_test_name]
ds_test = load_dataset(meta_folder, ds_test_name)
# Path folders
test_paths_cache_folder = os.path.join(meta_folder, ds_test_name, 'path_data')
train_paths_cache_folder = os.path.join(meta_folder, 'train_path_data')
# logger.info('Starting compute_false_merges...')
_, false_merge_probs, _ = compute_false_merges(
ds_train, ds_test,
train_segs_paths, train_segs_keys,
test_seg_path, test_seg_key,
test_paths_cache_folder,
train_paths_cache_folder
)
with open(os.path.join(test_paths_cache_folder, 'false_paths_predictions.pkl'), 'w') as f:
pickle.dump(false_merge_probs, f)
def resolve_false_merges(
ds_name, ds_names,
meta_folder, rf_cache_folder,
new_nodes_filepath,
pre_seg_filepath, pre_seg_key,
min_prob_thresh, max_prob_thresh,
exclude_objs_with_larger_thresh,
global_resolve=True
):
# Path folders
paths_cache_folder = os.path.join(meta_folder, ds_name, 'path_data')
# TODO Change here
weight_filepath = os.path.join(meta_folder, ds_name,
'probs_to_energies_0_z_16.0_0.5_rawprobreg.h5')
lifted_filepath = os.path.join(meta_folder, ds_name,
'lifted_probs_to_energies_0_3_0.5_2.0.h5')
ds_train = [load_dataset(meta_folder, name) for name in ds_names if name != ds_name]
rf_cache_name = 'rf_merges_%s' % '_'.join([ds.ds_name for ds in ds_train])
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
path_data_filepath = os.path.join(paths_cache_folder, 'paths_ds_{}.h5'.format(ds_name))
# with open(os.path.join(paths_cache_folder, 'paths_ds_{}.pkl'.format(ds_name))) as f:
# path_data = pickle.load(f)
paths = vigra.readHDF5(path_data_filepath, 'all_paths')
if paths.size:
paths = np.array([path.reshape((len(path) / 3, 3)) for path in paths])
paths_to_objs = vigra.readHDF5(path_data_filepath, 'paths_to_objs')
with open(os.path.join(paths_cache_folder, 'false_paths_predictions.pkl')) as f:
false_merge_probs = pickle.load(f)
# Find objects where probability >= min_prob_thresh and <= max_prob_thresh
objs_with_prob_greater_thresh = np.unique(
np.array(paths_to_objs)[
np.logical_and(
false_merge_probs >= min_prob_thresh,
false_merge_probs <= max_prob_thresh
)
]
)
if exclude_objs_with_larger_thresh:
objs_to_exclude = np.unique(
np.array(paths_to_objs)[
false_merge_probs > max_prob_thresh
]
)
objs_with_prob_greater_thresh = np.setdiff1d(objs_with_prob_greater_thresh, objs_to_exclude)
# Extract all paths for each of the found objects
false_paths = {}
for obj in objs_with_prob_greater_thresh:
# print paths_to_objs == obj
false_paths[obj] = np.array(paths)[paths_to_objs == obj]
rf_filepath = os.path.join(rf_cache_folder, rf_cache_name)
# with open(rf_filepath) as f:
# path_rf = pickle.load(f)
path_rf = RandomForest.load_from_file(rf_filepath, 'rf', ExperimentSettings().n_threads)
mc_segmentation = vigra.readHDF5(pre_seg_filepath, pre_seg_key)
mc_weights_all = vigra.readHDF5(weight_filepath, "data")
lifted_weights_all = vigra.readHDF5(lifted_filepath, "data")
if global_resolve:
new_node_labels = resolve_merges_with_lifted_edges_global(
ds, seg_id,
false_paths,
path_rf,
mc_segmentation,
mc_weights_all,
paths_cache_folder=paths_cache_folder,
lifted_weights_all=lifted_weights_all
)
else:
new_node_labels = resolve_merges_with_lifted_edges(
ds, ds_train,
seg_id,
false_paths,
path_rf,
mc_segmentation,
mc_weights_all,
paths_cache_folder=paths_cache_folder,
lifted_weights_all=lifted_weights_all
)
with open(new_nodes_filepath, 'w') as f:
pickle.dump(new_node_labels, f)
def project_new_result(
ds_name, meta_folder,
new_nodes_filepath,
save_path, results_name
):
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
# Load resolving result
with open(new_nodes_filepath) as f:
new_node_labels = pickle.load(f)
# project the result back to the volume
mc_seg = ds.project_mc_result(seg_id, new_node_labels)
# Write the result
vigra.writeHDF5(mc_seg, save_path, results_name, compression = 'gzip')
import nifty_with_cplex.graph.rag as nrag
def project_resolved_objects_to_segmentation(
meta_folder, ds_name,
mc_seg_filepath, mc_seg_key,
new_nodes_filepath,
save_path, results_name
):
ds = load_dataset(meta_folder, ds_name)
seg_id = 0
mc_segmentation = vigra.readHDF5(mc_seg_filepath, mc_seg_key)
# Load resolving result
with open(new_nodes_filepath) as f:
resolved_objs = pickle.load(f)
rag = ds.rag(seg_id)
mc_labeling = nrag.gridRagAccumulateLabels(rag, mc_segmentation)
new_label_offset = np.max(mc_labeling) + 1
for obj in resolved_objs:
resolved_nodes = resolved_objs[obj]
for node_id in resolved_nodes:
mc_labeling[node_id] = new_label_offset + resolved_nodes[node_id]
new_label_offset += np.max(resolved_nodes.values()) + 1
mc_segmentation = nrag.projectScalarNodeDataToPixels(rag, mc_labeling, ExperimentSettings().n_threads)
# Write the result
vigra.writeHDF5(mc_segmentation, save_path, results_name, compression = 'gzip')
| [
"[email protected]"
] | |
1ee7dc2b9ca208d6002aaa8adfe393e5b25d084f | 88be4d5657d19462eb1d74d2d4d98180b423a889 | /robolearn/torch/policies/weighted_multi_policy_selector.py | e0cb8481457f36ea0e1a6161526cff851f74721d | [
"BSD-3-Clause"
] | permissive | domingoesteban/robolearn | bc58278fe38894f4ca9ec9e657ee13a479a368b7 | 0d20125425c352b80ef2eeed1c0b11ab6497b11a | refs/heads/master | 2020-04-15T22:38:25.343229 | 2019-01-29T17:01:42 | 2019-01-29T17:01:42 | 165,080,647 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | from robolearn.torch.core import PyTorchModule
from robolearn.models.policies import ExplorationPolicy
class WeightedMultiPolicySelector(PyTorchModule, ExplorationPolicy):
def __init__(self, multipolicy, idx):
self.save_init_params(locals())
super(WeightedMultiPolicySelector, self).__init__()
ExplorationPolicy.__init__(self, multipolicy.action_dim)
self._multipolicy = multipolicy
self.idx = idx
def get_action(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_action(*args, **kwargs)
return action, policy_info
def get_actions(self, *args, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy.get_actions(*args, **kwargs)
return action, policy_info
def forward(self, *nn_input, **kwargs):
kwargs['pol_idx'] = self.idx
action, policy_info = self._multipolicy(*nn_input, **kwargs)
return action, policy_info
| [
"[email protected]"
] | |
8ae9fb9ae54b014300cf7675e7bfdbabcd0e5011 | 836d5f7190f6b4503e758c87c71598f18fdfce14 | /12-Veri-Tabanı/sqlite-database-2/database.py | d5adc5a68ad53c2351607d67672c1ff0cbb2b0b7 | [] | no_license | S-Oktay-Bicici/PYTHON-PROGRAMMING | cf452723fd3e7e8ec2aadc7980208d747c502e9a | 22e864f89544249d6309d6f4570a4104bf47346b | refs/heads/main | 2021-11-30T00:19:21.158084 | 2021-11-16T15:44:29 | 2021-11-16T15:44:29 | 316,716,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import sqlite3
veriler = [
("Ahmet Ümit","İstanbul Hatırası"),
("Yaşar Kemal","İnce Memed"),
("Paulo Coelho","Simyacı"),
("Paulo Coelho","Aldatmak")]
db = sqlite3.connect("kitaplar.db")
imlec = db.cursor()
imlec.execute("CREATE TABLE IF NOT EXISTS 'kitaplık tablosu' (yazar,kitap)")
for veri in veriler:
imlec.execute("INSERT INTO 'kitaplık tablosu' VALUES (?,?)",veri)
db.commit()
db.close()
| [
"[email protected]"
] | |
263e5accf9c46da5bf018e6fe716b80de9ee55da | 4e0ff785b993b6bae70745434e61f27ca82e88f0 | /229-Majority-Element-II/solution.py | 1b9494257f4bd6ea4a55db58f2ad57d67a4ef1ec | [] | no_license | NobodyWHU/Leetcode | 2ee557dd77c65c5fa8ca938efb6de3793b4de261 | d284fa3daab02531e5300867463b293d44737e32 | refs/heads/master | 2021-01-23T14:05:28.161062 | 2016-09-23T11:51:51 | 2016-09-23T11:51:51 | 58,898,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n1 = n2 = None
c1 = c2 = 0
for num in nums:
if n1 == num:
c1 += 1
elif n2 == num:
c2 += 1
elif c1 == 0:
n1, c1 = num, 1
elif c2 == 0:
n2, c2 = num, 1
else:
c1, c2 = c1 - 1, c2 - 1
size = len(nums)
return [n for n in (n1, n2)
if n is not None and nums.count(n) > size / 3]
| [
"[email protected]"
] | |
4490d298cb083a520e91f8cd046242f7439b10be | 60cf5de97160c0c104b447879edd0ea1ca9724e8 | /q29.py | 34fb2528f462f89c7b3226061a2fd7f1d74bc2cd | [] | no_license | VinayHaryan/String | 6f6b7924ab87ac8ea5509edefaa3aeda795b0de0 | 089dcf02a8d26afcae0ac2b23c640be5a6079095 | refs/heads/main | 2023-05-27T22:15:31.792837 | 2021-06-17T08:39:42 | 2021-06-17T08:39:42 | 377,736,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | '''
RUN LENGTH ENCODING IN PYTHON
given an input string, write a function that returns
string for the input string
For example, if the input string is ‘wwwwaaadexxxxxx’,
then the function should return ‘w4a3d1e1x6’.
Examples:
Input : str = 'wwwwaaadexxxxxx'
Output : 'w4a3d1e1x6'
'''
# python code for run length encoding
# from collections import OrderedDict
# def runlength(input):
# # generate ordered dictionary of all lower
# # case alphabets, its output will be
# # dict = {'w':0, 'a':0, 'd':0, 'e':0, 'x':0}
# dict = OrderedDict.fromkeys(input,0)
# # now iterate through input string to calculate
# # frquency of each character, its output will be
# # dict = {'w':4,'a':3,'d':1,'e':1,'x':6}
# for ch in input:
# dict[ch] += 1
# # now iterate through dictionary to make
# # output string from (key,value) pairs
# output = ''
# for key,value in dict.items():
# output = output + key + str(value)
# return output
# # Driver function
# if __name__ == '__main__':
# input="wwwwaaadexxxxxx"
# print (runlength(input))
from collections import OrderedDict
def runlengthencoding(input):
dict = OrderedDict.fromkeys(input,0)
for ch in input:
dict[ch] += 1
output = ''
for key, value in dict.items():
output = output + key + str(value)
return output
# Driver function
if __name__ == '__main__':
input = 'wwwwaaadexxxxxx'
print(runlengthencoding(input)) | [
"[email protected]"
] | |
c9903269bc89b02fe42b2a38a1a00e60006fd0f3 | aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286 | /077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/pip/_internal/wheel_builder.py | fa08016bdfb189f76aa4603f52684ae94fb20a25 | [
"MIT"
] | permissive | IvanaXu/PyTools | 0aff5982f50bb300bfa950405192c78473b69537 | 358ae06eef418fde35f424909d4f13049ca9ec7b | refs/heads/master | 2023-06-07T21:45:44.242363 | 2023-06-06T16:00:25 | 2023-06-06T16:00:25 | 163,940,845 | 60 | 8 | MIT | 2022-12-23T02:49:05 | 2019-01-03T07:54:16 | Python | UTF-8 | Python | false | false | 9,522 | py | """Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Optional, Tuple,
)
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
| [
"[email protected]"
] | |
9d5e381b6742e606d841d20ce2e6480a9029a65d | e3af1769d017fa5b20677b1228fd3ab42afc8927 | /projet/IBPackage/operations.py | 481a6a5755012665fb4d06ee741c5fdef5d684ad | [] | no_license | komi24/IB201116 | 08e8692a72badb82eecc79af753e1cf5c4021380 | 924c6540978b0308686eac867c16a3f6d1725f65 | refs/heads/master | 2023-01-19T11:34:32.627342 | 2020-11-20T15:24:31 | 2020-11-20T15:24:31 | 313,242,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
def ajoute_2(a):
return a + 2
def addition(a, b):
return a + b
def produit(a, b):
return a * b
# def moyenne(liste, operation, init):
# somme = init
# for i in liste:
# somme = operation(i, somme)
| [
"[email protected]"
] | |
77b73a4c62c781666aa6f58703e8ed6973d129db | c61145e8771724575f67ae5738dd6cbb9626a706 | /blog/models.py | b885ebd7fe878c0266a464c1da35eb04d96169e5 | [] | no_license | Seredyak1/test_task | 1399dd082f4281ca6f72d036f4df4c1c6945dafe | a5d433b827df46ffa95dd6dd91245b204884674f | refs/heads/master | 2020-04-16T08:03:04.521740 | 2019-01-16T09:33:47 | 2019-01-16T09:33:47 | 165,409,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
class Meta:
verbose_name_plural = 'Posts'
ordering = ('-updated_at',)
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=128, blank=True, null=True)
body = models.TextField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
@property
def like_count(self):
"""Show the number of likes in the Post"""
return self.like_set.count()
def add_like(self, user):
"""Add like to this Post"""
Like.objects.get_or_create(user=user, post=self)
def unlike(self, user):
"""Delete like to this Post"""
Like.objects.filter(user=user, post=self).delete()
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
| [
"[email protected]"
] | |
8ae1215e7351323fa30e296d34e9cf8d769a78c1 | 6c50175e82974fdb0ccabd544a40e013e6672cb0 | /LoginReg_Bootstrap/settings.py | fbcb01abea32951e3eba4b4872bb9626432a3aa4 | [] | no_license | Jallnutt1/LoginReg_Bootstrap | 9515878688ac6a16efaba18345b90b389a6c6213 | 60532872f1e04a5809f65745665e2f16df0a913e | refs/heads/main | 2023-05-26T05:02:59.124607 | 2021-06-04T01:50:46 | 2021-06-04T01:50:46 | 373,688,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,151 | py | """
Django settings for LoginReg_Bootstrap project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!lgvb3n$coe(bg@64j#p)&r^u6+o&y!vjmh=1c&iph=j%%&ylu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'LoginReg',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'LoginReg_Bootstrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'LoginReg_Bootstrap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
c006ca77594cce7b285e3cb5b9081c678b8e1f01 | 668dad44beb30cadb170e32a8a7f0a57c42e653c | /denormalize_to_csv.py | d2910eec545e9f6d62c1a4e254eb4424ae66ed54 | [] | no_license | SEL-Columbia/ss_data_analysis | 22b72540732b03836423e18462495b2252a2cca8 | dfb8c2670cddbddbb693e5a3243bc829bccf5ae0 | refs/heads/master | 2016-09-05T12:31:00.546458 | 2013-10-21T23:01:34 | 2013-10-21T23:01:34 | 13,162,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | import os
import datetime
import csv
import sys
"""
denormalize_to_csv.py
usage: python denormalize_to_csv.py logs_dir
description: Script to take a directory of sharedsolar log files
in csv format and denormalizes them such that they
can be concatenated together into "one big table"
of the same structure without losing any information
(while duplicating some...hence "denormalize")
"""
FIELD_MAP = {
'Time Stamp': 'time_stamp',
'Watts': 'watts',
'Volts': 'volts',
'Amps': 'amps',
'Watt Hours SC20': 'watt_hours_sc20',
'Watt Hours Today': 'watt_hours_today',
'Max Watts': 'max_watts',
'Max Volts': 'max_volts',
'Max Amps': 'max_amps',
'Min Watts': 'min_watts',
'Min Volts': 'min_volts',
'Min Amps': 'min_amps',
'Power Factor': 'power_factor',
'Power Cycle': 'power_cycle',
'Frequency': 'frequency',
'Volt Amps': 'volt_amps',
'Relay Not Closed': 'relay_not_closed',
'Send Rate': 'send_rate',
'Machine ID': 'machine_id',
'Type': 'circuit_type',
'Credit': 'credit'
}
def write_denormalized_csv(logfile, site, ip):
outfile = logfile.replace(".log", ".csv")
with open(logfile,'r') as csvinput:
with open(outfile, 'w') as csvoutput:
first_line = csvinput.readline()
# Simple check for properly formatted file (NOTE: MAINS files will not have a credit field at the end)
if (first_line.startswith("Time Stamp,Watts,Volts,Amps,Watt Hours SC20,Watt Hours Today,Max Watts,Max Volts,Max Amps,Min Watts,Min Volts,Min Amps,Power Factor,Power Cycle,Frequency,Volt Amps,Relay Not Closed,Send Rate,Machine ID,Type")):
# reset read ptr
csvinput.seek(0)
reader = csv.reader(csvinput)
writer = csv.writer(csvoutput, lineterminator='\n')
all = []
has_credit = True
# handle the header row
existing_header_row = next(reader)
new_header_row = []
# add the new denormalized fields to the new header
new_header_row.append('line_num')
new_header_row.append('site_id')
new_header_row.append('ip_addr')
# If the header row doesn't contain the Credit field, add it
if existing_header_row[-1] != 'Credit':
existing_header_row.append('Credit')
has_credit = False
# convert field names
for field in existing_header_row:
if field not in FIELD_MAP:
sys.stderr.write("Erroneous field: %s in file: %s skipping..." % (field, logfile))
else:
new_header_row.append(FIELD_MAP[field])
all.append(new_header_row)
line_num = 0
for row in reader:
row.insert(0, line_num)
row.insert(1, site)
row.insert(2, ip)
# in case there was no credit field in the input file, make it 0
if not has_credit:
row.append("0")
all.append(row)
line_num = line_num + 1
writer.writerows(all)
line_num = 0
else:
sys.stderr.write("Empty or corrupted file: %s\n" % logfile)
def denormalize_to_csv(logs_dir):
for (dirpath,dirnames,filenames) in os.walk(logs_dir):
for f in filenames:
if f.endswith(".log"):
dir_info = dirpath.split("/")
# Note: dir_info contents are blah/Site/YYYY/MM/DD/HH
site = dir_info[-5] # get the site from the dir (site is always 5 dirs up in the path)
ip = f[0:f.find(".")] # get the ip from the filename
full_filename = os.path.join(dirpath, f)
write_denormalized_csv(full_filename, site, ip)
if __name__=="__main__":
import sys
assert len(sys.argv) == 2, \
"Usage: python denormalize_to_csv.py logs_dir"
logs_dir = sys.argv[1]
denormalize_to_csv(logs_dir)
| [
"[email protected]"
] | |
5d2c18d1ea37c56236232061cf2a19e8e6d11fac | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/interactive/jm/random_normal_1/561870518.py | e07b9173fe09044ec19d2a4fbff66e5550b7c929 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 7,951 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 561870518
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 4, 15)
assert board is not None
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 4, 2) == 1
assert gamma_move(board, 2, 3, 7) == 1
assert gamma_move(board, 2, 2, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 3, 4) == 1
assert gamma_free_fields(board, 3) == 43
board700952916 = gamma_board(board)
assert board700952916 is not None
assert board700952916 == ("..22..\n"
"......\n"
"......\n"
"...3..\n"
"......\n"
"1...1.\n"
"......\n"
"......\n")
del board700952916
board700952916 = None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 2, 4) == 1
assert gamma_free_fields(board, 2) == 41
assert gamma_move(board, 3, 1, 7) == 1
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 4, 3, 3) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 5, 5) == 1
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_move(board, 2, 4, 7) == 1
assert gamma_move(board, 3, 3, 5) == 1
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 4, 4, 4) == 1
assert gamma_move(board, 4, 1, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_free_fields(board, 1) == 31
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_busy_fields(board, 3) == 5
assert gamma_move(board, 4, 4, 1) == 1
assert gamma_move(board, 1, 6, 2) == 0
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_move(board, 3, 1, 5) == 1
assert gamma_move(board, 4, 3, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_free_fields(board, 2) == 25
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 4, 6, 1) == 0
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 7, 0) == 0
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 4, 6, 5) == 0
board137299271 = gamma_board(board)
assert board137299271 is not None
assert board137299271 == (".3222.\n"
"......\n"
"33.3.1\n"
"..234.\n"
".2.41.\n"
"1.331.\n"
".4..42\n"
"1334..\n")
del board137299271
board137299271 = None
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_busy_fields(board, 2) == 8
assert gamma_move(board, 3, 2, 5) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 4, 1, 2) == 1
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 5) == 0
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 3, 5, 7) == 1
assert gamma_move(board, 4, 4, 6) == 1
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 2, 5) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_golden_move(board, 2, 1, 1) == 1
assert gamma_move(board, 3, 7, 0) == 0
assert gamma_move(board, 3, 0, 7) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 1, 5) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 12
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 0, 6) == 1
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 4, 5) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 0, 6) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 4, 7) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_busy_fields(board, 4) == 8
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 2, 2, 7) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_busy_fields(board, 3) == 13
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 0, 3) == 1
assert gamma_golden_move(board, 4, 6, 5) == 0
assert gamma_move(board, 1, 1, 0) == 0
board301797101 = gamma_board(board)
assert board301797101 is not None
assert board301797101 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
".21.42\n"
"1334..\n")
del board301797101
board301797101 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_free_fields(board, 3) == 9
assert gamma_move(board, 1, 4, 7) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_free_fields(board, 2) == 9
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 4, 0, 5) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 3, 1, 7) == 0
assert gamma_move(board, 4, 3, 5) == 0
assert gamma_move(board, 4, 2, 7) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 4, 6) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 6, 2) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
assert gamma_move(board, 4, 4, 5) == 0
assert gamma_move(board, 1, 5, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 6, 1) == 0
assert gamma_busy_fields(board, 3) == 14
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 6, 3) == 0
assert gamma_busy_fields(board, 4) == 9
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_move(board, 1, 1, 4) == 0
board493510436 = gamma_board(board)
assert board493510436 is not None
assert board493510436 == ("332223\n"
"4...42\n"
"333311\n"
"232342\n"
"42441.\n"
"14331.\n"
"321.42\n"
"1334.1\n")
del board493510436
board493510436 = None
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 2, 5) == 0
gamma_delete(board)
| [
"[email protected]"
] | |
4660ea0d2890f4a7ae7e8f48cbe1f776c8393822 | de428c011b56db862f05ec0ceab17b85f83f94b1 | /pythongame/scenes_game/player_environment_interactions.py | bfdfdb4eea733e83b0f61229bf7c9e6e1f382640 | [] | no_license | risooonho/python-2d-game | c6d1fceaf09c72a6f7573230a4a899bf79164b7f | 24b02646ed56f9017069b243b774e0ee46951aea | refs/heads/master | 2021-05-17T06:02:13.538699 | 2020-02-15T23:59:54 | 2020-02-15T23:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,646 | py | import sys
from typing import Optional, Any, List, Tuple
from pythongame.core.game_data import CONSUMABLES, PORTALS
from pythongame.core.game_state import GameState, NonPlayerCharacter, LootableOnGround, Portal, WarpPoint, \
ConsumableOnGround, ItemOnGround, Chest, Shrine
from pythongame.core.game_state import WorldEntity
from pythongame.core.item_data import build_item_name, create_item_description, get_item_data
from pythongame.core.math import boxes_intersect, is_x_and_y_within_distance, \
get_manhattan_distance_between_rects
from pythongame.core.npc_behaviors import has_npc_dialog
from pythongame.core.view.game_world_view import EntityActionText, EntityActionTextStyle
from pythongame.scenes_game.game_engine import GameEngine
class PlayerInteractionsState:
def __init__(self):
self.entity_to_interact_with: Any = None
def handle_nearby_entities(self, player_entity: WorldEntity, game_state: GameState, game_engine: GameEngine):
self.entity_to_interact_with = None
player_position = player_entity.get_position()
distance_to_closest_entity = sys.maxsize
for npc in game_state.non_player_characters:
if has_npc_dialog(npc.npc_type):
close_to_player = is_x_and_y_within_distance(player_position, npc.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), npc.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = npc
distance_to_closest_entity = distance
lootables_on_ground: List[LootableOnGround] = list(game_state.items_on_ground)
lootables_on_ground += game_state.consumables_on_ground
for lootable in lootables_on_ground:
if boxes_intersect(player_entity.rect(), lootable.world_entity.rect()):
self.entity_to_interact_with = lootable
distance_to_closest_entity = 0
for portal in game_state.portals:
close_to_player = is_x_and_y_within_distance(player_position, portal.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), portal.world_entity.rect())
if close_to_player:
game_engine.handle_being_close_to_portal(portal)
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = portal
distance_to_closest_entity = distance
for warp_point in game_state.warp_points:
close_to_player = is_x_and_y_within_distance(player_position, warp_point.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), warp_point.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = warp_point
distance_to_closest_entity = distance
for chest in game_state.chests:
close_to_player = is_x_and_y_within_distance(player_position, chest.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), chest.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = chest
distance_to_closest_entity = distance
for shrine in game_state.shrines:
close_to_player = is_x_and_y_within_distance(player_position, shrine.world_entity.get_position(), 75)
distance = get_manhattan_distance_between_rects(player_entity.rect(), shrine.world_entity.rect())
if close_to_player and distance < distance_to_closest_entity:
self.entity_to_interact_with = shrine
distance_to_closest_entity = distance
def get_entity_to_interact_with(self):
return self.entity_to_interact_with
def get_entity_action_text(self, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if self.entity_to_interact_with is None:
return None
return _get_entity_action_text(self.entity_to_interact_with, is_shift_key_held_down)
def _get_entity_action_text(ready_entity: Any, is_shift_key_held_down: bool) -> Optional[EntityActionText]:
if isinstance(ready_entity, NonPlayerCharacter):
return EntityActionText(ready_entity.world_entity, "...", [])
elif isinstance(ready_entity, LootableOnGround):
name, style = _get_loot_name(ready_entity)
if is_shift_key_held_down:
loot_details = _get_loot_details(ready_entity)
else:
loot_details = []
return EntityActionText(ready_entity.world_entity, name, loot_details, style=style)
elif isinstance(ready_entity, Portal):
if ready_entity.is_enabled:
data = PORTALS[ready_entity.portal_id]
return EntityActionText(ready_entity.world_entity, data.destination_name, [])
else:
return EntityActionText(ready_entity.world_entity, "???", [])
elif isinstance(ready_entity, WarpPoint):
return EntityActionText(ready_entity.world_entity, "Warp", [])
elif isinstance(ready_entity, Chest):
return EntityActionText(ready_entity.world_entity, "Open", [])
elif isinstance(ready_entity, Shrine):
if ready_entity.has_been_used:
return None
else:
return EntityActionText(ready_entity.world_entity, "Touch", [])
else:
raise Exception("Unhandled entity: " + str(ready_entity))
def _get_loot_name(lootable: LootableOnGround) -> Tuple[str, EntityActionTextStyle]:
if isinstance(lootable, ConsumableOnGround):
name = CONSUMABLES[lootable.consumable_type].name
return name, EntityActionTextStyle.PLAIN
if isinstance(lootable, ItemOnGround):
name = build_item_name(lootable.item_id)
if lootable.item_id.suffix_id is not None:
style = EntityActionTextStyle.LOOT_RARE
elif get_item_data(lootable.item_id).is_unique:
style = EntityActionTextStyle.LOOT_UNIQUE
else:
style = EntityActionTextStyle.PLAIN
return name, style
def _get_loot_details(lootable: LootableOnGround) -> List[str]:
if isinstance(lootable, ConsumableOnGround):
return [CONSUMABLES[lootable.consumable_type].description]
if isinstance(lootable, ItemOnGround):
# TODO Render suffix lines differently?
return [line.text for line in create_item_description(lootable.item_id)]
| [
"[email protected]"
] | |
f4018757458a86a63df44d42374c69ea3d612194 | de4d3fed2b538587124ad855c8ba2f30933e7edf | /backend/sparepart_main/sparepart_main/asgi.py | 597f5430d993ea910e06c11dd1b1488e41205dd3 | [] | no_license | zahydakhan/project_spare | aaea130edefa95630f73b3026de6c32800b0bc7f | 850374c270fd5ad2897bf9b6f0afb93b9e171059 | refs/heads/master | 2023-03-11T17:13:13.103574 | 2021-02-23T06:40:52 | 2021-02-23T06:40:52 | 339,530,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
ASGI config for sparepart_main project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sparepart_main.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
4913c2722dadc4eab70e690b9fb6b88e0097a781 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2022_06_15/aio/_application_insights_management_client.py | 8b411d38bef302c4b99c3ad09e97a57e5267f2e1 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,161 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WebTestsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Composite Swagger for Application Insights Management Client.
:ivar web_tests: WebTestsOperations operations
:vartype web_tests:
azure.mgmt.applicationinsights.v2022_06_15.aio.operations.WebTestsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-06-15". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ApplicationInsightsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.web_tests = WebTestsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ApplicationInsightsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
] | |
58917f325490ea31f0266642594353b9a3a355ea | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/2992.py | b11dac90d43eb4458792e75c04fcfe2c80575061 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py |
def read_case(file):
answer = int(file.readline()) - 1
for skip in range(4):
line = file.readline()
if skip is answer:
result = set(line.split())
return result
def read_input(filename):
with open(filename, "r") as in_file:
n_cases = int(in_file.readline().split()[0])
for case in range(n_cases):
yield case + 1, read_case(in_file), read_case(in_file)
def solve_case(first, second):
answer = first & second
if len(answer) == 1:
return "".join(answer)
elif answer:
return "Bad magician!"
else:
return "Volunteer cheated!"
cases = read_input("A-small-attempt0.in")
outfile = open("output.txt", "w+")
for case, first, second in cases:
result = solve_case(first, second)
outfile.write("Case #{}: {}\n".format(case, result)) | [
"[email protected]"
] | |
70ec64d86ed5c0c271e847db24f5640fba8b206c | d6f95f4347c2bd934393603819787acf70aaf4eb | /2018年11月15日福建省/gg.py | af58924762c19caec1070c8e9830667202198a39 | [] | no_license | moto-faith/work | 531804bca7b6ecb6d9776ed2086bbf9952e2043b | e77e40dbbb7dbb80bd2bc2584a6d1d020f92d2b4 | refs/heads/master | 2020-04-08T11:20:37.533419 | 2019-03-18T08:09:27 | 2019-03-18T08:09:27 | 159,302,505 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | #!/usr/bin/env python
#coding=utf-8
import time
import datetime
import re
import json
import requests
import time
import redis
import sys
from urlparse import urljoin
from db import DB
reload (sys)
import copy
import MySQLdb
sys.setdefaultencoding ("utf-8")
import htmlparser
from PIL import Image
def handle_post(post):
post = copy.deepcopy(post)
for k,v in post.iteritems():
print k,v
if isinstance(v, unicode):
v = v.encode("utf8")
if not isinstance(v,str) and not isinstance(v, int) and not isinstance(v, float):
v = json.dumps(v)
try:v = MySQLdb.escape_string(v)
except:pass
post.update({k:v})
return post
db = DB ().create ('mysql://zhxg:[email protected]:3306/sjk')
table = "list_info"
result1 = "list_model_filter"
urls = db.table(table).where('''siteName = "福建省公共资源交易中心"''').find()
dict_page_info = [url for url in urls if url is not None]
print "********-->", len (dict_page_info)
for str_urls in dict_page_info:
dict_post = str_urls
# print isinstance(dict_post,dict)
# dict_post = json.loads(dict_post)
# for k,v in dict_post.items():
# print k,v
# dd = dict_post.get("detailUrl")
dict_post["tf"]="1"
dict_post["irepeat"]="1"
dict_post["service"]="勘察设计"
dict_post["industry"]="industry"
dic = handle_post (dict_post)
try:
db.table (result1).add (dic)
except Exception as e:
print e
# for k,v in dict_post.items():
# print k,v
# detailUrl = dict_post.get ("detailUrl")
if __name__ == "__main__":
pass | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.